diff --git a/dev/reference/sleap_io/model/camera/index.html b/dev/reference/sleap_io/model/camera/index.html index 252ba335..76306ef2 100644 --- a/dev/reference/sleap_io/model/camera/index.html +++ b/dev/reference/sleap_io/model/camera/index.html @@ -1397,175 +1397,165 @@
sleap_io/model/camera.py
def __attrs_post_init__(self):
- """Initialize extrinsic matrix from rotation and translation vectors."""
-
- # Initialize extrinsic matrix
- self._extrinsic_matrix = np.eye(4, dtype="float64")
- self._extrinsic_matrix[:3, :3] = cv2.Rodrigues(self._rvec)[0]
- self._extrinsic_matrix[:3, 3] = self._tvec
+ def __attrs_post_init__(self):
+ """Initialize extrinsic matrix from rotation and translation vectors."""
+ # Initialize extrinsic matrix
+ self._extrinsic_matrix = np.eye(4, dtype="float64")
+ self._extrinsic_matrix[:3, :3] = cv2.Rodrigues(self._rvec)[0]
+ self._extrinsic_matrix[:3, 3] = self._tvec
sleap_io/model/camera.py
def __getattr__(self, name: str):
- """Get attribute by name.
-
- Args:
- name: Name of attribute to get.
+ def __getattr__(self, name: str):
+ """Get attribute by name.
+
+ Args:
+ name: Name of attribute to get.
+
+ Returns:
+ Value of attribute.
+
+ Raises:
+ AttributeError: If attribute does not exist.
+ """
+ if name in self.__attrs_attrs__:
+ return getattr(self, name)
- Returns:
- Value of attribute.
-
- Raises:
- AttributeError: If attribute does not exist.
- """
-
- if name in self.__attrs_attrs__:
- return getattr(self, name)
-
- # The aliases for methods called when triangulate with sleap_anipose
- method_aliases = {
- "get_name": self.name,
- "get_extrinsic_matrix": self.extrinsic_matrix,
- }
-
- def return_callable_method_alias():
- return method_aliases[name]
-
- if name in method_aliases:
- return return_callable_method_alias
-
- raise AttributeError(f"'Camera' object has no attribute or method '{name}'")
+ # The aliases for methods called when triangulate with sleap_anipose
+ method_aliases = {
+ "get_name": self.name,
+ "get_extrinsic_matrix": self.extrinsic_matrix,
+ }
+
+ def return_callable_method_alias():
+ return method_aliases[name]
+
+ if name in method_aliases:
+ return return_callable_method_alias
+
+ raise AttributeError(f"'Camera' object has no attribute or method '{name}'")
sleap_io/model/camera.py
def project(self, points: np.ndarray) -> np.ndarray:
- """Project 3D points to 2D using camera matrix and distortion coefficients.
-
- Args:
- points: 3D points to project of shape (N, 3) or (N, 1, 3).
-
- Returns:
- Projected 2D points of shape (N, 1, 2).
- """
-
- points = points.reshape(-1, 1, 3)
- out, _ = cv2.projectPoints(
- points,
- self.rvec,
- self.tvec,
- self.matrix,
- self.dist,
- )
- return out
+ def project(self, points: np.ndarray) -> np.ndarray:
+ """Project 3D points to 2D using camera matrix and distortion coefficients.
+
+ Args:
+ points: 3D points to project of shape (N, 3) or (N, 1, 3).
+
+ Returns:
+ Projected 2D points of shape (N, 1, 2).
+ """
+ points = points.reshape(-1, 1, 3)
+ out, _ = cv2.projectPoints(
+ points,
+ self.rvec,
+ self.tvec,
+ self.matrix,
+ self.dist,
+ )
+ return out
sleap_io/model/camera.py
def undistort_points(self, points: np.ndarray) -> np.ndarray:
- """Undistort points using camera matrix and distortion coefficients.
-
- Args:
- points: Points to undistort of shape (N, 2).
-
- Returns:
- Undistorted points of shape (N, 2).
- """
-
- shape = points.shape
- points = points.reshape(-1, 1, 2)
- out = cv2.undistortPoints(points, self.matrix, self.dist)
- return out.reshape(shape)
+ def undistort_points(self, points: np.ndarray) -> np.ndarray:
+ """Undistort points using camera matrix and distortion coefficients.
+
+ Args:
+ points: Points to undistort of shape (N, 2).
+
+ Returns:
+ Undistorted points of shape (N, 2).
+ """
+ shape = points.shape
+ points = points.reshape(-1, 1, 2)
+ out = cv2.undistortPoints(points, self.matrix, self.dist)
+ return out.reshape(shape)
Standalone utilities for working with animal pose tracking data.
This is intended to be a complement to the core SLEAP package that aims to provide functionality for interacting with pose tracking-related data structures and file formats with minimal dependencies. This package does not have any functionality related to labeling, training, or inference.
"},{"location":"#features","title":"Features","text":"The main purpose of this library is to provide utilities to load/save from different formats for pose data and standardize them into our common Data Model.
This enables ease-of-use through format-agnostic operations that make it easy to work with pose data, including utilities for common tasks. Some of these include:
See Examples for more usage examples and recipes.
"},{"location":"#installation","title":"Installation","text":"pip install sleap-io\n
or
conda install -c conda-forge sleap-io\n
For development, use one of the following syntaxes:
conda env create -f environment.yml\n
pip install -e .[dev]\n
"},{"location":"#support","title":"Support","text":"For technical inquiries specific to this package, please open an Issue with a description of your problem or request.
For general SLEAP usage, see the main website.
Other questions? Reach out to talmo@salk.edu
.
This package is distributed under a BSD 3-Clause License and can be used without restrictions. See LICENSE
for details.
sio.VideoWriter
: basic imageio-ffmpeg
video writer with sensible H264 presets. This can be used as a context manager: with sio.VideoWriter(\"video.mp4\") as vw:\n for frame in video:\n vw(frame)
sio.save_video
: high-level video writing. This can be used to quickly write a set of frames or even a whole Video
for easy (if inefficient) re-encoding: bad_video = sio.load_video(\"unseekable.avi\")\nsio.save_video(bad_video, \"seekable.mp4\")
IndexError
in VideoBackend
to enable sequence protocol for iteration over Video
s: for frame in video:\n pass
sio.io.video
to sio.io.video_reading
.Skeleton
__contains__(node: NodeOrIndex)
: Returns True
if a node exists in the skeleton.rebuild_cache()
: Method allowing explicit regeneration of the caching attributes from the nodes._name_to_node_cache
and _node_to_ind_cache
, better reflecting the mapping directionality.require_node(node: NodeOrIndex, add_missing: bool = True)
: Returns a Node
given a Node
, int
or str
. If add_missing
is True
, the node is added or created, otherwise an IndexError
is raised. This is helpful for flexibly converting between node representations with convenient existence handling.add_nodes(list[Node | str])
: Convenience method to add a list of nodes.add_edges(edges: list[Edge | tuple[NodeOrIndex, NodeOrIndex]])
: Convenience method to add a list of edges.rename_nodes(name_map: dict[NodeOrIndex, str] | list[str])
: Method to rename nodes either by specifying a potentially partial mapping from node(s) to new name(s), or a list of new names. Handles updating both the Node.name
attributes and the cache.rename_node(old_name: NodeOrIndex, new_name: str)
: Shorter syntax for renaming a single node.remove_nodes(nodes: list[NodeOrIndex])
: Method for removing nodes from the skeleton and updating caches. Does NOT update corresponding instances.remove_node(node: NodeOrIndex)
: Shorter syntax for removing a single node.reorder_nodes(new_order: list[NodeOrIndex])
: Method for setting the order of the nodes within the skeleton with cache updating. Does NOT update corresponding instances.Instance
/PredictedInstance
update_skeleton()
: Updates the points
attribute on the instance to reflect changes in the associated skeleton (removed nodes and reordering). This is called internally after updating the skeleton from the Labels
level, but also exposed for more complex data manipulation workflows.replace_skeleton(new_skeleton: Skeleton, node_map: dict[NodeOrIndex, NodeOrIndex] | None = None, rev_node_map: dict[NodeOrIndex, NodeOrIndex] | None = None)
: Method to replace the skeleton on the instance with optional capability to specify a node mapping so that data stored in the points
attribute is retained and associated with the right nodes in the new skeleton. Mapping is specified in node_map
from old to new nodes and defaults to mapping between node objects with the same name. rev_node_map
maps new nodes to old nodes and is used internally when calling from the Labels
level as it bypasses validation.Labels
instances
: Convenience property that returns a generator that loops over all labeled frames and returns all instances. This can be lazily iterated over without having to construct a huge list of all the instances.rename_nodes(name_map: dict[NodeOrIndex, str] | list[str], skeleton: Skeleton | None = None)
: Method to rename nodes in a specified skeleton within the labels.remove_nodes(nodes: list[NodeOrIndex], skeleton: Skeleton | None = None)
: Method to remove nodes in a specified skeleton within the labels. This also updates all instances associated with the skeleton, removing point data for the removed nodes.reorder_nodes(new_order: list[NodeOrIndex], skeleton: Skeleton | None = None)
: Method to reorder nodes in a specified skeleton within the labels. This also updates all instances associated with the skeleton, reordering point data for the nodes.replace_skeleton(new_skeleton: Skeleton, old_skeleton: Skeleton | None = None, node_map: dict[NodeOrIndex, NodeOrIndex] | None = None)
: Method to replace a skeleton entirely within the labels, updating all instances associated with the old skeleton to use the new skeleton, optionally with node remapping to retain previous point data.HDF5Video
edge cases by @talmo in #137Labels.extract
, Labels.trim
and Video.save
by @talmo in #140 LabeledFrame.frame_idx
: Now always converted to int
type.Video.close()
: Now caches backend metadata to Video.backend_metadata
to persist metadata on close.copy.deepcopy()
now works on Video
objects even if backend is open.Video.save(save_path: str | Path, frame_inds: list[int] | np.ndarray | None = None, video_kwargs: dict[str, Any] | None = None)
: Method to save a video file to an MP4 using VideoWriter
with an optional subset of frames.Labels.extract(inds: list[int] | list[tuple[Video, int]] | np.ndarray, copy: bool = True)
: Add method to extract a subset of frames from the labels, optionally making a copy, and return a new Labels
object.Labels.trim(save_path: str | Path, frame_inds: list[int] | np.ndarray, video: Video | int | None = None, video_kwargs: dict[str, Any] | None = None)
: Add method to extract a subset of the labels, write a video clip with the extracted friends, and adjust frame indices to match the clip.Full Changelog: v0.1.10...v0.2.0
"},{"location":"changelog/#v0110","title":"v0.1.10What's Changed","text":"Full Changelog: v0.1.9...v0.1.10
"},{"location":"changelog/#v019","title":"v0.1.9What's Changed","text":"av
as a dependency since it's still a little buggy and doesn't have broad enough platform compatibility.ndx-pose
< 0.2.0 until #104 is merged in.sio.io.utils.is_file_accessible
to check for readability by actually reading a byte. This catches permission and other esoteric filesystem errors (addresses #116).sio.load_slp(..., open_videos=False)
Video
objects with Video(..., open_backend=False)
.1.0
after taking out the train split.Labels.make_training_splits(..., embed=False)
. Previously, the function would always embed the images, which could be slow for large projects. With this change, the embed
parameter is introduced, allowing the user to choose whether to embed the images or save the labels with references to the source video files.Full Changelog: v0.1.8...v0.1.9
"},{"location":"changelog/#v018","title":"v0.1.8What's ChangedNew Contributors","text":"Full Changelog: v0.1.7...v0.1.8
"},{"location":"changelog/#v017","title":"v0.1.7What's Changed","text":"Full Changelog: v0.1.6...v0.1.7
"},{"location":"changelog/#v016","title":"v0.1.6What's Changed","text":"Full Changelog: v0.1.5...v0.1.6
"},{"location":"changelog/#v015","title":"v0.1.5What's Changed","text":"Labels.split
and Labels.make_training_splits
by @talmo in #98Full Changelog: v0.1.4...v0.1.5
"},{"location":"changelog/#v014","title":"v0.1.4What's Changed","text":"labels.save(\"labels.pkg.slp\", embed=\"user\")
to embed frames with user-labeled instances (Instance
)labels.save(\"labels.pkg.slp\", embed=\"user+suggestion\")
to embed frames with user-labeled instances and suggestion frames (useful for inference after training)labels.save(\"labels.pkg.slp\", embed=\"source\")
to restore the source video (\"unembed\")__repr__
s for Skeleton
, LabeledFrame
, Labels
, Instance
, PredictedInstance
Labels.append()
and Labels.extend()
to add LabeledFrame
s now will update Labels.tracks
, Labels.skeletons
and Labels.videos
with contents.Labels.update()
to manually update Labels.tracks
, Labels.skeletons
and Labels.videos
with contents of Labels.labeled_frames
and Labels.suggestions
.Labels.replace_filenames()
: multiple methods for replacing all video filenames across the project (#85).Skeleton.edge_names
to return list of edges as tuples of string namessio.load_video
and related high level Video
APIs to clarify supported file formats.Video(filename)
construction (#94)Note: This is a re-release of v0.1.3 which had a borked deployment.
Full Changelog: v0.1.2...v0.1.4
"},{"location":"changelog/#v013","title":"v0.1.3What's Changed","text":"labels.save(\"labels.pkg.slp\", embed=\"user\")
to embed frames with user-labeled instances (Instance
)labels.save(\"labels.pkg.slp\", embed=\"user+suggestion\")
to embed frames with user-labeled instances and suggestion frames (useful for inference after training)labels.save(\"labels.pkg.slp\", embed=\"source\")
to restore the source video (\"unembed\")__repr__
s for Skeleton
, LabeledFrame
, Labels
, Instance
, PredictedInstance
Labels.append()
and Labels.extend()
to add LabeledFrame
s now will update Labels.tracks
, Labels.skeletons
and Labels.videos
with contents.Labels.update()
to manually update Labels.tracks
, Labels.skeletons
and Labels.videos
with contents of Labels.labeled_frames
and Labels.suggestions
.Labels.replace_filenames()
: multiple methods for replacing all video filenames across the project (#85).Skeleton.edge_names
to return list of edges as tuples of string namessio.load_video
and related high level Video
APIs to clarify supported file formats.Video(filename)
construction (#94)Full Changelog: v0.1.2...v0.1.3
"},{"location":"changelog/#v012","title":"v0.1.2What's Changed","text":"Full Changelog: v0.1.1...v0.1.2
"},{"location":"changelog/#v011","title":"v0.1.1What's Changed","text":"ImageVideo
backend by @talmo in #88SuggestionFrame
by @talmo in #89ImageVideo
support in SLP by @talmo in #90Full Changelog: v0.1.0...v0.1.1
"},{"location":"changelog/#v010","title":"v0.1.0What's ChangedNotes on dependency pins","text":"Add skeleton utilities by @talmo in #76
Skeleton.add_node
: Add a node by name or object.Skeleton.add_edge
: Add an edge by lists of names or objects.Skeleton.add_symmetry
: Add a symmetry edge by lists of names or objects.Update CI and versions by @talmo in #77
Bump to v0.1.0 by @talmo in #78
Fix multi-skeleton loading by @talmo in #79
Add high level APIs by @talmo in #80
load_video
and load_file
high level APIs (#48)Labels QOL enhancements by @talmo in #81
LabeledFrame.remove_predictions
: Remove predicted instances from a labeled frame.LabeledFrame.remove_empty_instances
: Remove instances with no visible points from a labeled frame.Labels.save
: Instance-level convenience wrapper for sio.save_file
.Labels.clean
: Remove unused or empty frames, instances, videos, skeletons and tracks.Labels.remove_predictions
: Remove predicted instances from all labeled frames (#69).Labels.__getitem__
: Now supports lists, slices, numpy arrays, tuples of (Video, frame_idx)
and Video
.Video QOL enhancements by @talmo in #82
Video.is_open
: Checks if the video exists and the backend is set.Video.open
: Opens or restarts the backend for reading.Video.close
: Closes the backend for reading.Video.exists
: Check if the filename for the video exists.Video.replace_filename
: Replace the filename and restart the backend.ffmpeg < 6.1
due to imageio/imageio-ffmpeg#99h5py >= 3.8.0
due to h5py/h5py#2118python >= 3.8
due to h5py >= 3.8.0
(we still support python==3.7
via pip but this is not longer in CI)Full Changelog: v0.0.14...v0.1.0
"},{"location":"changelog/#v0014","title":"v0.0.14What's Changed","text":"Full Changelog: v0.0.13...v0.0.14
"},{"location":"changelog/#v0013","title":"v0.0.13What's Changed","text":"Full Changelog: v0.0.12...v0.0.13
"},{"location":"changelog/#v0012","title":"v0.0.12What's ChangedNew Contributors","text":"Full Changelog: v0.0.11...v0.0.12
"},{"location":"changelog/#v0011","title":"v0.0.11What's Changed","text":"Full Changelog: v0.0.10...v0.0.11
"},{"location":"changelog/#v0010","title":"v0.0.10What's Changed","text":"This is a hotfix to get around installing in older environments with numpy <1.20.
Full Changelog: v0.0.9...v0.0.10
"},{"location":"changelog/#v009","title":"v0.0.9What's Changed","text":"Full Changelog: v0.0.8...v0.0.9
"},{"location":"changelog/#v008","title":"v0.0.8What's Changed","text":"Full Changelog: v0.0.7...v0.0.8
"},{"location":"changelog/#v007","title":"v0.0.7What's Changed","text":"Full Changelog: v0.0.6...v0.0.7
"},{"location":"changelog/#v006","title":"v0.0.6What's Changed","text":"Full Changelog: v0.0.5...v0.0.6
"},{"location":"changelog/#v005","title":"v0.0.5What's Changed","text":"Full Changelog: v0.0.4...v0.0.5
"},{"location":"changelog/#v004","title":"v0.0.4What's Changed","text":"Full Changelog: v0.0.3...v0.0.4
"},{"location":"changelog/#v003","title":"v0.0.3What's Changed","text":"pyproject.toml
alone instead of setup.cfg
.mypy
type enforcement -- this is too strict for a library intended to be this flexible.Full Changelog: v0.0.2...v0.0.3
"},{"location":"changelog/#v002","title":"v0.0.2What's ChangedNew Contributors","text":"load_nwb
, save_nwb
, load_labelstudio
, save_labelstudio
Full Changelog: v0.0.1...v0.0.2
"},{"location":"changelog/#v001","title":"v0.0.1What's ChangedNew Contributors","text":"Initial stable release of the package.
__repr__
to labels object by @h-mayorquin in #8Full Changelog: https://github.com/talmolab/sleap-io/commits/v0.0.1
"},{"location":"examples/","title":"Examples","text":""},{"location":"examples/#load-and-save-in-different-formats","title":"Load and save in different formats","text":"import sleap_io as sio\n\n# Load from SLEAP file.\nlabels = sio.load_file(\"predictions.slp\")\n\n# Save to NWB file.\nlabels.save(\"predictions.nwb\")\n
See also: Labels.save
and Formats
import sleap_io as sio\n\nlabels = sio.load_slp(\"tests/data/slp/centered_pair_predictions.slp\")\n\n# Convert predictions to point coordinates in a single array.\ntrx = labels.numpy()\nn_frames, n_tracks, n_nodes, xy = trx.shape\nassert xy == 2\n\n# Convert to array with confidence scores appended.\ntrx_with_scores = labels.numpy(return_confidence=True)\nn_frames, n_tracks, n_nodes, xy_score = trx.shape \nassert xy_score == 3\n
See also: Labels.numpy
import sleap_io as sio\n\nvideo = sio.load_video(\"test.mp4\")\nn_frames, height, width, channels = video.shape\n\nframe = video[0]\nheight, width, channels = frame.shape\n
See also: sio.load_video
and Video
import sleap_io as sio\nimport numpy as np\n\n# Create skeleton.\nskeleton = sio.Skeleton(\n nodes=[\"head\", \"thorax\", \"abdomen\"],\n edges=[(\"head\", \"thorax\"), (\"thorax\", \"abdomen\")]\n)\n\n# Create video.\nvideo = sio.load_video(\"test.mp4\")\n\n# Create instance.\ninstance = sio.Instance.from_numpy(\n points=np.array([\n [10.2, 20.4],\n [5.8, 15.1],\n [0.3, 10.6],\n ]),\n skeleton=skeleton\n)\n\n# Create labeled frame.\nlf = sio.LabeledFrame(video=video, frame_idx=0, instances=[instance])\n\n# Create labels.\nlabels = sio.Labels(videos=[video], skeletons=[skeleton], labeled_frames=[lf])\n\n# Save.\nlabels.save(\"labels.slp\")\n
See also: Model, Labels
, LabeledFrame
, Instance
, PredictedInstance
, Skeleton
, Video
, Track
, SuggestionFrame
import sleap_io as sio\n\n# Load labels without trying to open the video files.\nlabels = sio.load_file(\"labels.v001.slp\", open_videos=False)\n\n# Fix paths using prefix replacement.\nlabels.replace_filenames(prefix_map={\n \"D:/data/sleap_projects\": \"/home/user/sleap_projects\",\n \"C:/Users/sleaper/Desktop/test\": \"/home/user/sleap_projects\",\n})\n\n# Save labels with updated paths.\nlabels.save(\"labels.v002.slp\")\n
See also: Labels.replace_filenames
import sleap_io as sio\n\n# Load source labels.\nlabels = sio.load_file(\"labels.v001.slp\")\n\n# Save with embedded images for frames with user labeled data and suggested frames.\nlabels.save(\"labels.v001.pkg.slp\", embed=\"user+suggestions\")\n
See also: Labels.save
import sleap_io as sio\n\n# Load source labels.\nlabels = sio.load_file(\"labels.v001.slp\")\n\n# Make splits and export with embedded images.\nlabels.make_training_splits(n_train=0.8, n_val=0.1, n_test=0.1, save_dir=\"split1\", seed=42)\n\n# Splits will be saved as self-contained SLP package files with images and labels.\nlabels_train = sio.load_file(\"split1/train.pkg.slp\")\nlabels_val = sio.load_file(\"split1/val.pkg.slp\")\nlabels_test = sio.load_file(\"split1/test.pkg.slp\")\n
See also: Labels.make_training_splits
Some video formats are not readily seekable at frame-level accuracy. By reencoding them with the default settings in our video writer, they will be reliably seekable with minimal loss of quality and can be achieved in a single line:
import sleap_io as sio\n\nsio.save_video(sio.load_video(\"input.mp4\"), \"output.mp4\")\n
See also: save_video
It can be sometimes be useful to pull out a short clip of frames, either for sharing or for generating data on only a subset of the video. We can do this with the following recipe:
import sleap_io as sio\n\n# Load existing data.\nlabels = sio.load_file(\"labels.slp\")\n\n# Create a new labels file with data from frames 1000-2000 in video 0.\n# Note: a new video will be saved with filename \"clip.mp4\" and frame indices adjusted in\n# the labels.\nclip = labels.trim(\"clip.slp\", list(range(1_000, 2_000)), video=0)\n
See also: Labels.trim
Skeleton
objects hold metadata about the keypoints, their ordering, names and connections. When converting between different annotation formats, it can be useful to change skeletons while retaining as much information as possible. We can do this as follows:
import sleap_io as sio\n\n# Load existing labels with skeleton with nodes: \"head\", \"trunk\", \"tti\"\nlabels = sio.load_file(\"labels.slp\")\n\n# Create a new skeleton with different nodes.\nnew_skeleton = sio.Skeleton([\"HEAD\", \"CENTROID\", \"TAIL_BASE\" \"TAIL_TIP\"])\n\n# Replace the skeleton with correspondences where possible.\nlabels.replace_skeleton(\n new_skeleton,\n node_map={\n \"head\": \"HEAD\",\n \"trunk\": \"CENTROID\",\n \"tti\": \"TAIL_BASE\"\n }\n)\n\n# Save with the new skeleton format.\nlabels.save(\"labels_with_new_skeleton.slp\")\n
See also: Labels.replace_skeleton
sleap_io.load_file(filename, format=None, **kwargs)
","text":"Load a file and return the appropriate object.
Parameters:
Name Type Description Defaultfilename
str | Path
Path to a file.
requiredformat
Optional[str]
Optional format to load as. If not provided, will be inferred from the file extension. Available formats are: \"slp\", \"nwb\", \"labelstudio\", \"jabs\" and \"video\".
None
Returns:
Type DescriptionUnion[Labels, Video]
A Labels
or Video
object.
sleap_io/io/main.py
def load_file(\n filename: str | Path, format: Optional[str] = None, **kwargs\n) -> Union[Labels, Video]:\n \"\"\"Load a file and return the appropriate object.\n\n Args:\n filename: Path to a file.\n format: Optional format to load as. If not provided, will be inferred from the\n file extension. Available formats are: \"slp\", \"nwb\", \"labelstudio\", \"jabs\"\n and \"video\".\n\n Returns:\n A `Labels` or `Video` object.\n \"\"\"\n if isinstance(filename, Path):\n filename = filename.as_posix()\n\n if format is None:\n if filename.endswith(\".slp\"):\n format = \"slp\"\n elif filename.endswith(\".nwb\"):\n format = \"nwb\"\n elif filename.endswith(\".json\"):\n format = \"json\"\n elif filename.endswith(\".h5\"):\n format = \"jabs\"\n else:\n for vid_ext in Video.EXTS:\n if filename.endswith(vid_ext):\n format = \"video\"\n break\n if format is None:\n raise ValueError(f\"Could not infer format from filename: '{filename}'.\")\n\n if filename.endswith(\".slp\"):\n return load_slp(filename, **kwargs)\n elif filename.endswith(\".nwb\"):\n return load_nwb(filename, **kwargs)\n elif filename.endswith(\".json\"):\n return load_labelstudio(filename, **kwargs)\n elif filename.endswith(\".h5\"):\n return load_jabs(filename, **kwargs)\n elif format == \"video\":\n return load_video(filename, **kwargs)\n
"},{"location":"formats/#sleap_io.save_file","title":"sleap_io.save_file(labels, filename, format=None, **kwargs)
","text":"Save a file based on the extension.
Parameters:
Name Type Description Defaultlabels
Labels
A SLEAP Labels
object (see load_slp
).
filename
str | Path
Path to save labels to.
requiredformat
Optional[str]
Optional format to save as. If not provided, will be inferred from the file extension. Available formats are: \"slp\", \"nwb\", \"labelstudio\" and \"jabs\".
None
Source code in sleap_io/io/main.py
def save_file(\n labels: Labels, filename: str | Path, format: Optional[str] = None, **kwargs\n):\n \"\"\"Save a file based on the extension.\n\n Args:\n labels: A SLEAP `Labels` object (see `load_slp`).\n filename: Path to save labels to.\n format: Optional format to save as. If not provided, will be inferred from the\n file extension. Available formats are: \"slp\", \"nwb\", \"labelstudio\" and\n \"jabs\".\n \"\"\"\n if isinstance(filename, Path):\n filename = str(filename)\n\n if format is None:\n if filename.endswith(\".slp\"):\n format = \"slp\"\n elif filename.endswith(\".nwb\"):\n format = \"nwb\"\n elif filename.endswith(\".json\"):\n format = \"labelstudio\"\n elif \"pose_version\" in kwargs:\n format = \"jabs\"\n\n if format == \"slp\":\n save_slp(labels, filename, **kwargs)\n elif format == \"nwb\":\n save_nwb(labels, filename, **kwargs)\n elif format == \"labelstudio\":\n save_labelstudio(labels, filename, **kwargs)\n elif format == \"jabs\":\n pose_version = kwargs.pop(\"pose_version\", 5)\n root_folder = kwargs.pop(\"root_folder\", filename)\n save_jabs(labels, pose_version=pose_version, root_folder=root_folder)\n else:\n raise ValueError(f\"Unknown format '{format}' for filename: '{filename}'.\")\n
"},{"location":"formats/#sleap_io.load_video","title":"sleap_io.load_video(filename, **kwargs)
","text":"Load a video file.
Parameters:
Name Type Description Defaultfilename
str
The filename(s) of the video. Supported extensions: \"mp4\", \"avi\", \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\", \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are expected. If filename is a folder, it will be searched for images.
requiredReturns:
Type DescriptionVideo
A Video
object.
sleap_io/io/main.py
def load_video(filename: str, **kwargs) -> Video:\n \"\"\"Load a video file.\n\n Args:\n filename: The filename(s) of the video. Supported extensions: \"mp4\", \"avi\",\n \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\",\n \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are\n expected. If filename is a folder, it will be searched for images.\n\n Returns:\n A `Video` object.\n \"\"\"\n return Video.from_filename(filename, **kwargs)\n
"},{"location":"formats/#sleap_io.save_video","title":"sleap_io.save_video(frames, filename, fps=30, pixelformat='yuv420p', codec='libx264', crf=25, preset='superfast', output_params=None)
","text":"Write a list of frames to a video file.
Parameters:
Name Type Description Defaultframes
ndarray | Video
Sequence of frames to write to video. Each frame should be a 2D or 3D numpy array with dimensions (height, width) or (height, width, channels).
requiredfilename
str | Path
Path to output video file.
requiredfps
float
Frames per second. Defaults to 30.
30
pixelformat
str
Pixel format for video. Defaults to \"yuv420p\".
'yuv420p'
codec
str
Codec to use for encoding. Defaults to \"libx264\".
'libx264'
crf
int
Constant rate factor to control lossiness of video. Values go from 2 to 32, with numbers in the 18 to 30 range being most common. Lower values mean less compressed/higher quality. Defaults to 25. No effect if codec is not \"libx264\".
25
preset
str
H264 encoding preset. Defaults to \"superfast\". No effect if codec is not \"libx264\".
'superfast'
output_params
list | None
Additional output parameters for FFMPEG. This should be a list of strings corresponding to command line arguments for FFMPEG and libx264. Use ffmpeg -h encoder=libx264
to see all options for libx264 output_params.
None
See also: sio.VideoWriter
sleap_io/io/main.py
def save_video(\n frames: np.ndarray | Video,\n filename: str | Path,\n fps: float = 30,\n pixelformat: str = \"yuv420p\",\n codec: str = \"libx264\",\n crf: int = 25,\n preset: str = \"superfast\",\n output_params: list | None = None,\n):\n \"\"\"Write a list of frames to a video file.\n\n Args:\n frames: Sequence of frames to write to video. Each frame should be a 2D or 3D\n numpy array with dimensions (height, width) or (height, width, channels).\n filename: Path to output video file.\n fps: Frames per second. Defaults to 30.\n pixelformat: Pixel format for video. Defaults to \"yuv420p\".\n codec: Codec to use for encoding. Defaults to \"libx264\".\n crf: Constant rate factor to control lossiness of video. Values go from 2 to 32,\n with numbers in the 18 to 30 range being most common. Lower values mean less\n compressed/higher quality. Defaults to 25. No effect if codec is not\n \"libx264\".\n preset: H264 encoding preset. Defaults to \"superfast\". No effect if codec is not\n \"libx264\".\n output_params: Additional output parameters for FFMPEG. This should be a list of\n strings corresponding to command line arguments for FFMPEG and libx264. Use\n `ffmpeg -h encoder=libx264` to see all options for libx264 output_params.\n\n See also: `sio.VideoWriter`\n \"\"\"\n if output_params is None:\n output_params = []\n\n with video_writing.VideoWriter(\n filename,\n fps=fps,\n pixelformat=pixelformat,\n codec=codec,\n crf=crf,\n preset=preset,\n output_params=output_params,\n ) as writer:\n for frame in frames:\n writer(frame)\n
"},{"location":"formats/#sleap_io.load_slp","title":"sleap_io.load_slp(filename, open_videos=True)
","text":"Load a SLEAP dataset.
Parameters:
Name Type Description Defaultfilename
str
Path to a SLEAP labels file (.slp
).
open_videos
bool
If True
(the default), attempt to open the video backend for I/O. If False
, the backend will not be opened (useful for reading metadata when the video files are not available).
True
Returns:
Type DescriptionLabels
The dataset as a Labels
object.
sleap_io/io/main.py
def load_slp(filename: str, open_videos: bool = True) -> Labels:\n \"\"\"Load a SLEAP dataset.\n\n Args:\n filename: Path to a SLEAP labels file (`.slp`).\n open_videos: If `True` (the default), attempt to open the video backend for\n I/O. If `False`, the backend will not be opened (useful for reading metadata\n when the video files are not available).\n\n Returns:\n The dataset as a `Labels` object.\n \"\"\"\n return slp.read_labels(filename, open_videos=open_videos)\n
"},{"location":"formats/#sleap_io.save_slp","title":"sleap_io.save_slp(labels, filename, embed=None)
","text":"Save a SLEAP dataset to a .slp
file.
Parameters:
Name Type Description Defaultlabels
Labels
A SLEAP Labels
object (see load_slp
).
filename
str
Path to save labels to ending with .slp
.
embed
bool | str | list[tuple[Video, int]] | None
Frames to embed in the saved labels file. One of None
, True
, \"all\"
, \"user\"
, \"suggestions\"
, \"user+suggestions\"
, \"source\"
or list of tuples of (video, frame_idx)
.
If None
is specified (the default) and the labels contains embedded frames, those embedded frames will be re-saved to the new file.
If True
or \"all\"
, all labeled frames and suggested frames will be embedded.
If \"source\"
is specified, no images will be embedded and the source video will be restored if available.
This argument is only valid for the SLP backend.
None
Source code in sleap_io/io/main.py
def save_slp(\n labels: Labels,\n filename: str,\n embed: bool | str | list[tuple[Video, int]] | None = None,\n):\n \"\"\"Save a SLEAP dataset to a `.slp` file.\n\n Args:\n labels: A SLEAP `Labels` object (see `load_slp`).\n filename: Path to save labels to ending with `.slp`.\n embed: Frames to embed in the saved labels file. One of `None`, `True`,\n `\"all\"`, `\"user\"`, `\"suggestions\"`, `\"user+suggestions\"`, `\"source\"` or list\n of tuples of `(video, frame_idx)`.\n\n If `None` is specified (the default) and the labels contains embedded\n frames, those embedded frames will be re-saved to the new file.\n\n If `True` or `\"all\"`, all labeled frames and suggested frames will be\n embedded.\n\n If `\"source\"` is specified, no images will be embedded and the source video\n will be restored if available.\n\n This argument is only valid for the SLP backend.\n \"\"\"\n return slp.write_labels(filename, labels, embed=embed)\n
"},{"location":"formats/#sleap_io.load_nwb","title":"sleap_io.load_nwb(filename)
","text":"Load an NWB dataset as a SLEAP Labels
object.
Parameters:
Name Type Description Defaultfilename
str
Path to a NWB file (.nwb
).
Returns:
Type DescriptionLabels
The dataset as a Labels
object.
sleap_io/io/main.py
def load_nwb(filename: str) -> Labels:\n \"\"\"Load an NWB dataset as a SLEAP `Labels` object.\n\n Args:\n filename: Path to a NWB file (`.nwb`).\n\n Returns:\n The dataset as a `Labels` object.\n \"\"\"\n return nwb.read_nwb(filename)\n
"},{"location":"formats/#sleap_io.save_nwb","title":"sleap_io.save_nwb(labels, filename, append=True)
","text":"Save a SLEAP dataset to NWB format.
Parameters:
Name Type Description Defaultlabels
Labels
A SLEAP Labels
object (see load_slp
).
filename
str
Path to NWB file to save to. Must end in .nwb
.
append
bool
If True
(the default), append to existing NWB file. File will be created if it does not exist.
True
See also: nwb.write_nwb, nwb.append_nwb
Source code insleap_io/io/main.py
def save_nwb(labels: Labels, filename: str, append: bool = True):\n \"\"\"Save a SLEAP dataset to NWB format.\n\n Args:\n labels: A SLEAP `Labels` object (see `load_slp`).\n filename: Path to NWB file to save to. Must end in `.nwb`.\n append: If `True` (the default), append to existing NWB file. File will be\n created if it does not exist.\n\n See also: nwb.write_nwb, nwb.append_nwb\n \"\"\"\n if append and Path(filename).exists():\n nwb.append_nwb(labels, filename)\n else:\n nwb.write_nwb(labels, filename)\n
"},{"location":"formats/#sleap_io.load_jabs","title":"sleap_io.load_jabs(filename, skeleton=None)
","text":"Read JABS-style predictions from a file and return a Labels
object.
Parameters:
Name Type Description Defaultfilename
str
Path to the jabs h5 pose file.
requiredskeleton
Optional[Skeleton]
An optional Skeleton
object.
None
Returns:
Type DescriptionLabels
Parsed labels as a Labels
instance.
sleap_io/io/main.py
def load_jabs(filename: str, skeleton: Optional[Skeleton] = None) -> Labels:\n \"\"\"Read JABS-style predictions from a file and return a `Labels` object.\n\n Args:\n filename: Path to the jabs h5 pose file.\n skeleton: An optional `Skeleton` object.\n\n Returns:\n Parsed labels as a `Labels` instance.\n \"\"\"\n return jabs.read_labels(filename, skeleton=skeleton)\n
"},{"location":"formats/#sleap_io.save_jabs","title":"sleap_io.save_jabs(labels, pose_version, root_folder=None)
","text":"Save a SLEAP dataset to JABS pose file format.
Parameters:
Name Type Description Defaultlabels
Labels
SLEAP Labels
object.
pose_version
int
The JABS pose version to write data out.
requiredroot_folder
Optional[str]
Optional root folder where the files should be saved.
None
Note Filenames for JABS poses are based on video filenames.
Source code insleap_io/io/main.py
def save_jabs(labels: Labels, pose_version: int, root_folder: Optional[str] = None):\n \"\"\"Save a SLEAP dataset to JABS pose file format.\n\n Args:\n labels: SLEAP `Labels` object.\n pose_version: The JABS pose version to write data out.\n root_folder: Optional root folder where the files should be saved.\n\n Note:\n Filenames for JABS poses are based on video filenames.\n \"\"\"\n jabs.write_labels(labels, pose_version, root_folder)\n
"},{"location":"formats/#sleap_io.load_labelstudio","title":"sleap_io.load_labelstudio(filename, skeleton=None)
","text":"Read Label Studio-style annotations from a file and return a Labels
object.
Parameters:
Name Type Description Defaultfilename
str
Path to the label-studio annotation file in JSON format.
requiredskeleton
Optional[Union[Skeleton, list[str]]]
An optional Skeleton
object or list of node names. If not provided (the default), skeleton will be inferred from the data. It may be useful to provide this so the keypoint label types can be filtered to just the ones in the skeleton.
None
Returns:
Type DescriptionLabels
Parsed labels as a Labels
instance.
sleap_io/io/main.py
def load_labelstudio(\n filename: str, skeleton: Optional[Union[Skeleton, list[str]]] = None\n) -> Labels:\n \"\"\"Read Label Studio-style annotations from a file and return a `Labels` object.\n\n Args:\n filename: Path to the label-studio annotation file in JSON format.\n skeleton: An optional `Skeleton` object or list of node names. If not provided\n (the default), skeleton will be inferred from the data. It may be useful to\n provide this so the keypoint label types can be filtered to just the ones in\n the skeleton.\n\n Returns:\n Parsed labels as a `Labels` instance.\n \"\"\"\n return labelstudio.read_labels(filename, skeleton=skeleton)\n
"},{"location":"formats/#sleap_io.save_labelstudio","title":"sleap_io.save_labelstudio(labels, filename)
","text":"Save a SLEAP dataset to Label Studio format.
Parameters:
Name Type Description Defaultlabels
Labels
A SLEAP Labels
object (see load_slp
).
filename
str
Path to save labels to ending with .json
.
sleap_io/io/main.py
def save_labelstudio(labels: Labels, filename: str):\n \"\"\"Save a SLEAP dataset to Label Studio format.\n\n Args:\n labels: A SLEAP `Labels` object (see `load_slp`).\n filename: Path to save labels to ending with `.json`.\n \"\"\"\n labelstudio.write_labels(labels, filename)\n
"},{"location":"model/","title":"Data model","text":"sleap-io
implements the core data structures used in SLEAP for storing data related to multi-instance pose tracking, including for annotation, training and inference.
sleap_io.Labels
","text":"Pose data for a set of videos that have user labels and/or predictions.
Attributes:
Name Type Descriptionlabeled_frames
list[LabeledFrame]
A list of LabeledFrame
s that are associated with this dataset.
videos
list[Video]
A list of Video
s that are associated with this dataset. Videos do not need to have corresponding LabeledFrame
s if they do not have any labels or predictions yet.
skeletons
list[Skeleton]
A list of Skeleton
s that are associated with this dataset. This should generally only contain a single skeleton.
tracks
list[Track]
A list of Track
s that are associated with this dataset.
suggestions
list[SuggestionFrame]
A list of SuggestionFrame
s that are associated with this dataset.
provenance
dict[str, Any]
Dictionary of arbitrary metadata providing additional information about where the dataset came from.
NotesVideo
s in contain LabeledFrame
s, and Skeleton
s and Track
s in contained Instance
s are added to the respective lists automatically.
Methods:
Name Description__attrs_post_init__
Append videos, skeletons, and tracks seen in labeled_frames
to Labels
.
__getitem__
Return one or more labeled frames based on indexing criteria.
__iter__
Iterate over labeled_frames
list when calling iter method on Labels
.
__len__
Return number of labeled frames.
__repr__
Return a readable representation of the labels.
__str__
Return a readable representation of the labels.
append
Append a labeled frame to the labels.
clean
Remove empty frames, unused skeletons, tracks and videos.
extend
Append a labeled frame to the labels.
extract
Extract a set of frames into a new Labels object.
find
Search for labeled frames given video and/or frame index.
make_training_splits
Make splits for training with embedded images.
numpy
Construct a numpy array from instance points.
remove_nodes
Remove nodes from the skeleton.
remove_predictions
Remove all predicted instances from the labels.
rename_nodes
Rename nodes in the skeleton.
reorder_nodes
Reorder nodes in the skeleton.
replace_filenames
Replace video filenames.
replace_skeleton
Replace the skeleton in the labels.
replace_videos
Replace videos and update all references.
save
Save labels to file in specified format.
split
Separate the labels into random splits.
trim
Trim the labels to a subset of frames and videos accordingly.
update
Update data structures based on contents.
Attributes:
Name Type Descriptioninstances
Iterator[Instance]
Return an iterator over all instances within all labeled frames.
skeleton
Skeleton
Return the skeleton if there is only a single skeleton in the labels.
user_labeled_frames
list[LabeledFrame]
Return all labeled frames with user (non-predicted) instances.
video
Video
Return the video if there is only a single video in the labels.
Source code insleap_io/model/labels.py
@define\nclass Labels:\n \"\"\"Pose data for a set of videos that have user labels and/or predictions.\n\n Attributes:\n labeled_frames: A list of `LabeledFrame`s that are associated with this dataset.\n videos: A list of `Video`s that are associated with this dataset. Videos do not\n need to have corresponding `LabeledFrame`s if they do not have any\n labels or predictions yet.\n skeletons: A list of `Skeleton`s that are associated with this dataset. This\n should generally only contain a single skeleton.\n tracks: A list of `Track`s that are associated with this dataset.\n suggestions: A list of `SuggestionFrame`s that are associated with this dataset.\n provenance: Dictionary of arbitrary metadata providing additional information\n about where the dataset came from.\n\n Notes:\n `Video`s in contain `LabeledFrame`s, and `Skeleton`s and `Track`s in contained\n `Instance`s are added to the respective lists automatically.\n \"\"\"\n\n labeled_frames: list[LabeledFrame] = field(factory=list)\n videos: list[Video] = field(factory=list)\n skeletons: list[Skeleton] = field(factory=list)\n tracks: list[Track] = field(factory=list)\n suggestions: list[SuggestionFrame] = field(factory=list)\n provenance: dict[str, Any] = field(factory=dict)\n\n def __attrs_post_init__(self):\n \"\"\"Append videos, skeletons, and tracks seen in `labeled_frames` to `Labels`.\"\"\"\n self.update()\n\n def update(self):\n \"\"\"Update data structures based on contents.\n\n This function will update the list of skeletons, videos and tracks from the\n labeled frames, instances and suggestions.\n \"\"\"\n for lf in self.labeled_frames:\n if lf.video not in self.videos:\n self.videos.append(lf.video)\n\n for inst in lf:\n if inst.skeleton not in self.skeletons:\n self.skeletons.append(inst.skeleton)\n\n if inst.track is not None and inst.track not in self.tracks:\n self.tracks.append(inst.track)\n\n for sf in self.suggestions:\n if sf.video not in self.videos:\n self.videos.append(sf.video)\n\n def __getitem__(\n self, key: int | slice | list[int] | np.ndarray | tuple[Video, int]\n ) -> list[LabeledFrame] | LabeledFrame:\n \"\"\"Return one or more labeled frames based on indexing criteria.\"\"\"\n if type(key) == int:\n return self.labeled_frames[key]\n elif type(key) == slice:\n return [self.labeled_frames[i] for i in range(*key.indices(len(self)))]\n elif type(key) == list:\n return [self.labeled_frames[i] for i in key]\n elif isinstance(key, np.ndarray):\n return [self.labeled_frames[i] for i in key.tolist()]\n elif type(key) == tuple and len(key) == 2:\n video, frame_idx = key\n res = self.find(video, frame_idx)\n if len(res) == 1:\n return res[0]\n elif len(res) == 0:\n raise IndexError(\n f\"No labeled frames found for video {video} and \"\n f\"frame index {frame_idx}.\"\n )\n elif type(key) == Video:\n res = self.find(key)\n if len(res) == 0:\n raise IndexError(f\"No labeled frames found for video {key}.\")\n return res\n else:\n raise IndexError(f\"Invalid indexing argument for labels: {key}\")\n\n def __iter__(self):\n \"\"\"Iterate over `labeled_frames` list when calling iter method on `Labels`.\"\"\"\n return iter(self.labeled_frames)\n\n def __len__(self) -> int:\n \"\"\"Return number of labeled frames.\"\"\"\n return len(self.labeled_frames)\n\n def __repr__(self) -> str:\n \"\"\"Return a readable representation of the labels.\"\"\"\n return (\n \"Labels(\"\n f\"labeled_frames={len(self.labeled_frames)}, \"\n f\"videos={len(self.videos)}, \"\n f\"skeletons={len(self.skeletons)}, \"\n f\"tracks={len(self.tracks)}, \"\n f\"suggestions={len(self.suggestions)}\"\n \")\"\n )\n\n def __str__(self) -> str:\n \"\"\"Return a readable representation of the labels.\"\"\"\n return self.__repr__()\n\n def append(self, lf: LabeledFrame, update: bool = True):\n \"\"\"Append a labeled frame to the labels.\n\n Args:\n lf: A labeled frame to add to the labels.\n update: If `True` (the default), update list of videos, tracks and\n skeletons from the contents.\n \"\"\"\n self.labeled_frames.append(lf)\n\n if update:\n if lf.video not in self.videos:\n self.videos.append(lf.video)\n\n for inst in lf:\n if inst.skeleton not in self.skeletons:\n self.skeletons.append(inst.skeleton)\n\n if inst.track is not None and inst.track not in self.tracks:\n self.tracks.append(inst.track)\n\n def extend(self, lfs: list[LabeledFrame], update: bool = True):\n \"\"\"Append a labeled frame to the labels.\n\n Args:\n lfs: A list of labeled frames to add to the labels.\n update: If `True` (the default), update list of videos, tracks and\n skeletons from the contents.\n \"\"\"\n self.labeled_frames.extend(lfs)\n\n if update:\n for lf in lfs:\n if lf.video not in self.videos:\n self.videos.append(lf.video)\n\n for inst in lf:\n if inst.skeleton not in self.skeletons:\n self.skeletons.append(inst.skeleton)\n\n if inst.track is not None and inst.track not in self.tracks:\n self.tracks.append(inst.track)\n\n def numpy(\n self,\n video: Optional[Union[Video, int]] = None,\n all_frames: bool = True,\n untracked: bool = False,\n return_confidence: bool = False,\n ) -> np.ndarray:\n \"\"\"Construct a numpy array from instance points.\n\n Args:\n video: Video or video index to convert to numpy arrays. If `None` (the\n default), uses the first video.\n untracked: If `False` (the default), include only instances that have a\n track assignment. If `True`, includes all instances in each frame in\n arbitrary order.\n return_confidence: If `False` (the default), only return points of nodes. If\n `True`, return the points and scores of nodes.\n\n Returns:\n An array of tracks of shape `(n_frames, n_tracks, n_nodes, 2)` if\n `return_confidence` is `False`. Otherwise returned shape is\n `(n_frames, n_tracks, n_nodes, 3)` if `return_confidence` is `True`.\n\n Missing data will be replaced with `np.nan`.\n\n If this is a single instance project, a track does not need to be assigned.\n\n Only predicted instances (NOT user instances) will be returned.\n\n Notes:\n This method assumes that instances have tracks assigned and is intended to\n function primarily for single-video prediction results.\n \"\"\"\n # Get labeled frames for specified video.\n if video is None:\n video = 0\n if type(video) == int:\n video = self.videos[video]\n lfs = [lf for lf in self.labeled_frames if lf.video == video]\n\n # Figure out frame index range.\n first_frame, last_frame = 0, 0\n for lf in lfs:\n first_frame = min(first_frame, lf.frame_idx)\n last_frame = max(last_frame, lf.frame_idx)\n\n # Figure out the number of tracks based on number of instances in each frame.\n # First, let's check the max number of predicted instances (regardless of\n # whether they're tracked.\n n_preds = 0\n for lf in lfs:\n n_pred_instances = len(lf.predicted_instances)\n n_preds = max(n_preds, n_pred_instances)\n\n # Case 1: We don't care about order because there's only 1 instance per frame,\n # or we're considering untracked instances.\n untracked = untracked or n_preds == 1\n if untracked:\n n_tracks = n_preds\n else:\n # Case 2: We're considering only tracked instances.\n n_tracks = len(self.tracks)\n\n n_frames = int(last_frame - first_frame + 1)\n skeleton = self.skeletons[-1] # Assume project only uses last skeleton\n n_nodes = len(skeleton.nodes)\n\n if return_confidence:\n tracks = np.full((n_frames, n_tracks, n_nodes, 3), np.nan, dtype=\"float32\")\n else:\n tracks = np.full((n_frames, n_tracks, n_nodes, 2), np.nan, dtype=\"float32\")\n for lf in lfs:\n i = int(lf.frame_idx - first_frame)\n if untracked:\n for j, inst in enumerate(lf.predicted_instances):\n tracks[i, j] = inst.numpy(scores=return_confidence)\n else:\n tracked_instances = [\n inst\n for inst in lf.instances\n if type(inst) == PredictedInstance and inst.track is not None\n ]\n for inst in tracked_instances:\n j = self.tracks.index(inst.track) # type: ignore[arg-type]\n tracks[i, j] = inst.numpy(scores=return_confidence)\n\n return tracks\n\n @property\n def video(self) -> Video:\n \"\"\"Return the video if there is only a single video in the labels.\"\"\"\n if len(self.videos) == 0:\n raise ValueError(\"There are no videos in the labels.\")\n elif len(self.videos) == 1:\n return self.videos[0]\n else:\n raise ValueError(\n \"Labels.video can only be used when there is only a single video saved \"\n \"in the labels. Use Labels.videos instead.\"\n )\n\n @property\n def skeleton(self) -> Skeleton:\n \"\"\"Return the skeleton if there is only a single skeleton in the labels.\"\"\"\n if len(self.skeletons) == 0:\n raise ValueError(\"There are no skeletons in the labels.\")\n elif len(self.skeletons) == 1:\n return self.skeletons[0]\n else:\n raise ValueError(\n \"Labels.skeleton can only be used when there is only a single skeleton \"\n \"saved in the labels. Use Labels.skeletons instead.\"\n )\n\n def find(\n self,\n video: Video,\n frame_idx: int | list[int] | None = None,\n return_new: bool = False,\n ) -> list[LabeledFrame]:\n \"\"\"Search for labeled frames given video and/or frame index.\n\n Args:\n video: A `Video` that is associated with the project.\n frame_idx: The frame index (or indices) which we want to find in the video.\n If a range is specified, we'll return all frames with indices in that\n range. If not specific, then we'll return all labeled frames for video.\n return_new: Whether to return singleton of new and empty `LabeledFrame` if\n none are found in project.\n\n Returns:\n List of `LabeledFrame` objects that match the criteria.\n\n The list will be empty if no matches found, unless return_new is True, in\n which case it contains new (empty) `LabeledFrame` objects with `video` and\n `frame_index` set.\n \"\"\"\n results = []\n\n if frame_idx is None:\n for lf in self.labeled_frames:\n if lf.video == video:\n results.append(lf)\n return results\n\n if np.isscalar(frame_idx):\n frame_idx = np.array(frame_idx).reshape(-1)\n\n for frame_ind in frame_idx:\n result = None\n for lf in self.labeled_frames:\n if lf.video == video and lf.frame_idx == frame_ind:\n result = lf\n results.append(result)\n break\n if result is None and return_new:\n results.append(LabeledFrame(video=video, frame_idx=frame_ind))\n\n return results\n\n def save(\n self,\n filename: str,\n format: Optional[str] = None,\n embed: bool | str | list[tuple[Video, int]] | None = None,\n **kwargs,\n ):\n \"\"\"Save labels to file in specified format.\n\n Args:\n filename: Path to save labels to.\n format: The format to save the labels in. If `None`, the format will be\n inferred from the file extension. Available formats are `\"slp\"`,\n `\"nwb\"`, `\"labelstudio\"`, and `\"jabs\"`.\n embed: Frames to embed in the saved labels file. One of `None`, `True`,\n `\"all\"`, `\"user\"`, `\"suggestions\"`, `\"user+suggestions\"`, `\"source\"` or\n list of tuples of `(video, frame_idx)`.\n\n If `None` is specified (the default) and the labels contains embedded\n frames, those embedded frames will be re-saved to the new file.\n\n If `True` or `\"all\"`, all labeled frames and suggested frames will be\n embedded.\n\n If `\"source\"` is specified, no images will be embedded and the source\n video will be restored if available.\n\n This argument is only valid for the SLP backend.\n \"\"\"\n from sleap_io import save_file\n\n save_file(self, filename, format=format, embed=embed, **kwargs)\n\n def clean(\n self,\n frames: bool = True,\n empty_instances: bool = False,\n skeletons: bool = True,\n tracks: bool = True,\n videos: bool = False,\n ):\n \"\"\"Remove empty frames, unused skeletons, tracks and videos.\n\n Args:\n frames: If `True` (the default), remove empty frames.\n empty_instances: If `True` (NOT default), remove instances that have no\n visible points.\n skeletons: If `True` (the default), remove unused skeletons.\n tracks: If `True` (the default), remove unused tracks.\n videos: If `True` (NOT default), remove videos that have no labeled frames.\n \"\"\"\n used_skeletons = []\n used_tracks = []\n used_videos = []\n kept_frames = []\n for lf in self.labeled_frames:\n\n if empty_instances:\n lf.remove_empty_instances()\n\n if frames and len(lf) == 0:\n continue\n\n if videos and lf.video not in used_videos:\n used_videos.append(lf.video)\n\n if skeletons or tracks:\n for inst in lf:\n if skeletons and inst.skeleton not in used_skeletons:\n used_skeletons.append(inst.skeleton)\n if (\n tracks\n and inst.track is not None\n and inst.track not in used_tracks\n ):\n used_tracks.append(inst.track)\n\n if frames:\n kept_frames.append(lf)\n\n if videos:\n self.videos = [video for video in self.videos if video in used_videos]\n\n if skeletons:\n self.skeletons = [\n skeleton for skeleton in self.skeletons if skeleton in used_skeletons\n ]\n\n if tracks:\n self.tracks = [track for track in self.tracks if track in used_tracks]\n\n if frames:\n self.labeled_frames = kept_frames\n\n def remove_predictions(self, clean: bool = True):\n \"\"\"Remove all predicted instances from the labels.\n\n Args:\n clean: If `True` (the default), also remove any empty frames and unused\n tracks and skeletons. It does NOT remove videos that have no labeled\n frames or instances with no visible points.\n\n See also: `Labels.clean`\n \"\"\"\n for lf in self.labeled_frames:\n lf.remove_predictions()\n\n if clean:\n self.clean(\n frames=True,\n empty_instances=False,\n skeletons=True,\n tracks=True,\n videos=False,\n )\n\n @property\n def user_labeled_frames(self) -> list[LabeledFrame]:\n \"\"\"Return all labeled frames with user (non-predicted) instances.\"\"\"\n return [lf for lf in self.labeled_frames if lf.has_user_instances]\n\n @property\n def instances(self) -> Iterator[Instance]:\n \"\"\"Return an iterator over all instances within all labeled frames.\"\"\"\n return (instance for lf in self.labeled_frames for instance in lf.instances)\n\n def rename_nodes(\n self,\n name_map: dict[NodeOrIndex, str] | list[str],\n skeleton: Skeleton | None = None,\n ):\n \"\"\"Rename nodes in the skeleton.\n\n Args:\n name_map: A dictionary mapping old node names to new node names. Keys can be\n specified as `Node` objects, integer indices, or string names. Values\n must be specified as string names.\n\n If a list of strings is provided of the same length as the current\n nodes, the nodes will be renamed to the names in the list in order.\n skeleton: `Skeleton` to update. If `None` (the default), assumes there is\n only one skeleton in the labels and raises `ValueError` otherwise.\n\n Raises:\n ValueError: If the new node names exist in the skeleton, if the old node\n names are not found in the skeleton, or if there is more than one\n skeleton in the `Labels` but it is not specified.\n\n Notes:\n This method is recommended over `Skeleton.rename_nodes` as it will update\n all instances in the labels to reflect the new node names.\n\n Example:\n >>> labels = Labels(skeletons=[Skeleton([\"A\", \"B\", \"C\"])])\n >>> labels.rename_nodes({\"A\": \"X\", \"B\": \"Y\", \"C\": \"Z\"})\n >>> labels.skeleton.node_names\n [\"X\", \"Y\", \"Z\"]\n >>> labels.rename_nodes([\"a\", \"b\", \"c\"])\n >>> labels.skeleton.node_names\n [\"a\", \"b\", \"c\"]\n \"\"\"\n if skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Skeleton must be specified when there is more than one skeleton in \"\n \"the labels.\"\n )\n skeleton = self.skeleton\n\n skeleton.rename_nodes(name_map)\n\n def remove_nodes(self, nodes: list[NodeOrIndex], skeleton: Skeleton | None = None):\n \"\"\"Remove nodes from the skeleton.\n\n Args:\n nodes: A list of node names, indices, or `Node` objects to remove.\n skeleton: `Skeleton` to update. If `None` (the default), assumes there is\n only one skeleton in the labels and raises `ValueError` otherwise.\n\n Raises:\n ValueError: If the nodes are not found in the skeleton, or if there is more\n than one skeleton in the `Labels` but it is not specified.\n\n Notes:\n This method should always be used when removing nodes from the skeleton as\n it handles updating the lookup caches necessary for indexing nodes by name,\n and updating instances to reflect the changes made to the skeleton.\n\n Any edges and symmetries that are connected to the removed nodes will also\n be removed.\n \"\"\"\n if skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Skeleton must be specified when there is more than one skeleton \"\n \"in the labels.\"\n )\n skeleton = self.skeleton\n\n skeleton.remove_nodes(nodes)\n\n for inst in self.instances:\n if inst.skeleton == skeleton:\n inst.update_skeleton()\n\n def reorder_nodes(\n self, new_order: list[NodeOrIndex], skeleton: Skeleton | None = None\n ):\n \"\"\"Reorder nodes in the skeleton.\n\n Args:\n new_order: A list of node names, indices, or `Node` objects specifying the\n new order of the nodes.\n skeleton: `Skeleton` to update. If `None` (the default), assumes there is\n only one skeleton in the labels and raises `ValueError` otherwise.\n\n Raises:\n ValueError: If the new order of nodes is not the same length as the current\n nodes, or if there is more than one skeleton in the `Labels` but it is\n not specified.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name, as well as updating instances to reflect the changes made to the\n skeleton.\n \"\"\"\n if skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Skeleton must be specified when there is more than one skeleton \"\n \"in the labels.\"\n )\n skeleton = self.skeleton\n\n skeleton.reorder_nodes(new_order)\n\n for inst in self.instances:\n if inst.skeleton == skeleton:\n inst.update_skeleton()\n\n def replace_skeleton(\n self,\n new_skeleton: Skeleton,\n old_skeleton: Skeleton | None = None,\n node_map: dict[NodeOrIndex, NodeOrIndex] | None = None,\n ):\n \"\"\"Replace the skeleton in the labels.\n\n Args:\n new_skeleton: The new `Skeleton` to replace the old skeleton with.\n old_skeleton: The old `Skeleton` to replace. If `None` (the default),\n assumes there is only one skeleton in the labels and raises `ValueError`\n otherwise.\n node_map: Dictionary mapping nodes in the old skeleton to nodes in the new\n skeleton. Keys and values can be specified as `Node` objects, integer\n indices, or string names. If not provided, only nodes with identical\n names will be mapped. Points associated with unmapped nodes will be\n removed.\n\n Raises:\n ValueError: If there is more than one skeleton in the `Labels` but it is not\n specified.\n\n Warning:\n This method will replace the skeleton in all instances in the labels that\n have the old skeleton. **All point data associated with nodes not in the\n `node_map` will be lost.**\n \"\"\"\n if old_skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Old skeleton must be specified when there is more than one \"\n \"skeleton in the labels.\"\n )\n old_skeleton = self.skeleton\n\n if node_map is None:\n node_map = {}\n for old_node in old_skeleton.nodes:\n for new_node in new_skeleton.nodes:\n if old_node.name == new_node.name:\n node_map[old_node] = new_node\n break\n else:\n node_map = {\n old_skeleton.require_node(\n old, add_missing=False\n ): new_skeleton.require_node(new, add_missing=False)\n for old, new in node_map.items()\n }\n\n # Make new -> old mapping for nodes for efficiency.\n rev_node_map = {new: old for old, new in node_map.items()}\n\n # Replace the skeleton in the instances.\n for inst in self.instances:\n if inst.skeleton == old_skeleton:\n inst.replace_skeleton(new_skeleton, rev_node_map=rev_node_map)\n\n # Replace the skeleton in the labels.\n self.skeletons[self.skeletons.index(old_skeleton)] = new_skeleton\n\n def replace_videos(\n self,\n old_videos: list[Video] | None = None,\n new_videos: list[Video] | None = None,\n video_map: dict[Video, Video] | None = None,\n ):\n \"\"\"Replace videos and update all references.\n\n Args:\n old_videos: List of videos to be replaced.\n new_videos: List of videos to replace with.\n video_map: Alternative input of dictionary where keys are the old videos and\n values are the new videos.\n \"\"\"\n if (\n old_videos is None\n and new_videos is not None\n and len(new_videos) == len(self.videos)\n ):\n old_videos = self.videos\n\n if video_map is None:\n video_map = {o: n for o, n in zip(old_videos, new_videos)}\n\n # Update the labeled frames with the new videos.\n for lf in self.labeled_frames:\n if lf.video in video_map:\n lf.video = video_map[lf.video]\n\n # Update suggestions with the new videos.\n for sf in self.suggestions:\n if sf.video in video_map:\n sf.video = video_map[sf.video]\n\n # Update the list of videos.\n self.videos = [video_map.get(video, video) for video in self.videos]\n\n def replace_filenames(\n self,\n new_filenames: list[str | Path] | None = None,\n filename_map: dict[str | Path, str | Path] | None = None,\n prefix_map: dict[str | Path, str | Path] | None = None,\n ):\n \"\"\"Replace video filenames.\n\n Args:\n new_filenames: List of new filenames. Must have the same length as the\n number of videos in the labels.\n filename_map: Dictionary mapping old filenames (keys) to new filenames\n (values).\n prefix_map: Dictonary mapping old prefixes (keys) to new prefixes (values).\n\n Notes:\n Only one of the argument types can be provided.\n \"\"\"\n n = 0\n if new_filenames is not None:\n n += 1\n if filename_map is not None:\n n += 1\n if prefix_map is not None:\n n += 1\n if n != 1:\n raise ValueError(\n \"Exactly one input method must be provided to replace filenames.\"\n )\n\n if new_filenames is not None:\n if len(self.videos) != len(new_filenames):\n raise ValueError(\n f\"Number of new filenames ({len(new_filenames)}) does not match \"\n f\"the number of videos ({len(self.videos)}).\"\n )\n\n for video, new_filename in zip(self.videos, new_filenames):\n video.replace_filename(new_filename)\n\n elif filename_map is not None:\n for video in self.videos:\n for old_fn, new_fn in filename_map.items():\n if type(video.filename) == list:\n new_fns = []\n for fn in video.filename:\n if Path(fn) == Path(old_fn):\n new_fns.append(new_fn)\n else:\n new_fns.append(fn)\n video.replace_filename(new_fns)\n else:\n if Path(video.filename) == Path(old_fn):\n video.replace_filename(new_fn)\n\n elif prefix_map is not None:\n for video in self.videos:\n for old_prefix, new_prefix in prefix_map.items():\n old_prefix, new_prefix = Path(old_prefix), Path(new_prefix)\n\n if type(video.filename) == list:\n new_fns = []\n for fn in video.filename:\n fn = Path(fn)\n if fn.as_posix().startswith(old_prefix.as_posix()):\n new_fns.append(new_prefix / fn.relative_to(old_prefix))\n else:\n new_fns.append(fn)\n video.replace_filename(new_fns)\n else:\n fn = Path(video.filename)\n if fn.as_posix().startswith(old_prefix.as_posix()):\n video.replace_filename(\n new_prefix / fn.relative_to(old_prefix)\n )\n\n def extract(\n self, inds: list[int] | list[tuple[Video, int]] | np.ndarray, copy: bool = True\n ) -> Labels:\n \"\"\"Extract a set of frames into a new Labels object.\n\n Args:\n inds: Indices of labeled frames. Can be specified as a list of array of\n integer indices of labeled frames or tuples of Video and frame indices.\n copy: If `True` (the default), return a copy of the frames and containing\n objects. Otherwise, return a reference to the data.\n\n Returns:\n A new `Labels` object containing the selected labels.\n\n Notes:\n This copies the labeled frames and their associated data, including\n skeletons and tracks, and tries to maintain the relative ordering.\n\n This also copies the provenance and inserts an extra key: `\"source_labels\"`\n with the path to the current labels, if available.\n\n It does NOT copy suggested frames.\n \"\"\"\n lfs = self[inds]\n\n if copy:\n lfs = deepcopy(lfs)\n labels = Labels(lfs)\n\n # Try to keep the lists in the same order.\n track_to_ind = {track.name: ind for ind, track in enumerate(self.tracks)}\n labels.tracks = sorted(labels.tracks, key=lambda x: track_to_ind[x.name])\n\n skel_to_ind = {skel.name: ind for ind, skel in enumerate(self.skeletons)}\n labels.skeletons = sorted(labels.skeletons, key=lambda x: skel_to_ind[x.name])\n\n labels.provenance = deepcopy(labels.provenance)\n labels.provenance[\"source_labels\"] = self.provenance.get(\"filename\", None)\n\n return labels\n\n def split(self, n: int | float, seed: int | None = None) -> tuple[Labels, Labels]:\n \"\"\"Separate the labels into random splits.\n\n Args:\n n: Size of the first split. If integer >= 1, assumes that this is the number\n of labeled frames in the first split. If < 1.0, this will be treated as\n a fraction of the total labeled frames.\n seed: Optional integer seed to use for reproducibility.\n\n Returns:\n A tuple of `split1, split2`.\n\n If an integer was specified, `len(split1) == n`.\n\n If a fraction was specified, `len(split1) == int(n * len(labels))`.\n\n The second split contains the remainder, i.e.,\n `len(split2) == len(labels) - len(split1)`.\n\n If there are too few frames, a minimum of 1 frame will be kept in the second\n split.\n\n If there is exactly 1 labeled frame in the labels, the same frame will be\n assigned to both splits.\n \"\"\"\n n0 = len(self)\n if n0 == 0:\n return self, self\n n1 = n\n if n < 1.0:\n n1 = max(int(n0 * float(n)), 1)\n n2 = max(n0 - n1, 1)\n n1, n2 = int(n1), int(n2)\n\n rng = np.random.default_rng(seed=seed)\n inds1 = rng.choice(n0, size=(n1,), replace=False)\n\n if n0 == 1:\n inds2 = np.array([0])\n else:\n inds2 = np.setdiff1d(np.arange(n0), inds1)\n\n split1 = self.extract(inds1, copy=True)\n split2 = self.extract(inds2, copy=True)\n\n return split1, split2\n\n def make_training_splits(\n self,\n n_train: int | float,\n n_val: int | float | None = None,\n n_test: int | float | None = None,\n save_dir: str | Path | None = None,\n seed: int | None = None,\n embed: bool = True,\n ) -> tuple[Labels, Labels] | tuple[Labels, Labels, Labels]:\n \"\"\"Make splits for training with embedded images.\n\n Args:\n n_train: Size of the training split as integer or fraction.\n n_val: Size of the validation split as integer or fraction. If `None`,\n this will be inferred based on the values of `n_train` and `n_test`. If\n `n_test` is `None`, this will be the remainder of the data after the\n training split.\n n_test: Size of the testing split as integer or fraction. If `None`, the\n test split will not be saved.\n save_dir: If specified, save splits to SLP files with embedded images.\n seed: Optional integer seed to use for reproducibility.\n embed: If `True` (the default), embed user labeled frame images in the saved\n files, which is useful for portability but can be slow for large\n projects. If `False`, labels are saved with references to the source\n videos files.\n\n Returns:\n A tuple of `labels_train, labels_val` or\n `labels_train, labels_val, labels_test` if `n_test` was specified.\n\n Notes:\n Predictions and suggestions will be removed before saving, leaving only\n frames with user labeled data (the source labels are not affected).\n\n Frames with user labeled data will be embedded in the resulting files.\n\n If `save_dir` is specified, this will save the randomly sampled splits to:\n\n - `{save_dir}/train.pkg.slp`\n - `{save_dir}/val.pkg.slp`\n - `{save_dir}/test.pkg.slp` (if `n_test` is specified)\n\n If `embed` is `False`, the files will be saved without embedded images to:\n\n - `{save_dir}/train.slp`\n - `{save_dir}/val.slp`\n - `{save_dir}/test.slp` (if `n_test` is specified)\n\n See also: `Labels.split`\n \"\"\"\n # Clean up labels.\n labels = deepcopy(self)\n labels.remove_predictions()\n labels.suggestions = []\n labels.clean()\n\n # Make train split.\n labels_train, labels_rest = labels.split(n_train, seed=seed)\n\n # Make test split.\n if n_test is not None:\n if n_test < 1:\n n_test = (n_test * len(labels)) / len(labels_rest)\n labels_test, labels_rest = labels_rest.split(n=n_test, seed=seed)\n\n # Make val split.\n if n_val is not None:\n if n_val < 1:\n n_val = (n_val * len(labels)) / len(labels_rest)\n if isinstance(n_val, float) and n_val == 1.0:\n labels_val = labels_rest\n else:\n labels_val, _ = labels_rest.split(n=n_val, seed=seed)\n else:\n labels_val = labels_rest\n\n # Update provenance.\n source_labels = self.provenance.get(\"filename\", None)\n labels_train.provenance[\"source_labels\"] = source_labels\n if n_val is not None:\n labels_val.provenance[\"source_labels\"] = source_labels\n if n_test is not None:\n labels_test.provenance[\"source_labels\"] = source_labels\n\n # Save.\n if save_dir is not None:\n save_dir = Path(save_dir)\n save_dir.mkdir(exist_ok=True, parents=True)\n\n if embed:\n labels_train.save(save_dir / \"train.pkg.slp\", embed=\"user\")\n labels_val.save(save_dir / \"val.pkg.slp\", embed=\"user\")\n labels_test.save(save_dir / \"test.pkg.slp\", embed=\"user\")\n else:\n labels_train.save(save_dir / \"train.slp\", embed=False)\n labels_val.save(save_dir / \"val.slp\", embed=False)\n labels_test.save(save_dir / \"test.slp\", embed=False)\n\n if n_test is None:\n return labels_train, labels_val\n else:\n return labels_train, labels_val, labels_test\n\n def trim(\n self,\n save_path: str | Path,\n frame_inds: list[int] | np.ndarray,\n video: Video | int | None = None,\n video_kwargs: dict[str, Any] | None = None,\n ) -> Labels:\n \"\"\"Trim the labels to a subset of frames and videos accordingly.\n\n Args:\n save_path: Path to the trimmed labels SLP file. Video will be saved with the\n same base name but with .mp4 extension.\n frame_inds: Frame indices to save. Can be specified as a list or array of\n frame integers.\n video: Video or integer index of the video to trim. Does not need to be\n specified for single-video projects.\n video_kwargs: A dictionary of keyword arguments to provide to\n `sio.save_video` for video compression.\n\n Returns:\n The resulting labels object referencing the trimmed data.\n\n Notes:\n This will remove any data outside of the trimmed frames, save new videos,\n and adjust the frame indices to match the newly trimmed videos.\n \"\"\"\n if video is None:\n if len(self.videos) == 1:\n video = self.video\n else:\n raise ValueError(\n \"Video needs to be specified when trimming multi-video projects.\"\n )\n if type(video) == int:\n video = self.videos[video]\n\n # Write trimmed clip.\n save_path = Path(save_path)\n video_path = save_path.with_suffix(\".mp4\")\n fidx0, fidx1 = np.min(frame_inds), np.max(frame_inds)\n new_video = video.save(\n video_path,\n frame_inds=np.arange(fidx0, fidx1 + 1),\n video_kwargs=video_kwargs,\n )\n\n # Get frames in range.\n # TODO: Create an optimized search function for this access pattern.\n inds = []\n for ind, lf in enumerate(self):\n if lf.video == video and lf.frame_idx >= fidx0 and lf.frame_idx <= fidx1:\n inds.append(ind)\n trimmed_labels = self.extract(inds, copy=True)\n\n # Adjust video and frame indices.\n trimmed_labels.videos = [new_video]\n for lf in trimmed_labels:\n lf.video = new_video\n lf.frame_idx = lf.frame_idx - fidx0\n\n # Save.\n trimmed_labels.save(save_path)\n\n return trimmed_labels\n
"},{"location":"model/#sleap_io.Labels.instances","title":"instances: Iterator[Instance]
property
","text":"Return an iterator over all instances within all labeled frames.
"},{"location":"model/#sleap_io.Labels.skeleton","title":"skeleton: Skeleton
property
","text":"Return the skeleton if there is only a single skeleton in the labels.
"},{"location":"model/#sleap_io.Labels.user_labeled_frames","title":"user_labeled_frames: list[LabeledFrame]
property
","text":"Return all labeled frames with user (non-predicted) instances.
"},{"location":"model/#sleap_io.Labels.video","title":"video: Video
property
","text":"Return the video if there is only a single video in the labels.
"},{"location":"model/#sleap_io.Labels.__attrs_post_init__","title":"__attrs_post_init__()
","text":"Append videos, skeletons, and tracks seen in labeled_frames
to Labels
.
sleap_io/model/labels.py
def __attrs_post_init__(self):\n \"\"\"Append videos, skeletons, and tracks seen in `labeled_frames` to `Labels`.\"\"\"\n self.update()\n
"},{"location":"model/#sleap_io.Labels.__getitem__","title":"__getitem__(key)
","text":"Return one or more labeled frames based on indexing criteria.
Source code insleap_io/model/labels.py
def __getitem__(\n self, key: int | slice | list[int] | np.ndarray | tuple[Video, int]\n) -> list[LabeledFrame] | LabeledFrame:\n \"\"\"Return one or more labeled frames based on indexing criteria.\"\"\"\n if type(key) == int:\n return self.labeled_frames[key]\n elif type(key) == slice:\n return [self.labeled_frames[i] for i in range(*key.indices(len(self)))]\n elif type(key) == list:\n return [self.labeled_frames[i] for i in key]\n elif isinstance(key, np.ndarray):\n return [self.labeled_frames[i] for i in key.tolist()]\n elif type(key) == tuple and len(key) == 2:\n video, frame_idx = key\n res = self.find(video, frame_idx)\n if len(res) == 1:\n return res[0]\n elif len(res) == 0:\n raise IndexError(\n f\"No labeled frames found for video {video} and \"\n f\"frame index {frame_idx}.\"\n )\n elif type(key) == Video:\n res = self.find(key)\n if len(res) == 0:\n raise IndexError(f\"No labeled frames found for video {key}.\")\n return res\n else:\n raise IndexError(f\"Invalid indexing argument for labels: {key}\")\n
"},{"location":"model/#sleap_io.Labels.__iter__","title":"__iter__()
","text":"Iterate over labeled_frames
list when calling iter method on Labels
.
sleap_io/model/labels.py
def __iter__(self):\n \"\"\"Iterate over `labeled_frames` list when calling iter method on `Labels`.\"\"\"\n return iter(self.labeled_frames)\n
"},{"location":"model/#sleap_io.Labels.__len__","title":"__len__()
","text":"Return number of labeled frames.
Source code insleap_io/model/labels.py
def __len__(self) -> int:\n \"\"\"Return number of labeled frames.\"\"\"\n return len(self.labeled_frames)\n
"},{"location":"model/#sleap_io.Labels.__repr__","title":"__repr__()
","text":"Return a readable representation of the labels.
Source code insleap_io/model/labels.py
def __repr__(self) -> str:\n \"\"\"Return a readable representation of the labels.\"\"\"\n return (\n \"Labels(\"\n f\"labeled_frames={len(self.labeled_frames)}, \"\n f\"videos={len(self.videos)}, \"\n f\"skeletons={len(self.skeletons)}, \"\n f\"tracks={len(self.tracks)}, \"\n f\"suggestions={len(self.suggestions)}\"\n \")\"\n )\n
"},{"location":"model/#sleap_io.Labels.__str__","title":"__str__()
","text":"Return a readable representation of the labels.
Source code insleap_io/model/labels.py
def __str__(self) -> str:\n \"\"\"Return a readable representation of the labels.\"\"\"\n return self.__repr__()\n
"},{"location":"model/#sleap_io.Labels.append","title":"append(lf, update=True)
","text":"Append a labeled frame to the labels.
Parameters:
Name Type Description Defaultlf
LabeledFrame
A labeled frame to add to the labels.
requiredupdate
bool
If True
(the default), update list of videos, tracks and skeletons from the contents.
True
Source code in sleap_io/model/labels.py
def append(self, lf: LabeledFrame, update: bool = True):\n \"\"\"Append a labeled frame to the labels.\n\n Args:\n lf: A labeled frame to add to the labels.\n update: If `True` (the default), update list of videos, tracks and\n skeletons from the contents.\n \"\"\"\n self.labeled_frames.append(lf)\n\n if update:\n if lf.video not in self.videos:\n self.videos.append(lf.video)\n\n for inst in lf:\n if inst.skeleton not in self.skeletons:\n self.skeletons.append(inst.skeleton)\n\n if inst.track is not None and inst.track not in self.tracks:\n self.tracks.append(inst.track)\n
"},{"location":"model/#sleap_io.Labels.clean","title":"clean(frames=True, empty_instances=False, skeletons=True, tracks=True, videos=False)
","text":"Remove empty frames, unused skeletons, tracks and videos.
Parameters:
Name Type Description Defaultframes
bool
If True
(the default), remove empty frames.
True
empty_instances
bool
If True
(NOT default), remove instances that have no visible points.
False
skeletons
bool
If True
(the default), remove unused skeletons.
True
tracks
bool
If True
(the default), remove unused tracks.
True
videos
bool
If True
(NOT default), remove videos that have no labeled frames.
False
Source code in sleap_io/model/labels.py
def clean(\n self,\n frames: bool = True,\n empty_instances: bool = False,\n skeletons: bool = True,\n tracks: bool = True,\n videos: bool = False,\n):\n \"\"\"Remove empty frames, unused skeletons, tracks and videos.\n\n Args:\n frames: If `True` (the default), remove empty frames.\n empty_instances: If `True` (NOT default), remove instances that have no\n visible points.\n skeletons: If `True` (the default), remove unused skeletons.\n tracks: If `True` (the default), remove unused tracks.\n videos: If `True` (NOT default), remove videos that have no labeled frames.\n \"\"\"\n used_skeletons = []\n used_tracks = []\n used_videos = []\n kept_frames = []\n for lf in self.labeled_frames:\n\n if empty_instances:\n lf.remove_empty_instances()\n\n if frames and len(lf) == 0:\n continue\n\n if videos and lf.video not in used_videos:\n used_videos.append(lf.video)\n\n if skeletons or tracks:\n for inst in lf:\n if skeletons and inst.skeleton not in used_skeletons:\n used_skeletons.append(inst.skeleton)\n if (\n tracks\n and inst.track is not None\n and inst.track not in used_tracks\n ):\n used_tracks.append(inst.track)\n\n if frames:\n kept_frames.append(lf)\n\n if videos:\n self.videos = [video for video in self.videos if video in used_videos]\n\n if skeletons:\n self.skeletons = [\n skeleton for skeleton in self.skeletons if skeleton in used_skeletons\n ]\n\n if tracks:\n self.tracks = [track for track in self.tracks if track in used_tracks]\n\n if frames:\n self.labeled_frames = kept_frames\n
"},{"location":"model/#sleap_io.Labels.extend","title":"extend(lfs, update=True)
","text":"Append a labeled frame to the labels.
Parameters:
Name Type Description Defaultlfs
list[LabeledFrame]
A list of labeled frames to add to the labels.
requiredupdate
bool
If True
(the default), update list of videos, tracks and skeletons from the contents.
True
Source code in sleap_io/model/labels.py
def extend(self, lfs: list[LabeledFrame], update: bool = True):\n \"\"\"Append a labeled frame to the labels.\n\n Args:\n lfs: A list of labeled frames to add to the labels.\n update: If `True` (the default), update list of videos, tracks and\n skeletons from the contents.\n \"\"\"\n self.labeled_frames.extend(lfs)\n\n if update:\n for lf in lfs:\n if lf.video not in self.videos:\n self.videos.append(lf.video)\n\n for inst in lf:\n if inst.skeleton not in self.skeletons:\n self.skeletons.append(inst.skeleton)\n\n if inst.track is not None and inst.track not in self.tracks:\n self.tracks.append(inst.track)\n
"},{"location":"model/#sleap_io.Labels.extract","title":"extract(inds, copy=True)
","text":"Extract a set of frames into a new Labels object.
Parameters:
Name Type Description Defaultinds
list[int] | list[tuple[Video, int]] | ndarray
Indices of labeled frames. Can be specified as a list of array of integer indices of labeled frames or tuples of Video and frame indices.
requiredcopy
bool
If True
(the default), return a copy of the frames and containing objects. Otherwise, return a reference to the data.
True
Returns:
Type DescriptionLabels
A new Labels
object containing the selected labels.
This copies the labeled frames and their associated data, including skeletons and tracks, and tries to maintain the relative ordering.
This also copies the provenance and inserts an extra key: \"source_labels\"
with the path to the current labels, if available.
It does NOT copy suggested frames.
Source code insleap_io/model/labels.py
def extract(\n self, inds: list[int] | list[tuple[Video, int]] | np.ndarray, copy: bool = True\n) -> Labels:\n \"\"\"Extract a set of frames into a new Labels object.\n\n Args:\n inds: Indices of labeled frames. Can be specified as a list of array of\n integer indices of labeled frames or tuples of Video and frame indices.\n copy: If `True` (the default), return a copy of the frames and containing\n objects. Otherwise, return a reference to the data.\n\n Returns:\n A new `Labels` object containing the selected labels.\n\n Notes:\n This copies the labeled frames and their associated data, including\n skeletons and tracks, and tries to maintain the relative ordering.\n\n This also copies the provenance and inserts an extra key: `\"source_labels\"`\n with the path to the current labels, if available.\n\n It does NOT copy suggested frames.\n \"\"\"\n lfs = self[inds]\n\n if copy:\n lfs = deepcopy(lfs)\n labels = Labels(lfs)\n\n # Try to keep the lists in the same order.\n track_to_ind = {track.name: ind for ind, track in enumerate(self.tracks)}\n labels.tracks = sorted(labels.tracks, key=lambda x: track_to_ind[x.name])\n\n skel_to_ind = {skel.name: ind for ind, skel in enumerate(self.skeletons)}\n labels.skeletons = sorted(labels.skeletons, key=lambda x: skel_to_ind[x.name])\n\n labels.provenance = deepcopy(labels.provenance)\n labels.provenance[\"source_labels\"] = self.provenance.get(\"filename\", None)\n\n return labels\n
"},{"location":"model/#sleap_io.Labels.find","title":"find(video, frame_idx=None, return_new=False)
","text":"Search for labeled frames given video and/or frame index.
Parameters:
Name Type Description Defaultvideo
Video
A Video
that is associated with the project.
frame_idx
int | list[int] | None
The frame index (or indices) which we want to find in the video. If a range is specified, we'll return all frames with indices in that range. If not specific, then we'll return all labeled frames for video.
None
return_new
bool
Whether to return singleton of new and empty LabeledFrame
if none are found in project.
False
Returns:
Type Descriptionlist[LabeledFrame]
List of LabeledFrame
objects that match the criteria.
The list will be empty if no matches found, unless return_new is True, in which case it contains new (empty) LabeledFrame
objects with video
and frame_index
set.
sleap_io/model/labels.py
def find(\n self,\n video: Video,\n frame_idx: int | list[int] | None = None,\n return_new: bool = False,\n) -> list[LabeledFrame]:\n \"\"\"Search for labeled frames given video and/or frame index.\n\n Args:\n video: A `Video` that is associated with the project.\n frame_idx: The frame index (or indices) which we want to find in the video.\n If a range is specified, we'll return all frames with indices in that\n range. If not specific, then we'll return all labeled frames for video.\n return_new: Whether to return singleton of new and empty `LabeledFrame` if\n none are found in project.\n\n Returns:\n List of `LabeledFrame` objects that match the criteria.\n\n The list will be empty if no matches found, unless return_new is True, in\n which case it contains new (empty) `LabeledFrame` objects with `video` and\n `frame_index` set.\n \"\"\"\n results = []\n\n if frame_idx is None:\n for lf in self.labeled_frames:\n if lf.video == video:\n results.append(lf)\n return results\n\n if np.isscalar(frame_idx):\n frame_idx = np.array(frame_idx).reshape(-1)\n\n for frame_ind in frame_idx:\n result = None\n for lf in self.labeled_frames:\n if lf.video == video and lf.frame_idx == frame_ind:\n result = lf\n results.append(result)\n break\n if result is None and return_new:\n results.append(LabeledFrame(video=video, frame_idx=frame_ind))\n\n return results\n
"},{"location":"model/#sleap_io.Labels.make_training_splits","title":"make_training_splits(n_train, n_val=None, n_test=None, save_dir=None, seed=None, embed=True)
","text":"Make splits for training with embedded images.
Parameters:
Name Type Description Defaultn_train
int | float
Size of the training split as integer or fraction.
requiredn_val
int | float | None
Size of the validation split as integer or fraction. If None
, this will be inferred based on the values of n_train
and n_test
. If n_test
is None
, this will be the remainder of the data after the training split.
None
n_test
int | float | None
Size of the testing split as integer or fraction. If None
, the test split will not be saved.
None
save_dir
str | Path | None
If specified, save splits to SLP files with embedded images.
None
seed
int | None
Optional integer seed to use for reproducibility.
None
embed
bool
If True
(the default), embed user labeled frame images in the saved files, which is useful for portability but can be slow for large projects. If False
, labels are saved with references to the source videos files.
True
Returns:
Type Descriptiontuple[Labels, Labels] | tuple[Labels, Labels, Labels]
A tuple of labels_train, labels_val
or labels_train, labels_val, labels_test
if n_test
was specified.
Predictions and suggestions will be removed before saving, leaving only frames with user labeled data (the source labels are not affected).
Frames with user labeled data will be embedded in the resulting files.
If save_dir
is specified, this will save the randomly sampled splits to:
{save_dir}/train.pkg.slp
{save_dir}/val.pkg.slp
{save_dir}/test.pkg.slp
(if n_test
is specified)If embed
is False
, the files will be saved without embedded images to:
{save_dir}/train.slp
{save_dir}/val.slp
{save_dir}/test.slp
(if n_test
is specified)See also: Labels.split
sleap_io/model/labels.py
def make_training_splits(\n self,\n n_train: int | float,\n n_val: int | float | None = None,\n n_test: int | float | None = None,\n save_dir: str | Path | None = None,\n seed: int | None = None,\n embed: bool = True,\n) -> tuple[Labels, Labels] | tuple[Labels, Labels, Labels]:\n \"\"\"Make splits for training with embedded images.\n\n Args:\n n_train: Size of the training split as integer or fraction.\n n_val: Size of the validation split as integer or fraction. If `None`,\n this will be inferred based on the values of `n_train` and `n_test`. If\n `n_test` is `None`, this will be the remainder of the data after the\n training split.\n n_test: Size of the testing split as integer or fraction. If `None`, the\n test split will not be saved.\n save_dir: If specified, save splits to SLP files with embedded images.\n seed: Optional integer seed to use for reproducibility.\n embed: If `True` (the default), embed user labeled frame images in the saved\n files, which is useful for portability but can be slow for large\n projects. If `False`, labels are saved with references to the source\n videos files.\n\n Returns:\n A tuple of `labels_train, labels_val` or\n `labels_train, labels_val, labels_test` if `n_test` was specified.\n\n Notes:\n Predictions and suggestions will be removed before saving, leaving only\n frames with user labeled data (the source labels are not affected).\n\n Frames with user labeled data will be embedded in the resulting files.\n\n If `save_dir` is specified, this will save the randomly sampled splits to:\n\n - `{save_dir}/train.pkg.slp`\n - `{save_dir}/val.pkg.slp`\n - `{save_dir}/test.pkg.slp` (if `n_test` is specified)\n\n If `embed` is `False`, the files will be saved without embedded images to:\n\n - `{save_dir}/train.slp`\n - `{save_dir}/val.slp`\n - `{save_dir}/test.slp` (if `n_test` is specified)\n\n See also: `Labels.split`\n \"\"\"\n # Clean up labels.\n labels = deepcopy(self)\n labels.remove_predictions()\n labels.suggestions = []\n labels.clean()\n\n # Make train split.\n labels_train, labels_rest = labels.split(n_train, seed=seed)\n\n # Make test split.\n if n_test is not None:\n if n_test < 1:\n n_test = (n_test * len(labels)) / len(labels_rest)\n labels_test, labels_rest = labels_rest.split(n=n_test, seed=seed)\n\n # Make val split.\n if n_val is not None:\n if n_val < 1:\n n_val = (n_val * len(labels)) / len(labels_rest)\n if isinstance(n_val, float) and n_val == 1.0:\n labels_val = labels_rest\n else:\n labels_val, _ = labels_rest.split(n=n_val, seed=seed)\n else:\n labels_val = labels_rest\n\n # Update provenance.\n source_labels = self.provenance.get(\"filename\", None)\n labels_train.provenance[\"source_labels\"] = source_labels\n if n_val is not None:\n labels_val.provenance[\"source_labels\"] = source_labels\n if n_test is not None:\n labels_test.provenance[\"source_labels\"] = source_labels\n\n # Save.\n if save_dir is not None:\n save_dir = Path(save_dir)\n save_dir.mkdir(exist_ok=True, parents=True)\n\n if embed:\n labels_train.save(save_dir / \"train.pkg.slp\", embed=\"user\")\n labels_val.save(save_dir / \"val.pkg.slp\", embed=\"user\")\n labels_test.save(save_dir / \"test.pkg.slp\", embed=\"user\")\n else:\n labels_train.save(save_dir / \"train.slp\", embed=False)\n labels_val.save(save_dir / \"val.slp\", embed=False)\n labels_test.save(save_dir / \"test.slp\", embed=False)\n\n if n_test is None:\n return labels_train, labels_val\n else:\n return labels_train, labels_val, labels_test\n
"},{"location":"model/#sleap_io.Labels.numpy","title":"numpy(video=None, all_frames=True, untracked=False, return_confidence=False)
","text":"Construct a numpy array from instance points.
Parameters:
Name Type Description Defaultvideo
Optional[Union[Video, int]]
Video or video index to convert to numpy arrays. If None
(the default), uses the first video.
None
untracked
bool
If False
(the default), include only instances that have a track assignment. If True
, includes all instances in each frame in arbitrary order.
False
return_confidence
bool
If False
(the default), only return points of nodes. If True
, return the points and scores of nodes.
False
Returns:
Type Descriptionndarray
An array of tracks of shape (n_frames, n_tracks, n_nodes, 2)
if return_confidence
is False
. Otherwise returned shape is (n_frames, n_tracks, n_nodes, 3)
if return_confidence
is True
.
Missing data will be replaced with np.nan
.
If this is a single instance project, a track does not need to be assigned.
Only predicted instances (NOT user instances) will be returned.
NotesThis method assumes that instances have tracks assigned and is intended to function primarily for single-video prediction results.
Source code insleap_io/model/labels.py
def numpy(\n self,\n video: Optional[Union[Video, int]] = None,\n all_frames: bool = True,\n untracked: bool = False,\n return_confidence: bool = False,\n) -> np.ndarray:\n \"\"\"Construct a numpy array from instance points.\n\n Args:\n video: Video or video index to convert to numpy arrays. If `None` (the\n default), uses the first video.\n untracked: If `False` (the default), include only instances that have a\n track assignment. If `True`, includes all instances in each frame in\n arbitrary order.\n return_confidence: If `False` (the default), only return points of nodes. If\n `True`, return the points and scores of nodes.\n\n Returns:\n An array of tracks of shape `(n_frames, n_tracks, n_nodes, 2)` if\n `return_confidence` is `False`. Otherwise returned shape is\n `(n_frames, n_tracks, n_nodes, 3)` if `return_confidence` is `True`.\n\n Missing data will be replaced with `np.nan`.\n\n If this is a single instance project, a track does not need to be assigned.\n\n Only predicted instances (NOT user instances) will be returned.\n\n Notes:\n This method assumes that instances have tracks assigned and is intended to\n function primarily for single-video prediction results.\n \"\"\"\n # Get labeled frames for specified video.\n if video is None:\n video = 0\n if type(video) == int:\n video = self.videos[video]\n lfs = [lf for lf in self.labeled_frames if lf.video == video]\n\n # Figure out frame index range.\n first_frame, last_frame = 0, 0\n for lf in lfs:\n first_frame = min(first_frame, lf.frame_idx)\n last_frame = max(last_frame, lf.frame_idx)\n\n # Figure out the number of tracks based on number of instances in each frame.\n # First, let's check the max number of predicted instances (regardless of\n # whether they're tracked.\n n_preds = 0\n for lf in lfs:\n n_pred_instances = len(lf.predicted_instances)\n n_preds = max(n_preds, n_pred_instances)\n\n # Case 1: We don't care about order because there's only 1 instance per frame,\n # or we're considering untracked instances.\n untracked = untracked or n_preds == 1\n if untracked:\n n_tracks = n_preds\n else:\n # Case 2: We're considering only tracked instances.\n n_tracks = len(self.tracks)\n\n n_frames = int(last_frame - first_frame + 1)\n skeleton = self.skeletons[-1] # Assume project only uses last skeleton\n n_nodes = len(skeleton.nodes)\n\n if return_confidence:\n tracks = np.full((n_frames, n_tracks, n_nodes, 3), np.nan, dtype=\"float32\")\n else:\n tracks = np.full((n_frames, n_tracks, n_nodes, 2), np.nan, dtype=\"float32\")\n for lf in lfs:\n i = int(lf.frame_idx - first_frame)\n if untracked:\n for j, inst in enumerate(lf.predicted_instances):\n tracks[i, j] = inst.numpy(scores=return_confidence)\n else:\n tracked_instances = [\n inst\n for inst in lf.instances\n if type(inst) == PredictedInstance and inst.track is not None\n ]\n for inst in tracked_instances:\n j = self.tracks.index(inst.track) # type: ignore[arg-type]\n tracks[i, j] = inst.numpy(scores=return_confidence)\n\n return tracks\n
"},{"location":"model/#sleap_io.Labels.remove_nodes","title":"remove_nodes(nodes, skeleton=None)
","text":"Remove nodes from the skeleton.
Parameters:
Name Type Description Defaultnodes
list[NodeOrIndex]
A list of node names, indices, or Node
objects to remove.
skeleton
Skeleton | None
Skeleton
to update. If None
(the default), assumes there is only one skeleton in the labels and raises ValueError
otherwise.
None
Raises:
Type DescriptionValueError
If the nodes are not found in the skeleton, or if there is more than one skeleton in the Labels
but it is not specified.
This method should always be used when removing nodes from the skeleton as it handles updating the lookup caches necessary for indexing nodes by name, and updating instances to reflect the changes made to the skeleton.
Any edges and symmetries that are connected to the removed nodes will also be removed.
Source code insleap_io/model/labels.py
def remove_nodes(self, nodes: list[NodeOrIndex], skeleton: Skeleton | None = None):\n \"\"\"Remove nodes from the skeleton.\n\n Args:\n nodes: A list of node names, indices, or `Node` objects to remove.\n skeleton: `Skeleton` to update. If `None` (the default), assumes there is\n only one skeleton in the labels and raises `ValueError` otherwise.\n\n Raises:\n ValueError: If the nodes are not found in the skeleton, or if there is more\n than one skeleton in the `Labels` but it is not specified.\n\n Notes:\n This method should always be used when removing nodes from the skeleton as\n it handles updating the lookup caches necessary for indexing nodes by name,\n and updating instances to reflect the changes made to the skeleton.\n\n Any edges and symmetries that are connected to the removed nodes will also\n be removed.\n \"\"\"\n if skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Skeleton must be specified when there is more than one skeleton \"\n \"in the labels.\"\n )\n skeleton = self.skeleton\n\n skeleton.remove_nodes(nodes)\n\n for inst in self.instances:\n if inst.skeleton == skeleton:\n inst.update_skeleton()\n
"},{"location":"model/#sleap_io.Labels.remove_predictions","title":"remove_predictions(clean=True)
","text":"Remove all predicted instances from the labels.
Parameters:
Name Type Description Defaultclean
bool
If True
(the default), also remove any empty frames and unused tracks and skeletons. It does NOT remove videos that have no labeled frames or instances with no visible points.
True
See also: Labels.clean
sleap_io/model/labels.py
def remove_predictions(self, clean: bool = True):\n \"\"\"Remove all predicted instances from the labels.\n\n Args:\n clean: If `True` (the default), also remove any empty frames and unused\n tracks and skeletons. It does NOT remove videos that have no labeled\n frames or instances with no visible points.\n\n See also: `Labels.clean`\n \"\"\"\n for lf in self.labeled_frames:\n lf.remove_predictions()\n\n if clean:\n self.clean(\n frames=True,\n empty_instances=False,\n skeletons=True,\n tracks=True,\n videos=False,\n )\n
"},{"location":"model/#sleap_io.Labels.rename_nodes","title":"rename_nodes(name_map, skeleton=None)
","text":"Rename nodes in the skeleton.
Parameters:
Name Type Description Defaultname_map
dict[NodeOrIndex, str] | list[str]
A dictionary mapping old node names to new node names. Keys can be specified as Node
objects, integer indices, or string names. Values must be specified as string names.
If a list of strings is provided of the same length as the current nodes, the nodes will be renamed to the names in the list in order.
requiredskeleton
Skeleton | None
Skeleton
to update. If None
(the default), assumes there is only one skeleton in the labels and raises ValueError
otherwise.
None
Raises:
Type DescriptionValueError
If the new node names exist in the skeleton, if the old node names are not found in the skeleton, or if there is more than one skeleton in the Labels
but it is not specified.
This method is recommended over Skeleton.rename_nodes
as it will update all instances in the labels to reflect the new node names.
labels = Labels(skeletons=[Skeleton([\"A\", \"B\", \"C\"])]) labels.rename_nodes({\"A\": \"X\", \"B\": \"Y\", \"C\": \"Z\"}) labels.skeleton.node_names [\"X\", \"Y\", \"Z\"] labels.rename_nodes([\"a\", \"b\", \"c\"]) labels.skeleton.node_names [\"a\", \"b\", \"c\"]
Source code insleap_io/model/labels.py
def rename_nodes(\n self,\n name_map: dict[NodeOrIndex, str] | list[str],\n skeleton: Skeleton | None = None,\n):\n \"\"\"Rename nodes in the skeleton.\n\n Args:\n name_map: A dictionary mapping old node names to new node names. Keys can be\n specified as `Node` objects, integer indices, or string names. Values\n must be specified as string names.\n\n If a list of strings is provided of the same length as the current\n nodes, the nodes will be renamed to the names in the list in order.\n skeleton: `Skeleton` to update. If `None` (the default), assumes there is\n only one skeleton in the labels and raises `ValueError` otherwise.\n\n Raises:\n ValueError: If the new node names exist in the skeleton, if the old node\n names are not found in the skeleton, or if there is more than one\n skeleton in the `Labels` but it is not specified.\n\n Notes:\n This method is recommended over `Skeleton.rename_nodes` as it will update\n all instances in the labels to reflect the new node names.\n\n Example:\n >>> labels = Labels(skeletons=[Skeleton([\"A\", \"B\", \"C\"])])\n >>> labels.rename_nodes({\"A\": \"X\", \"B\": \"Y\", \"C\": \"Z\"})\n >>> labels.skeleton.node_names\n [\"X\", \"Y\", \"Z\"]\n >>> labels.rename_nodes([\"a\", \"b\", \"c\"])\n >>> labels.skeleton.node_names\n [\"a\", \"b\", \"c\"]\n \"\"\"\n if skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Skeleton must be specified when there is more than one skeleton in \"\n \"the labels.\"\n )\n skeleton = self.skeleton\n\n skeleton.rename_nodes(name_map)\n
"},{"location":"model/#sleap_io.Labels.reorder_nodes","title":"reorder_nodes(new_order, skeleton=None)
","text":"Reorder nodes in the skeleton.
Parameters:
Name Type Description Defaultnew_order
list[NodeOrIndex]
A list of node names, indices, or Node
objects specifying the new order of the nodes.
skeleton
Skeleton | None
Skeleton
to update. If None
(the default), assumes there is only one skeleton in the labels and raises ValueError
otherwise.
None
Raises:
Type DescriptionValueError
If the new order of nodes is not the same length as the current nodes, or if there is more than one skeleton in the Labels
but it is not specified.
This method handles updating the lookup caches necessary for indexing nodes by name, as well as updating instances to reflect the changes made to the skeleton.
Source code insleap_io/model/labels.py
def reorder_nodes(\n self, new_order: list[NodeOrIndex], skeleton: Skeleton | None = None\n):\n \"\"\"Reorder nodes in the skeleton.\n\n Args:\n new_order: A list of node names, indices, or `Node` objects specifying the\n new order of the nodes.\n skeleton: `Skeleton` to update. If `None` (the default), assumes there is\n only one skeleton in the labels and raises `ValueError` otherwise.\n\n Raises:\n ValueError: If the new order of nodes is not the same length as the current\n nodes, or if there is more than one skeleton in the `Labels` but it is\n not specified.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name, as well as updating instances to reflect the changes made to the\n skeleton.\n \"\"\"\n if skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Skeleton must be specified when there is more than one skeleton \"\n \"in the labels.\"\n )\n skeleton = self.skeleton\n\n skeleton.reorder_nodes(new_order)\n\n for inst in self.instances:\n if inst.skeleton == skeleton:\n inst.update_skeleton()\n
"},{"location":"model/#sleap_io.Labels.replace_filenames","title":"replace_filenames(new_filenames=None, filename_map=None, prefix_map=None)
","text":"Replace video filenames.
Parameters:
Name Type Description Defaultnew_filenames
list[str | Path] | None
List of new filenames. Must have the same length as the number of videos in the labels.
None
filename_map
dict[str | Path, str | Path] | None
Dictionary mapping old filenames (keys) to new filenames (values).
None
prefix_map
dict[str | Path, str | Path] | None
Dictonary mapping old prefixes (keys) to new prefixes (values).
None
Notes Only one of the argument types can be provided.
Source code insleap_io/model/labels.py
def replace_filenames(\n self,\n new_filenames: list[str | Path] | None = None,\n filename_map: dict[str | Path, str | Path] | None = None,\n prefix_map: dict[str | Path, str | Path] | None = None,\n):\n \"\"\"Replace video filenames.\n\n Args:\n new_filenames: List of new filenames. Must have the same length as the\n number of videos in the labels.\n filename_map: Dictionary mapping old filenames (keys) to new filenames\n (values).\n prefix_map: Dictonary mapping old prefixes (keys) to new prefixes (values).\n\n Notes:\n Only one of the argument types can be provided.\n \"\"\"\n n = 0\n if new_filenames is not None:\n n += 1\n if filename_map is not None:\n n += 1\n if prefix_map is not None:\n n += 1\n if n != 1:\n raise ValueError(\n \"Exactly one input method must be provided to replace filenames.\"\n )\n\n if new_filenames is not None:\n if len(self.videos) != len(new_filenames):\n raise ValueError(\n f\"Number of new filenames ({len(new_filenames)}) does not match \"\n f\"the number of videos ({len(self.videos)}).\"\n )\n\n for video, new_filename in zip(self.videos, new_filenames):\n video.replace_filename(new_filename)\n\n elif filename_map is not None:\n for video in self.videos:\n for old_fn, new_fn in filename_map.items():\n if type(video.filename) == list:\n new_fns = []\n for fn in video.filename:\n if Path(fn) == Path(old_fn):\n new_fns.append(new_fn)\n else:\n new_fns.append(fn)\n video.replace_filename(new_fns)\n else:\n if Path(video.filename) == Path(old_fn):\n video.replace_filename(new_fn)\n\n elif prefix_map is not None:\n for video in self.videos:\n for old_prefix, new_prefix in prefix_map.items():\n old_prefix, new_prefix = Path(old_prefix), Path(new_prefix)\n\n if type(video.filename) == list:\n new_fns = []\n for fn in video.filename:\n fn = Path(fn)\n if fn.as_posix().startswith(old_prefix.as_posix()):\n new_fns.append(new_prefix / fn.relative_to(old_prefix))\n else:\n new_fns.append(fn)\n video.replace_filename(new_fns)\n else:\n fn = Path(video.filename)\n if fn.as_posix().startswith(old_prefix.as_posix()):\n video.replace_filename(\n new_prefix / fn.relative_to(old_prefix)\n )\n
"},{"location":"model/#sleap_io.Labels.replace_skeleton","title":"replace_skeleton(new_skeleton, old_skeleton=None, node_map=None)
","text":"Replace the skeleton in the labels.
Parameters:
Name Type Description Defaultnew_skeleton
Skeleton
The new Skeleton
to replace the old skeleton with.
old_skeleton
Skeleton | None
The old Skeleton
to replace. If None
(the default), assumes there is only one skeleton in the labels and raises ValueError
otherwise.
None
node_map
dict[NodeOrIndex, NodeOrIndex] | None
Dictionary mapping nodes in the old skeleton to nodes in the new skeleton. Keys and values can be specified as Node
objects, integer indices, or string names. If not provided, only nodes with identical names will be mapped. Points associated with unmapped nodes will be removed.
None
Raises:
Type DescriptionValueError
If there is more than one skeleton in the Labels
but it is not specified.
This method will replace the skeleton in all instances in the labels that have the old skeleton. All point data associated with nodes not in the node_map
will be lost.
sleap_io/model/labels.py
def replace_skeleton(\n self,\n new_skeleton: Skeleton,\n old_skeleton: Skeleton | None = None,\n node_map: dict[NodeOrIndex, NodeOrIndex] | None = None,\n):\n \"\"\"Replace the skeleton in the labels.\n\n Args:\n new_skeleton: The new `Skeleton` to replace the old skeleton with.\n old_skeleton: The old `Skeleton` to replace. If `None` (the default),\n assumes there is only one skeleton in the labels and raises `ValueError`\n otherwise.\n node_map: Dictionary mapping nodes in the old skeleton to nodes in the new\n skeleton. Keys and values can be specified as `Node` objects, integer\n indices, or string names. If not provided, only nodes with identical\n names will be mapped. Points associated with unmapped nodes will be\n removed.\n\n Raises:\n ValueError: If there is more than one skeleton in the `Labels` but it is not\n specified.\n\n Warning:\n This method will replace the skeleton in all instances in the labels that\n have the old skeleton. **All point data associated with nodes not in the\n `node_map` will be lost.**\n \"\"\"\n if old_skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Old skeleton must be specified when there is more than one \"\n \"skeleton in the labels.\"\n )\n old_skeleton = self.skeleton\n\n if node_map is None:\n node_map = {}\n for old_node in old_skeleton.nodes:\n for new_node in new_skeleton.nodes:\n if old_node.name == new_node.name:\n node_map[old_node] = new_node\n break\n else:\n node_map = {\n old_skeleton.require_node(\n old, add_missing=False\n ): new_skeleton.require_node(new, add_missing=False)\n for old, new in node_map.items()\n }\n\n # Make new -> old mapping for nodes for efficiency.\n rev_node_map = {new: old for old, new in node_map.items()}\n\n # Replace the skeleton in the instances.\n for inst in self.instances:\n if inst.skeleton == old_skeleton:\n inst.replace_skeleton(new_skeleton, rev_node_map=rev_node_map)\n\n # Replace the skeleton in the labels.\n self.skeletons[self.skeletons.index(old_skeleton)] = new_skeleton\n
"},{"location":"model/#sleap_io.Labels.replace_videos","title":"replace_videos(old_videos=None, new_videos=None, video_map=None)
","text":"Replace videos and update all references.
Parameters:
Name Type Description Defaultold_videos
list[Video] | None
List of videos to be replaced.
None
new_videos
list[Video] | None
List of videos to replace with.
None
video_map
dict[Video, Video] | None
Alternative input of dictionary where keys are the old videos and values are the new videos.
None
Source code in sleap_io/model/labels.py
def replace_videos(\n self,\n old_videos: list[Video] | None = None,\n new_videos: list[Video] | None = None,\n video_map: dict[Video, Video] | None = None,\n):\n \"\"\"Replace videos and update all references.\n\n Args:\n old_videos: List of videos to be replaced.\n new_videos: List of videos to replace with.\n video_map: Alternative input of dictionary where keys are the old videos and\n values are the new videos.\n \"\"\"\n if (\n old_videos is None\n and new_videos is not None\n and len(new_videos) == len(self.videos)\n ):\n old_videos = self.videos\n\n if video_map is None:\n video_map = {o: n for o, n in zip(old_videos, new_videos)}\n\n # Update the labeled frames with the new videos.\n for lf in self.labeled_frames:\n if lf.video in video_map:\n lf.video = video_map[lf.video]\n\n # Update suggestions with the new videos.\n for sf in self.suggestions:\n if sf.video in video_map:\n sf.video = video_map[sf.video]\n\n # Update the list of videos.\n self.videos = [video_map.get(video, video) for video in self.videos]\n
"},{"location":"model/#sleap_io.Labels.save","title":"save(filename, format=None, embed=None, **kwargs)
","text":"Save labels to file in specified format.
Parameters:
Name Type Description Defaultfilename
str
Path to save labels to.
requiredformat
Optional[str]
The format to save the labels in. If None
, the format will be inferred from the file extension. Available formats are \"slp\"
, \"nwb\"
, \"labelstudio\"
, and \"jabs\"
.
None
embed
bool | str | list[tuple[Video, int]] | None
Frames to embed in the saved labels file. One of None
, True
, \"all\"
, \"user\"
, \"suggestions\"
, \"user+suggestions\"
, \"source\"
or list of tuples of (video, frame_idx)
.
If None
is specified (the default) and the labels contains embedded frames, those embedded frames will be re-saved to the new file.
If True
or \"all\"
, all labeled frames and suggested frames will be embedded.
If \"source\"
is specified, no images will be embedded and the source video will be restored if available.
This argument is only valid for the SLP backend.
None
Source code in sleap_io/model/labels.py
def save(\n self,\n filename: str,\n format: Optional[str] = None,\n embed: bool | str | list[tuple[Video, int]] | None = None,\n **kwargs,\n):\n \"\"\"Save labels to file in specified format.\n\n Args:\n filename: Path to save labels to.\n format: The format to save the labels in. If `None`, the format will be\n inferred from the file extension. Available formats are `\"slp\"`,\n `\"nwb\"`, `\"labelstudio\"`, and `\"jabs\"`.\n embed: Frames to embed in the saved labels file. One of `None`, `True`,\n `\"all\"`, `\"user\"`, `\"suggestions\"`, `\"user+suggestions\"`, `\"source\"` or\n list of tuples of `(video, frame_idx)`.\n\n If `None` is specified (the default) and the labels contains embedded\n frames, those embedded frames will be re-saved to the new file.\n\n If `True` or `\"all\"`, all labeled frames and suggested frames will be\n embedded.\n\n If `\"source\"` is specified, no images will be embedded and the source\n video will be restored if available.\n\n This argument is only valid for the SLP backend.\n \"\"\"\n from sleap_io import save_file\n\n save_file(self, filename, format=format, embed=embed, **kwargs)\n
"},{"location":"model/#sleap_io.Labels.split","title":"split(n, seed=None)
","text":"Separate the labels into random splits.
Parameters:
Name Type Description Defaultn
int | float
Size of the first split. If integer >= 1, assumes that this is the number of labeled frames in the first split. If < 1.0, this will be treated as a fraction of the total labeled frames.
requiredseed
int | None
Optional integer seed to use for reproducibility.
None
Returns:
Type Descriptiontuple[Labels, Labels]
A tuple of split1, split2
.
If an integer was specified, len(split1) == n
.
If a fraction was specified, len(split1) == int(n * len(labels))
.
The second split contains the remainder, i.e., len(split2) == len(labels) - len(split1)
.
If there are too few frames, a minimum of 1 frame will be kept in the second split.
If there is exactly 1 labeled frame in the labels, the same frame will be assigned to both splits.
Source code insleap_io/model/labels.py
def split(self, n: int | float, seed: int | None = None) -> tuple[Labels, Labels]:\n \"\"\"Separate the labels into random splits.\n\n Args:\n n: Size of the first split. If integer >= 1, assumes that this is the number\n of labeled frames in the first split. If < 1.0, this will be treated as\n a fraction of the total labeled frames.\n seed: Optional integer seed to use for reproducibility.\n\n Returns:\n A tuple of `split1, split2`.\n\n If an integer was specified, `len(split1) == n`.\n\n If a fraction was specified, `len(split1) == int(n * len(labels))`.\n\n The second split contains the remainder, i.e.,\n `len(split2) == len(labels) - len(split1)`.\n\n If there are too few frames, a minimum of 1 frame will be kept in the second\n split.\n\n If there is exactly 1 labeled frame in the labels, the same frame will be\n assigned to both splits.\n \"\"\"\n n0 = len(self)\n if n0 == 0:\n return self, self\n n1 = n\n if n < 1.0:\n n1 = max(int(n0 * float(n)), 1)\n n2 = max(n0 - n1, 1)\n n1, n2 = int(n1), int(n2)\n\n rng = np.random.default_rng(seed=seed)\n inds1 = rng.choice(n0, size=(n1,), replace=False)\n\n if n0 == 1:\n inds2 = np.array([0])\n else:\n inds2 = np.setdiff1d(np.arange(n0), inds1)\n\n split1 = self.extract(inds1, copy=True)\n split2 = self.extract(inds2, copy=True)\n\n return split1, split2\n
"},{"location":"model/#sleap_io.Labels.trim","title":"trim(save_path, frame_inds, video=None, video_kwargs=None)
","text":"Trim the labels to a subset of frames and videos accordingly.
Parameters:
Name Type Description Defaultsave_path
str | Path
Path to the trimmed labels SLP file. Video will be saved with the same base name but with .mp4 extension.
requiredframe_inds
list[int] | ndarray
Frame indices to save. Can be specified as a list or array of frame integers.
requiredvideo
Video | int | None
Video or integer index of the video to trim. Does not need to be specified for single-video projects.
None
video_kwargs
dict[str, Any] | None
A dictionary of keyword arguments to provide to sio.save_video
for video compression.
None
Returns:
Type DescriptionLabels
The resulting labels object referencing the trimmed data.
NotesThis will remove any data outside of the trimmed frames, save new videos, and adjust the frame indices to match the newly trimmed videos.
Source code insleap_io/model/labels.py
def trim(\n self,\n save_path: str | Path,\n frame_inds: list[int] | np.ndarray,\n video: Video | int | None = None,\n video_kwargs: dict[str, Any] | None = None,\n) -> Labels:\n \"\"\"Trim the labels to a subset of frames and videos accordingly.\n\n Args:\n save_path: Path to the trimmed labels SLP file. Video will be saved with the\n same base name but with .mp4 extension.\n frame_inds: Frame indices to save. Can be specified as a list or array of\n frame integers.\n video: Video or integer index of the video to trim. Does not need to be\n specified for single-video projects.\n video_kwargs: A dictionary of keyword arguments to provide to\n `sio.save_video` for video compression.\n\n Returns:\n The resulting labels object referencing the trimmed data.\n\n Notes:\n This will remove any data outside of the trimmed frames, save new videos,\n and adjust the frame indices to match the newly trimmed videos.\n \"\"\"\n if video is None:\n if len(self.videos) == 1:\n video = self.video\n else:\n raise ValueError(\n \"Video needs to be specified when trimming multi-video projects.\"\n )\n if type(video) == int:\n video = self.videos[video]\n\n # Write trimmed clip.\n save_path = Path(save_path)\n video_path = save_path.with_suffix(\".mp4\")\n fidx0, fidx1 = np.min(frame_inds), np.max(frame_inds)\n new_video = video.save(\n video_path,\n frame_inds=np.arange(fidx0, fidx1 + 1),\n video_kwargs=video_kwargs,\n )\n\n # Get frames in range.\n # TODO: Create an optimized search function for this access pattern.\n inds = []\n for ind, lf in enumerate(self):\n if lf.video == video and lf.frame_idx >= fidx0 and lf.frame_idx <= fidx1:\n inds.append(ind)\n trimmed_labels = self.extract(inds, copy=True)\n\n # Adjust video and frame indices.\n trimmed_labels.videos = [new_video]\n for lf in trimmed_labels:\n lf.video = new_video\n lf.frame_idx = lf.frame_idx - fidx0\n\n # Save.\n trimmed_labels.save(save_path)\n\n return trimmed_labels\n
"},{"location":"model/#sleap_io.Labels.update","title":"update()
","text":"Update data structures based on contents.
This function will update the list of skeletons, videos and tracks from the labeled frames, instances and suggestions.
Source code insleap_io/model/labels.py
def update(self):\n \"\"\"Update data structures based on contents.\n\n This function will update the list of skeletons, videos and tracks from the\n labeled frames, instances and suggestions.\n \"\"\"\n for lf in self.labeled_frames:\n if lf.video not in self.videos:\n self.videos.append(lf.video)\n\n for inst in lf:\n if inst.skeleton not in self.skeletons:\n self.skeletons.append(inst.skeleton)\n\n if inst.track is not None and inst.track not in self.tracks:\n self.tracks.append(inst.track)\n\n for sf in self.suggestions:\n if sf.video not in self.videos:\n self.videos.append(sf.video)\n
"},{"location":"model/#sleap_io.LabeledFrame","title":"sleap_io.LabeledFrame
","text":"Labeled data for a single frame of a video.
Attributes:
Name Type Descriptionvideo
Video
The Video
associated with this LabeledFrame
.
frame_idx
int
The index of the LabeledFrame
in the Video
.
instances
list[Union[Instance, PredictedInstance]]
List of Instance
objects associated with this LabeledFrame
.
Instances of this class are hashed by identity, not by value. This means that two LabeledFrame
instances with the same attributes will NOT be considered equal in a set or dict.
Methods:
Name Description__getitem__
Return the Instance
at key
index in the instances
list.
__iter__
Iterate over Instance
s in instances
list.
__len__
Return the number of instances in the frame.
numpy
Return all instances in the frame as a numpy array.
remove_empty_instances
Remove all instances with no visible points.
remove_predictions
Remove all PredictedInstance
objects from the frame.
Attributes:
Name Type Descriptionhas_predicted_instances
bool
Return True if the frame has any predicted instances.
has_user_instances
bool
Return True if the frame has any user-labeled instances.
image
ndarray
Return the image of the frame as a numpy array.
predicted_instances
list[Instance]
Frame instances that are predicted by a model (PredictedInstance
objects).
unused_predictions
list[Instance]
Return a list of \"unused\" PredictedInstance
objects in frame.
user_instances
list[Instance]
Frame instances that are user-labeled (Instance
objects).
sleap_io/model/labeled_frame.py
@define(eq=False)\nclass LabeledFrame:\n \"\"\"Labeled data for a single frame of a video.\n\n Attributes:\n video: The `Video` associated with this `LabeledFrame`.\n frame_idx: The index of the `LabeledFrame` in the `Video`.\n instances: List of `Instance` objects associated with this `LabeledFrame`.\n\n Notes:\n Instances of this class are hashed by identity, not by value. This means that\n two `LabeledFrame` instances with the same attributes will NOT be considered\n equal in a set or dict.\n \"\"\"\n\n video: Video\n frame_idx: int = field(converter=int)\n instances: list[Union[Instance, PredictedInstance]] = field(factory=list)\n\n def __len__(self) -> int:\n \"\"\"Return the number of instances in the frame.\"\"\"\n return len(self.instances)\n\n def __getitem__(self, key: int) -> Union[Instance, PredictedInstance]:\n \"\"\"Return the `Instance` at `key` index in the `instances` list.\"\"\"\n return self.instances[key]\n\n def __iter__(self):\n \"\"\"Iterate over `Instance`s in `instances` list.\"\"\"\n return iter(self.instances)\n\n @property\n def user_instances(self) -> list[Instance]:\n \"\"\"Frame instances that are user-labeled (`Instance` objects).\"\"\"\n return [inst for inst in self.instances if type(inst) == Instance]\n\n @property\n def has_user_instances(self) -> bool:\n \"\"\"Return True if the frame has any user-labeled instances.\"\"\"\n for inst in self.instances:\n if type(inst) == Instance:\n return True\n return False\n\n @property\n def predicted_instances(self) -> list[Instance]:\n \"\"\"Frame instances that are predicted by a model (`PredictedInstance` objects).\"\"\"\n return [inst for inst in self.instances if type(inst) == PredictedInstance]\n\n @property\n def has_predicted_instances(self) -> bool:\n \"\"\"Return True if the frame has any predicted instances.\"\"\"\n for inst in self.instances:\n if type(inst) == PredictedInstance:\n return True\n return False\n\n def numpy(self) -> np.ndarray:\n \"\"\"Return all instances in the frame as a numpy array.\n\n Returns:\n Points as a numpy array of shape `(n_instances, n_nodes, 2)`.\n\n Note that the order of the instances is arbitrary.\n \"\"\"\n n_instances = len(self.instances)\n n_nodes = len(self.instances[0]) if n_instances > 0 else 0\n pts = np.full((n_instances, n_nodes, 2), np.nan)\n for i, inst in enumerate(self.instances):\n pts[i] = inst.numpy()[:, 0:2]\n return pts\n\n @property\n def image(self) -> np.ndarray:\n \"\"\"Return the image of the frame as a numpy array.\"\"\"\n return self.video[self.frame_idx]\n\n @property\n def unused_predictions(self) -> list[Instance]:\n \"\"\"Return a list of \"unused\" `PredictedInstance` objects in frame.\n\n This is all of the `PredictedInstance` objects which do not have a corresponding\n `Instance` in the same track in the same frame.\n \"\"\"\n unused_predictions = []\n any_tracks = [inst.track for inst in self.instances if inst.track is not None]\n if len(any_tracks):\n # Use tracks to determine which predicted instances have been used\n used_tracks = [\n inst.track\n for inst in self.instances\n if type(inst) == Instance and inst.track is not None\n ]\n unused_predictions = [\n inst\n for inst in self.instances\n if inst.track not in used_tracks and type(inst) == PredictedInstance\n ]\n\n else:\n # Use from_predicted to determine which predicted instances have been used\n # TODO: should we always do this instead of using tracks?\n used_instances = [\n inst.from_predicted\n for inst in self.instances\n if inst.from_predicted is not None\n ]\n unused_predictions = [\n inst\n for inst in self.instances\n if type(inst) == PredictedInstance and inst not in used_instances\n ]\n\n return unused_predictions\n\n def remove_predictions(self):\n \"\"\"Remove all `PredictedInstance` objects from the frame.\"\"\"\n self.instances = [inst for inst in self.instances if type(inst) == Instance]\n\n def remove_empty_instances(self):\n \"\"\"Remove all instances with no visible points.\"\"\"\n self.instances = [inst for inst in self.instances if not inst.is_empty]\n
"},{"location":"model/#sleap_io.LabeledFrame.has_predicted_instances","title":"has_predicted_instances: bool
property
","text":"Return True if the frame has any predicted instances.
"},{"location":"model/#sleap_io.LabeledFrame.has_user_instances","title":"has_user_instances: bool
property
","text":"Return True if the frame has any user-labeled instances.
"},{"location":"model/#sleap_io.LabeledFrame.image","title":"image: np.ndarray
property
","text":"Return the image of the frame as a numpy array.
"},{"location":"model/#sleap_io.LabeledFrame.predicted_instances","title":"predicted_instances: list[Instance]
property
","text":"Frame instances that are predicted by a model (PredictedInstance
objects).
unused_predictions: list[Instance]
property
","text":"Return a list of \"unused\" PredictedInstance
objects in frame.
This is all of the PredictedInstance
objects which do not have a corresponding Instance
in the same track in the same frame.
user_instances: list[Instance]
property
","text":"Frame instances that are user-labeled (Instance
objects).
__getitem__(key)
","text":"Return the Instance
at key
index in the instances
list.
sleap_io/model/labeled_frame.py
def __getitem__(self, key: int) -> Union[Instance, PredictedInstance]:\n \"\"\"Return the `Instance` at `key` index in the `instances` list.\"\"\"\n return self.instances[key]\n
"},{"location":"model/#sleap_io.LabeledFrame.__iter__","title":"__iter__()
","text":"Iterate over Instance
s in instances
list.
sleap_io/model/labeled_frame.py
def __iter__(self):\n \"\"\"Iterate over `Instance`s in `instances` list.\"\"\"\n return iter(self.instances)\n
"},{"location":"model/#sleap_io.LabeledFrame.__len__","title":"__len__()
","text":"Return the number of instances in the frame.
Source code insleap_io/model/labeled_frame.py
def __len__(self) -> int:\n \"\"\"Return the number of instances in the frame.\"\"\"\n return len(self.instances)\n
"},{"location":"model/#sleap_io.LabeledFrame.numpy","title":"numpy()
","text":"Return all instances in the frame as a numpy array.
Returns:
Type Descriptionndarray
Points as a numpy array of shape (n_instances, n_nodes, 2)
.
Note that the order of the instances is arbitrary.
Source code insleap_io/model/labeled_frame.py
def numpy(self) -> np.ndarray:\n \"\"\"Return all instances in the frame as a numpy array.\n\n Returns:\n Points as a numpy array of shape `(n_instances, n_nodes, 2)`.\n\n Note that the order of the instances is arbitrary.\n \"\"\"\n n_instances = len(self.instances)\n n_nodes = len(self.instances[0]) if n_instances > 0 else 0\n pts = np.full((n_instances, n_nodes, 2), np.nan)\n for i, inst in enumerate(self.instances):\n pts[i] = inst.numpy()[:, 0:2]\n return pts\n
"},{"location":"model/#sleap_io.LabeledFrame.remove_empty_instances","title":"remove_empty_instances()
","text":"Remove all instances with no visible points.
Source code insleap_io/model/labeled_frame.py
def remove_empty_instances(self):\n \"\"\"Remove all instances with no visible points.\"\"\"\n self.instances = [inst for inst in self.instances if not inst.is_empty]\n
"},{"location":"model/#sleap_io.LabeledFrame.remove_predictions","title":"remove_predictions()
","text":"Remove all PredictedInstance
objects from the frame.
sleap_io/model/labeled_frame.py
def remove_predictions(self):\n \"\"\"Remove all `PredictedInstance` objects from the frame.\"\"\"\n self.instances = [inst for inst in self.instances if type(inst) == Instance]\n
"},{"location":"model/#sleap_io.Instance","title":"sleap_io.Instance
","text":"This class represents a ground truth instance such as an animal.
An Instance
has a set of landmarks (Point
s) that correspond to the nodes defined in its Skeleton
.
It may also be associated with a Track
which links multiple instances together across frames or videos.
Attributes:
Name Type Descriptionpoints
Union[dict[Node, Point], dict[Node, PredictedPoint]]
A dictionary with keys as Node
s and values as Point
s containing all of the landmarks of the instance. This can also be specified as a dictionary with node names, a list of length n_nodes
, or a numpy array of shape (n_nodes, 2)
.
skeleton
Skeleton
The Skeleton
that describes the Node
s and Edge
s associated with this instance.
track
Optional[Track]
An optional Track
associated with a unique animal/object across frames or videos.
from_predicted
Optional[PredictedInstance]
The PredictedInstance
(if any) that this instance was initialized from. This is used with human-in-the-loop workflows.
Methods:
Name Description__attrs_post_init__
Maintain point mappings between node and points after initialization.
__getitem__
Return the point associated with a node or None
if not set.
__len__
Return the number of points in the instance.
__repr__
Return a readable representation of the instance.
from_numpy
Create an instance object from a numpy array.
numpy
Return the instance points as a numpy array.
replace_skeleton
Replace the skeleton associated with the instance.
update_skeleton
Update the points dictionary to match the skeleton.
Attributes:
Name Type Descriptionis_empty
bool
Return True
if no points are visible on the instance.
n_visible
int
Return the number of visible points in the instance.
Source code insleap_io/model/instance.py
@define(auto_attribs=True, slots=True, eq=True)\nclass Instance:\n \"\"\"This class represents a ground truth instance such as an animal.\n\n An `Instance` has a set of landmarks (`Point`s) that correspond to the nodes defined\n in its `Skeleton`.\n\n It may also be associated with a `Track` which links multiple instances together\n across frames or videos.\n\n Attributes:\n points: A dictionary with keys as `Node`s and values as `Point`s containing all\n of the landmarks of the instance. This can also be specified as a dictionary\n with node names, a list of length `n_nodes`, or a numpy array of shape\n `(n_nodes, 2)`.\n skeleton: The `Skeleton` that describes the `Node`s and `Edge`s associated with\n this instance.\n track: An optional `Track` associated with a unique animal/object across frames\n or videos.\n from_predicted: The `PredictedInstance` (if any) that this instance was\n initialized from. This is used with human-in-the-loop workflows.\n \"\"\"\n\n _POINT_TYPE = Point\n\n def _make_default_point(self, x, y):\n return self._POINT_TYPE(x, y, visible=not (math.isnan(x) or math.isnan(y)))\n\n def _convert_points(self, attr, points):\n \"\"\"Maintain points mappings between nodes and points.\"\"\"\n if type(points) == np.ndarray:\n points = points.tolist()\n\n if type(points) == list:\n if len(points) != len(self.skeleton):\n raise ValueError(\n \"If specifying points as a list, must provide as many points as \"\n \"nodes in the skeleton.\"\n )\n points = {node: pt for node, pt in zip(self.skeleton.nodes, points)}\n\n if type(points) == dict:\n keys = [\n node if type(node) == Node else self.skeleton[node]\n for node in points.keys()\n ]\n vals = [\n (\n point\n if type(point) == self._POINT_TYPE\n else self._make_default_point(*point)\n )\n for point in points.values()\n ]\n points = {k: v for k, v in zip(keys, vals)}\n\n missing_nodes = list(set(self.skeleton.nodes) - set(points.keys()))\n for node in missing_nodes:\n points[node] = self._make_default_point(x=np.nan, y=np.nan)\n\n return points\n\n points: Union[dict[Node, Point], dict[Node, PredictedPoint]] = field(\n on_setattr=_convert_points, eq=cmp_using(eq=_compare_points) # type: ignore\n )\n skeleton: Skeleton\n track: Optional[Track] = None\n from_predicted: Optional[PredictedInstance] = None\n\n def __attrs_post_init__(self):\n \"\"\"Maintain point mappings between node and points after initialization.\"\"\"\n super().__setattr__(\"points\", self._convert_points(None, self.points))\n\n def __getitem__(self, node: Union[int, str, Node]) -> Optional[Point]:\n \"\"\"Return the point associated with a node or `None` if not set.\"\"\"\n if (type(node) == int) or (type(node) == str):\n node = self.skeleton[node]\n if isinstance(node, Node):\n return self.points.get(node, None)\n else:\n raise IndexError(f\"Invalid indexing argument for instance: {node}\")\n\n def __len__(self) -> int:\n \"\"\"Return the number of points in the instance.\"\"\"\n return len(self.points)\n\n def __repr__(self) -> str:\n \"\"\"Return a readable representation of the instance.\"\"\"\n pts = self.numpy().tolist()\n track = f'\"{self.track.name}\"' if self.track is not None else self.track\n\n return f\"Instance(points={pts}, track={track})\"\n\n @property\n def n_visible(self) -> int:\n \"\"\"Return the number of visible points in the instance.\"\"\"\n return sum(pt.visible for pt in self.points.values())\n\n @property\n def is_empty(self) -> bool:\n \"\"\"Return `True` if no points are visible on the instance.\"\"\"\n return self.n_visible == 0\n\n @classmethod\n def from_numpy(\n cls, points: np.ndarray, skeleton: Skeleton, track: Optional[Track] = None\n ) -> \"Instance\":\n \"\"\"Create an instance object from a numpy array.\n\n Args:\n points: A numpy array of shape `(n_nodes, 2)` corresponding to the points of\n the skeleton. Values of `np.nan` indicate \"missing\" nodes.\n skeleton: The `Skeleton` that this `Instance` is associated with. It should\n have `n_nodes` nodes.\n track: An optional `Track` associated with a unique animal/object across\n frames or videos.\n \"\"\"\n return cls(\n points=points, skeleton=skeleton, track=track # type: ignore[arg-type]\n )\n\n def numpy(self) -> np.ndarray:\n \"\"\"Return the instance points as a numpy array.\"\"\"\n pts = np.full((len(self.skeleton), 2), np.nan)\n for node, point in self.points.items():\n if point.visible:\n pts[self.skeleton.index(node)] = point.numpy()\n return pts\n\n def update_skeleton(self):\n \"\"\"Update the points dictionary to match the skeleton.\n\n Points associated with nodes that are no longer in the skeleton will be removed.\n\n Additionally, the keys of the points dictionary will be ordered to match the\n order of the nodes in the skeleton.\n\n Notes:\n This method is useful when the skeleton has been updated (e.g., nodes\n removed or reordered).\n\n However, it is recommended to use `Labels`-level methods (e.g.,\n `Labels.remove_nodes()`) when manipulating the skeleton as these will\n automatically call this method on every instance.\n \"\"\"\n # Create a new dictionary to hold the updated points\n new_points = {}\n\n # Iterate over the nodes in the skeleton\n for node in self.skeleton.nodes:\n # Get the point associated with the node\n point = self.points.get(node, None)\n\n # If the point is not None, add it to the new dictionary\n if point is not None:\n new_points[node] = point\n\n # Update the points dictionary\n self.points = new_points\n\n def replace_skeleton(\n self,\n new_skeleton: Skeleton,\n node_map: dict[NodeOrIndex, NodeOrIndex] | None = None,\n rev_node_map: dict[NodeOrIndex, NodeOrIndex] | None = None,\n ):\n \"\"\"Replace the skeleton associated with the instance.\n\n The points dictionary will be updated to match the new skeleton.\n\n Args:\n new_skeleton: The new `Skeleton` to associate with the instance.\n node_map: Dictionary mapping nodes in the old skeleton to nodes in the new\n skeleton. Keys and values can be specified as `Node` objects, integer\n indices, or string names. If not provided, only nodes with identical\n names will be mapped. Points associated with unmapped nodes will be\n removed.\n rev_node_map: Dictionary mapping nodes in the new skeleton to nodes in the\n old skeleton. This is used internally when calling from\n `Labels.replace_skeleton()` as it is more efficient to compute this\n mapping once and pass it to all instances. No validation is done on this\n mapping, so nodes are expected to be `Node` objects.\n \"\"\"\n if rev_node_map is None:\n if node_map is None:\n node_map = {}\n for old_node in self.skeleton.nodes:\n for new_node in new_skeleton.nodes:\n if old_node.name == new_node.name:\n node_map[old_node] = new_node\n break\n else:\n node_map = {\n self.skeleton.require_node(\n old, add_missing=False\n ): new_skeleton.require_node(new, add_missing=False)\n for old, new in node_map.items()\n }\n\n # Make new -> old mapping for nodes\n rev_node_map = {new: old for old, new in node_map.items()}\n\n # Build new points list with mapped nodes\n new_points = {}\n for new_node in new_skeleton.nodes:\n old_node = rev_node_map.get(new_node, None)\n if old_node is not None and old_node in self.points:\n new_points[new_node] = self.points[old_node]\n\n # Update the skeleton and points\n self.skeleton = new_skeleton\n self.points = new_points\n
"},{"location":"model/#sleap_io.Instance.is_empty","title":"is_empty: bool
property
","text":"Return True
if no points are visible on the instance.
n_visible: int
property
","text":"Return the number of visible points in the instance.
"},{"location":"model/#sleap_io.Instance.__attrs_post_init__","title":"__attrs_post_init__()
","text":"Maintain point mappings between node and points after initialization.
Source code insleap_io/model/instance.py
def __attrs_post_init__(self):\n \"\"\"Maintain point mappings between node and points after initialization.\"\"\"\n super().__setattr__(\"points\", self._convert_points(None, self.points))\n
"},{"location":"model/#sleap_io.Instance.__getitem__","title":"__getitem__(node)
","text":"Return the point associated with a node or None
if not set.
sleap_io/model/instance.py
def __getitem__(self, node: Union[int, str, Node]) -> Optional[Point]:\n \"\"\"Return the point associated with a node or `None` if not set.\"\"\"\n if (type(node) == int) or (type(node) == str):\n node = self.skeleton[node]\n if isinstance(node, Node):\n return self.points.get(node, None)\n else:\n raise IndexError(f\"Invalid indexing argument for instance: {node}\")\n
"},{"location":"model/#sleap_io.Instance.__len__","title":"__len__()
","text":"Return the number of points in the instance.
Source code insleap_io/model/instance.py
def __len__(self) -> int:\n \"\"\"Return the number of points in the instance.\"\"\"\n return len(self.points)\n
"},{"location":"model/#sleap_io.Instance.__repr__","title":"__repr__()
","text":"Return a readable representation of the instance.
Source code insleap_io/model/instance.py
def __repr__(self) -> str:\n \"\"\"Return a readable representation of the instance.\"\"\"\n pts = self.numpy().tolist()\n track = f'\"{self.track.name}\"' if self.track is not None else self.track\n\n return f\"Instance(points={pts}, track={track})\"\n
"},{"location":"model/#sleap_io.Instance.from_numpy","title":"from_numpy(points, skeleton, track=None)
classmethod
","text":"Create an instance object from a numpy array.
Parameters:
Name Type Description Defaultpoints
ndarray
A numpy array of shape (n_nodes, 2)
corresponding to the points of the skeleton. Values of np.nan
indicate \"missing\" nodes.
skeleton
Skeleton
The Skeleton
that this Instance
is associated with. It should have n_nodes
nodes.
track
Optional[Track]
An optional Track
associated with a unique animal/object across frames or videos.
None
Source code in sleap_io/model/instance.py
@classmethod\ndef from_numpy(\n cls, points: np.ndarray, skeleton: Skeleton, track: Optional[Track] = None\n) -> \"Instance\":\n \"\"\"Create an instance object from a numpy array.\n\n Args:\n points: A numpy array of shape `(n_nodes, 2)` corresponding to the points of\n the skeleton. Values of `np.nan` indicate \"missing\" nodes.\n skeleton: The `Skeleton` that this `Instance` is associated with. It should\n have `n_nodes` nodes.\n track: An optional `Track` associated with a unique animal/object across\n frames or videos.\n \"\"\"\n return cls(\n points=points, skeleton=skeleton, track=track # type: ignore[arg-type]\n )\n
"},{"location":"model/#sleap_io.Instance.numpy","title":"numpy()
","text":"Return the instance points as a numpy array.
Source code insleap_io/model/instance.py
def numpy(self) -> np.ndarray:\n \"\"\"Return the instance points as a numpy array.\"\"\"\n pts = np.full((len(self.skeleton), 2), np.nan)\n for node, point in self.points.items():\n if point.visible:\n pts[self.skeleton.index(node)] = point.numpy()\n return pts\n
"},{"location":"model/#sleap_io.Instance.replace_skeleton","title":"replace_skeleton(new_skeleton, node_map=None, rev_node_map=None)
","text":"Replace the skeleton associated with the instance.
The points dictionary will be updated to match the new skeleton.
Parameters:
Name Type Description Defaultnew_skeleton
Skeleton
The new Skeleton
to associate with the instance.
node_map
dict[NodeOrIndex, NodeOrIndex] | None
Dictionary mapping nodes in the old skeleton to nodes in the new skeleton. Keys and values can be specified as Node
objects, integer indices, or string names. If not provided, only nodes with identical names will be mapped. Points associated with unmapped nodes will be removed.
None
rev_node_map
dict[NodeOrIndex, NodeOrIndex] | None
Dictionary mapping nodes in the new skeleton to nodes in the old skeleton. This is used internally when calling from Labels.replace_skeleton()
as it is more efficient to compute this mapping once and pass it to all instances. No validation is done on this mapping, so nodes are expected to be Node
objects.
None
Source code in sleap_io/model/instance.py
def replace_skeleton(\n self,\n new_skeleton: Skeleton,\n node_map: dict[NodeOrIndex, NodeOrIndex] | None = None,\n rev_node_map: dict[NodeOrIndex, NodeOrIndex] | None = None,\n):\n \"\"\"Replace the skeleton associated with the instance.\n\n The points dictionary will be updated to match the new skeleton.\n\n Args:\n new_skeleton: The new `Skeleton` to associate with the instance.\n node_map: Dictionary mapping nodes in the old skeleton to nodes in the new\n skeleton. Keys and values can be specified as `Node` objects, integer\n indices, or string names. If not provided, only nodes with identical\n names will be mapped. Points associated with unmapped nodes will be\n removed.\n rev_node_map: Dictionary mapping nodes in the new skeleton to nodes in the\n old skeleton. This is used internally when calling from\n `Labels.replace_skeleton()` as it is more efficient to compute this\n mapping once and pass it to all instances. No validation is done on this\n mapping, so nodes are expected to be `Node` objects.\n \"\"\"\n if rev_node_map is None:\n if node_map is None:\n node_map = {}\n for old_node in self.skeleton.nodes:\n for new_node in new_skeleton.nodes:\n if old_node.name == new_node.name:\n node_map[old_node] = new_node\n break\n else:\n node_map = {\n self.skeleton.require_node(\n old, add_missing=False\n ): new_skeleton.require_node(new, add_missing=False)\n for old, new in node_map.items()\n }\n\n # Make new -> old mapping for nodes\n rev_node_map = {new: old for old, new in node_map.items()}\n\n # Build new points list with mapped nodes\n new_points = {}\n for new_node in new_skeleton.nodes:\n old_node = rev_node_map.get(new_node, None)\n if old_node is not None and old_node in self.points:\n new_points[new_node] = self.points[old_node]\n\n # Update the skeleton and points\n self.skeleton = new_skeleton\n self.points = new_points\n
"},{"location":"model/#sleap_io.Instance.update_skeleton","title":"update_skeleton()
","text":"Update the points dictionary to match the skeleton.
Points associated with nodes that are no longer in the skeleton will be removed.
Additionally, the keys of the points dictionary will be ordered to match the order of the nodes in the skeleton.
NotesThis method is useful when the skeleton has been updated (e.g., nodes removed or reordered).
However, it is recommended to use Labels
-level methods (e.g., Labels.remove_nodes()
) when manipulating the skeleton as these will automatically call this method on every instance.
sleap_io/model/instance.py
def update_skeleton(self):\n \"\"\"Update the points dictionary to match the skeleton.\n\n Points associated with nodes that are no longer in the skeleton will be removed.\n\n Additionally, the keys of the points dictionary will be ordered to match the\n order of the nodes in the skeleton.\n\n Notes:\n This method is useful when the skeleton has been updated (e.g., nodes\n removed or reordered).\n\n However, it is recommended to use `Labels`-level methods (e.g.,\n `Labels.remove_nodes()`) when manipulating the skeleton as these will\n automatically call this method on every instance.\n \"\"\"\n # Create a new dictionary to hold the updated points\n new_points = {}\n\n # Iterate over the nodes in the skeleton\n for node in self.skeleton.nodes:\n # Get the point associated with the node\n point = self.points.get(node, None)\n\n # If the point is not None, add it to the new dictionary\n if point is not None:\n new_points[node] = point\n\n # Update the points dictionary\n self.points = new_points\n
"},{"location":"model/#sleap_io.PredictedInstance","title":"sleap_io.PredictedInstance
","text":" Bases: Instance
A PredictedInstance
is an Instance
that was predicted using a model.
Attributes:
Name Type Descriptionskeleton
The Skeleton
that this Instance
is associated with.
points
A dictionary where keys are Skeleton
nodes and values are Point
s.
track
An optional Track
associated with a unique animal/object across frames or videos.
from_predicted
Optional[PredictedInstance]
Not applicable in PredictedInstance
s (must be set to None
).
score
float
The instance detection or part grouping prediction score. This is a scalar that represents the confidence with which this entire instance was predicted. This may not always be applicable depending on the model type.
tracking_score
Optional[float]
The score associated with the Track
assignment. This is typically the value from the score matrix used in an identity assignment.
Methods:
Name Description__repr__
Return a readable representation of the instance.
from_numpy
Create an instance object from a numpy array.
numpy
Return the instance points as a numpy array.
Source code insleap_io/model/instance.py
@define\nclass PredictedInstance(Instance):\n \"\"\"A `PredictedInstance` is an `Instance` that was predicted using a model.\n\n Attributes:\n skeleton: The `Skeleton` that this `Instance` is associated with.\n points: A dictionary where keys are `Skeleton` nodes and values are `Point`s.\n track: An optional `Track` associated with a unique animal/object across frames\n or videos.\n from_predicted: Not applicable in `PredictedInstance`s (must be set to `None`).\n score: The instance detection or part grouping prediction score. This is a\n scalar that represents the confidence with which this entire instance was\n predicted. This may not always be applicable depending on the model type.\n tracking_score: The score associated with the `Track` assignment. This is\n typically the value from the score matrix used in an identity assignment.\n \"\"\"\n\n _POINT_TYPE = PredictedPoint\n\n from_predicted: Optional[PredictedInstance] = field(\n default=None, validator=validators.instance_of(type(None))\n )\n score: float = 0.0\n tracking_score: Optional[float] = 0\n\n def __repr__(self) -> str:\n \"\"\"Return a readable representation of the instance.\"\"\"\n pts = self.numpy().tolist()\n track = f'\"{self.track.name}\"' if self.track is not None else self.track\n\n score = str(self.score) if self.score is None else f\"{self.score:.2f}\"\n tracking_score = (\n str(self.tracking_score)\n if self.tracking_score is None\n else f\"{self.tracking_score:.2f}\"\n )\n return (\n f\"PredictedInstance(points={pts}, track={track}, \"\n f\"score={score}, tracking_score={tracking_score})\"\n )\n\n @classmethod\n def from_numpy( # type: ignore[override]\n cls,\n points: np.ndarray,\n point_scores: np.ndarray,\n instance_score: float,\n skeleton: Skeleton,\n tracking_score: Optional[float] = None,\n track: Optional[Track] = None,\n ) -> \"PredictedInstance\":\n \"\"\"Create an instance object from a numpy array.\n\n Args:\n points: A numpy array of shape `(n_nodes, 2)` corresponding to the points of\n the skeleton. Values of `np.nan` indicate \"missing\" nodes.\n point_scores: The points-level prediction score. This is an array that\n represents the confidence with which each point in the instance was\n predicted. This may not always be applicable depending on the model\n type.\n instance_score: The instance detection or part grouping prediction score.\n This is a scalar that represents the confidence with which this entire\n instance was predicted. This may not always be applicable depending on\n the model type.\n skeleton: The `Skeleton` that this `Instance` is associated with. It should\n have `n_nodes` nodes.\n tracking_score: The score associated with the `Track` assignment. This is\n typically the value from the score matrix used in an identity\n assignment.\n track: An optional `Track` associated with a unique animal/object across\n frames or videos.\n \"\"\"\n node_points = {\n node: PredictedPoint(pt[0], pt[1], score=score)\n for node, pt, score in zip(skeleton.nodes, points, point_scores)\n }\n return cls(\n points=node_points,\n skeleton=skeleton,\n score=instance_score,\n tracking_score=tracking_score,\n track=track,\n )\n\n def numpy(self, scores: bool = False) -> np.ndarray:\n \"\"\"Return the instance points as a numpy array.\"\"\"\n pts = np.full((len(self.skeleton), 3), np.nan)\n for node, point in self.points.items():\n if point.visible:\n pts[self.skeleton.index(node)] = point.numpy()\n if not scores:\n pts = pts[:, :2]\n return pts\n
"},{"location":"model/#sleap_io.PredictedInstance.__repr__","title":"__repr__()
","text":"Return a readable representation of the instance.
Source code insleap_io/model/instance.py
def __repr__(self) -> str:\n \"\"\"Return a readable representation of the instance.\"\"\"\n pts = self.numpy().tolist()\n track = f'\"{self.track.name}\"' if self.track is not None else self.track\n\n score = str(self.score) if self.score is None else f\"{self.score:.2f}\"\n tracking_score = (\n str(self.tracking_score)\n if self.tracking_score is None\n else f\"{self.tracking_score:.2f}\"\n )\n return (\n f\"PredictedInstance(points={pts}, track={track}, \"\n f\"score={score}, tracking_score={tracking_score})\"\n )\n
"},{"location":"model/#sleap_io.PredictedInstance.from_numpy","title":"from_numpy(points, point_scores, instance_score, skeleton, tracking_score=None, track=None)
classmethod
","text":"Create an instance object from a numpy array.
Parameters:
Name Type Description Defaultpoints
ndarray
A numpy array of shape (n_nodes, 2)
corresponding to the points of the skeleton. Values of np.nan
indicate \"missing\" nodes.
point_scores
ndarray
The points-level prediction score. This is an array that represents the confidence with which each point in the instance was predicted. This may not always be applicable depending on the model type.
requiredinstance_score
float
The instance detection or part grouping prediction score. This is a scalar that represents the confidence with which this entire instance was predicted. This may not always be applicable depending on the model type.
requiredskeleton
Skeleton
The Skeleton
that this Instance
is associated with. It should have n_nodes
nodes.
tracking_score
Optional[float]
The score associated with the Track
assignment. This is typically the value from the score matrix used in an identity assignment.
None
track
Optional[Track]
An optional Track
associated with a unique animal/object across frames or videos.
None
Source code in sleap_io/model/instance.py
@classmethod\ndef from_numpy( # type: ignore[override]\n cls,\n points: np.ndarray,\n point_scores: np.ndarray,\n instance_score: float,\n skeleton: Skeleton,\n tracking_score: Optional[float] = None,\n track: Optional[Track] = None,\n) -> \"PredictedInstance\":\n \"\"\"Create an instance object from a numpy array.\n\n Args:\n points: A numpy array of shape `(n_nodes, 2)` corresponding to the points of\n the skeleton. Values of `np.nan` indicate \"missing\" nodes.\n point_scores: The points-level prediction score. This is an array that\n represents the confidence with which each point in the instance was\n predicted. This may not always be applicable depending on the model\n type.\n instance_score: The instance detection or part grouping prediction score.\n This is a scalar that represents the confidence with which this entire\n instance was predicted. This may not always be applicable depending on\n the model type.\n skeleton: The `Skeleton` that this `Instance` is associated with. It should\n have `n_nodes` nodes.\n tracking_score: The score associated with the `Track` assignment. This is\n typically the value from the score matrix used in an identity\n assignment.\n track: An optional `Track` associated with a unique animal/object across\n frames or videos.\n \"\"\"\n node_points = {\n node: PredictedPoint(pt[0], pt[1], score=score)\n for node, pt, score in zip(skeleton.nodes, points, point_scores)\n }\n return cls(\n points=node_points,\n skeleton=skeleton,\n score=instance_score,\n tracking_score=tracking_score,\n track=track,\n )\n
"},{"location":"model/#sleap_io.PredictedInstance.numpy","title":"numpy(scores=False)
","text":"Return the instance points as a numpy array.
Source code insleap_io/model/instance.py
def numpy(self, scores: bool = False) -> np.ndarray:\n \"\"\"Return the instance points as a numpy array.\"\"\"\n pts = np.full((len(self.skeleton), 3), np.nan)\n for node, point in self.points.items():\n if point.visible:\n pts[self.skeleton.index(node)] = point.numpy()\n if not scores:\n pts = pts[:, :2]\n return pts\n
"},{"location":"model/#sleap_io.Point","title":"sleap_io.Point
","text":"A 2D spatial landmark and metadata associated with annotation.
Attributes:
Name Type Descriptionx
float
The horizontal pixel location of point in image coordinates.
y
float
The vertical pixel location of point in image coordinates.
visible
bool
Whether point is visible in the image or not.
complete
bool
Has the point been verified by the user labeler.
Class variableseq_atol: Controls absolute tolerence allowed in x
and y
when comparing two Point
s for equality. eq_rtol: Controls relative tolerence allowed in x
and y
when comparing two Point
s for equality.
Methods:
Name Description__eq__
Compare self
and other
for equality.
numpy
Return the coordinates as a numpy array of shape (2,)
.
sleap_io/model/instance.py
@define\nclass Point:\n \"\"\"A 2D spatial landmark and metadata associated with annotation.\n\n Attributes:\n x: The horizontal pixel location of point in image coordinates.\n y: The vertical pixel location of point in image coordinates.\n visible: Whether point is visible in the image or not.\n complete: Has the point been verified by the user labeler.\n\n Class variables:\n eq_atol: Controls absolute tolerence allowed in `x` and `y` when comparing two\n `Point`s for equality.\n eq_rtol: Controls relative tolerence allowed in `x` and `y` when comparing two\n `Point`s for equality.\n\n \"\"\"\n\n eq_atol: ClassVar[float] = 1e-08\n eq_rtol: ClassVar[float] = 0\n\n x: float\n y: float\n visible: bool = True\n complete: bool = False\n\n def __eq__(self, other: object) -> bool:\n \"\"\"Compare `self` and `other` for equality.\n\n Precision error between the respective `x` and `y` properties of two\n instances may be allowed or controlled via the `Point.eq_atol` and\n `Point.eq_rtol` class variables. Set to zero to disable their effect.\n Internally, `numpy.isclose()` is used for the comparison:\n https://numpy.org/doc/stable/reference/generated/numpy.isclose.html\n\n Args:\n other: Instance of `Point` to compare to.\n\n Returns:\n Returns True if all attributes of `self` and `other` are the identical\n (possibly allowing precision error for `x` and `y` attributes).\n \"\"\"\n # Check that other is a Point.\n if type(other) is not type(self):\n return False\n\n # We know that we have some kind of point at this point.\n other = cast(Point, other)\n\n return bool(\n np.all(\n np.isclose(\n [self.x, self.y],\n [other.x, other.y],\n rtol=Point.eq_rtol,\n atol=Point.eq_atol,\n equal_nan=True,\n )\n )\n and (self.visible == other.visible)\n and (self.complete == other.complete)\n )\n\n def numpy(self) -> np.ndarray:\n \"\"\"Return the coordinates as a numpy array of shape `(2,)`.\"\"\"\n return np.array([self.x, self.y]) if self.visible else np.full((2,), np.nan)\n
"},{"location":"model/#sleap_io.Point.__eq__","title":"__eq__(other)
","text":"Compare self
and other
for equality.
Precision error between the respective x
and y
properties of two instances may be allowed or controlled via the Point.eq_atol
and Point.eq_rtol
class variables. Set to zero to disable their effect. Internally, numpy.isclose()
is used for the comparison: https://numpy.org/doc/stable/reference/generated/numpy.isclose.html
Parameters:
Name Type Description Defaultother
object
Instance of Point
to compare to.
Returns:
Type Descriptionbool
Returns True if all attributes of self
and other
are the identical (possibly allowing precision error for x
and y
attributes).
sleap_io/model/instance.py
def __eq__(self, other: object) -> bool:\n \"\"\"Compare `self` and `other` for equality.\n\n Precision error between the respective `x` and `y` properties of two\n instances may be allowed or controlled via the `Point.eq_atol` and\n `Point.eq_rtol` class variables. Set to zero to disable their effect.\n Internally, `numpy.isclose()` is used for the comparison:\n https://numpy.org/doc/stable/reference/generated/numpy.isclose.html\n\n Args:\n other: Instance of `Point` to compare to.\n\n Returns:\n Returns True if all attributes of `self` and `other` are the identical\n (possibly allowing precision error for `x` and `y` attributes).\n \"\"\"\n # Check that other is a Point.\n if type(other) is not type(self):\n return False\n\n # We know that we have some kind of point at this point.\n other = cast(Point, other)\n\n return bool(\n np.all(\n np.isclose(\n [self.x, self.y],\n [other.x, other.y],\n rtol=Point.eq_rtol,\n atol=Point.eq_atol,\n equal_nan=True,\n )\n )\n and (self.visible == other.visible)\n and (self.complete == other.complete)\n )\n
"},{"location":"model/#sleap_io.Point.numpy","title":"numpy()
","text":"Return the coordinates as a numpy array of shape (2,)
.
sleap_io/model/instance.py
def numpy(self) -> np.ndarray:\n \"\"\"Return the coordinates as a numpy array of shape `(2,)`.\"\"\"\n return np.array([self.x, self.y]) if self.visible else np.full((2,), np.nan)\n
"},{"location":"model/#sleap_io.PredictedPoint","title":"sleap_io.PredictedPoint
","text":" Bases: Point
A predicted point with associated score generated by a prediction model.
It has all the properties of a labeled Point
, plus a score
.
Attributes:
Name Type Descriptionx
The horizontal pixel location of point within image frame.
y
The vertical pixel location of point within image frame.
visible
Whether point is visible in the image or not.
complete
Has the point been verified by the user labeler.
score
float
The point-level prediction score. This is typically the confidence and set to a value between 0 and 1.
Methods:
Name Description__eq__
Compare self
and other
for equality.
numpy
Return the coordinates and score as a numpy array of shape (3,)
.
sleap_io/model/instance.py
@define\nclass PredictedPoint(Point):\n \"\"\"A predicted point with associated score generated by a prediction model.\n\n It has all the properties of a labeled `Point`, plus a `score`.\n\n Attributes:\n x: The horizontal pixel location of point within image frame.\n y: The vertical pixel location of point within image frame.\n visible: Whether point is visible in the image or not.\n complete: Has the point been verified by the user labeler.\n score: The point-level prediction score. This is typically the confidence and\n set to a value between 0 and 1.\n \"\"\"\n\n score: float = 0.0\n\n def numpy(self) -> np.ndarray:\n \"\"\"Return the coordinates and score as a numpy array of shape `(3,)`.\"\"\"\n return (\n np.array([self.x, self.y, self.score])\n if self.visible\n else np.full((3,), np.nan)\n )\n\n def __eq__(self, other: object) -> bool:\n \"\"\"Compare `self` and `other` for equality.\n\n See `Point.__eq__()` for important notes about point equality semantics!\n\n Args:\n other: Instance of `PredictedPoint` to compare\n\n Returns:\n Returns True if all attributes of `self` and `other` are the identical\n (possibly allowing precision error for `x` and `y` attributes).\n \"\"\"\n if not super().__eq__(other):\n return False\n\n # we know that we have a point at this point\n other = cast(PredictedPoint, other)\n\n return self.score == other.score\n
"},{"location":"model/#sleap_io.PredictedPoint.__eq__","title":"__eq__(other)
","text":"Compare self
and other
for equality.
See Point.__eq__()
for important notes about point equality semantics!
Parameters:
Name Type Description Defaultother
object
Instance of PredictedPoint
to compare
Returns:
Type Descriptionbool
Returns True if all attributes of self
and other
are the identical (possibly allowing precision error for x
and y
attributes).
sleap_io/model/instance.py
def __eq__(self, other: object) -> bool:\n \"\"\"Compare `self` and `other` for equality.\n\n See `Point.__eq__()` for important notes about point equality semantics!\n\n Args:\n other: Instance of `PredictedPoint` to compare\n\n Returns:\n Returns True if all attributes of `self` and `other` are the identical\n (possibly allowing precision error for `x` and `y` attributes).\n \"\"\"\n if not super().__eq__(other):\n return False\n\n # we know that we have a point at this point\n other = cast(PredictedPoint, other)\n\n return self.score == other.score\n
"},{"location":"model/#sleap_io.PredictedPoint.numpy","title":"numpy()
","text":"Return the coordinates and score as a numpy array of shape (3,)
.
sleap_io/model/instance.py
def numpy(self) -> np.ndarray:\n \"\"\"Return the coordinates and score as a numpy array of shape `(3,)`.\"\"\"\n return (\n np.array([self.x, self.y, self.score])\n if self.visible\n else np.full((3,), np.nan)\n )\n
"},{"location":"model/#sleap_io.Skeleton","title":"sleap_io.Skeleton
","text":"A description of a set of landmark types and connections between them.
Skeletons are represented by a directed graph composed of a set of Node
s (landmark types such as body parts) and Edge
s (connections between parts).
Attributes:
Name Type Descriptionnodes
list[Node]
A list of Node
s. May be specified as a list of strings to create new nodes from their names.
edges
list[Edge]
A list of Edge
s. May be specified as a list of 2-tuples of string names or integer indices of nodes
. Each edge corresponds to a pair of source and destination nodes forming a directed edge.
symmetries
list[Symmetry]
A list of Symmetry
s. Each symmetry corresponds to symmetric body parts, such as \"left eye\", \"right eye\"
. This is used when applying flip (reflection) augmentation to images in order to appropriately swap the indices of symmetric landmarks.
name
str | None
A descriptive name for the Skeleton
.
Methods:
Name Description__attrs_post_init__
Ensure nodes are Node
s, edges are Edge
s, and Node
map is updated.
__contains__
Check if a node is in the skeleton.
__getitem__
Return a Node
when indexing by name or integer.
__len__
Return the number of nodes in the skeleton.
__repr__
Return a readable representation of the skeleton.
add_edge
Add an Edge
to the skeleton.
add_edges
Add multiple Edge
s to the skeleton.
add_node
Add a Node
to the skeleton.
add_nodes
Add multiple Node
s to the skeleton.
add_symmetry
Add a symmetry relationship to the skeleton.
get_flipped_node_inds
Returns node indices that should be switched when horizontally flipping.
index
Return the index of a node specified as a Node
or string name.
rebuild_cache
Rebuild the node name/index to Node
map caches.
remove_node
Remove a single node from the skeleton.
remove_nodes
Remove nodes from the skeleton.
rename_node
Rename a single node in the skeleton.
rename_nodes
Rename nodes in the skeleton.
reorder_nodes
Reorder nodes in the skeleton.
require_node
Return a Node
object, handling indexing and adding missing nodes.
Attributes:
Name Type Descriptionedge_inds
list[tuple[int, int]]
Edges indices as a list of 2-tuples.
edge_names
list[str, str]
Edge names as a list of 2-tuples with string node names.
node_names
list[str]
Names of the nodes associated with this skeleton as a list of strings.
symmetry_inds
list[tuple[int, int]]
Symmetry indices as a list of 2-tuples.
symmetry_names
list[str, str]
Symmetry names as a list of 2-tuples with string node names.
Source code insleap_io/model/skeleton.py
@define(eq=False)\nclass Skeleton:\n \"\"\"A description of a set of landmark types and connections between them.\n\n Skeletons are represented by a directed graph composed of a set of `Node`s (landmark\n types such as body parts) and `Edge`s (connections between parts).\n\n Attributes:\n nodes: A list of `Node`s. May be specified as a list of strings to create new\n nodes from their names.\n edges: A list of `Edge`s. May be specified as a list of 2-tuples of string names\n or integer indices of `nodes`. Each edge corresponds to a pair of source and\n destination nodes forming a directed edge.\n symmetries: A list of `Symmetry`s. Each symmetry corresponds to symmetric body\n parts, such as `\"left eye\", \"right eye\"`. This is used when applying flip\n (reflection) augmentation to images in order to appropriately swap the\n indices of symmetric landmarks.\n name: A descriptive name for the `Skeleton`.\n \"\"\"\n\n def _nodes_on_setattr(self, attr, new_nodes):\n \"\"\"Callback to update caches when nodes are set.\"\"\"\n self.rebuild_cache(nodes=new_nodes)\n return new_nodes\n\n nodes: list[Node] = field(\n factory=list,\n on_setattr=_nodes_on_setattr,\n )\n edges: list[Edge] = field(factory=list)\n symmetries: list[Symmetry] = field(factory=list)\n name: str | None = None\n _name_to_node_cache: dict[str, Node] = field(init=False, repr=False, eq=False)\n _node_to_ind_cache: dict[Node, int] = field(init=False, repr=False, eq=False)\n\n def __attrs_post_init__(self):\n \"\"\"Ensure nodes are `Node`s, edges are `Edge`s, and `Node` map is updated.\"\"\"\n self._convert_nodes()\n self._convert_edges()\n self.rebuild_cache()\n\n def _convert_nodes(self):\n \"\"\"Convert nodes to `Node` objects if needed.\"\"\"\n if isinstance(self.nodes, np.ndarray):\n object.__setattr__(self, \"nodes\", self.nodes.tolist())\n for i, node in enumerate(self.nodes):\n if type(node) == str:\n self.nodes[i] = Node(node)\n\n def _convert_edges(self):\n \"\"\"Convert list of edge names or integers to `Edge` objects if needed.\"\"\"\n if isinstance(self.edges, np.ndarray):\n self.edges = self.edges.tolist()\n node_names = self.node_names\n for i, edge in enumerate(self.edges):\n if type(edge) == Edge:\n continue\n src, dst = edge\n if type(src) == str:\n try:\n src = node_names.index(src)\n except ValueError:\n raise ValueError(\n f\"Node '{src}' specified in the edge list is not in the nodes.\"\n )\n if type(src) == int or (\n np.isscalar(src) and np.issubdtype(src.dtype, np.integer)\n ):\n src = self.nodes[src]\n\n if type(dst) == str:\n try:\n dst = node_names.index(dst)\n except ValueError:\n raise ValueError(\n f\"Node '{dst}' specified in the edge list is not in the nodes.\"\n )\n if type(dst) == int or (\n np.isscalar(dst) and np.issubdtype(dst.dtype, np.integer)\n ):\n dst = self.nodes[dst]\n\n self.edges[i] = Edge(src, dst)\n\n def rebuild_cache(self, nodes: list[Node] | None = None):\n \"\"\"Rebuild the node name/index to `Node` map caches.\n\n Args:\n nodes: A list of `Node` objects to update the cache with. If not provided,\n the cache will be updated with the current nodes in the skeleton. If\n nodes are provided, the cache will be updated with the provided nodes,\n but the current nodes in the skeleton will not be updated. Default is\n `None`.\n\n Notes:\n This function should be called when nodes or node list is mutated to update\n the lookup caches for indexing nodes by name or `Node` object.\n\n This is done automatically when nodes are added or removed from the skeleton\n using the convenience methods in this class.\n\n This method only needs to be used when manually mutating nodes or the node\n list directly.\n \"\"\"\n if nodes is None:\n nodes = self.nodes\n self._name_to_node_cache = {node.name: node for node in nodes}\n self._node_to_ind_cache = {node: i for i, node in enumerate(nodes)}\n\n @property\n def node_names(self) -> list[str]:\n \"\"\"Names of the nodes associated with this skeleton as a list of strings.\"\"\"\n return [node.name for node in self.nodes]\n\n @property\n def edge_inds(self) -> list[tuple[int, int]]:\n \"\"\"Edges indices as a list of 2-tuples.\"\"\"\n return [\n (self.nodes.index(edge.source), self.nodes.index(edge.destination))\n for edge in self.edges\n ]\n\n @property\n def edge_names(self) -> list[str, str]:\n \"\"\"Edge names as a list of 2-tuples with string node names.\"\"\"\n return [(edge.source.name, edge.destination.name) for edge in self.edges]\n\n @property\n def symmetry_inds(self) -> list[tuple[int, int]]:\n \"\"\"Symmetry indices as a list of 2-tuples.\"\"\"\n return [\n tuple(sorted((self.index(symmetry[0]), self.index(symmetry[1]))))\n for symmetry in self.symmetries\n ]\n\n @property\n def symmetry_names(self) -> list[str, str]:\n \"\"\"Symmetry names as a list of 2-tuples with string node names.\"\"\"\n return [\n (self.nodes[i].name, self.nodes[j].name) for (i, j) in self.symmetry_inds\n ]\n\n def get_flipped_node_inds(self) -> list[int]:\n \"\"\"Returns node indices that should be switched when horizontally flipping.\n\n This is useful as a lookup table for flipping the landmark coordinates when\n doing data augmentation.\n\n Example:\n >>> skel = Skeleton([\"A\", \"B_left\", \"B_right\", \"C\", \"D_left\", \"D_right\"])\n >>> skel.add_symmetry(\"B_left\", \"B_right\")\n >>> skel.add_symmetry(\"D_left\", \"D_right\")\n >>> skel.flipped_node_inds\n [0, 2, 1, 3, 5, 4]\n >>> pose = np.array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]])\n >>> pose[skel.flipped_node_inds]\n array([[0, 0],\n [2, 2],\n [1, 1],\n [3, 3],\n [5, 5],\n [4, 4]])\n \"\"\"\n flip_idx = np.arange(len(self.nodes))\n if len(self.symmetries) > 0:\n symmetry_inds = np.array(\n [(self.index(a), self.index(b)) for a, b in self.symmetries]\n )\n flip_idx[symmetry_inds[:, 0]] = symmetry_inds[:, 1]\n flip_idx[symmetry_inds[:, 1]] = symmetry_inds[:, 0]\n\n flip_idx = flip_idx.tolist()\n return flip_idx\n\n def __len__(self) -> int:\n \"\"\"Return the number of nodes in the skeleton.\"\"\"\n return len(self.nodes)\n\n def __repr__(self) -> str:\n \"\"\"Return a readable representation of the skeleton.\"\"\"\n nodes = \", \".join([f'\"{node}\"' for node in self.node_names])\n return \"Skeleton(\" f\"nodes=[{nodes}], \" f\"edges={self.edge_inds}\" \")\"\n\n def index(self, node: Node | str) -> int:\n \"\"\"Return the index of a node specified as a `Node` or string name.\"\"\"\n if type(node) == str:\n return self.index(self._name_to_node_cache[node])\n elif type(node) == Node:\n return self._node_to_ind_cache[node]\n else:\n raise IndexError(f\"Invalid indexing argument for skeleton: {node}\")\n\n def __getitem__(self, idx: NodeOrIndex) -> Node:\n \"\"\"Return a `Node` when indexing by name or integer.\"\"\"\n if type(idx) == int:\n return self.nodes[idx]\n elif type(idx) == str:\n return self._name_to_node_cache[idx]\n else:\n raise IndexError(f\"Invalid indexing argument for skeleton: {idx}\")\n\n def __contains__(self, node: NodeOrIndex) -> bool:\n \"\"\"Check if a node is in the skeleton.\"\"\"\n if type(node) == str:\n return node in self._name_to_node_cache\n elif type(node) == Node:\n return node in self.nodes\n elif type(node) == int:\n return 0 <= node < len(self.nodes)\n else:\n raise ValueError(f\"Invalid node type for skeleton: {node}\")\n\n def add_node(self, node: Node | str):\n \"\"\"Add a `Node` to the skeleton.\n\n Args:\n node: A `Node` object or a string name to create a new node.\n\n Raises:\n ValueError: If the node already exists in the skeleton or if the node is\n not specified as a `Node` or string.\n \"\"\"\n if node in self:\n raise ValueError(f\"Node '{node}' already exists in the skeleton.\")\n\n if type(node) == str:\n node = Node(node)\n\n if type(node) != Node:\n raise ValueError(f\"Invalid node type: {node} ({type(node)})\")\n\n self.nodes.append(node)\n\n # Atomic update of the cache.\n self._name_to_node_cache[node.name] = node\n self._node_to_ind_cache[node] = len(self.nodes) - 1\n\n def add_nodes(self, nodes: list[Node | str]):\n \"\"\"Add multiple `Node`s to the skeleton.\n\n Args:\n nodes: A list of `Node` objects or string names to create new nodes.\n \"\"\"\n for node in nodes:\n self.add_node(node)\n\n def require_node(self, node: NodeOrIndex, add_missing: bool = True) -> Node:\n \"\"\"Return a `Node` object, handling indexing and adding missing nodes.\n\n Args:\n node: A `Node` object, name or index.\n add_missing: If `True`, missing nodes will be added to the skeleton. If\n `False`, an error will be raised if the node is not found. Default is\n `True`.\n\n Returns:\n The `Node` object.\n\n Raises:\n IndexError: If the node is not found in the skeleton and `add_missing` is\n `False`.\n \"\"\"\n if node not in self:\n if add_missing:\n self.add_node(node)\n else:\n raise IndexError(f\"Node '{node}' not found in the skeleton.\")\n\n if type(node) == Node:\n return node\n\n return self[node]\n\n def add_edge(\n self,\n src: NodeOrIndex | Edge | tuple[NodeOrIndex, NodeOrIndex],\n dst: NodeOrIndex | None = None,\n ):\n \"\"\"Add an `Edge` to the skeleton.\n\n Args:\n src: The source node specified as a `Node`, name or index.\n dst: The destination node specified as a `Node`, name or index.\n \"\"\"\n edge = None\n if type(src) == tuple:\n src, dst = src\n\n if is_node_or_index(src):\n if not is_node_or_index(dst):\n raise ValueError(\"Destination node must be specified.\")\n\n src = self.require_node(src)\n dst = self.require_node(dst)\n edge = Edge(src, dst)\n\n if type(src) == Edge:\n edge = src\n\n if edge not in self.edges:\n self.edges.append(edge)\n\n def add_edges(self, edges: list[Edge | tuple[NodeOrIndex, NodeOrIndex]]):\n \"\"\"Add multiple `Edge`s to the skeleton.\n\n Args:\n edges: A list of `Edge` objects or 2-tuples of source and destination nodes.\n \"\"\"\n for edge in edges:\n self.add_edge(edge)\n\n def add_symmetry(\n self, node1: Symmetry | NodeOrIndex = None, node2: NodeOrIndex | None = None\n ):\n \"\"\"Add a symmetry relationship to the skeleton.\n\n Args:\n node1: The first node specified as a `Node`, name or index. If a `Symmetry`\n object is provided, it will be added directly to the skeleton.\n node2: The second node specified as a `Node`, name or index.\n \"\"\"\n symmetry = None\n if type(node1) == Symmetry:\n symmetry = node1\n node1, node2 = symmetry\n\n node1 = self.require_node(node1)\n node2 = self.require_node(node2)\n\n if symmetry is None:\n symmetry = Symmetry({node1, node2})\n\n if symmetry not in self.symmetries:\n self.symmetries.append(symmetry)\n\n def rename_nodes(self, name_map: dict[NodeOrIndex, str] | list[str]):\n \"\"\"Rename nodes in the skeleton.\n\n Args:\n name_map: A dictionary mapping old node names to new node names. Keys can be\n specified as `Node` objects, integer indices, or string names. Values\n must be specified as string names.\n\n If a list of strings is provided of the same length as the current\n nodes, the nodes will be renamed to the names in the list in order.\n\n Raises:\n ValueError: If the new node names exist in the skeleton or if the old node\n names are not found in the skeleton.\n\n Notes:\n This method should always be used when renaming nodes in the skeleton as it\n handles updating the lookup caches necessary for indexing nodes by name.\n\n After renaming, instances using this skeleton **do NOT need to be updated**\n as the nodes are stored by reference in the skeleton, so changes are\n reflected automatically.\n\n Example:\n >>> skel = Skeleton([\"A\", \"B\", \"C\"], edges=[(\"A\", \"B\"), (\"B\", \"C\")])\n >>> skel.rename_nodes({\"A\": \"X\", \"B\": \"Y\", \"C\": \"Z\"})\n >>> skel.node_names\n [\"X\", \"Y\", \"Z\"]\n >>> skel.rename_nodes([\"a\", \"b\", \"c\"])\n >>> skel.node_names\n [\"a\", \"b\", \"c\"]\n \"\"\"\n if type(name_map) == list:\n if len(name_map) != len(self.nodes):\n raise ValueError(\n \"List of new node names must be the same length as the current \"\n \"nodes.\"\n )\n name_map = {node: name for node, name in zip(self.nodes, name_map)}\n\n for old_name, new_name in name_map.items():\n if type(old_name) == Node:\n old_name = old_name.name\n if type(old_name) == int:\n old_name = self.nodes[old_name].name\n\n if old_name not in self._name_to_node_cache:\n raise ValueError(f\"Node '{old_name}' not found in the skeleton.\")\n if new_name in self._name_to_node_cache:\n raise ValueError(f\"Node '{new_name}' already exists in the skeleton.\")\n\n node = self._name_to_node_cache[old_name]\n node.name = new_name\n self._name_to_node_cache[new_name] = node\n del self._name_to_node_cache[old_name]\n\n def rename_node(self, old_name: NodeOrIndex, new_name: str):\n \"\"\"Rename a single node in the skeleton.\n\n Args:\n old_name: The name of the node to rename. Can also be specified as an\n integer index or `Node` object.\n new_name: The new name for the node.\n \"\"\"\n self.rename_nodes({old_name: new_name})\n\n def remove_nodes(self, nodes: list[NodeOrIndex]):\n \"\"\"Remove nodes from the skeleton.\n\n Args:\n nodes: A list of node names, indices, or `Node` objects to remove.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name.\n\n Any edges and symmetries that are connected to the removed nodes will also\n be removed.\n\n Warning:\n **This method does NOT update instances** that use this skeleton to reflect\n changes.\n\n It is recommended to use the `Labels.remove_nodes()` method which will\n update all contained to reflect the changes made to the skeleton.\n\n To manually update instances after this method is called, call\n `instance.update_nodes()` on each instance that uses this skeleton.\n \"\"\"\n # Standardize input and make a pre-mutation copy before keys are changed.\n rm_node_objs = [self.require_node(node, add_missing=False) for node in nodes]\n\n # Remove nodes from the skeleton.\n for node in rm_node_objs:\n self.nodes.remove(node)\n del self._name_to_node_cache[node.name]\n\n # Remove edges connected to the removed nodes.\n self.edges = [\n edge\n for edge in self.edges\n if edge.source not in rm_node_objs and edge.destination not in rm_node_objs\n ]\n\n # Remove symmetries connected to the removed nodes.\n self.symmetries = [\n symmetry\n for symmetry in self.symmetries\n if symmetry.nodes.isdisjoint(rm_node_objs)\n ]\n\n # Update node index map.\n self.rebuild_cache()\n\n def remove_node(self, node: NodeOrIndex):\n \"\"\"Remove a single node from the skeleton.\n\n Args:\n node: The node to remove. Can be specified as a string name, integer index,\n or `Node` object.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name.\n\n Any edges and symmetries that are connected to the removed node will also be\n removed.\n\n Warning:\n **This method does NOT update instances** that use this skeleton to reflect\n changes.\n\n It is recommended to use the `Labels.remove_nodes()` method which will\n update all contained instances to reflect the changes made to the skeleton.\n\n To manually update instances after this method is called, call\n `Instance.update_skeleton()` on each instance that uses this skeleton.\n \"\"\"\n self.remove_nodes([node])\n\n def reorder_nodes(self, new_order: list[NodeOrIndex]):\n \"\"\"Reorder nodes in the skeleton.\n\n Args:\n new_order: A list of node names, indices, or `Node` objects specifying the\n new order of the nodes.\n\n Raises:\n ValueError: If the new order of nodes is not the same length as the current\n nodes.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name.\n\n Warning:\n After reordering, instances using this skeleton do not need to be updated as\n the nodes are stored by reference in the skeleton.\n\n However, the order that points are stored in the instances will not be\n updated to match the new order of the nodes in the skeleton. This should not\n matter unless the ordering of the keys in the `Instance.points` dictionary\n is used instead of relying on the skeleton node order.\n\n To make sure these are aligned, it is recommended to use the\n `Labels.reorder_nodes()` method which will update all contained instances to\n reflect the changes made to the skeleton.\n\n To manually update instances after this method is called, call\n `Instance.update_skeleton()` on each instance that uses this skeleton.\n \"\"\"\n if len(new_order) != len(self.nodes):\n raise ValueError(\n \"New order of nodes must be the same length as the current nodes.\"\n )\n\n new_nodes = [self.require_node(node, add_missing=False) for node in new_order]\n self.nodes = new_nodes\n
"},{"location":"model/#sleap_io.Skeleton.edge_inds","title":"edge_inds: list[tuple[int, int]]
property
","text":"Edges indices as a list of 2-tuples.
"},{"location":"model/#sleap_io.Skeleton.edge_names","title":"edge_names: list[str, str]
property
","text":"Edge names as a list of 2-tuples with string node names.
"},{"location":"model/#sleap_io.Skeleton.node_names","title":"node_names: list[str]
property
","text":"Names of the nodes associated with this skeleton as a list of strings.
"},{"location":"model/#sleap_io.Skeleton.symmetry_inds","title":"symmetry_inds: list[tuple[int, int]]
property
","text":"Symmetry indices as a list of 2-tuples.
"},{"location":"model/#sleap_io.Skeleton.symmetry_names","title":"symmetry_names: list[str, str]
property
","text":"Symmetry names as a list of 2-tuples with string node names.
"},{"location":"model/#sleap_io.Skeleton.__attrs_post_init__","title":"__attrs_post_init__()
","text":"Ensure nodes are Node
s, edges are Edge
s, and Node
map is updated.
sleap_io/model/skeleton.py
def __attrs_post_init__(self):\n \"\"\"Ensure nodes are `Node`s, edges are `Edge`s, and `Node` map is updated.\"\"\"\n self._convert_nodes()\n self._convert_edges()\n self.rebuild_cache()\n
"},{"location":"model/#sleap_io.Skeleton.__contains__","title":"__contains__(node)
","text":"Check if a node is in the skeleton.
Source code insleap_io/model/skeleton.py
def __contains__(self, node: NodeOrIndex) -> bool:\n \"\"\"Check if a node is in the skeleton.\"\"\"\n if type(node) == str:\n return node in self._name_to_node_cache\n elif type(node) == Node:\n return node in self.nodes\n elif type(node) == int:\n return 0 <= node < len(self.nodes)\n else:\n raise ValueError(f\"Invalid node type for skeleton: {node}\")\n
"},{"location":"model/#sleap_io.Skeleton.__getitem__","title":"__getitem__(idx)
","text":"Return a Node
when indexing by name or integer.
sleap_io/model/skeleton.py
def __getitem__(self, idx: NodeOrIndex) -> Node:\n \"\"\"Return a `Node` when indexing by name or integer.\"\"\"\n if type(idx) == int:\n return self.nodes[idx]\n elif type(idx) == str:\n return self._name_to_node_cache[idx]\n else:\n raise IndexError(f\"Invalid indexing argument for skeleton: {idx}\")\n
"},{"location":"model/#sleap_io.Skeleton.__len__","title":"__len__()
","text":"Return the number of nodes in the skeleton.
Source code insleap_io/model/skeleton.py
def __len__(self) -> int:\n \"\"\"Return the number of nodes in the skeleton.\"\"\"\n return len(self.nodes)\n
"},{"location":"model/#sleap_io.Skeleton.__repr__","title":"__repr__()
","text":"Return a readable representation of the skeleton.
Source code insleap_io/model/skeleton.py
def __repr__(self) -> str:\n \"\"\"Return a readable representation of the skeleton.\"\"\"\n nodes = \", \".join([f'\"{node}\"' for node in self.node_names])\n return \"Skeleton(\" f\"nodes=[{nodes}], \" f\"edges={self.edge_inds}\" \")\"\n
"},{"location":"model/#sleap_io.Skeleton.add_edge","title":"add_edge(src, dst=None)
","text":"Add an Edge
to the skeleton.
Parameters:
Name Type Description Defaultsrc
NodeOrIndex | Edge | tuple[NodeOrIndex, NodeOrIndex]
The source node specified as a Node
, name or index.
dst
NodeOrIndex | None
The destination node specified as a Node
, name or index.
None
Source code in sleap_io/model/skeleton.py
def add_edge(\n self,\n src: NodeOrIndex | Edge | tuple[NodeOrIndex, NodeOrIndex],\n dst: NodeOrIndex | None = None,\n):\n \"\"\"Add an `Edge` to the skeleton.\n\n Args:\n src: The source node specified as a `Node`, name or index.\n dst: The destination node specified as a `Node`, name or index.\n \"\"\"\n edge = None\n if type(src) == tuple:\n src, dst = src\n\n if is_node_or_index(src):\n if not is_node_or_index(dst):\n raise ValueError(\"Destination node must be specified.\")\n\n src = self.require_node(src)\n dst = self.require_node(dst)\n edge = Edge(src, dst)\n\n if type(src) == Edge:\n edge = src\n\n if edge not in self.edges:\n self.edges.append(edge)\n
"},{"location":"model/#sleap_io.Skeleton.add_edges","title":"add_edges(edges)
","text":"Add multiple Edge
s to the skeleton.
Parameters:
Name Type Description Defaultedges
list[Edge | tuple[NodeOrIndex, NodeOrIndex]]
A list of Edge
objects or 2-tuples of source and destination nodes.
sleap_io/model/skeleton.py
def add_edges(self, edges: list[Edge | tuple[NodeOrIndex, NodeOrIndex]]):\n \"\"\"Add multiple `Edge`s to the skeleton.\n\n Args:\n edges: A list of `Edge` objects or 2-tuples of source and destination nodes.\n \"\"\"\n for edge in edges:\n self.add_edge(edge)\n
"},{"location":"model/#sleap_io.Skeleton.add_node","title":"add_node(node)
","text":"Add a Node
to the skeleton.
Parameters:
Name Type Description Defaultnode
Node | str
A Node
object or a string name to create a new node.
Raises:
Type DescriptionValueError
If the node already exists in the skeleton or if the node is not specified as a Node
or string.
sleap_io/model/skeleton.py
def add_node(self, node: Node | str):\n \"\"\"Add a `Node` to the skeleton.\n\n Args:\n node: A `Node` object or a string name to create a new node.\n\n Raises:\n ValueError: If the node already exists in the skeleton or if the node is\n not specified as a `Node` or string.\n \"\"\"\n if node in self:\n raise ValueError(f\"Node '{node}' already exists in the skeleton.\")\n\n if type(node) == str:\n node = Node(node)\n\n if type(node) != Node:\n raise ValueError(f\"Invalid node type: {node} ({type(node)})\")\n\n self.nodes.append(node)\n\n # Atomic update of the cache.\n self._name_to_node_cache[node.name] = node\n self._node_to_ind_cache[node] = len(self.nodes) - 1\n
"},{"location":"model/#sleap_io.Skeleton.add_nodes","title":"add_nodes(nodes)
","text":"Add multiple Node
s to the skeleton.
Parameters:
Name Type Description Defaultnodes
list[Node | str]
A list of Node
objects or string names to create new nodes.
sleap_io/model/skeleton.py
def add_nodes(self, nodes: list[Node | str]):\n \"\"\"Add multiple `Node`s to the skeleton.\n\n Args:\n nodes: A list of `Node` objects or string names to create new nodes.\n \"\"\"\n for node in nodes:\n self.add_node(node)\n
"},{"location":"model/#sleap_io.Skeleton.add_symmetry","title":"add_symmetry(node1=None, node2=None)
","text":"Add a symmetry relationship to the skeleton.
Parameters:
Name Type Description Defaultnode1
Symmetry | NodeOrIndex
The first node specified as a Node
, name or index. If a Symmetry
object is provided, it will be added directly to the skeleton.
None
node2
NodeOrIndex | None
The second node specified as a Node
, name or index.
None
Source code in sleap_io/model/skeleton.py
def add_symmetry(\n self, node1: Symmetry | NodeOrIndex = None, node2: NodeOrIndex | None = None\n):\n \"\"\"Add a symmetry relationship to the skeleton.\n\n Args:\n node1: The first node specified as a `Node`, name or index. If a `Symmetry`\n object is provided, it will be added directly to the skeleton.\n node2: The second node specified as a `Node`, name or index.\n \"\"\"\n symmetry = None\n if type(node1) == Symmetry:\n symmetry = node1\n node1, node2 = symmetry\n\n node1 = self.require_node(node1)\n node2 = self.require_node(node2)\n\n if symmetry is None:\n symmetry = Symmetry({node1, node2})\n\n if symmetry not in self.symmetries:\n self.symmetries.append(symmetry)\n
"},{"location":"model/#sleap_io.Skeleton.get_flipped_node_inds","title":"get_flipped_node_inds()
","text":"Returns node indices that should be switched when horizontally flipping.
This is useful as a lookup table for flipping the landmark coordinates when doing data augmentation.
Exampleskel = Skeleton([\"A\", \"B_left\", \"B_right\", \"C\", \"D_left\", \"D_right\"]) skel.add_symmetry(\"B_left\", \"B_right\") skel.add_symmetry(\"D_left\", \"D_right\") skel.flipped_node_inds [0, 2, 1, 3, 5, 4] pose = np.array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]) pose[skel.flipped_node_inds] array([[0, 0], [2, 2], [1, 1], [3, 3], [5, 5], [4, 4]])
Source code insleap_io/model/skeleton.py
def get_flipped_node_inds(self) -> list[int]:\n \"\"\"Returns node indices that should be switched when horizontally flipping.\n\n This is useful as a lookup table for flipping the landmark coordinates when\n doing data augmentation.\n\n Example:\n >>> skel = Skeleton([\"A\", \"B_left\", \"B_right\", \"C\", \"D_left\", \"D_right\"])\n >>> skel.add_symmetry(\"B_left\", \"B_right\")\n >>> skel.add_symmetry(\"D_left\", \"D_right\")\n >>> skel.flipped_node_inds\n [0, 2, 1, 3, 5, 4]\n >>> pose = np.array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]])\n >>> pose[skel.flipped_node_inds]\n array([[0, 0],\n [2, 2],\n [1, 1],\n [3, 3],\n [5, 5],\n [4, 4]])\n \"\"\"\n flip_idx = np.arange(len(self.nodes))\n if len(self.symmetries) > 0:\n symmetry_inds = np.array(\n [(self.index(a), self.index(b)) for a, b in self.symmetries]\n )\n flip_idx[symmetry_inds[:, 0]] = symmetry_inds[:, 1]\n flip_idx[symmetry_inds[:, 1]] = symmetry_inds[:, 0]\n\n flip_idx = flip_idx.tolist()\n return flip_idx\n
"},{"location":"model/#sleap_io.Skeleton.index","title":"index(node)
","text":"Return the index of a node specified as a Node
or string name.
sleap_io/model/skeleton.py
def index(self, node: Node | str) -> int:\n \"\"\"Return the index of a node specified as a `Node` or string name.\"\"\"\n if type(node) == str:\n return self.index(self._name_to_node_cache[node])\n elif type(node) == Node:\n return self._node_to_ind_cache[node]\n else:\n raise IndexError(f\"Invalid indexing argument for skeleton: {node}\")\n
"},{"location":"model/#sleap_io.Skeleton.rebuild_cache","title":"rebuild_cache(nodes=None)
","text":"Rebuild the node name/index to Node
map caches.
Parameters:
Name Type Description Defaultnodes
list[Node] | None
A list of Node
objects to update the cache with. If not provided, the cache will be updated with the current nodes in the skeleton. If nodes are provided, the cache will be updated with the provided nodes, but the current nodes in the skeleton will not be updated. Default is None
.
None
Notes This function should be called when nodes or node list is mutated to update the lookup caches for indexing nodes by name or Node
object.
This is done automatically when nodes are added or removed from the skeleton using the convenience methods in this class.
This method only needs to be used when manually mutating nodes or the node list directly.
Source code insleap_io/model/skeleton.py
def rebuild_cache(self, nodes: list[Node] | None = None):\n \"\"\"Rebuild the node name/index to `Node` map caches.\n\n Args:\n nodes: A list of `Node` objects to update the cache with. If not provided,\n the cache will be updated with the current nodes in the skeleton. If\n nodes are provided, the cache will be updated with the provided nodes,\n but the current nodes in the skeleton will not be updated. Default is\n `None`.\n\n Notes:\n This function should be called when nodes or node list is mutated to update\n the lookup caches for indexing nodes by name or `Node` object.\n\n This is done automatically when nodes are added or removed from the skeleton\n using the convenience methods in this class.\n\n This method only needs to be used when manually mutating nodes or the node\n list directly.\n \"\"\"\n if nodes is None:\n nodes = self.nodes\n self._name_to_node_cache = {node.name: node for node in nodes}\n self._node_to_ind_cache = {node: i for i, node in enumerate(nodes)}\n
"},{"location":"model/#sleap_io.Skeleton.remove_node","title":"remove_node(node)
","text":"Remove a single node from the skeleton.
Parameters:
Name Type Description Defaultnode
NodeOrIndex
The node to remove. Can be specified as a string name, integer index, or Node
object.
This method handles updating the lookup caches necessary for indexing nodes by name.
Any edges and symmetries that are connected to the removed node will also be removed.
WarningThis method does NOT update instances that use this skeleton to reflect changes.
It is recommended to use the Labels.remove_nodes()
method which will update all contained instances to reflect the changes made to the skeleton.
To manually update instances after this method is called, call Instance.update_skeleton()
on each instance that uses this skeleton.
sleap_io/model/skeleton.py
def remove_node(self, node: NodeOrIndex):\n \"\"\"Remove a single node from the skeleton.\n\n Args:\n node: The node to remove. Can be specified as a string name, integer index,\n or `Node` object.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name.\n\n Any edges and symmetries that are connected to the removed node will also be\n removed.\n\n Warning:\n **This method does NOT update instances** that use this skeleton to reflect\n changes.\n\n It is recommended to use the `Labels.remove_nodes()` method which will\n update all contained instances to reflect the changes made to the skeleton.\n\n To manually update instances after this method is called, call\n `Instance.update_skeleton()` on each instance that uses this skeleton.\n \"\"\"\n self.remove_nodes([node])\n
"},{"location":"model/#sleap_io.Skeleton.remove_nodes","title":"remove_nodes(nodes)
","text":"Remove nodes from the skeleton.
Parameters:
Name Type Description Defaultnodes
list[NodeOrIndex]
A list of node names, indices, or Node
objects to remove.
This method handles updating the lookup caches necessary for indexing nodes by name.
Any edges and symmetries that are connected to the removed nodes will also be removed.
WarningThis method does NOT update instances that use this skeleton to reflect changes.
It is recommended to use the Labels.remove_nodes()
method which will update all contained to reflect the changes made to the skeleton.
To manually update instances after this method is called, call instance.update_nodes()
on each instance that uses this skeleton.
sleap_io/model/skeleton.py
def remove_nodes(self, nodes: list[NodeOrIndex]):\n \"\"\"Remove nodes from the skeleton.\n\n Args:\n nodes: A list of node names, indices, or `Node` objects to remove.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name.\n\n Any edges and symmetries that are connected to the removed nodes will also\n be removed.\n\n Warning:\n **This method does NOT update instances** that use this skeleton to reflect\n changes.\n\n It is recommended to use the `Labels.remove_nodes()` method which will\n update all contained to reflect the changes made to the skeleton.\n\n To manually update instances after this method is called, call\n `instance.update_nodes()` on each instance that uses this skeleton.\n \"\"\"\n # Standardize input and make a pre-mutation copy before keys are changed.\n rm_node_objs = [self.require_node(node, add_missing=False) for node in nodes]\n\n # Remove nodes from the skeleton.\n for node in rm_node_objs:\n self.nodes.remove(node)\n del self._name_to_node_cache[node.name]\n\n # Remove edges connected to the removed nodes.\n self.edges = [\n edge\n for edge in self.edges\n if edge.source not in rm_node_objs and edge.destination not in rm_node_objs\n ]\n\n # Remove symmetries connected to the removed nodes.\n self.symmetries = [\n symmetry\n for symmetry in self.symmetries\n if symmetry.nodes.isdisjoint(rm_node_objs)\n ]\n\n # Update node index map.\n self.rebuild_cache()\n
"},{"location":"model/#sleap_io.Skeleton.rename_node","title":"rename_node(old_name, new_name)
","text":"Rename a single node in the skeleton.
Parameters:
Name Type Description Defaultold_name
NodeOrIndex
The name of the node to rename. Can also be specified as an integer index or Node
object.
new_name
str
The new name for the node.
required Source code insleap_io/model/skeleton.py
def rename_node(self, old_name: NodeOrIndex, new_name: str):\n \"\"\"Rename a single node in the skeleton.\n\n Args:\n old_name: The name of the node to rename. Can also be specified as an\n integer index or `Node` object.\n new_name: The new name for the node.\n \"\"\"\n self.rename_nodes({old_name: new_name})\n
"},{"location":"model/#sleap_io.Skeleton.rename_nodes","title":"rename_nodes(name_map)
","text":"Rename nodes in the skeleton.
Parameters:
Name Type Description Defaultname_map
dict[NodeOrIndex, str] | list[str]
A dictionary mapping old node names to new node names. Keys can be specified as Node
objects, integer indices, or string names. Values must be specified as string names.
If a list of strings is provided of the same length as the current nodes, the nodes will be renamed to the names in the list in order.
requiredRaises:
Type DescriptionValueError
If the new node names exist in the skeleton or if the old node names are not found in the skeleton.
NotesThis method should always be used when renaming nodes in the skeleton as it handles updating the lookup caches necessary for indexing nodes by name.
After renaming, instances using this skeleton do NOT need to be updated as the nodes are stored by reference in the skeleton, so changes are reflected automatically.
Exampleskel = Skeleton([\"A\", \"B\", \"C\"], edges=[(\"A\", \"B\"), (\"B\", \"C\")]) skel.rename_nodes({\"A\": \"X\", \"B\": \"Y\", \"C\": \"Z\"}) skel.node_names [\"X\", \"Y\", \"Z\"] skel.rename_nodes([\"a\", \"b\", \"c\"]) skel.node_names [\"a\", \"b\", \"c\"]
Source code insleap_io/model/skeleton.py
def rename_nodes(self, name_map: dict[NodeOrIndex, str] | list[str]):\n \"\"\"Rename nodes in the skeleton.\n\n Args:\n name_map: A dictionary mapping old node names to new node names. Keys can be\n specified as `Node` objects, integer indices, or string names. Values\n must be specified as string names.\n\n If a list of strings is provided of the same length as the current\n nodes, the nodes will be renamed to the names in the list in order.\n\n Raises:\n ValueError: If the new node names exist in the skeleton or if the old node\n names are not found in the skeleton.\n\n Notes:\n This method should always be used when renaming nodes in the skeleton as it\n handles updating the lookup caches necessary for indexing nodes by name.\n\n After renaming, instances using this skeleton **do NOT need to be updated**\n as the nodes are stored by reference in the skeleton, so changes are\n reflected automatically.\n\n Example:\n >>> skel = Skeleton([\"A\", \"B\", \"C\"], edges=[(\"A\", \"B\"), (\"B\", \"C\")])\n >>> skel.rename_nodes({\"A\": \"X\", \"B\": \"Y\", \"C\": \"Z\"})\n >>> skel.node_names\n [\"X\", \"Y\", \"Z\"]\n >>> skel.rename_nodes([\"a\", \"b\", \"c\"])\n >>> skel.node_names\n [\"a\", \"b\", \"c\"]\n \"\"\"\n if type(name_map) == list:\n if len(name_map) != len(self.nodes):\n raise ValueError(\n \"List of new node names must be the same length as the current \"\n \"nodes.\"\n )\n name_map = {node: name for node, name in zip(self.nodes, name_map)}\n\n for old_name, new_name in name_map.items():\n if type(old_name) == Node:\n old_name = old_name.name\n if type(old_name) == int:\n old_name = self.nodes[old_name].name\n\n if old_name not in self._name_to_node_cache:\n raise ValueError(f\"Node '{old_name}' not found in the skeleton.\")\n if new_name in self._name_to_node_cache:\n raise ValueError(f\"Node '{new_name}' already exists in the skeleton.\")\n\n node = self._name_to_node_cache[old_name]\n node.name = new_name\n self._name_to_node_cache[new_name] = node\n del self._name_to_node_cache[old_name]\n
"},{"location":"model/#sleap_io.Skeleton.reorder_nodes","title":"reorder_nodes(new_order)
","text":"Reorder nodes in the skeleton.
Parameters:
Name Type Description Defaultnew_order
list[NodeOrIndex]
A list of node names, indices, or Node
objects specifying the new order of the nodes.
Raises:
Type DescriptionValueError
If the new order of nodes is not the same length as the current nodes.
NotesThis method handles updating the lookup caches necessary for indexing nodes by name.
WarningAfter reordering, instances using this skeleton do not need to be updated as the nodes are stored by reference in the skeleton.
However, the order that points are stored in the instances will not be updated to match the new order of the nodes in the skeleton. This should not matter unless the ordering of the keys in the Instance.points
dictionary is used instead of relying on the skeleton node order.
To make sure these are aligned, it is recommended to use the Labels.reorder_nodes()
method which will update all contained instances to reflect the changes made to the skeleton.
To manually update instances after this method is called, call Instance.update_skeleton()
on each instance that uses this skeleton.
sleap_io/model/skeleton.py
def reorder_nodes(self, new_order: list[NodeOrIndex]):\n \"\"\"Reorder nodes in the skeleton.\n\n Args:\n new_order: A list of node names, indices, or `Node` objects specifying the\n new order of the nodes.\n\n Raises:\n ValueError: If the new order of nodes is not the same length as the current\n nodes.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name.\n\n Warning:\n After reordering, instances using this skeleton do not need to be updated as\n the nodes are stored by reference in the skeleton.\n\n However, the order that points are stored in the instances will not be\n updated to match the new order of the nodes in the skeleton. This should not\n matter unless the ordering of the keys in the `Instance.points` dictionary\n is used instead of relying on the skeleton node order.\n\n To make sure these are aligned, it is recommended to use the\n `Labels.reorder_nodes()` method which will update all contained instances to\n reflect the changes made to the skeleton.\n\n To manually update instances after this method is called, call\n `Instance.update_skeleton()` on each instance that uses this skeleton.\n \"\"\"\n if len(new_order) != len(self.nodes):\n raise ValueError(\n \"New order of nodes must be the same length as the current nodes.\"\n )\n\n new_nodes = [self.require_node(node, add_missing=False) for node in new_order]\n self.nodes = new_nodes\n
"},{"location":"model/#sleap_io.Skeleton.require_node","title":"require_node(node, add_missing=True)
","text":"Return a Node
object, handling indexing and adding missing nodes.
Parameters:
Name Type Description Defaultnode
NodeOrIndex
A Node
object, name or index.
add_missing
bool
If True
, missing nodes will be added to the skeleton. If False
, an error will be raised if the node is not found. Default is True
.
True
Returns:
Type DescriptionNode
The Node
object.
Raises:
Type DescriptionIndexError
If the node is not found in the skeleton and add_missing
is False
.
sleap_io/model/skeleton.py
def require_node(self, node: NodeOrIndex, add_missing: bool = True) -> Node:\n \"\"\"Return a `Node` object, handling indexing and adding missing nodes.\n\n Args:\n node: A `Node` object, name or index.\n add_missing: If `True`, missing nodes will be added to the skeleton. If\n `False`, an error will be raised if the node is not found. Default is\n `True`.\n\n Returns:\n The `Node` object.\n\n Raises:\n IndexError: If the node is not found in the skeleton and `add_missing` is\n `False`.\n \"\"\"\n if node not in self:\n if add_missing:\n self.add_node(node)\n else:\n raise IndexError(f\"Node '{node}' not found in the skeleton.\")\n\n if type(node) == Node:\n return node\n\n return self[node]\n
"},{"location":"model/#sleap_io.Node","title":"sleap_io.Node
","text":"A landmark type within a Skeleton
.
This typically corresponds to a unique landmark within a skeleton, such as the \"left eye\".
Attributes:
Name Type Descriptionname
str
Descriptive label for the landmark.
Source code insleap_io/model/skeleton.py
@define(eq=False)\nclass Node:\n \"\"\"A landmark type within a `Skeleton`.\n\n This typically corresponds to a unique landmark within a skeleton, such as the \"left\n eye\".\n\n Attributes:\n name: Descriptive label for the landmark.\n \"\"\"\n\n name: str\n
"},{"location":"model/#sleap_io.Edge","title":"sleap_io.Edge
","text":"A connection between two Node
objects within a Skeleton
.
This is a directed edge, representing the ordering of Node
s in the Skeleton
tree.
Attributes:
Name Type Descriptionsource
Node
The origin Node
.
destination
Node
The destination Node
.
Methods:
Name Description__getitem__
Return the source Node
(idx
is 0) or destination Node
(idx
is 1).
sleap_io/model/skeleton.py
@define(frozen=True)\nclass Edge:\n \"\"\"A connection between two `Node` objects within a `Skeleton`.\n\n This is a directed edge, representing the ordering of `Node`s in the `Skeleton`\n tree.\n\n Attributes:\n source: The origin `Node`.\n destination: The destination `Node`.\n \"\"\"\n\n source: Node\n destination: Node\n\n def __getitem__(self, idx) -> Node:\n \"\"\"Return the source `Node` (`idx` is 0) or destination `Node` (`idx` is 1).\"\"\"\n if idx == 0:\n return self.source\n elif idx == 1:\n return self.destination\n else:\n raise IndexError(\"Edge only has 2 nodes (source and destination).\")\n
"},{"location":"model/#sleap_io.Edge.__getitem__","title":"__getitem__(idx)
","text":"Return the source Node
(idx
is 0) or destination Node
(idx
is 1).
sleap_io/model/skeleton.py
def __getitem__(self, idx) -> Node:\n \"\"\"Return the source `Node` (`idx` is 0) or destination `Node` (`idx` is 1).\"\"\"\n if idx == 0:\n return self.source\n elif idx == 1:\n return self.destination\n else:\n raise IndexError(\"Edge only has 2 nodes (source and destination).\")\n
"},{"location":"model/#sleap_io.Symmetry","title":"sleap_io.Symmetry
","text":"A relationship between a pair of nodes denoting their left/right pairing.
Attributes:
Name Type Descriptionnodes
set[Node]
A set of two Node
s.
Methods:
Name Description__getitem__
Return the first node.
__iter__
Iterate over the symmetric nodes.
Source code insleap_io/model/skeleton.py
@define\nclass Symmetry:\n \"\"\"A relationship between a pair of nodes denoting their left/right pairing.\n\n Attributes:\n nodes: A set of two `Node`s.\n \"\"\"\n\n nodes: set[Node] = field(converter=set, validator=lambda _, __, val: len(val) == 2)\n\n def __iter__(self):\n \"\"\"Iterate over the symmetric nodes.\"\"\"\n return iter(self.nodes)\n\n def __getitem__(self, idx) -> Node:\n \"\"\"Return the first node.\"\"\"\n for i, node in enumerate(self.nodes):\n if i == idx:\n return node\n
"},{"location":"model/#sleap_io.Symmetry.__getitem__","title":"__getitem__(idx)
","text":"Return the first node.
Source code insleap_io/model/skeleton.py
def __getitem__(self, idx) -> Node:\n \"\"\"Return the first node.\"\"\"\n for i, node in enumerate(self.nodes):\n if i == idx:\n return node\n
"},{"location":"model/#sleap_io.Symmetry.__iter__","title":"__iter__()
","text":"Iterate over the symmetric nodes.
Source code insleap_io/model/skeleton.py
def __iter__(self):\n \"\"\"Iterate over the symmetric nodes.\"\"\"\n return iter(self.nodes)\n
"},{"location":"model/#sleap_io.Track","title":"sleap_io.Track
","text":"An object that represents the same animal/object across multiple detections.
This allows tracking of unique entities in the video over time and space.
A Track
may also be used to refer to unique identity classes that span multiple videos, such as \"female mouse\"
.
Attributes:
Name Type Descriptionname
str
A name given to this track for identification purposes.
NotesTrack
s are compared by identity. This means that unique track objects with the same name are considered to be different.
sleap_io/model/instance.py
@define(eq=False)\nclass Track:\n \"\"\"An object that represents the same animal/object across multiple detections.\n\n This allows tracking of unique entities in the video over time and space.\n\n A `Track` may also be used to refer to unique identity classes that span multiple\n videos, such as `\"female mouse\"`.\n\n Attributes:\n name: A name given to this track for identification purposes.\n\n Notes:\n `Track`s are compared by identity. This means that unique track objects with the\n same name are considered to be different.\n \"\"\"\n\n name: str = \"\"\n
"},{"location":"model/#sleap_io.Video","title":"sleap_io.Video
","text":"Video
class used by sleap to represent videos and data associated with them.
This class is used to store information regarding a video and its components. It is used to store the video's filename
, shape
, and the video's backend
.
To create a Video
object, use the from_filename
method which will select the backend appropriately.
Attributes:
Name Type Descriptionfilename
str | list[str]
The filename(s) of the video. Supported extensions: \"mp4\", \"avi\", \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\", \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are expected. If filename is a folder, it will be searched for images.
backend
Optional[VideoBackend]
An object that implements the basic methods for reading and manipulating frames of a specific video type.
backend_metadata
dict[str, any]
A dictionary of metadata specific to the backend. This is useful for storing metadata that requires an open backend (e.g., shape information) without having access to the video file itself.
source_video
Optional[Video]
The source video object if this is a proxy video. This is present when the video contains an embedded subset of frames from another video.
open_backend
bool
Whether to open the backend when the video is available. If True
(the default), the backend will be automatically opened if the video exists. Set this to False
when you want to manually open the backend, or when the you know the video file does not exist and you want to avoid trying to open the file.
Instances of this class are hashed by identity, not by value. This means that two Video
instances with the same attributes will NOT be considered equal in a set or dict.
See also: VideoBackend
Methods:
Name Description__attrs_post_init__
Post init syntactic sugar.
__deepcopy__
Deep copy the video object.
__getitem__
Return the frames of the video at the given indices.
__len__
Return the length of the video as the number of frames.
__repr__
Informal string representation (for print or format).
__str__
Informal string representation (for print or format).
close
Close the video backend.
exists
Check if the video file exists and is accessible.
from_filename
Create a Video from a filename.
open
Open the video backend for reading.
replace_filename
Update the filename of the video, optionally opening the backend.
save
Save video frames to a new video file.
Attributes:
Name Type Descriptiongrayscale
bool | None
Return whether the video is grayscale.
is_open
bool
Check if the video backend is open.
shape
Tuple[int, int, int, int] | None
Return the shape of the video as (num_frames, height, width, channels).
Source code insleap_io/model/video.py
@attrs.define(eq=False)\nclass Video:\n \"\"\"`Video` class used by sleap to represent videos and data associated with them.\n\n This class is used to store information regarding a video and its components.\n It is used to store the video's `filename`, `shape`, and the video's `backend`.\n\n To create a `Video` object, use the `from_filename` method which will select the\n backend appropriately.\n\n Attributes:\n filename: The filename(s) of the video. Supported extensions: \"mp4\", \"avi\",\n \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\",\n \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are\n expected. If filename is a folder, it will be searched for images.\n backend: An object that implements the basic methods for reading and\n manipulating frames of a specific video type.\n backend_metadata: A dictionary of metadata specific to the backend. This is\n useful for storing metadata that requires an open backend (e.g., shape\n information) without having access to the video file itself.\n source_video: The source video object if this is a proxy video. This is present\n when the video contains an embedded subset of frames from another video.\n open_backend: Whether to open the backend when the video is available. If `True`\n (the default), the backend will be automatically opened if the video exists.\n Set this to `False` when you want to manually open the backend, or when the\n you know the video file does not exist and you want to avoid trying to open\n the file.\n\n Notes:\n Instances of this class are hashed by identity, not by value. This means that\n two `Video` instances with the same attributes will NOT be considered equal in a\n set or dict.\n\n See also: VideoBackend\n \"\"\"\n\n filename: str | list[str]\n backend: Optional[VideoBackend] = None\n backend_metadata: dict[str, any] = attrs.field(factory=dict)\n source_video: Optional[Video] = None\n open_backend: bool = True\n\n EXTS = MediaVideo.EXTS + HDF5Video.EXTS + ImageVideo.EXTS\n\n def __attrs_post_init__(self):\n \"\"\"Post init syntactic sugar.\"\"\"\n if self.open_backend and self.backend is None and self.exists():\n try:\n self.open()\n except Exception as e:\n # If we can't open the backend, just ignore it for now so we don't\n # prevent the user from building the Video object entirely.\n pass\n\n def __deepcopy__(self, memo):\n \"\"\"Deep copy the video object.\"\"\"\n if id(self) in memo:\n return memo[id(self)]\n\n reopen = False\n if self.is_open:\n reopen = True\n self.close()\n\n new_video = Video(\n filename=self.filename,\n backend=None,\n backend_metadata=self.backend_metadata,\n source_video=self.source_video,\n open_backend=self.open_backend,\n )\n\n memo[id(self)] = new_video\n\n if reopen:\n self.open()\n\n return new_video\n\n @classmethod\n def from_filename(\n cls,\n filename: str | list[str],\n dataset: Optional[str] = None,\n grayscale: Optional[bool] = None,\n keep_open: bool = True,\n source_video: Optional[Video] = None,\n **kwargs,\n ) -> VideoBackend:\n \"\"\"Create a Video from a filename.\n\n Args:\n filename: The filename(s) of the video. Supported extensions: \"mp4\", \"avi\",\n \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\",\n \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are\n expected. If filename is a folder, it will be searched for images.\n dataset: Name of dataset in HDF5 file.\n grayscale: Whether to force grayscale. If None, autodetect on first frame\n load.\n keep_open: Whether to keep the video reader open between calls to read\n frames. If False, will close the reader after each call. If True (the\n default), it will keep the reader open and cache it for subsequent calls\n which may enhance the performance of reading multiple frames.\n source_video: The source video object if this is a proxy video. This is\n present when the video contains an embedded subset of frames from\n another video.\n\n Returns:\n Video instance with the appropriate backend instantiated.\n \"\"\"\n return cls(\n filename=filename,\n backend=VideoBackend.from_filename(\n filename,\n dataset=dataset,\n grayscale=grayscale,\n keep_open=keep_open,\n **kwargs,\n ),\n source_video=source_video,\n )\n\n @property\n def shape(self) -> Tuple[int, int, int, int] | None:\n \"\"\"Return the shape of the video as (num_frames, height, width, channels).\n\n If the video backend is not set or it cannot determine the shape of the video,\n this will return None.\n \"\"\"\n return self._get_shape()\n\n def _get_shape(self) -> Tuple[int, int, int, int] | None:\n \"\"\"Return the shape of the video as (num_frames, height, width, channels).\n\n This suppresses errors related to querying the backend for the video shape, such\n as when it has not been set or when the video file is not found.\n \"\"\"\n try:\n return self.backend.shape\n except:\n if \"shape\" in self.backend_metadata:\n return self.backend_metadata[\"shape\"]\n return None\n\n @property\n def grayscale(self) -> bool | None:\n \"\"\"Return whether the video is grayscale.\n\n If the video backend is not set or it cannot determine whether the video is\n grayscale, this will return None.\n \"\"\"\n shape = self.shape\n if shape is not None:\n return shape[-1] == 1\n else:\n grayscale = None\n if \"grayscale\" in self.backend_metadata:\n grayscale = self.backend_metadata[\"grayscale\"]\n return grayscale\n\n @grayscale.setter\n def grayscale(self, value: bool):\n \"\"\"Set the grayscale value and adjust the backend.\"\"\"\n if self.backend is not None:\n self.backend.grayscale = value\n self.backend._cached_shape = None\n\n self.backend_metadata[\"grayscale\"] = value\n\n def __len__(self) -> int:\n \"\"\"Return the length of the video as the number of frames.\"\"\"\n shape = self.shape\n return 0 if shape is None else shape[0]\n\n def __repr__(self) -> str:\n \"\"\"Informal string representation (for print or format).\"\"\"\n dataset = (\n f\"dataset={self.backend.dataset}, \"\n if getattr(self.backend, \"dataset\", \"\")\n else \"\"\n )\n return (\n \"Video(\"\n f'filename=\"{self.filename}\", '\n f\"shape={self.shape}, \"\n f\"{dataset}\"\n f\"backend={type(self.backend).__name__}\"\n \")\"\n )\n\n def __str__(self) -> str:\n \"\"\"Informal string representation (for print or format).\"\"\"\n return self.__repr__()\n\n def __getitem__(self, inds: int | list[int] | slice) -> np.ndarray:\n \"\"\"Return the frames of the video at the given indices.\n\n Args:\n inds: Index or list of indices of frames to read.\n\n Returns:\n Frame or frames as a numpy array of shape `(height, width, channels)` if a\n scalar index is provided, or `(frames, height, width, channels)` if a list\n of indices is provided.\n\n See also: VideoBackend.get_frame, VideoBackend.get_frames\n \"\"\"\n if not self.is_open:\n if self.open_backend:\n self.open()\n else:\n raise ValueError(\n \"Video backend is not open. Call video.open() or set \"\n \"video.open_backend to True to do automatically on frame read.\"\n )\n return self.backend[inds]\n\n def exists(self, check_all: bool = False, dataset: str | None = None) -> bool:\n \"\"\"Check if the video file exists and is accessible.\n\n Args:\n check_all: If `True`, check that all filenames in a list exist. If `False`\n (the default), check that the first filename exists.\n dataset: Name of dataset in HDF5 file. If specified, this will function will\n return `False` if the dataset does not exist.\n\n Returns:\n `True` if the file exists and is accessible, `False` otherwise.\n \"\"\"\n if isinstance(self.filename, list):\n if check_all:\n for f in self.filename:\n if not is_file_accessible(f):\n return False\n return True\n else:\n return is_file_accessible(self.filename[0])\n\n file_is_accessible = is_file_accessible(self.filename)\n if not file_is_accessible:\n return False\n\n if dataset is None or dataset == \"\":\n dataset = self.backend_metadata.get(\"dataset\", None)\n\n if dataset is not None and dataset != \"\":\n has_dataset = False\n if (\n self.backend is not None\n and type(self.backend) == HDF5Video\n and self.backend._open_reader is not None\n ):\n has_dataset = dataset in self.backend._open_reader\n else:\n with h5py.File(self.filename, \"r\") as f:\n has_dataset = dataset in f\n return has_dataset\n\n return True\n\n @property\n def is_open(self) -> bool:\n \"\"\"Check if the video backend is open.\"\"\"\n return self.exists() and self.backend is not None\n\n def open(\n self,\n filename: Optional[str] = None,\n dataset: Optional[str] = None,\n grayscale: Optional[str] = None,\n keep_open: bool = True,\n ):\n \"\"\"Open the video backend for reading.\n\n Args:\n filename: Filename to open. If not specified, will use the filename set on\n the video object.\n dataset: Name of dataset in HDF5 file.\n grayscale: Whether to force grayscale. If None, autodetect on first frame\n load.\n keep_open: Whether to keep the video reader open between calls to read\n frames. If False, will close the reader after each call. If True (the\n default), it will keep the reader open and cache it for subsequent calls\n which may enhance the performance of reading multiple frames.\n\n Notes:\n This is useful for opening the video backend to read frames and then closing\n it after reading all the necessary frames.\n\n If the backend was already open, it will be closed before opening a new one.\n Values for the HDF5 dataset and grayscale will be remembered if not\n specified.\n \"\"\"\n if filename is not None:\n self.replace_filename(filename, open=False)\n\n # Try to remember values from previous backend if available and not specified.\n if self.backend is not None:\n if dataset is None:\n dataset = getattr(self.backend, \"dataset\", None)\n if grayscale is None:\n grayscale = getattr(self.backend, \"grayscale\", None)\n\n else:\n if dataset is None and \"dataset\" in self.backend_metadata:\n dataset = self.backend_metadata[\"dataset\"]\n if grayscale is None:\n if \"grayscale\" in self.backend_metadata:\n grayscale = self.backend_metadata[\"grayscale\"]\n elif \"shape\" in self.backend_metadata:\n grayscale = self.backend_metadata[\"shape\"][-1] == 1\n\n if not self.exists(dataset=dataset):\n msg = f\"Video does not exist or is inaccessible: {self.filename}\"\n if dataset is not None:\n msg += f\" (dataset: {dataset})\"\n raise FileNotFoundError(msg)\n\n # Close previous backend if open.\n self.close()\n\n # Create new backend.\n self.backend = VideoBackend.from_filename(\n self.filename,\n dataset=dataset,\n grayscale=grayscale,\n keep_open=keep_open,\n )\n\n def close(self):\n \"\"\"Close the video backend.\"\"\"\n if self.backend is not None:\n # Try to remember values from previous backend if available and not\n # specified.\n try:\n self.backend_metadata[\"dataset\"] = getattr(\n self.backend, \"dataset\", None\n )\n self.backend_metadata[\"grayscale\"] = getattr(\n self.backend, \"grayscale\", None\n )\n self.backend_metadata[\"shape\"] = getattr(self.backend, \"shape\", None)\n except:\n pass\n\n del self.backend\n self.backend = None\n\n def replace_filename(\n self, new_filename: str | Path | list[str] | list[Path], open: bool = True\n ):\n \"\"\"Update the filename of the video, optionally opening the backend.\n\n Args:\n new_filename: New filename to set for the video.\n open: If `True` (the default), open the backend with the new filename. If\n the new filename does not exist, no error is raised.\n \"\"\"\n if isinstance(new_filename, Path):\n new_filename = new_filename.as_posix()\n\n if isinstance(new_filename, list):\n new_filename = [\n p.as_posix() if isinstance(p, Path) else p for p in new_filename\n ]\n\n self.filename = new_filename\n self.backend_metadata[\"filename\"] = new_filename\n\n if open:\n if self.exists():\n self.open()\n else:\n self.close()\n\n def save(\n self,\n save_path: str | Path,\n frame_inds: list[int] | np.ndarray | None = None,\n video_kwargs: dict[str, Any] | None = None,\n ) -> Video:\n \"\"\"Save video frames to a new video file.\n\n Args:\n save_path: Path to the new video file. Should end in MP4.\n frame_inds: Frame indices to save. Can be specified as a list or array of\n frame integers. If not specified, saves all video frames.\n video_kwargs: A dictionary of keyword arguments to provide to\n `sio.save_video` for video compression.\n\n Returns:\n A new `Video` object pointing to the new video file.\n \"\"\"\n video_kwargs = {} if video_kwargs is None else video_kwargs\n frame_inds = np.arange(len(self)) if frame_inds is None else frame_inds\n\n with VideoWriter(save_path, **video_kwargs) as vw:\n for frame_ind in frame_inds:\n vw(self[frame_ind])\n\n new_video = Video.from_filename(save_path, grayscale=self.grayscale)\n return new_video\n
"},{"location":"model/#sleap_io.Video.grayscale","title":"grayscale: bool | None
property
writable
","text":"Return whether the video is grayscale.
If the video backend is not set or it cannot determine whether the video is grayscale, this will return None.
"},{"location":"model/#sleap_io.Video.is_open","title":"is_open: bool
property
","text":"Check if the video backend is open.
"},{"location":"model/#sleap_io.Video.shape","title":"shape: Tuple[int, int, int, int] | None
property
","text":"Return the shape of the video as (num_frames, height, width, channels).
If the video backend is not set or it cannot determine the shape of the video, this will return None.
"},{"location":"model/#sleap_io.Video.__attrs_post_init__","title":"__attrs_post_init__()
","text":"Post init syntactic sugar.
Source code insleap_io/model/video.py
def __attrs_post_init__(self):\n \"\"\"Post init syntactic sugar.\"\"\"\n if self.open_backend and self.backend is None and self.exists():\n try:\n self.open()\n except Exception as e:\n # If we can't open the backend, just ignore it for now so we don't\n # prevent the user from building the Video object entirely.\n pass\n
"},{"location":"model/#sleap_io.Video.__deepcopy__","title":"__deepcopy__(memo)
","text":"Deep copy the video object.
Source code insleap_io/model/video.py
def __deepcopy__(self, memo):\n \"\"\"Deep copy the video object.\"\"\"\n if id(self) in memo:\n return memo[id(self)]\n\n reopen = False\n if self.is_open:\n reopen = True\n self.close()\n\n new_video = Video(\n filename=self.filename,\n backend=None,\n backend_metadata=self.backend_metadata,\n source_video=self.source_video,\n open_backend=self.open_backend,\n )\n\n memo[id(self)] = new_video\n\n if reopen:\n self.open()\n\n return new_video\n
"},{"location":"model/#sleap_io.Video.__getitem__","title":"__getitem__(inds)
","text":"Return the frames of the video at the given indices.
Parameters:
Name Type Description Defaultinds
int | list[int] | slice
Index or list of indices of frames to read.
requiredReturns:
Type Descriptionndarray
Frame or frames as a numpy array of shape (height, width, channels)
if a scalar index is provided, or (frames, height, width, channels)
if a list of indices is provided.
See also: VideoBackend.get_frame, VideoBackend.get_frames
Source code insleap_io/model/video.py
def __getitem__(self, inds: int | list[int] | slice) -> np.ndarray:\n \"\"\"Return the frames of the video at the given indices.\n\n Args:\n inds: Index or list of indices of frames to read.\n\n Returns:\n Frame or frames as a numpy array of shape `(height, width, channels)` if a\n scalar index is provided, or `(frames, height, width, channels)` if a list\n of indices is provided.\n\n See also: VideoBackend.get_frame, VideoBackend.get_frames\n \"\"\"\n if not self.is_open:\n if self.open_backend:\n self.open()\n else:\n raise ValueError(\n \"Video backend is not open. Call video.open() or set \"\n \"video.open_backend to True to do automatically on frame read.\"\n )\n return self.backend[inds]\n
"},{"location":"model/#sleap_io.Video.__len__","title":"__len__()
","text":"Return the length of the video as the number of frames.
Source code insleap_io/model/video.py
def __len__(self) -> int:\n \"\"\"Return the length of the video as the number of frames.\"\"\"\n shape = self.shape\n return 0 if shape is None else shape[0]\n
"},{"location":"model/#sleap_io.Video.__repr__","title":"__repr__()
","text":"Informal string representation (for print or format).
Source code insleap_io/model/video.py
def __repr__(self) -> str:\n \"\"\"Informal string representation (for print or format).\"\"\"\n dataset = (\n f\"dataset={self.backend.dataset}, \"\n if getattr(self.backend, \"dataset\", \"\")\n else \"\"\n )\n return (\n \"Video(\"\n f'filename=\"{self.filename}\", '\n f\"shape={self.shape}, \"\n f\"{dataset}\"\n f\"backend={type(self.backend).__name__}\"\n \")\"\n )\n
"},{"location":"model/#sleap_io.Video.__str__","title":"__str__()
","text":"Informal string representation (for print or format).
Source code insleap_io/model/video.py
def __str__(self) -> str:\n \"\"\"Informal string representation (for print or format).\"\"\"\n return self.__repr__()\n
"},{"location":"model/#sleap_io.Video.close","title":"close()
","text":"Close the video backend.
Source code insleap_io/model/video.py
def close(self):\n \"\"\"Close the video backend.\"\"\"\n if self.backend is not None:\n # Try to remember values from previous backend if available and not\n # specified.\n try:\n self.backend_metadata[\"dataset\"] = getattr(\n self.backend, \"dataset\", None\n )\n self.backend_metadata[\"grayscale\"] = getattr(\n self.backend, \"grayscale\", None\n )\n self.backend_metadata[\"shape\"] = getattr(self.backend, \"shape\", None)\n except:\n pass\n\n del self.backend\n self.backend = None\n
"},{"location":"model/#sleap_io.Video.exists","title":"exists(check_all=False, dataset=None)
","text":"Check if the video file exists and is accessible.
Parameters:
Name Type Description Defaultcheck_all
bool
If True
, check that all filenames in a list exist. If False
(the default), check that the first filename exists.
False
dataset
str | None
Name of dataset in HDF5 file. If specified, this will function will return False
if the dataset does not exist.
None
Returns:
Type Descriptionbool
True
if the file exists and is accessible, False
otherwise.
sleap_io/model/video.py
def exists(self, check_all: bool = False, dataset: str | None = None) -> bool:\n \"\"\"Check if the video file exists and is accessible.\n\n Args:\n check_all: If `True`, check that all filenames in a list exist. If `False`\n (the default), check that the first filename exists.\n dataset: Name of dataset in HDF5 file. If specified, this will function will\n return `False` if the dataset does not exist.\n\n Returns:\n `True` if the file exists and is accessible, `False` otherwise.\n \"\"\"\n if isinstance(self.filename, list):\n if check_all:\n for f in self.filename:\n if not is_file_accessible(f):\n return False\n return True\n else:\n return is_file_accessible(self.filename[0])\n\n file_is_accessible = is_file_accessible(self.filename)\n if not file_is_accessible:\n return False\n\n if dataset is None or dataset == \"\":\n dataset = self.backend_metadata.get(\"dataset\", None)\n\n if dataset is not None and dataset != \"\":\n has_dataset = False\n if (\n self.backend is not None\n and type(self.backend) == HDF5Video\n and self.backend._open_reader is not None\n ):\n has_dataset = dataset in self.backend._open_reader\n else:\n with h5py.File(self.filename, \"r\") as f:\n has_dataset = dataset in f\n return has_dataset\n\n return True\n
"},{"location":"model/#sleap_io.Video.from_filename","title":"from_filename(filename, dataset=None, grayscale=None, keep_open=True, source_video=None, **kwargs)
classmethod
","text":"Create a Video from a filename.
Parameters:
Name Type Description Defaultfilename
str | list[str]
The filename(s) of the video. Supported extensions: \"mp4\", \"avi\", \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\", \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are expected. If filename is a folder, it will be searched for images.
requireddataset
Optional[str]
Name of dataset in HDF5 file.
None
grayscale
Optional[bool]
Whether to force grayscale. If None, autodetect on first frame load.
None
keep_open
bool
Whether to keep the video reader open between calls to read frames. If False, will close the reader after each call. If True (the default), it will keep the reader open and cache it for subsequent calls which may enhance the performance of reading multiple frames.
True
source_video
Optional[Video]
The source video object if this is a proxy video. This is present when the video contains an embedded subset of frames from another video.
None
Returns:
Type DescriptionVideoBackend
Video instance with the appropriate backend instantiated.
Source code insleap_io/model/video.py
@classmethod\ndef from_filename(\n cls,\n filename: str | list[str],\n dataset: Optional[str] = None,\n grayscale: Optional[bool] = None,\n keep_open: bool = True,\n source_video: Optional[Video] = None,\n **kwargs,\n) -> VideoBackend:\n \"\"\"Create a Video from a filename.\n\n Args:\n filename: The filename(s) of the video. Supported extensions: \"mp4\", \"avi\",\n \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\",\n \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are\n expected. If filename is a folder, it will be searched for images.\n dataset: Name of dataset in HDF5 file.\n grayscale: Whether to force grayscale. If None, autodetect on first frame\n load.\n keep_open: Whether to keep the video reader open between calls to read\n frames. If False, will close the reader after each call. If True (the\n default), it will keep the reader open and cache it for subsequent calls\n which may enhance the performance of reading multiple frames.\n source_video: The source video object if this is a proxy video. This is\n present when the video contains an embedded subset of frames from\n another video.\n\n Returns:\n Video instance with the appropriate backend instantiated.\n \"\"\"\n return cls(\n filename=filename,\n backend=VideoBackend.from_filename(\n filename,\n dataset=dataset,\n grayscale=grayscale,\n keep_open=keep_open,\n **kwargs,\n ),\n source_video=source_video,\n )\n
"},{"location":"model/#sleap_io.Video.open","title":"open(filename=None, dataset=None, grayscale=None, keep_open=True)
","text":"Open the video backend for reading.
Parameters:
Name Type Description Defaultfilename
Optional[str]
Filename to open. If not specified, will use the filename set on the video object.
None
dataset
Optional[str]
Name of dataset in HDF5 file.
None
grayscale
Optional[str]
Whether to force grayscale. If None, autodetect on first frame load.
None
keep_open
bool
Whether to keep the video reader open between calls to read frames. If False, will close the reader after each call. If True (the default), it will keep the reader open and cache it for subsequent calls which may enhance the performance of reading multiple frames.
True
Notes This is useful for opening the video backend to read frames and then closing it after reading all the necessary frames.
If the backend was already open, it will be closed before opening a new one. Values for the HDF5 dataset and grayscale will be remembered if not specified.
Source code insleap_io/model/video.py
def open(\n self,\n filename: Optional[str] = None,\n dataset: Optional[str] = None,\n grayscale: Optional[str] = None,\n keep_open: bool = True,\n):\n \"\"\"Open the video backend for reading.\n\n Args:\n filename: Filename to open. If not specified, will use the filename set on\n the video object.\n dataset: Name of dataset in HDF5 file.\n grayscale: Whether to force grayscale. If None, autodetect on first frame\n load.\n keep_open: Whether to keep the video reader open between calls to read\n frames. If False, will close the reader after each call. If True (the\n default), it will keep the reader open and cache it for subsequent calls\n which may enhance the performance of reading multiple frames.\n\n Notes:\n This is useful for opening the video backend to read frames and then closing\n it after reading all the necessary frames.\n\n If the backend was already open, it will be closed before opening a new one.\n Values for the HDF5 dataset and grayscale will be remembered if not\n specified.\n \"\"\"\n if filename is not None:\n self.replace_filename(filename, open=False)\n\n # Try to remember values from previous backend if available and not specified.\n if self.backend is not None:\n if dataset is None:\n dataset = getattr(self.backend, \"dataset\", None)\n if grayscale is None:\n grayscale = getattr(self.backend, \"grayscale\", None)\n\n else:\n if dataset is None and \"dataset\" in self.backend_metadata:\n dataset = self.backend_metadata[\"dataset\"]\n if grayscale is None:\n if \"grayscale\" in self.backend_metadata:\n grayscale = self.backend_metadata[\"grayscale\"]\n elif \"shape\" in self.backend_metadata:\n grayscale = self.backend_metadata[\"shape\"][-1] == 1\n\n if not self.exists(dataset=dataset):\n msg = f\"Video does not exist or is inaccessible: {self.filename}\"\n if dataset is not None:\n msg += f\" (dataset: {dataset})\"\n raise FileNotFoundError(msg)\n\n # Close previous backend if open.\n self.close()\n\n # Create new backend.\n self.backend = VideoBackend.from_filename(\n self.filename,\n dataset=dataset,\n grayscale=grayscale,\n keep_open=keep_open,\n )\n
"},{"location":"model/#sleap_io.Video.replace_filename","title":"replace_filename(new_filename, open=True)
","text":"Update the filename of the video, optionally opening the backend.
Parameters:
Name Type Description Defaultnew_filename
str | Path | list[str] | list[Path]
New filename to set for the video.
requiredopen
bool
If True
(the default), open the backend with the new filename. If the new filename does not exist, no error is raised.
True
Source code in sleap_io/model/video.py
def replace_filename(\n self, new_filename: str | Path | list[str] | list[Path], open: bool = True\n):\n \"\"\"Update the filename of the video, optionally opening the backend.\n\n Args:\n new_filename: New filename to set for the video.\n open: If `True` (the default), open the backend with the new filename. If\n the new filename does not exist, no error is raised.\n \"\"\"\n if isinstance(new_filename, Path):\n new_filename = new_filename.as_posix()\n\n if isinstance(new_filename, list):\n new_filename = [\n p.as_posix() if isinstance(p, Path) else p for p in new_filename\n ]\n\n self.filename = new_filename\n self.backend_metadata[\"filename\"] = new_filename\n\n if open:\n if self.exists():\n self.open()\n else:\n self.close()\n
"},{"location":"model/#sleap_io.Video.save","title":"save(save_path, frame_inds=None, video_kwargs=None)
","text":"Save video frames to a new video file.
Parameters:
Name Type Description Defaultsave_path
str | Path
Path to the new video file. Should end in MP4.
requiredframe_inds
list[int] | ndarray | None
Frame indices to save. Can be specified as a list or array of frame integers. If not specified, saves all video frames.
None
video_kwargs
dict[str, Any] | None
A dictionary of keyword arguments to provide to sio.save_video
for video compression.
None
Returns:
Type DescriptionVideo
A new Video
object pointing to the new video file.
sleap_io/model/video.py
def save(\n self,\n save_path: str | Path,\n frame_inds: list[int] | np.ndarray | None = None,\n video_kwargs: dict[str, Any] | None = None,\n) -> Video:\n \"\"\"Save video frames to a new video file.\n\n Args:\n save_path: Path to the new video file. Should end in MP4.\n frame_inds: Frame indices to save. Can be specified as a list or array of\n frame integers. If not specified, saves all video frames.\n video_kwargs: A dictionary of keyword arguments to provide to\n `sio.save_video` for video compression.\n\n Returns:\n A new `Video` object pointing to the new video file.\n \"\"\"\n video_kwargs = {} if video_kwargs is None else video_kwargs\n frame_inds = np.arange(len(self)) if frame_inds is None else frame_inds\n\n with VideoWriter(save_path, **video_kwargs) as vw:\n for frame_ind in frame_inds:\n vw(self[frame_ind])\n\n new_video = Video.from_filename(save_path, grayscale=self.grayscale)\n return new_video\n
"},{"location":"model/#sleap_io.SuggestionFrame","title":"sleap_io.SuggestionFrame
","text":"Data structure for a single frame of suggestions.
Attributes:
Name Type Descriptionvideo
Video
The video associated with the frame.
frame_idx
int
The index of the frame in the video.
Source code insleap_io/model/suggestions.py
@attrs.define(auto_attribs=True)\nclass SuggestionFrame:\n \"\"\"Data structure for a single frame of suggestions.\n\n Attributes:\n video: The video associated with the frame.\n frame_idx: The index of the frame in the video.\n \"\"\"\n\n video: Video\n frame_idx: int\n
"},{"location":"reference/SUMMARY/","title":"SUMMARY","text":"sleap_io
","text":"This module exposes all high level APIs for sleap-io.
Modules:
Name Descriptionio
This sub-package contains I/O-related modules such as specific format backends.
model
This subpackage contains data model interfaces.
version
This module defines the package version.
"},{"location":"reference/sleap_io/version/","title":"version","text":""},{"location":"reference/sleap_io/version/#sleap_io.version","title":"sleap_io.version
","text":"This module defines the package version.
"},{"location":"reference/sleap_io/io/","title":"io","text":""},{"location":"reference/sleap_io/io/#sleap_io.io","title":"sleap_io.io
","text":"This sub-package contains I/O-related modules such as specific format backends.
Modules:
Name Descriptionjabs
This module handles direct I/O operations for working with JABS files.
labelstudio
This module handles direct I/O operations for working with Labelstudio files.
main
This module contains high-level wrappers for utilizing different I/O backends.
nwb
Functions to write and read from the neurodata without borders (NWB) format.
slp
This module handles direct I/O operations for working with .slp files.
utils
Miscellaneous utilities for working with different I/O formats.
video
Backends for reading videos.
video_reading
Backends for reading videos.
video_writing
Utilities for writing videos.
"},{"location":"reference/sleap_io/io/jabs/","title":"jabs","text":""},{"location":"reference/sleap_io/io/jabs/#sleap_io.io.jabs","title":"sleap_io.io.jabs
","text":"This module handles direct I/O operations for working with JABS files.
Functions:
Name Descriptionconvert_labels
Convert a Labels
object into JABS-formatted annotations.
get_max_ids_in_video
Determine the maximum number of identities that exist at the same time.
make_simple_skeleton
Create a Skeleton
with a requested number of nodes attached in a line.
prediction_to_instance
Create an Instance
from prediction data.
read_labels
Read JABS style pose from a file and return a Labels
object.
tracklets_to_v3
Changes identity tracklets to the v3 format specifications.
write_jabs_v2
Write JABS pose file v2 data to file.
write_jabs_v3
Write JABS pose file v3 data to file.
write_jabs_v4
Write JABS pose file v4 data to file.
write_jabs_v5
Write JABS pose file v5 data to file.
write_labels
Convert and save a SLEAP Labels
object to a JABS pose file.
convert_labels(all_labels, video)
","text":"Convert a Labels
object into JABS-formatted annotations.
Parameters:
Name Type Description Defaultall_labels
Labels
SLEAP Labels
to be converted to JABS format.
video
Video
name of video to be converted
requiredReturns:
Type Descriptiondict
Dictionary of JABS data of the Labels
data.
sleap_io/io/jabs.py
def convert_labels(all_labels: Labels, video: Video) -> dict:\n \"\"\"Convert a `Labels` object into JABS-formatted annotations.\n\n Args:\n all_labels: SLEAP `Labels` to be converted to JABS format.\n video: name of video to be converted\n\n Returns:\n Dictionary of JABS data of the `Labels` data.\n \"\"\"\n labels = all_labels.find(video=video)\n\n # Determine shape of output\n # Low estimate of last frame labeled\n num_frames = max([x.frame_idx for x in labels]) + 1\n # If there is metadata available for the video, use that\n if video.shape:\n num_frames = max(num_frames, video.shape[0])\n if len(all_labels.skeletons) == 1:\n skeleton = all_labels.skeleton\n elif len(all_labels.skeletons) > 1:\n skeleton = [x for x in all_labels.skeletons if x.name == \"Mouse\"]\n if len(skeleton) == 0:\n raise ValueError(\"No mouse skeleton found in labels.\")\n skeleton = skeleton[0]\n num_keypoints = len(skeleton.nodes)\n num_mice = get_max_ids_in_video(labels, key=\"Mouse\")\n # Note that this 1-indexes identities\n track_2_idx = {\n key: val + 1\n for key, val in zip(all_labels.tracks, range(len(all_labels.tracks)))\n }\n last_unassigned_id = num_mice\n\n keypoint_mat = np.zeros([num_frames, num_mice, num_keypoints, 2], dtype=np.uint16)\n confidence_mat = np.zeros([num_frames, num_mice, num_keypoints], dtype=np.float32)\n identity_mat = np.zeros([num_frames, num_mice], dtype=np.uint32)\n instance_vector = np.zeros([num_frames], dtype=np.uint8)\n static_objects = {}\n\n # Populate the matrices with data\n for label in labels:\n assigned_instances = 0\n for instance_idx, instance in enumerate(label.instances):\n # Static objects just get added to the object dict\n # This will clobber data if more than one frame is annotated\n if instance.skeleton.name != \"Mouse\":\n static_objects[instance.skeleton.name] = instance.numpy()\n continue\n pose = instance.numpy()\n if pose.shape[0] != len(JABS_DEFAULT_KEYPOINTS):\n warnings.warn(\n f\"JABS format only supports 12 keypoints for mice. Skipping storage of instance on frame {label.frame_idx} with {len(instance.points)} keypoints.\"\n )\n continue\n missing_points = np.isnan(pose[:, 0])\n pose[np.isnan(pose)] = 0\n # JABS stores y,x for poses\n pose = np.flip(pose.astype(np.uint16), axis=-1)\n keypoint_mat[label.frame_idx, instance_idx, :, :] = pose\n confidence_mat[label.frame_idx, instance_idx, ~missing_points] = 1.0\n if instance.track:\n identity_mat[label.frame_idx, instance_idx] = track_2_idx[\n instance.track\n ]\n else:\n warnings.warn(\n f\"Pose with unassigned track found on {label.video.filename} frame {label.frame_idx} instance {instance_idx}. Assigning ID {last_unassigned_id}.\"\n )\n identity_mat[label.frame_idx, instance_idx] = last_unassigned_id\n last_unassigned_id += 1\n assigned_instances += 1\n instance_vector[label.frame_idx] = assigned_instances\n\n # Return the data as a dict\n return {\n \"keypoints\": keypoint_mat.astype(np.uint16),\n \"confidence\": confidence_mat.astype(np.float32),\n \"identity\": identity_mat.astype(np.uint32),\n \"num_identities\": instance_vector.astype(np.uint16),\n \"static_objects\": static_objects,\n }\n
"},{"location":"reference/sleap_io/io/jabs/#sleap_io.io.jabs.get_max_ids_in_video","title":"get_max_ids_in_video(labels, key='Mouse')
","text":"Determine the maximum number of identities that exist at the same time.
Parameters:
Name Type Description Defaultlabels
List[Labels]
SLEAP Labels
to count
key
str
Name of the skeleton to select for identities
'Mouse'
Returns:
Type Descriptionint
Count of the maximum concurrent identities in a single frame
Source code insleap_io/io/jabs.py
def get_max_ids_in_video(labels: List[Labels], key: str = \"Mouse\") -> int:\n \"\"\"Determine the maximum number of identities that exist at the same time.\n\n Args:\n labels: SLEAP `Labels` to count\n key: Name of the skeleton to select for identities\n\n Returns:\n Count of the maximum concurrent identities in a single frame\n \"\"\"\n max_labels = 0\n for label in labels:\n n_labels = sum([x.skeleton.name == key for x in label.instances])\n max_labels = max(max_labels, n_labels)\n\n return max_labels\n
"},{"location":"reference/sleap_io/io/jabs/#sleap_io.io.jabs.make_simple_skeleton","title":"make_simple_skeleton(name, num_points)
","text":"Create a Skeleton
with a requested number of nodes attached in a line.
Parameters:
Name Type Description Defaultname
str
name of the skeleton and prefix to nodes
requirednum_points
int
number of points to use in the skeleton
requiredReturns:
Type DescriptionSkeleton
Generated Skeleton
.
sleap_io/io/jabs.py
def make_simple_skeleton(name: str, num_points: int) -> Skeleton:\n \"\"\"Create a `Skeleton` with a requested number of nodes attached in a line.\n\n Args:\n name: name of the skeleton and prefix to nodes\n num_points: number of points to use in the skeleton\n\n Returns:\n Generated `Skeleton`.\n \"\"\"\n nodes = [Node(name + \"_kp\" + str(i)) for i in range(num_points)]\n edges = [Edge(nodes[i], nodes[i + 1]) for i in range(num_points - 1)]\n return Skeleton(nodes, edges, name=name)\n
"},{"location":"reference/sleap_io/io/jabs/#sleap_io.io.jabs.prediction_to_instance","title":"prediction_to_instance(data, confidence, skeleton, track=None)
","text":"Create an Instance
from prediction data.
Parameters:
Name Type Description Defaultdata
Union[ndarray[uint16], ndarray[float32]]
keypoint locations
requiredconfidence
ndarray[float32]
confidence for keypoints
requiredskeleton
Skeleton
Skeleton
to use for Instance
track
Track
Track
to assign to Instance
None
Returns:
Type DescriptionInstance
Parsed Instance
.
sleap_io/io/jabs.py
def prediction_to_instance(\n data: Union[np.ndarray[np.uint16], np.ndarray[np.float32]],\n confidence: np.ndarray[np.float32],\n skeleton: Skeleton,\n track: Track = None,\n) -> Instance:\n \"\"\"Create an `Instance` from prediction data.\n\n Args:\n data: keypoint locations\n confidence: confidence for keypoints\n skeleton: `Skeleton` to use for `Instance`\n track: `Track` to assign to `Instance`\n\n Returns:\n Parsed `Instance`.\n \"\"\"\n assert (\n len(skeleton.nodes) == data.shape[0]\n ), f\"Skeleton ({len(skeleton.nodes)}) does not match number of keypoints ({data.shape[0]})\"\n\n points = {}\n for i, cur_node in enumerate(skeleton.nodes):\n # confidence of 0 indicates no keypoint predicted for instance\n if confidence[i] > 0:\n points[cur_node] = Point(\n data[i, 0],\n data[i, 1],\n visible=True,\n )\n\n if not points:\n return None\n else:\n return Instance(points, skeleton=skeleton, track=track)\n
"},{"location":"reference/sleap_io/io/jabs/#sleap_io.io.jabs.read_labels","title":"read_labels(labels_path, skeleton=JABS_DEFAULT_SKELETON)
","text":"Read JABS style pose from a file and return a Labels
object.
TODO: Attributes are ignored, including px_to_cm field. TODO: Segmentation data ignored in v6, but will read in pose. TODO: Lixit static objects currently stored as n_lixit,2 (eg 1 object). Should be converted to multiple objects
Parameters:
Name Type Description Defaultlabels_path
str
Path to the JABS pose file.
requiredskeleton
Optional[Skeleton]
An optional Skeleton
object. Defaults to JABS pose version 2-6.
JABS_DEFAULT_SKELETON
Returns:
Type DescriptionLabels
Parsed labels as a Labels
instance.
sleap_io/io/jabs.py
def read_labels(\n labels_path: str, skeleton: Optional[Skeleton] = JABS_DEFAULT_SKELETON\n) -> Labels:\n \"\"\"Read JABS style pose from a file and return a `Labels` object.\n\n TODO: Attributes are ignored, including px_to_cm field.\n TODO: Segmentation data ignored in v6, but will read in pose.\n TODO: Lixit static objects currently stored as n_lixit,2 (eg 1 object). Should be converted to multiple objects\n\n Args:\n labels_path: Path to the JABS pose file.\n skeleton: An optional `Skeleton` object. Defaults to JABS pose version 2-6.\n\n Returns:\n Parsed labels as a `Labels` instance.\n \"\"\"\n frames: List[LabeledFrame] = []\n # Video name is the pose file minus the suffix\n video_name = re.sub(r\"(_pose_est_v[2-6])?\\.h5\", \".avi\", labels_path)\n video = Video.from_filename(video_name)\n if not skeleton:\n skeleton = JABS_DEFAULT_SKELETON\n tracks = {}\n\n if not os.access(labels_path, os.F_OK):\n raise FileNotFoundError(f\"{labels_path} doesn't exist.\")\n if not os.access(labels_path, os.R_OK):\n raise PermissionError(f\"{labels_path} cannot be accessed.\")\n\n with h5py.File(labels_path, \"r\") as pose_file:\n num_frames = pose_file[\"poseest/points\"].shape[0]\n try:\n pose_version = pose_file[\"poseest\"].attrs[\"version\"][0]\n except (KeyError, IndexError):\n pose_version = 2\n data_shape = pose_file[\"poseest/points\"].shape\n assert (\n len(data_shape) == 3\n ), f\"Pose version not present and shape does not match single mouse: shape of {data_shape} for {labels_path}\"\n if pose_version == 2:\n tracks[1] = Track(\"1\")\n # Change field name for newer pose formats\n if pose_version == 3:\n id_key = \"instance_track_id\"\n elif pose_version > 3:\n id_key = \"instance_embed_id\"\n max_ids = pose_file[\"poseest/points\"].shape[1]\n\n for frame_idx in range(num_frames):\n instances = []\n pose_data = pose_file[\"poseest/points\"][frame_idx, ...]\n # JABS stores y,x for poses\n pose_data = np.flip(pose_data, axis=-1)\n pose_conf = pose_file[\"poseest/confidence\"][frame_idx, ...]\n # single animal case\n if pose_version == 2:\n new_instance = prediction_to_instance(\n pose_data, pose_conf, skeleton, tracks[1]\n )\n instances.append(new_instance)\n # multi-animal case\n if pose_version > 2:\n pose_ids = pose_file[\"poseest/\" + id_key][frame_idx, ...]\n # pose_v3 uses another field to describe the number of valid poses\n if pose_version == 3:\n max_ids = pose_file[\"poseest/instance_count\"][frame_idx]\n for cur_id in range(max_ids):\n # v4+ uses reserved values for invalid/unused poses\n # Note: ignores 'poseest/id_mask' to keep predictions that were not assigned an id\n if pose_version > 3 and pose_ids[cur_id] <= 0:\n continue\n if pose_ids[cur_id] not in tracks.keys():\n tracks[pose_ids[cur_id]] = Track(str(pose_ids[cur_id]))\n new_instance = prediction_to_instance(\n pose_data[cur_id],\n pose_conf[cur_id],\n skeleton,\n tracks[pose_ids[cur_id]],\n )\n if new_instance:\n instances.append(new_instance)\n # Static objects\n if (\n frame_idx == 0\n and pose_version >= 5\n and \"static_objects\" in pose_file.keys()\n ):\n present_objects = pose_file[\"static_objects\"].keys()\n for cur_object in present_objects:\n object_keypoints = pose_file[\"static_objects/\" + cur_object][:]\n object_skeleton = make_simple_skeleton(\n cur_object, object_keypoints.shape[0]\n )\n new_instance = prediction_to_instance(\n object_keypoints,\n np.ones(object_keypoints.shape[:-1]),\n object_skeleton,\n )\n if new_instance:\n instances.append(new_instance)\n frame_label = LabeledFrame(video, frame_idx, instances)\n frames.append(frame_label)\n labels = Labels(frames)\n labels.provenance[\"filename\"] = labels_path\n return labels\n
"},{"location":"reference/sleap_io/io/jabs/#sleap_io.io.jabs.tracklets_to_v3","title":"tracklets_to_v3(tracklet_matrix)
","text":"Changes identity tracklets to the v3 format specifications.
v3 specifications require(a) tracklets are 0-indexed (b) tracklets appear in ascending order \u00a9 tracklets exist for continuous blocks of time
Parameters:
Name Type Description Defaulttracklet_matrix
ndarray
Numpy array of shape (frame, n_animals) that contains identity values. Identities are assumed to be 1-indexed.
requiredReturns:
Type Descriptionndarray
A corrected numpy array of the same shape as input
Source code insleap_io/io/jabs.py
def tracklets_to_v3(tracklet_matrix: np.ndarray) -> np.ndarray:\n \"\"\"Changes identity tracklets to the v3 format specifications.\n\n v3 specifications require:\n (a) tracklets are 0-indexed\n (b) tracklets appear in ascending order\n (c) tracklets exist for continuous blocks of time\n\n Args:\n tracklet_matrix: Numpy array of shape (frame, n_animals) that contains identity values. Identities are assumed to be 1-indexed.\n\n Returns:\n A corrected numpy array of the same shape as input\n \"\"\"\n assert tracklet_matrix.ndim == 2\n\n # Fragment the tracklets based on gaps\n valid_ids = np.unique(tracklet_matrix)\n valid_ids = valid_ids[valid_ids != 0]\n track_fragments = {}\n for cur_id in valid_ids:\n frame_idx, column_idx = np.where(tracklet_matrix == cur_id)\n gaps = np.nonzero(np.diff(frame_idx) - 1)[0]\n for sliced_frame, sliced_column in zip(\n np.split(frame_idx, gaps + 1), np.split(column_idx, gaps + 1)\n ):\n # The keys used here are (first frame, first column) such that sorting can be used for ascending order\n track_fragments[sliced_frame[0], sliced_column[0]] = sliced_column\n\n return_mat = np.zeros_like(tracklet_matrix)\n for next_id, key in enumerate(sorted(track_fragments.keys())):\n columns_to_assign = track_fragments[key]\n return_mat[\n range(key[0], key[0] + len(columns_to_assign)), columns_to_assign\n ] = next_id\n\n return return_mat\n
"},{"location":"reference/sleap_io/io/jabs/#sleap_io.io.jabs.write_jabs_v2","title":"write_jabs_v2(data, filename)
","text":"Write JABS pose file v2 data to file.
Writes single mouse pose data.
Parameters:
Name Type Description Defaultdata
dict
Dictionary of JABS data generated from convert_labels
requiredfilename
str
Filename to write data to
required Source code insleap_io/io/jabs.py
def write_jabs_v2(data: dict, filename: str):\n \"\"\"Write JABS pose file v2 data to file.\n\n Writes single mouse pose data.\n\n Args:\n data: Dictionary of JABS data generated from convert_labels\n filename: Filename to write data to\n \"\"\"\n # Check that we're trying to write single mouse data\n assert data[\"keypoints\"].shape[1] == 1\n out_keypoints = np.squeeze(data[\"keypoints\"], axis=1)\n out_confidences = np.squeeze(data[\"confidence\"], axis=1)\n\n with h5py.File(filename, \"w\") as h5:\n pose_grp = h5.require_group(\"poseest\")\n pose_grp.attrs.update({\"version\": [2, 0]})\n pose_grp.require_dataset(\n \"points\", out_keypoints.shape, out_keypoints.dtype, data=out_keypoints\n )\n pose_grp.require_dataset(\n \"confidence\",\n out_confidences.shape,\n out_confidences.dtype,\n data=out_confidences,\n )\n
"},{"location":"reference/sleap_io/io/jabs/#sleap_io.io.jabs.write_jabs_v3","title":"write_jabs_v3(data, filename)
","text":"Write JABS pose file v3 data to file.
Writes multi-mouse pose data.
Parameters:
Name Type Description Defaultdata
dict
Dictionary of JABS data generated from convert_labels
requiredfilename
str
Filename to write data to
required Source code insleap_io/io/jabs.py
def write_jabs_v3(data: dict, filename: str):\n \"\"\"Write JABS pose file v3 data to file.\n\n Writes multi-mouse pose data.\n\n Args:\n data: Dictionary of JABS data generated from convert_labels\n filename: Filename to write data to\n \"\"\"\n v3_tracklets = tracklets_to_v3(data[\"identity\"])\n with h5py.File(filename, \"w\") as h5:\n pose_grp = h5.require_group(\"poseest\")\n pose_grp.attrs.update({\"version\": [3, 0]})\n # keypoint field\n pose_grp.require_dataset(\n \"points\",\n data[\"keypoints\"].shape,\n data[\"keypoints\"].dtype,\n data=data[\"keypoints\"],\n )\n # confidence field\n pose_grp.require_dataset(\n \"confidence\",\n data[\"confidence\"].shape,\n data[\"confidence\"].dtype,\n data=data[\"confidence\"],\n )\n # id field\n pose_grp.require_dataset(\n \"instance_track_id\",\n v3_tracklets.shape,\n v3_tracklets.dtype,\n data=v3_tracklets,\n )\n # instance count field\n pose_grp.require_dataset(\n \"instance_count\",\n data[\"num_identities\"].shape,\n data[\"num_identities\"].dtype,\n data=data[\"num_identities\"],\n )\n # extra field where we don't have data, so fill with default data\n pose_grp.require_dataset(\n \"instance_embedding\",\n data[\"confidence\"].shape,\n data[\"confidence\"].dtype,\n data=np.zeros_like(data[\"confidence\"]),\n )\n
"},{"location":"reference/sleap_io/io/jabs/#sleap_io.io.jabs.write_jabs_v4","title":"write_jabs_v4(data, filename)
","text":"Write JABS pose file v4 data to file.
Writes multi-mouse pose and longterm identity object data.
Parameters:
Name Type Description Defaultdata
dict
Dictionary of JABS data generated from convert_labels
requiredfilename
str
Filename to write data to
required Source code insleap_io/io/jabs.py
def write_jabs_v4(data: dict, filename: str):\n \"\"\"Write JABS pose file v4 data to file.\n\n Writes multi-mouse pose and longterm identity object data.\n\n Args:\n data: Dictionary of JABS data generated from convert_labels\n filename: Filename to write data to\n \"\"\"\n # v4 extends v3\n write_jabs_v3(data, filename)\n with h5py.File(filename, \"a\") as h5:\n pose_grp = h5.require_group(\"poseest\")\n pose_grp.attrs.update({\"version\": [4, 0]})\n # new fields on top of v4\n identity_mask_mat = np.all(data[\"confidence\"] == 0, axis=-1).astype(bool)\n pose_grp.require_dataset(\n \"id_mask\",\n identity_mask_mat.shape,\n identity_mask_mat.dtype,\n data=identity_mask_mat,\n )\n # No identity embedding data\n # Note that since the identity information doesn't exist, this will break any functionality that relies on it\n default_id_embeds = np.zeros(\n list(identity_mask_mat.shape) + [0], dtype=np.float32\n )\n pose_grp.require_dataset(\n \"identity_embeds\",\n default_id_embeds.shape,\n default_id_embeds.dtype,\n data=default_id_embeds,\n )\n default_id_centers = np.zeros(default_id_embeds.shape[1:], dtype=np.float32)\n pose_grp.require_dataset(\n \"instance_id_center\",\n default_id_centers.shape,\n default_id_centers.dtype,\n data=default_id_centers,\n )\n # v4 uses an id field that is 1-indexed\n pose_grp.require_dataset(\n \"instance_embed_id\",\n data[\"identity\"].shape,\n data[\"identity\"].dtype,\n data=data[\"identity\"],\n )\n
"},{"location":"reference/sleap_io/io/jabs/#sleap_io.io.jabs.write_jabs_v5","title":"write_jabs_v5(data, filename)
","text":"Write JABS pose file v5 data to file.
Writes multi-mouse pose, longterm identity, and static object data.
Parameters:
Name Type Description Defaultdata
dict
Dictionary of JABS data generated from convert_labels
requiredfilename
str
Filename to write data to
required Source code insleap_io/io/jabs.py
def write_jabs_v5(data: dict, filename: str):\n \"\"\"Write JABS pose file v5 data to file.\n\n Writes multi-mouse pose, longterm identity, and static object data.\n\n Args:\n data: Dictionary of JABS data generated from convert_labels\n filename: Filename to write data to\n \"\"\"\n # v5 extends v4\n write_jabs_v4(data, filename)\n with h5py.File(filename, \"a\") as h5:\n pose_grp = h5.require_group(\"poseest\")\n pose_grp.attrs.update({\"version\": [5, 0]})\n if \"static_objects\" in data.keys():\n object_grp = h5.require_group(\"static_objects\")\n for object_key, object_keypoints in data[\"static_objects\"].items():\n object_grp.require_dataset(\n object_key,\n object_keypoints.shape,\n np.uint16,\n data=object_keypoints.astype(np.uint16),\n )\n
"},{"location":"reference/sleap_io/io/jabs/#sleap_io.io.jabs.write_labels","title":"write_labels(labels, pose_version, root_folder)
","text":"Convert and save a SLEAP Labels
object to a JABS pose file.
Only supports pose version 2 (single mouse) and 3-5 (multi mouse).
Parameters:
Name Type Description Defaultlabels
Labels
SLEAP Labels
to be converted to JABS pose format.
pose_version
int
JABS pose version to use when writing data.
requiredroot_folder
str
Root folder where the jabs files should be written
required Source code insleap_io/io/jabs.py
def write_labels(labels: Labels, pose_version: int, root_folder: str):\n \"\"\"Convert and save a SLEAP `Labels` object to a JABS pose file.\n\n Only supports pose version 2 (single mouse) and 3-5 (multi mouse).\n\n Args:\n labels: SLEAP `Labels` to be converted to JABS pose format.\n pose_version: JABS pose version to use when writing data.\n root_folder: Root folder where the jabs files should be written\n \"\"\"\n for video in labels.videos:\n converted_labels = convert_labels(labels, video)\n out_filename = (\n os.path.splitext(video.filename)[0] + f\"_pose_est_v{pose_version}.h5\"\n )\n if root_folder:\n out_filename = os.path.join(root_folder, out_filename)\n os.makedirs(os.path.dirname(out_filename), exist_ok=True)\n if os.path.exists(out_filename):\n warnings.warn(f\"Skipping {out_filename} because it already exists.\")\n continue\n if pose_version == 2:\n write_jabs_v2(converted_labels, out_filename)\n elif pose_version == 3:\n write_jabs_v3(converted_labels, out_filename)\n elif pose_version == 4:\n write_jabs_v4(converted_labels, out_filename)\n elif pose_version == 5:\n write_jabs_v5(converted_labels, out_filename)\n else:\n raise NotImplementedError(f\"Pose format {pose_version} not supported.\")\n
"},{"location":"reference/sleap_io/io/labelstudio/","title":"labelstudio","text":""},{"location":"reference/sleap_io/io/labelstudio/#sleap_io.io.labelstudio","title":"sleap_io.io.labelstudio
","text":"This module handles direct I/O operations for working with Labelstudio files.
Some important nomenclaturetasks
: typically maps to a single frame of data to be annotated, closest correspondance is to LabeledFrame
annotations
: collection of points, polygons, relations, etc. corresponds to Instance
s and Point
s, but a flattened hierarchyFunctions:
Name Descriptionbuild_relation_map
Build a two-way relationship map between annotations.
convert_labels
Convert a Labels
object into Label Studio-formatted annotations.
filter_and_index
Filter annotations based on the type field and index them by ID.
infer_nodes
Parse the loaded JSON tasks to create a minimal skeleton.
parse_tasks
Read Label Studio style annotations from a file and return a Labels
object.
read_labels
Read Label Studio style annotations from a file and return a Labels
object.
task_to_labeled_frame
Parse annotations from an entry.
video_from_task
Given a Label Studio task, retrieve video information.
write_labels
Convert and save a SLEAP Labels
object to a Label Studio .json
file.
build_relation_map(annotations)
","text":"Build a two-way relationship map between annotations.
Parameters:
Name Type Description Defaultannotations
Iterable[dict]
annotations, presumably, containing relation types
requiredReturns:
Type DescriptionDict[str, List[str]]
A two way map of relations indexed by from_id
and to_id
fields.
sleap_io/io/labelstudio.py
def build_relation_map(annotations: Iterable[dict]) -> Dict[str, List[str]]:\n \"\"\"Build a two-way relationship map between annotations.\n\n Args:\n annotations: annotations, presumably, containing relation types\n\n Returns:\n A two way map of relations indexed by `from_id` and `to_id` fields.\n \"\"\"\n relations = list(filter(lambda d: d[\"type\"] == \"relation\", annotations))\n relmap: Dict[str, List[str]] = {}\n for rel in relations:\n if rel[\"from_id\"] not in relmap:\n relmap[rel[\"from_id\"]] = []\n relmap[rel[\"from_id\"]].append(rel[\"to_id\"])\n\n if rel[\"to_id\"] not in relmap:\n relmap[rel[\"to_id\"]] = []\n relmap[rel[\"to_id\"]].append(rel[\"from_id\"])\n return relmap\n
"},{"location":"reference/sleap_io/io/labelstudio/#sleap_io.io.labelstudio.convert_labels","title":"convert_labels(labels)
","text":"Convert a Labels
object into Label Studio-formatted annotations.
Parameters:
Name Type Description Defaultlabels
Labels
SLEAP Labels
to be converted to Label Studio task format.
Returns:
Type DescriptionList[dict]
Label Studio dictionaries of the Labels
data.
sleap_io/io/labelstudio.py
def convert_labels(labels: Labels) -> List[dict]:\n \"\"\"Convert a `Labels` object into Label Studio-formatted annotations.\n\n Args:\n labels: SLEAP `Labels` to be converted to Label Studio task format.\n\n Returns:\n Label Studio dictionaries of the `Labels` data.\n \"\"\"\n out = []\n for frame in labels.labeled_frames:\n if frame.video.shape is not None:\n height = frame.video.shape[1]\n width = frame.video.shape[2]\n else:\n height = 100\n width = 100\n\n frame_annots = []\n\n for instance in frame.instances:\n inst_id = str(uuid.uuid4())\n frame_annots.append(\n {\n \"original_width\": width,\n \"original_height\": height,\n \"image_rotation\": 0,\n \"value\": {\n \"x\": 0,\n \"y\": 0,\n \"width\": width,\n \"height\": height,\n \"rotation\": 0,\n \"rectanglelabels\": [\n \"instance_class\"\n ], # TODO: need to handle instance classes / identity\n },\n \"id\": inst_id,\n \"from_name\": \"individuals\",\n \"to_name\": \"image\",\n \"type\": \"rectanglelabels\",\n }\n )\n\n for node, point in instance.points.items():\n point_id = str(uuid.uuid4())\n\n # add this point\n frame_annots.append(\n {\n \"original_width\": width,\n \"original_height\": height,\n \"image_rotation\": 0,\n \"value\": {\n \"x\": point.x / width * 100,\n \"y\": point.y / height * 100,\n \"keypointlabels\": [node.name],\n },\n \"from_name\": \"keypoint-label\",\n \"to_name\": \"image\",\n \"type\": \"keypointlabels\",\n \"id\": point_id,\n }\n )\n\n # add relationship of point to individual\n frame_annots.append(\n {\n \"from_id\": point_id,\n \"to_id\": inst_id,\n \"type\": \"relation\",\n \"direction\": \"right\",\n }\n )\n\n out.append(\n {\n \"data\": {\n # 'image': f\"/data/{up_deets['file']}\"\n },\n \"meta\": {\n \"video\": {\n \"filename\": frame.video.filename,\n \"frame_idx\": frame.frame_idx,\n \"shape\": frame.video.shape,\n }\n },\n \"annotations\": [\n {\n \"result\": frame_annots,\n \"was_cancelled\": False,\n \"ground_truth\": False,\n \"created_at\": datetime.datetime.now(\n datetime.timezone.utc\n ).strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\"),\n \"updated_at\": datetime.datetime.now(\n datetime.timezone.utc\n ).strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\"),\n \"lead_time\": 0,\n \"result_count\": 1,\n # \"completed_by\": user['id']\n }\n ],\n }\n )\n\n return out\n
"},{"location":"reference/sleap_io/io/labelstudio/#sleap_io.io.labelstudio.filter_and_index","title":"filter_and_index(annotations, annot_type)
","text":"Filter annotations based on the type field and index them by ID.
Parameters:
Name Type Description Defaultannotations
Iterable[dict]
annotations to filter and index
requiredannot_type
str
annotation type to filter e.x. 'keypointlabels' or 'rectanglelabels'
requiredReturns:
Type DescriptionDict[str, dict]
Dict of ndexed and filtered annotations. Only annotations of type annot_type
will survive, and annotations are indexed by ID.
sleap_io/io/labelstudio.py
def filter_and_index(annotations: Iterable[dict], annot_type: str) -> Dict[str, dict]:\n \"\"\"Filter annotations based on the type field and index them by ID.\n\n Args:\n annotations: annotations to filter and index\n annot_type: annotation type to filter e.x. 'keypointlabels' or 'rectanglelabels'\n\n Returns:\n Dict of ndexed and filtered annotations. Only annotations of type `annot_type`\n will survive, and annotations are indexed by ID.\n \"\"\"\n filtered = list(filter(lambda d: d[\"type\"] == annot_type, annotations))\n indexed = {item[\"id\"]: item for item in filtered}\n return indexed\n
"},{"location":"reference/sleap_io/io/labelstudio/#sleap_io.io.labelstudio.infer_nodes","title":"infer_nodes(tasks)
","text":"Parse the loaded JSON tasks to create a minimal skeleton.
Parameters:
Name Type Description Defaulttasks
List[Dict]
Collection of tasks loaded from Label Studio JSON.
requiredReturns:
Type DescriptionSkeleton
The inferred Skeleton
.
sleap_io/io/labelstudio.py
def infer_nodes(tasks: List[Dict]) -> Skeleton:\n \"\"\"Parse the loaded JSON tasks to create a minimal skeleton.\n\n Args:\n tasks: Collection of tasks loaded from Label Studio JSON.\n\n Returns:\n The inferred `Skeleton`.\n \"\"\"\n node_names = set()\n for entry in tasks:\n if \"annotations\" in entry:\n key = \"annotations\"\n elif \"completions\" in entry:\n key = \"completions\"\n else:\n raise ValueError(\"Cannot find annotation data for entry!\")\n\n for annotation in entry[key]:\n for datum in annotation[\"result\"]:\n if datum[\"type\"] == \"keypointlabels\":\n for node_name in datum[\"value\"][\"keypointlabels\"]:\n node_names.add(node_name)\n\n skeleton = Skeleton(nodes=list(node_names))\n return skeleton\n
"},{"location":"reference/sleap_io/io/labelstudio/#sleap_io.io.labelstudio.parse_tasks","title":"parse_tasks(tasks, skeleton)
","text":"Read Label Studio style annotations from a file and return a Labels
object.
Parameters:
Name Type Description Defaulttasks
List[Dict]
Collection of tasks to be converted to Labels
.
skeleton
Skeleton
Skeleton
with the nodes and edges to be used.
Returns:
Type DescriptionLabels
Parsed labels as a Labels
instance.
sleap_io/io/labelstudio.py
def parse_tasks(tasks: List[Dict], skeleton: Skeleton) -> Labels:\n \"\"\"Read Label Studio style annotations from a file and return a `Labels` object.\n\n Args:\n tasks: Collection of tasks to be converted to `Labels`.\n skeleton: `Skeleton` with the nodes and edges to be used.\n\n Returns:\n Parsed labels as a `Labels` instance.\n \"\"\"\n frames: List[LabeledFrame] = []\n for entry in tasks:\n # depending version, we have seen keys `annotations` and `completions`\n if \"annotations\" in entry:\n key = \"annotations\"\n elif \"completions\" in entry:\n key = \"completions\"\n else:\n raise ValueError(\"Cannot find annotation data for entry!\")\n\n frames.append(task_to_labeled_frame(entry, skeleton, key=key))\n\n return Labels(frames)\n
"},{"location":"reference/sleap_io/io/labelstudio/#sleap_io.io.labelstudio.read_labels","title":"read_labels(labels_path, skeleton=None)
","text":"Read Label Studio style annotations from a file and return a Labels
object.
Parameters:
Name Type Description Defaultlabels_path
str
Path to the Label Studio annotation file, in json format.
requiredskeleton
Optional[Union[Skeleton, List[str]]]
An optional Skeleton
object or list of node names. If not provided (the default), skeleton will be inferred from the data. It may be useful to provide this so the keypoint label types can be filtered to just the ones in the skeleton.
None
Returns:
Type DescriptionLabels
Parsed labels as a Labels
instance.
sleap_io/io/labelstudio.py
def read_labels(\n labels_path: str, skeleton: Optional[Union[Skeleton, List[str]]] = None\n) -> Labels:\n \"\"\"Read Label Studio style annotations from a file and return a `Labels` object.\n\n Args:\n labels_path: Path to the Label Studio annotation file, in json format.\n skeleton: An optional `Skeleton` object or list of node names. If not provided\n (the default), skeleton will be inferred from the data. It may be useful to\n provide this so the keypoint label types can be filtered to just the ones in\n the skeleton.\n\n Returns:\n Parsed labels as a `Labels` instance.\n \"\"\"\n with open(labels_path, \"r\") as task_file:\n tasks = json.load(task_file)\n\n if type(skeleton) == list:\n skeleton = Skeleton(nodes=skeleton) # type: ignore[arg-type]\n elif skeleton is None:\n skeleton = infer_nodes(tasks)\n else:\n assert isinstance(skeleton, Skeleton)\n\n labels = parse_tasks(tasks, skeleton)\n labels.provenance[\"filename\"] = labels_path\n return labels\n
"},{"location":"reference/sleap_io/io/labelstudio/#sleap_io.io.labelstudio.task_to_labeled_frame","title":"task_to_labeled_frame(task, skeleton, key='annotations')
","text":"Parse annotations from an entry.
Parameters:
Name Type Description Defaulttask
dict
Label Studio task to be parsed.
requiredskeleton
Skeleton
Skeleton to use for parsing.
requiredkey
str
Key to use for parsing annotations. Defaults to \"annotations\".
'annotations'
Returns:
Type DescriptionLabeledFrame
Parsed LabeledFrame
instance.
sleap_io/io/labelstudio.py
def task_to_labeled_frame(\n task: dict, skeleton: Skeleton, key: str = \"annotations\"\n) -> LabeledFrame:\n \"\"\"Parse annotations from an entry.\n\n Args:\n task: Label Studio task to be parsed.\n skeleton: Skeleton to use for parsing.\n key: Key to use for parsing annotations. Defaults to \"annotations\".\n\n Returns:\n Parsed `LabeledFrame` instance.\n \"\"\"\n if len(task[key]) > 1:\n warnings.warn(\n f\"Task {task.get('id', '??')}: Multiple annotations found, \"\n \"only taking the first!\"\n )\n\n # only parse the first entry result\n to_parse = task[key][0][\"result\"]\n\n individuals = filter_and_index(to_parse, \"rectanglelabels\")\n keypoints = filter_and_index(to_parse, \"keypointlabels\")\n relations = build_relation_map(to_parse)\n instances = []\n\n if len(individuals) > 0:\n # multi animal case:\n for indv_id, indv in individuals.items():\n points = {}\n for rel in relations[indv_id]:\n kpt = keypoints.pop(rel)\n node = Node(kpt[\"value\"][\"keypointlabels\"][0])\n x_pos = (kpt[\"value\"][\"x\"] * kpt[\"original_width\"]) / 100\n y_pos = (kpt[\"value\"][\"y\"] * kpt[\"original_height\"]) / 100\n\n # If the value is a NAN, the user did not mark this keypoint\n if math.isnan(x_pos) or math.isnan(y_pos):\n continue\n\n points[node] = Point(x_pos, y_pos)\n\n if len(points) > 0:\n instances.append(Instance(points, skeleton))\n\n # If this is multi-animal, any leftover keypoints should be unique bodyparts, and\n # will be collected here if single-animal, we only have 'unique bodyparts' [in a\n # way] and the process is identical\n points = {}\n for _, kpt in keypoints.items():\n node = Node(kpt[\"value\"][\"keypointlabels\"][0])\n points[node] = Point(\n (kpt[\"value\"][\"x\"] * kpt[\"original_width\"]) / 100,\n (kpt[\"value\"][\"y\"] * kpt[\"original_height\"]) / 100,\n visible=True,\n )\n if len(points) > 0:\n instances.append(Instance(points, skeleton))\n\n video, frame_idx = video_from_task(task)\n\n return LabeledFrame(video, frame_idx, instances)\n
"},{"location":"reference/sleap_io/io/labelstudio/#sleap_io.io.labelstudio.video_from_task","title":"video_from_task(task)
","text":"Given a Label Studio task, retrieve video information.
Parameters:
Name Type Description Defaulttask
dict
Label Studio task
requiredReturns:
Type DescriptionTuple[Video, int]
Video and frame index for this task
Source code insleap_io/io/labelstudio.py
def video_from_task(task: dict) -> Tuple[Video, int]:\n \"\"\"Given a Label Studio task, retrieve video information.\n\n Args:\n task: Label Studio task\n\n Returns:\n Video and frame index for this task\n \"\"\"\n if \"meta\" in task and \"video\" in task[\"meta\"]:\n video = Video(task[\"meta\"][\"video\"][\"filename\"], task[\"meta\"][\"video\"][\"shape\"])\n frame_idx = task[\"meta\"][\"video\"][\"frame_idx\"]\n return video, frame_idx\n\n else:\n raise KeyError(\"Unable to locate video information for task!\", task)\n
"},{"location":"reference/sleap_io/io/labelstudio/#sleap_io.io.labelstudio.write_labels","title":"write_labels(labels, filename)
","text":"Convert and save a SLEAP Labels
object to a Label Studio .json
file.
Parameters:
Name Type Description Defaultlabels
Labels
SLEAP Labels
to be converted to Label Studio task format.
filename
str
Path to save Label Studio annotations (.json
).
sleap_io/io/labelstudio.py
def write_labels(labels: Labels, filename: str):\n \"\"\"Convert and save a SLEAP `Labels` object to a Label Studio `.json` file.\n\n Args:\n labels: SLEAP `Labels` to be converted to Label Studio task format.\n filename: Path to save Label Studio annotations (`.json`).\n \"\"\"\n\n def _encode(obj):\n if type(obj).__name__ == \"uint64\":\n return int(obj)\n\n ls_dicts = convert_labels(labels)\n with open(filename, \"w\") as f:\n json.dump(ls_dicts, f, indent=4, default=_encode)\n
"},{"location":"reference/sleap_io/io/main/","title":"main","text":""},{"location":"reference/sleap_io/io/main/#sleap_io.io.main","title":"sleap_io.io.main
","text":"This module contains high-level wrappers for utilizing different I/O backends.
Modules:
Name Descriptionjabs
This module handles direct I/O operations for working with JABS files.
labelstudio
This module handles direct I/O operations for working with Labelstudio files.
nwb
Functions to write and read from the neurodata without borders (NWB) format.
slp
This module handles direct I/O operations for working with .slp files.
video_writing
Utilities for writing videos.
Functions:
Name Descriptionload_file
Load a file and return the appropriate object.
load_jabs
Read JABS-style predictions from a file and return a Labels
object.
load_labelstudio
Read Label Studio-style annotations from a file and return a Labels
object.
load_nwb
Load an NWB dataset as a SLEAP Labels
object.
load_slp
Load a SLEAP dataset.
load_video
Load a video file.
save_file
Save a file based on the extension.
save_jabs
Save a SLEAP dataset to JABS pose file format.
save_labelstudio
Save a SLEAP dataset to Label Studio format.
save_nwb
Save a SLEAP dataset to NWB format.
save_slp
Save a SLEAP dataset to a .slp
file.
save_video
Write a list of frames to a video file.
"},{"location":"reference/sleap_io/io/main/#sleap_io.io.main.load_file","title":"load_file(filename, format=None, **kwargs)
","text":"Load a file and return the appropriate object.
Parameters:
Name Type Description Defaultfilename
str | Path
Path to a file.
requiredformat
Optional[str]
Optional format to load as. If not provided, will be inferred from the file extension. Available formats are: \"slp\", \"nwb\", \"labelstudio\", \"jabs\" and \"video\".
None
Returns:
Type DescriptionUnion[Labels, Video]
A Labels
or Video
object.
sleap_io/io/main.py
def load_file(\n filename: str | Path, format: Optional[str] = None, **kwargs\n) -> Union[Labels, Video]:\n \"\"\"Load a file and return the appropriate object.\n\n Args:\n filename: Path to a file.\n format: Optional format to load as. If not provided, will be inferred from the\n file extension. Available formats are: \"slp\", \"nwb\", \"labelstudio\", \"jabs\"\n and \"video\".\n\n Returns:\n A `Labels` or `Video` object.\n \"\"\"\n if isinstance(filename, Path):\n filename = filename.as_posix()\n\n if format is None:\n if filename.endswith(\".slp\"):\n format = \"slp\"\n elif filename.endswith(\".nwb\"):\n format = \"nwb\"\n elif filename.endswith(\".json\"):\n format = \"json\"\n elif filename.endswith(\".h5\"):\n format = \"jabs\"\n else:\n for vid_ext in Video.EXTS:\n if filename.endswith(vid_ext):\n format = \"video\"\n break\n if format is None:\n raise ValueError(f\"Could not infer format from filename: '{filename}'.\")\n\n if filename.endswith(\".slp\"):\n return load_slp(filename, **kwargs)\n elif filename.endswith(\".nwb\"):\n return load_nwb(filename, **kwargs)\n elif filename.endswith(\".json\"):\n return load_labelstudio(filename, **kwargs)\n elif filename.endswith(\".h5\"):\n return load_jabs(filename, **kwargs)\n elif format == \"video\":\n return load_video(filename, **kwargs)\n
"},{"location":"reference/sleap_io/io/main/#sleap_io.io.main.load_jabs","title":"load_jabs(filename, skeleton=None)
","text":"Read JABS-style predictions from a file and return a Labels
object.
Parameters:
Name Type Description Defaultfilename
str
Path to the jabs h5 pose file.
requiredskeleton
Optional[Skeleton]
An optional Skeleton
object.
None
Returns:
Type DescriptionLabels
Parsed labels as a Labels
instance.
sleap_io/io/main.py
def load_jabs(filename: str, skeleton: Optional[Skeleton] = None) -> Labels:\n \"\"\"Read JABS-style predictions from a file and return a `Labels` object.\n\n Args:\n filename: Path to the jabs h5 pose file.\n skeleton: An optional `Skeleton` object.\n\n Returns:\n Parsed labels as a `Labels` instance.\n \"\"\"\n return jabs.read_labels(filename, skeleton=skeleton)\n
"},{"location":"reference/sleap_io/io/main/#sleap_io.io.main.load_labelstudio","title":"load_labelstudio(filename, skeleton=None)
","text":"Read Label Studio-style annotations from a file and return a Labels
object.
Parameters:
Name Type Description Defaultfilename
str
Path to the label-studio annotation file in JSON format.
requiredskeleton
Optional[Union[Skeleton, list[str]]]
An optional Skeleton
object or list of node names. If not provided (the default), skeleton will be inferred from the data. It may be useful to provide this so the keypoint label types can be filtered to just the ones in the skeleton.
None
Returns:
Type DescriptionLabels
Parsed labels as a Labels
instance.
sleap_io/io/main.py
def load_labelstudio(\n filename: str, skeleton: Optional[Union[Skeleton, list[str]]] = None\n) -> Labels:\n \"\"\"Read Label Studio-style annotations from a file and return a `Labels` object.\n\n Args:\n filename: Path to the label-studio annotation file in JSON format.\n skeleton: An optional `Skeleton` object or list of node names. If not provided\n (the default), skeleton will be inferred from the data. It may be useful to\n provide this so the keypoint label types can be filtered to just the ones in\n the skeleton.\n\n Returns:\n Parsed labels as a `Labels` instance.\n \"\"\"\n return labelstudio.read_labels(filename, skeleton=skeleton)\n
"},{"location":"reference/sleap_io/io/main/#sleap_io.io.main.load_nwb","title":"load_nwb(filename)
","text":"Load an NWB dataset as a SLEAP Labels
object.
Parameters:
Name Type Description Defaultfilename
str
Path to a NWB file (.nwb
).
Returns:
Type DescriptionLabels
The dataset as a Labels
object.
sleap_io/io/main.py
def load_nwb(filename: str) -> Labels:\n \"\"\"Load an NWB dataset as a SLEAP `Labels` object.\n\n Args:\n filename: Path to a NWB file (`.nwb`).\n\n Returns:\n The dataset as a `Labels` object.\n \"\"\"\n return nwb.read_nwb(filename)\n
"},{"location":"reference/sleap_io/io/main/#sleap_io.io.main.load_slp","title":"load_slp(filename, open_videos=True)
","text":"Load a SLEAP dataset.
Parameters:
Name Type Description Defaultfilename
str
Path to a SLEAP labels file (.slp
).
open_videos
bool
If True
(the default), attempt to open the video backend for I/O. If False
, the backend will not be opened (useful for reading metadata when the video files are not available).
True
Returns:
Type DescriptionLabels
The dataset as a Labels
object.
sleap_io/io/main.py
def load_slp(filename: str, open_videos: bool = True) -> Labels:\n \"\"\"Load a SLEAP dataset.\n\n Args:\n filename: Path to a SLEAP labels file (`.slp`).\n open_videos: If `True` (the default), attempt to open the video backend for\n I/O. If `False`, the backend will not be opened (useful for reading metadata\n when the video files are not available).\n\n Returns:\n The dataset as a `Labels` object.\n \"\"\"\n return slp.read_labels(filename, open_videos=open_videos)\n
"},{"location":"reference/sleap_io/io/main/#sleap_io.io.main.load_video","title":"load_video(filename, **kwargs)
","text":"Load a video file.
Parameters:
Name Type Description Defaultfilename
str
The filename(s) of the video. Supported extensions: \"mp4\", \"avi\", \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\", \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are expected. If filename is a folder, it will be searched for images.
requiredReturns:
Type DescriptionVideo
A Video
object.
sleap_io/io/main.py
def load_video(filename: str, **kwargs) -> Video:\n \"\"\"Load a video file.\n\n Args:\n filename: The filename(s) of the video. Supported extensions: \"mp4\", \"avi\",\n \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\",\n \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are\n expected. If filename is a folder, it will be searched for images.\n\n Returns:\n A `Video` object.\n \"\"\"\n return Video.from_filename(filename, **kwargs)\n
"},{"location":"reference/sleap_io/io/main/#sleap_io.io.main.save_file","title":"save_file(labels, filename, format=None, **kwargs)
","text":"Save a file based on the extension.
Parameters:
Name Type Description Defaultlabels
Labels
A SLEAP Labels
object (see load_slp
).
filename
str | Path
Path to save labels to.
requiredformat
Optional[str]
Optional format to save as. If not provided, will be inferred from the file extension. Available formats are: \"slp\", \"nwb\", \"labelstudio\" and \"jabs\".
None
Source code in sleap_io/io/main.py
def save_file(\n labels: Labels, filename: str | Path, format: Optional[str] = None, **kwargs\n):\n \"\"\"Save a file based on the extension.\n\n Args:\n labels: A SLEAP `Labels` object (see `load_slp`).\n filename: Path to save labels to.\n format: Optional format to save as. If not provided, will be inferred from the\n file extension. Available formats are: \"slp\", \"nwb\", \"labelstudio\" and\n \"jabs\".\n \"\"\"\n if isinstance(filename, Path):\n filename = str(filename)\n\n if format is None:\n if filename.endswith(\".slp\"):\n format = \"slp\"\n elif filename.endswith(\".nwb\"):\n format = \"nwb\"\n elif filename.endswith(\".json\"):\n format = \"labelstudio\"\n elif \"pose_version\" in kwargs:\n format = \"jabs\"\n\n if format == \"slp\":\n save_slp(labels, filename, **kwargs)\n elif format == \"nwb\":\n save_nwb(labels, filename, **kwargs)\n elif format == \"labelstudio\":\n save_labelstudio(labels, filename, **kwargs)\n elif format == \"jabs\":\n pose_version = kwargs.pop(\"pose_version\", 5)\n root_folder = kwargs.pop(\"root_folder\", filename)\n save_jabs(labels, pose_version=pose_version, root_folder=root_folder)\n else:\n raise ValueError(f\"Unknown format '{format}' for filename: '{filename}'.\")\n
"},{"location":"reference/sleap_io/io/main/#sleap_io.io.main.save_jabs","title":"save_jabs(labels, pose_version, root_folder=None)
","text":"Save a SLEAP dataset to JABS pose file format.
Parameters:
Name Type Description Defaultlabels
Labels
SLEAP Labels
object.
pose_version
int
The JABS pose version to write data out.
requiredroot_folder
Optional[str]
Optional root folder where the files should be saved.
None
Note Filenames for JABS poses are based on video filenames.
Source code insleap_io/io/main.py
def save_jabs(labels: Labels, pose_version: int, root_folder: Optional[str] = None):\n \"\"\"Save a SLEAP dataset to JABS pose file format.\n\n Args:\n labels: SLEAP `Labels` object.\n pose_version: The JABS pose version to write data out.\n root_folder: Optional root folder where the files should be saved.\n\n Note:\n Filenames for JABS poses are based on video filenames.\n \"\"\"\n jabs.write_labels(labels, pose_version, root_folder)\n
"},{"location":"reference/sleap_io/io/main/#sleap_io.io.main.save_labelstudio","title":"save_labelstudio(labels, filename)
","text":"Save a SLEAP dataset to Label Studio format.
Parameters:
Name Type Description Defaultlabels
Labels
A SLEAP Labels
object (see load_slp
).
filename
str
Path to save labels to ending with .json
.
sleap_io/io/main.py
def save_labelstudio(labels: Labels, filename: str):\n \"\"\"Save a SLEAP dataset to Label Studio format.\n\n Args:\n labels: A SLEAP `Labels` object (see `load_slp`).\n filename: Path to save labels to ending with `.json`.\n \"\"\"\n labelstudio.write_labels(labels, filename)\n
"},{"location":"reference/sleap_io/io/main/#sleap_io.io.main.save_nwb","title":"save_nwb(labels, filename, append=True)
","text":"Save a SLEAP dataset to NWB format.
Parameters:
Name Type Description Defaultlabels
Labels
A SLEAP Labels
object (see load_slp
).
filename
str
Path to NWB file to save to. Must end in .nwb
.
append
bool
If True
(the default), append to existing NWB file. File will be created if it does not exist.
True
See also: nwb.write_nwb, nwb.append_nwb
Source code insleap_io/io/main.py
def save_nwb(labels: Labels, filename: str, append: bool = True):\n \"\"\"Save a SLEAP dataset to NWB format.\n\n Args:\n labels: A SLEAP `Labels` object (see `load_slp`).\n filename: Path to NWB file to save to. Must end in `.nwb`.\n append: If `True` (the default), append to existing NWB file. File will be\n created if it does not exist.\n\n See also: nwb.write_nwb, nwb.append_nwb\n \"\"\"\n if append and Path(filename).exists():\n nwb.append_nwb(labels, filename)\n else:\n nwb.write_nwb(labels, filename)\n
"},{"location":"reference/sleap_io/io/main/#sleap_io.io.main.save_slp","title":"save_slp(labels, filename, embed=None)
","text":"Save a SLEAP dataset to a .slp
file.
Parameters:
Name Type Description Defaultlabels
Labels
A SLEAP Labels
object (see load_slp
).
filename
str
Path to save labels to ending with .slp
.
embed
bool | str | list[tuple[Video, int]] | None
Frames to embed in the saved labels file. One of None
, True
, \"all\"
, \"user\"
, \"suggestions\"
, \"user+suggestions\"
, \"source\"
or list of tuples of (video, frame_idx)
.
If None
is specified (the default) and the labels contains embedded frames, those embedded frames will be re-saved to the new file.
If True
or \"all\"
, all labeled frames and suggested frames will be embedded.
If \"source\"
is specified, no images will be embedded and the source video will be restored if available.
This argument is only valid for the SLP backend.
None
Source code in sleap_io/io/main.py
def save_slp(\n labels: Labels,\n filename: str,\n embed: bool | str | list[tuple[Video, int]] | None = None,\n):\n \"\"\"Save a SLEAP dataset to a `.slp` file.\n\n Args:\n labels: A SLEAP `Labels` object (see `load_slp`).\n filename: Path to save labels to ending with `.slp`.\n embed: Frames to embed in the saved labels file. One of `None`, `True`,\n `\"all\"`, `\"user\"`, `\"suggestions\"`, `\"user+suggestions\"`, `\"source\"` or list\n of tuples of `(video, frame_idx)`.\n\n If `None` is specified (the default) and the labels contains embedded\n frames, those embedded frames will be re-saved to the new file.\n\n If `True` or `\"all\"`, all labeled frames and suggested frames will be\n embedded.\n\n If `\"source\"` is specified, no images will be embedded and the source video\n will be restored if available.\n\n This argument is only valid for the SLP backend.\n \"\"\"\n return slp.write_labels(filename, labels, embed=embed)\n
"},{"location":"reference/sleap_io/io/main/#sleap_io.io.main.save_video","title":"save_video(frames, filename, fps=30, pixelformat='yuv420p', codec='libx264', crf=25, preset='superfast', output_params=None)
","text":"Write a list of frames to a video file.
Parameters:
Name Type Description Defaultframes
ndarray | Video
Sequence of frames to write to video. Each frame should be a 2D or 3D numpy array with dimensions (height, width) or (height, width, channels).
requiredfilename
str | Path
Path to output video file.
requiredfps
float
Frames per second. Defaults to 30.
30
pixelformat
str
Pixel format for video. Defaults to \"yuv420p\".
'yuv420p'
codec
str
Codec to use for encoding. Defaults to \"libx264\".
'libx264'
crf
int
Constant rate factor to control lossiness of video. Values go from 2 to 32, with numbers in the 18 to 30 range being most common. Lower values mean less compressed/higher quality. Defaults to 25. No effect if codec is not \"libx264\".
25
preset
str
H264 encoding preset. Defaults to \"superfast\". No effect if codec is not \"libx264\".
'superfast'
output_params
list | None
Additional output parameters for FFMPEG. This should be a list of strings corresponding to command line arguments for FFMPEG and libx264. Use ffmpeg -h encoder=libx264
to see all options for libx264 output_params.
None
See also: sio.VideoWriter
sleap_io/io/main.py
def save_video(\n frames: np.ndarray | Video,\n filename: str | Path,\n fps: float = 30,\n pixelformat: str = \"yuv420p\",\n codec: str = \"libx264\",\n crf: int = 25,\n preset: str = \"superfast\",\n output_params: list | None = None,\n):\n \"\"\"Write a list of frames to a video file.\n\n Args:\n frames: Sequence of frames to write to video. Each frame should be a 2D or 3D\n numpy array with dimensions (height, width) or (height, width, channels).\n filename: Path to output video file.\n fps: Frames per second. Defaults to 30.\n pixelformat: Pixel format for video. Defaults to \"yuv420p\".\n codec: Codec to use for encoding. Defaults to \"libx264\".\n crf: Constant rate factor to control lossiness of video. Values go from 2 to 32,\n with numbers in the 18 to 30 range being most common. Lower values mean less\n compressed/higher quality. Defaults to 25. No effect if codec is not\n \"libx264\".\n preset: H264 encoding preset. Defaults to \"superfast\". No effect if codec is not\n \"libx264\".\n output_params: Additional output parameters for FFMPEG. This should be a list of\n strings corresponding to command line arguments for FFMPEG and libx264. Use\n `ffmpeg -h encoder=libx264` to see all options for libx264 output_params.\n\n See also: `sio.VideoWriter`\n \"\"\"\n if output_params is None:\n output_params = []\n\n with video_writing.VideoWriter(\n filename,\n fps=fps,\n pixelformat=pixelformat,\n codec=codec,\n crf=crf,\n preset=preset,\n output_params=output_params,\n ) as writer:\n for frame in frames:\n writer(frame)\n
"},{"location":"reference/sleap_io/io/nwb/","title":"nwb","text":""},{"location":"reference/sleap_io/io/nwb/#sleap_io.io.nwb","title":"sleap_io.io.nwb
","text":"Functions to write and read from the neurodata without borders (NWB) format.
Functions:
Name Descriptionappend_nwb
Append a SLEAP Labels
object to an existing NWB data file.
append_nwb_data
Append data from a Labels object to an in-memory nwb file.
build_pose_estimation_container_for_track
Create a PoseEstimation container for a track.
build_track_pose_estimation_list
Build a list of PoseEstimationSeries from tracks.
convert_predictions_to_dataframe
Convert predictions data to a Pandas dataframe.
get_processing_module_for_video
Auxiliary function to create a processing module.
get_timestamps
Return a vector of timestamps for a PoseEstimationSeries
.
read_nwb
Read an NWB formatted file to a SLEAP Labels
object.
write_nwb
Write labels to an nwb file and save it to the nwbfile_path given.
"},{"location":"reference/sleap_io/io/nwb/#sleap_io.io.nwb.append_nwb","title":"append_nwb(labels, filename, pose_estimation_metadata=None)
","text":"Append a SLEAP Labels
object to an existing NWB data file.
Parameters:
Name Type Description Defaultlabels
Labels
A general Labels
object.
filename
str
The path to the NWB file.
requiredpose_estimation_metadata
Optional[dict]
Metadata for pose estimation. See append_nwb_data
for details.
None
See also: append_nwb_data
Source code insleap_io/io/nwb.py
def append_nwb(\n labels: Labels, filename: str, pose_estimation_metadata: Optional[dict] = None\n):\n \"\"\"Append a SLEAP `Labels` object to an existing NWB data file.\n\n Args:\n labels: A general `Labels` object.\n filename: The path to the NWB file.\n pose_estimation_metadata: Metadata for pose estimation. See `append_nwb_data`\n for details.\n\n See also: append_nwb_data\n \"\"\"\n with NWBHDF5IO(filename, mode=\"a\", load_namespaces=True) as io:\n nwb_file = io.read()\n nwb_file = append_nwb_data(\n labels, nwb_file, pose_estimation_metadata=pose_estimation_metadata\n )\n io.write(nwb_file)\n
"},{"location":"reference/sleap_io/io/nwb/#sleap_io.io.nwb.append_nwb_data","title":"append_nwb_data(labels, nwbfile, pose_estimation_metadata=None)
","text":"Append data from a Labels object to an in-memory nwb file.
Parameters:
Name Type Description Defaultlabels
Labels
A general labels object
requirednwbfile
NWBFile
And in-memory nwbfile where the data is to be appended.
requiredpose_estimation_metadata
Optional[dict]
This argument has a dual purpose:
1) It can be used to pass time information about the video which is necessary for synchronizing frames in pose estimation tracking to other modalities. Either the video timestamps can be passed to This can be used to pass the timestamps with the key video_timestamps
or the sampling rate with keyvideo_sample_rate
.
e.g. pose_estimation_metadata[\"video_timestamps\"] = np.array(timestamps) or pose_estimation_metadata[\"video_sample_rate\"] = 15 # In Hz
2) The other use of this dictionary is to ovewrite sleap-io default arguments for the PoseEstimation container. see https://github.com/rly/ndx-pose for a full list or arguments.
None
Returns:
Type DescriptionNWBFile
An in-memory nwbfile with the data from the labels object appended.
Source code insleap_io/io/nwb.py
def append_nwb_data(\n labels: Labels, nwbfile: NWBFile, pose_estimation_metadata: Optional[dict] = None\n) -> NWBFile:\n \"\"\"Append data from a Labels object to an in-memory nwb file.\n\n Args:\n labels: A general labels object\n nwbfile: And in-memory nwbfile where the data is to be appended.\n pose_estimation_metadata: This argument has a dual purpose:\n\n 1) It can be used to pass time information about the video which is\n necessary for synchronizing frames in pose estimation tracking to other\n modalities. Either the video timestamps can be passed to\n This can be used to pass the timestamps with the key `video_timestamps`\n or the sampling rate with key`video_sample_rate`.\n\n e.g. pose_estimation_metadata[\"video_timestamps\"] = np.array(timestamps)\n or pose_estimation_metadata[\"video_sample_rate\"] = 15 # In Hz\n\n 2) The other use of this dictionary is to ovewrite sleap-io default\n arguments for the PoseEstimation container.\n see https://github.com/rly/ndx-pose for a full list or arguments.\n\n Returns:\n An in-memory nwbfile with the data from the labels object appended.\n \"\"\"\n pose_estimation_metadata = pose_estimation_metadata or dict()\n\n # Extract default metadata\n provenance = labels.provenance\n default_metadata = dict(scorer=str(provenance))\n sleap_version = provenance.get(\"sleap_version\", None)\n default_metadata[\"source_software_version\"] = sleap_version\n\n labels_data_df = convert_predictions_to_dataframe(labels)\n\n # For every video create a processing module\n for video_index, video in enumerate(labels.videos):\n video_path = Path(video.filename)\n processing_module_name = f\"SLEAP_VIDEO_{video_index:03}_{video_path.stem}\"\n nwb_processing_module = get_processing_module_for_video(\n processing_module_name, nwbfile\n )\n\n # Propagate video metadata\n default_metadata[\"original_videos\"] = [f\"{video.filename}\"] # type: ignore\n default_metadata[\"labeled_videos\"] = [f\"{video.filename}\"] # type: ignore\n\n # Overwrite default with the user provided metadata\n default_metadata.update(pose_estimation_metadata)\n\n # For every track in that video create a PoseEstimation container\n name_of_tracks_in_video = (\n labels_data_df[video.filename]\n .columns.get_level_values(\"track_name\")\n .unique()\n )\n\n for track_index, track_name in enumerate(name_of_tracks_in_video):\n pose_estimation_container = build_pose_estimation_container_for_track(\n labels_data_df,\n labels,\n track_name,\n video,\n default_metadata,\n )\n nwb_processing_module.add(pose_estimation_container)\n\n return nwbfile\n
"},{"location":"reference/sleap_io/io/nwb/#sleap_io.io.nwb.build_pose_estimation_container_for_track","title":"build_pose_estimation_container_for_track(labels_data_df, labels, track_name, video, pose_estimation_metadata)
","text":"Create a PoseEstimation container for a track.
Parameters:
Name Type Description Defaultlabels_data_df
DataFrame
A pandas object with the data corresponding to the predicted instances associated to this labels object.
requiredlabels
Labels
A general labels object
requiredtrack_name
str
The name of the track in labels.tracks
requiredvideo
Video
The video to which data belongs to
requiredReturns:
Name Type DescriptionPoseEstimation
PoseEstimation
A PoseEstimation multicontainer where the time series of all the node trajectories in the track are stored. One time series per node.
Source code insleap_io/io/nwb.py
def build_pose_estimation_container_for_track(\n labels_data_df: pd.DataFrame,\n labels: Labels,\n track_name: str,\n video: Video,\n pose_estimation_metadata: dict,\n) -> PoseEstimation:\n \"\"\"Create a PoseEstimation container for a track.\n\n Args:\n labels_data_df (pd.DataFrame): A pandas object with the data corresponding\n to the predicted instances associated to this labels object.\n labels (Labels): A general labels object\n track_name (str): The name of the track in labels.tracks\n video (Video): The video to which data belongs to\n\n Returns:\n PoseEstimation: A PoseEstimation multicontainer where the time series\n of all the node trajectories in the track are stored. One time series per\n node.\n \"\"\"\n # Copy metadata for local use and modification\n pose_estimation_metadata_copy = deepcopy(pose_estimation_metadata)\n video_path = Path(video.filename)\n\n all_track_skeletons = (\n labels_data_df[video.filename]\n .columns.get_level_values(\"skeleton_name\")\n .unique()\n )\n\n # Assuming only one skeleton per track\n skeleton_name = all_track_skeletons[0]\n skeleton = next(\n skeleton for skeleton in labels.skeletons if skeleton.name == skeleton_name\n )\n\n track_data_df = labels_data_df[\n video.filename,\n skeleton.name,\n track_name,\n ]\n\n # Combine each node's PoseEstimationSeries to create a PoseEstimation container\n timestamps = pose_estimation_metadata_copy.pop(\"video_timestamps\", None)\n sample_rate = pose_estimation_metadata_copy.pop(\"video_sample_rate\", 1.0)\n if timestamps is None:\n # Keeps backward compatbility.\n timestamps = np.arange(track_data_df.shape[0]) * sample_rate\n else:\n timestamps = np.asarray(timestamps)\n\n pose_estimation_series_list = build_track_pose_estimation_list(\n track_data_df, timestamps\n )\n\n # Arrange and mix metadata\n pose_estimation_container_kwargs = dict(\n name=f\"track={track_name}\",\n description=f\"Estimated positions of {skeleton.name} in video {video_path.name}\",\n pose_estimation_series=pose_estimation_series_list,\n nodes=skeleton.node_names,\n edges=np.array(skeleton.edge_inds).astype(\"uint64\"),\n source_software=\"SLEAP\",\n # dimensions=np.array([[video.backend.height, video.backend.width]]),\n )\n\n pose_estimation_container_kwargs.update(**pose_estimation_metadata_copy)\n pose_estimation_container = PoseEstimation(**pose_estimation_container_kwargs)\n\n return pose_estimation_container\n
"},{"location":"reference/sleap_io/io/nwb/#sleap_io.io.nwb.build_track_pose_estimation_list","title":"build_track_pose_estimation_list(track_data_df, timestamps)
","text":"Build a list of PoseEstimationSeries from tracks.
Parameters:
Name Type Description Defaulttrack_data_df
DataFrame
A pandas DataFrame object containing the trajectories for all the nodes associated with a specific track.
requiredReturns:
Type DescriptionList[PoseEstimationSeries]
List[PoseEstimationSeries]: The list of all the PoseEstimationSeries. One for each node.
Source code insleap_io/io/nwb.py
def build_track_pose_estimation_list(\n track_data_df: pd.DataFrame, timestamps: ArrayLike\n) -> List[PoseEstimationSeries]:\n \"\"\"Build a list of PoseEstimationSeries from tracks.\n\n Args:\n track_data_df (pd.DataFrame): A pandas DataFrame object containing the\n trajectories for all the nodes associated with a specific track.\n\n Returns:\n List[PoseEstimationSeries]: The list of all the PoseEstimationSeries.\n One for each node.\n \"\"\"\n name_of_nodes_in_track = track_data_df.columns.get_level_values(\n \"node_name\"\n ).unique()\n\n pose_estimation_series_list: List[PoseEstimationSeries] = []\n for node_name in name_of_nodes_in_track:\n # Drop data with missing values\n data_for_node = track_data_df[node_name].dropna(axis=\"index\", how=\"any\")\n\n node_trajectory = data_for_node[[\"x\", \"y\"]].to_numpy()\n confidence = data_for_node[\"score\"].to_numpy()\n\n reference_frame = (\n \"The coordinates are in (x, y) relative to the top-left of the image. \"\n \"Coordinates refer to the midpoint of the pixel. \"\n \"That is, t the midpoint of the top-left pixel is at (0, 0), whereas \"\n \"the top-left corner of that same pixel is at (-0.5, -0.5).\"\n )\n\n pose_estimation_kwargs = dict(\n name=f\"{node_name}\",\n description=f\"Sequential trajectory of {node_name}.\",\n data=node_trajectory,\n unit=\"pixels\",\n reference_frame=reference_frame,\n confidence=confidence,\n confidence_definition=\"Point-wise confidence scores.\",\n )\n\n # Add timestamps or only rate if the timestamps are uniform\n frames = data_for_node.index.values\n timestamps_for_data = timestamps[frames] # type: ignore[index]\n sample_periods = np.diff(timestamps_for_data)\n if sample_periods.size == 0:\n rate = None # This is the case with only one data point\n else:\n # Difference below 0.1 ms do not matter for behavior in videos\n uniform_samples = np.unique(sample_periods.round(5)).size == 1\n rate = 1 / sample_periods[0] if uniform_samples else None\n\n if rate:\n # Video sample rates are ints but nwb expect floats\n rate = float(int(rate))\n pose_estimation_kwargs.update(\n rate=rate, starting_time=timestamps_for_data[0]\n )\n else:\n pose_estimation_kwargs.update(timestamps=timestamps_for_data)\n\n # Build the pose estimation object and attach it to the list\n pose_estimation_series = PoseEstimationSeries(**pose_estimation_kwargs)\n pose_estimation_series_list.append(pose_estimation_series)\n\n return pose_estimation_series_list\n
"},{"location":"reference/sleap_io/io/nwb/#sleap_io.io.nwb.convert_predictions_to_dataframe","title":"convert_predictions_to_dataframe(labels)
","text":"Convert predictions data to a Pandas dataframe.
Parameters:
Name Type Description Defaultlabels
Labels
A general label object.
requiredReturns:
Type DescriptionDataFrame
pd.DataFrame: A pandas data frame with the structured data with hierarchical columns. The column hierarchy is: \"video_path\", \"skeleton_name\", \"track_name\", \"node_name\", And it is indexed by the frames.
Raises:
Type DescriptionValueError
If no frames in the label objects contain predicted instances.
Source code insleap_io/io/nwb.py
def convert_predictions_to_dataframe(labels: Labels) -> pd.DataFrame:\n \"\"\"Convert predictions data to a Pandas dataframe.\n\n Args:\n labels: A general label object.\n\n Returns:\n pd.DataFrame: A pandas data frame with the structured data with\n hierarchical columns. The column hierarchy is:\n \"video_path\",\n \"skeleton_name\",\n \"track_name\",\n \"node_name\",\n And it is indexed by the frames.\n\n Raises:\n ValueError: If no frames in the label objects contain predicted instances.\n \"\"\"\n # Form pairs of labeled_frames and predicted instances\n labeled_frames = labels.labeled_frames\n all_frame_instance_tuples = (\n (label_frame, instance) # type: ignore\n for label_frame in labeled_frames\n for instance in label_frame.predicted_instances\n )\n\n # Extract the data\n data_list = list()\n for labeled_frame, instance in all_frame_instance_tuples:\n # Traverse the nodes of the instances's skeleton\n skeleton = instance.skeleton\n for node in skeleton.nodes:\n row_dict = dict(\n frame_idx=labeled_frame.frame_idx,\n x=instance.points[node].x,\n y=instance.points[node].y,\n score=instance.points[node].score, # type: ignore[attr-defined]\n node_name=node.name,\n skeleton_name=skeleton.name,\n track_name=instance.track.name if instance.track else \"untracked\",\n video_path=labeled_frame.video.filename,\n )\n data_list.append(row_dict)\n\n if not data_list:\n raise ValueError(\"No predicted instances found in labels object\")\n\n labels_df = pd.DataFrame(data_list)\n\n # Reformat the data with columns for dict-like hierarchical data access.\n index = [\n \"skeleton_name\",\n \"track_name\",\n \"node_name\",\n \"video_path\",\n \"frame_idx\",\n ]\n\n labels_tidy_df = (\n labels_df.set_index(index)\n .unstack(level=[0, 1, 2, 3])\n .swaplevel(0, -1, axis=1) # video_path on top while x, y score on bottom\n .sort_index(axis=1) # Better format for columns\n .sort_index(axis=0) # Sorts by frames\n )\n\n return labels_tidy_df\n
"},{"location":"reference/sleap_io/io/nwb/#sleap_io.io.nwb.get_processing_module_for_video","title":"get_processing_module_for_video(processing_module_name, nwbfile)
","text":"Auxiliary function to create a processing module.
Checks for the processing module existence and creates if not available.
Parameters:
Name Type Description Defaultprocessing_module_name
str
The name of the processing module.
requirednwbfile
NWBFile
The nwbfile to attach the processing module to.
requiredReturns:
Name Type DescriptionProcessingModule
ProcessingModule
An nwb processing module with the desired name.
Source code insleap_io/io/nwb.py
def get_processing_module_for_video(\n processing_module_name: str, nwbfile: NWBFile\n) -> ProcessingModule:\n \"\"\"Auxiliary function to create a processing module.\n\n Checks for the processing module existence and creates if not available.\n\n Args:\n processing_module_name (str): The name of the processing module.\n nwbfile (NWBFile): The nwbfile to attach the processing module to.\n\n Returns:\n ProcessingModule: An nwb processing module with the desired name.\n \"\"\"\n description = \"Processed SLEAP data\"\n processing_module = (\n nwbfile.processing[processing_module_name]\n if processing_module_name in nwbfile.processing\n else nwbfile.create_processing_module(\n name=processing_module_name, description=description\n )\n )\n return processing_module\n
"},{"location":"reference/sleap_io/io/nwb/#sleap_io.io.nwb.get_timestamps","title":"get_timestamps(series)
","text":"Return a vector of timestamps for a PoseEstimationSeries
.
sleap_io/io/nwb.py
def get_timestamps(series: PoseEstimationSeries) -> np.ndarray:\n \"\"\"Return a vector of timestamps for a `PoseEstimationSeries`.\"\"\"\n if series.timestamps is not None:\n return np.asarray(series.timestamps)\n else:\n return np.arange(series.data.shape[0]) * series.rate + series.starting_time\n
"},{"location":"reference/sleap_io/io/nwb/#sleap_io.io.nwb.read_nwb","title":"read_nwb(path)
","text":"Read an NWB formatted file to a SLEAP Labels
object.
Parameters:
Name Type Description Defaultpath
str
Path to an NWB file (.nwb
).
Returns:
Type DescriptionLabels
A Labels
object.
sleap_io/io/nwb.py
def read_nwb(path: str) -> Labels:\n \"\"\"Read an NWB formatted file to a SLEAP `Labels` object.\n\n Args:\n path: Path to an NWB file (`.nwb`).\n\n Returns:\n A `Labels` object.\n \"\"\"\n with NWBHDF5IO(path, mode=\"r\", load_namespaces=True) as io:\n read_nwbfile = io.read()\n nwb_file = read_nwbfile.processing\n\n # Get list of videos\n video_keys: List[str] = [key for key in nwb_file.keys() if \"SLEAP_VIDEO\" in key]\n video_tracks = dict()\n\n # Get track keys\n test_processing_module: ProcessingModule = nwb_file[video_keys[0]]\n track_keys: List[str] = list(test_processing_module.fields[\"data_interfaces\"])\n\n # Get track\n test_pose_estimation: PoseEstimation = test_processing_module[track_keys[0]]\n node_names = test_pose_estimation.nodes[:]\n edge_inds = test_pose_estimation.edges[:]\n\n for processing_module in nwb_file.values():\n # Get track keys\n _track_keys: List[str] = list(processing_module.fields[\"data_interfaces\"])\n is_tracked: bool = re.sub(\"[0-9]+\", \"\", _track_keys[0]) == \"track\"\n\n # Figure out the max number of frames and the canonical timestamps\n timestamps = np.empty(())\n for track_key in _track_keys:\n for node_name in node_names:\n pose_estimation_series = processing_module[track_key][node_name]\n timestamps = np.union1d(\n timestamps, get_timestamps(pose_estimation_series)\n )\n timestamps = np.sort(timestamps)\n\n # Recreate Labels numpy (same as output of Labels.numpy())\n n_tracks = len(_track_keys)\n n_frames = len(timestamps)\n n_nodes = len(node_names)\n tracks_numpy = np.full((n_frames, n_tracks, n_nodes, 2), np.nan, np.float32)\n confidence = np.full((n_frames, n_tracks, n_nodes), np.nan, np.float32)\n for track_idx, track_key in enumerate(_track_keys):\n pose_estimation = processing_module[track_key]\n\n for node_idx, node_name in enumerate(node_names):\n pose_estimation_series = pose_estimation[node_name]\n frame_inds = np.searchsorted(\n timestamps, get_timestamps(pose_estimation_series)\n )\n tracks_numpy[frame_inds, track_idx, node_idx, :] = (\n pose_estimation_series.data[:]\n )\n confidence[frame_inds, track_idx, node_idx] = (\n pose_estimation_series.confidence[:]\n )\n\n video_tracks[Path(pose_estimation.original_videos[0]).as_posix()] = (\n tracks_numpy,\n confidence,\n is_tracked,\n )\n\n # Create skeleton\n skeleton = Skeleton(\n nodes=node_names,\n edges=edge_inds,\n )\n\n # Add instances to labeled frames\n lfs = []\n for video_fn, (tracks_numpy, confidence, is_tracked) in video_tracks.items():\n video = Video(filename=video_fn)\n n_frames, n_tracks, n_nodes, _ = tracks_numpy.shape\n tracks = [Track(name=f\"track{track_idx}\") for track_idx in range(n_tracks)]\n for frame_idx, (frame_pts, frame_confs) in enumerate(\n zip(tracks_numpy, confidence)\n ):\n insts: List[Union[Instance, PredictedInstance]] = []\n for track, (inst_pts, inst_confs) in zip(\n tracks, zip(frame_pts, frame_confs)\n ):\n if np.isnan(inst_pts).all():\n continue\n insts.append(\n PredictedInstance.from_numpy(\n points=inst_pts, # (n_nodes, 2)\n point_scores=inst_confs, # (n_nodes,)\n instance_score=inst_confs.mean(), # ()\n skeleton=skeleton,\n track=track if is_tracked else None,\n )\n )\n if len(insts) > 0:\n lfs.append(\n LabeledFrame(video=video, frame_idx=frame_idx, instances=insts)\n )\n labels = Labels(lfs)\n labels.provenance[\"filename\"] = path\n return labels\n
"},{"location":"reference/sleap_io/io/nwb/#sleap_io.io.nwb.write_nwb","title":"write_nwb(labels, nwbfile_path, nwb_file_kwargs=None, pose_estimation_metadata=None)
","text":"Write labels to an nwb file and save it to the nwbfile_path given.
Parameters:
Name Type Description Defaultlabels
Labels
A general Labels
object.
nwbfile_path
str
The path where the nwb file is to be written.
requirednwb_file_kwargs
Optional[dict]
A dict containing metadata to the nwbfile. Example: nwb_file_kwargs = { 'session_description: 'your_session_description', 'identifier': 'your session_identifier', } For a full list of possible values see: https://pynwb.readthedocs.io/en/stable/pynwb.file.html#pynwb.file.NWBFile
Defaults to None and default values are used to generate the nwb file.
None
pose_estimation_metadata
Optional[dict]
This argument has a dual purpose:
1) It can be used to pass time information about the video which is necessary for synchronizing frames in pose estimation tracking to other modalities. Either the video timestamps can be passed to This can be used to pass the timestamps with the key video_timestamps
or the sampling rate with keyvideo_sample_rate
.
e.g. pose_estimation_metadata[\"video_timestamps\"] = np.array(timestamps) or pose_estimation_metadata[\"video_sample_rate] = 15 # In Hz
2) The other use of this dictionary is to ovewrite sleap-io default arguments for the PoseEstimation container. see https://github.com/rly/ndx-pose for a full list or arguments.
None
Source code in sleap_io/io/nwb.py
def write_nwb(\n labels: Labels,\n nwbfile_path: str,\n nwb_file_kwargs: Optional[dict] = None,\n pose_estimation_metadata: Optional[dict] = None,\n):\n \"\"\"Write labels to an nwb file and save it to the nwbfile_path given.\n\n Args:\n labels: A general `Labels` object.\n nwbfile_path: The path where the nwb file is to be written.\n nwb_file_kwargs: A dict containing metadata to the nwbfile. Example:\n nwb_file_kwargs = {\n 'session_description: 'your_session_description',\n 'identifier': 'your session_identifier',\n }\n For a full list of possible values see:\n https://pynwb.readthedocs.io/en/stable/pynwb.file.html#pynwb.file.NWBFile\n\n Defaults to None and default values are used to generate the nwb file.\n\n pose_estimation_metadata: This argument has a dual purpose:\n\n 1) It can be used to pass time information about the video which is\n necessary for synchronizing frames in pose estimation tracking to other\n modalities. Either the video timestamps can be passed to\n This can be used to pass the timestamps with the key `video_timestamps`\n or the sampling rate with key`video_sample_rate`.\n\n e.g. pose_estimation_metadata[\"video_timestamps\"] = np.array(timestamps)\n or pose_estimation_metadata[\"video_sample_rate] = 15 # In Hz\n\n 2) The other use of this dictionary is to ovewrite sleap-io default\n arguments for the PoseEstimation container.\n see https://github.com/rly/ndx-pose for a full list or arguments.\n \"\"\"\n nwb_file_kwargs = nwb_file_kwargs or dict()\n\n # Add required values for nwbfile if not present\n session_description = nwb_file_kwargs.get(\n \"session_description\", \"Processed SLEAP pose data\"\n )\n session_start_time = nwb_file_kwargs.get(\n \"session_start_time\", datetime.datetime.now(datetime.timezone.utc)\n )\n identifier = nwb_file_kwargs.get(\"identifier\", str(uuid.uuid1()))\n\n nwb_file_kwargs.update(\n session_description=session_description,\n session_start_time=session_start_time,\n identifier=identifier,\n )\n\n nwbfile = NWBFile(**nwb_file_kwargs)\n nwbfile = append_nwb_data(labels, nwbfile, pose_estimation_metadata)\n\n with NWBHDF5IO(str(nwbfile_path), \"w\") as io:\n io.write(nwbfile)\n
"},{"location":"reference/sleap_io/io/slp/","title":"slp","text":""},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp","title":"sleap_io.io.slp
","text":"This module handles direct I/O operations for working with .slp files.
Classes:
Name DescriptionInstanceType
Enumeration of instance types to integers.
Functions:
Name Descriptionembed_frames
Embed frames in a SLEAP labels file.
embed_video
Embed frames of a video in a SLEAP labels file.
embed_videos
Embed videos in a SLEAP labels file.
make_video
Create a Video
object from a JSON dictionary.
read_instances
Read Instance
dataset in a SLEAP labels file.
read_labels
Read a SLEAP labels file.
read_metadata
Read metadata from a SLEAP labels file.
read_points
Read Point
dataset from a SLEAP labels file.
read_pred_points
Read PredictedPoint
dataset from a SLEAP labels file.
read_skeletons
Read Skeleton
dataset from a SLEAP labels file.
read_suggestions
Read SuggestionFrame
dataset in a SLEAP labels file.
read_tracks
Read Track
dataset in a SLEAP labels file.
read_videos
Read Video
dataset in a SLEAP labels file.
sanitize_filename
Sanitize a filename to a canonical posix-compatible format.
serialize_skeletons
Serialize a list of Skeleton
objects to JSON-compatible dicts.
video_to_dict
Convert a Video
object to a JSON-compatible dictionary.
write_labels
Write a SLEAP labels file.
write_lfs
Write labeled frames, instances and points to a SLEAP labels file.
write_metadata
Write metadata to a SLEAP labels file.
write_suggestions
Write track metadata to a SLEAP labels file.
write_tracks
Write track metadata to a SLEAP labels file.
write_videos
Write video metadata to a SLEAP labels file.
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.InstanceType","title":"InstanceType
","text":" Bases: IntEnum
Enumeration of instance types to integers.
Source code insleap_io/io/slp.py
class InstanceType(IntEnum):\n \"\"\"Enumeration of instance types to integers.\"\"\"\n\n USER = 0\n PREDICTED = 1\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.embed_frames","title":"embed_frames(labels_path, labels, embed, image_format='png')
","text":"Embed frames in a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredlabels
Labels
A Labels
object to embed in the labels file.
embed
list[tuple[Video, int]]
A list of tuples of (video, frame_idx)
specifying the frames to embed.
image_format
str
The image format to use for embedding. Valid formats are \"png\" (the default), \"jpg\" or \"hdf5\".
'png'
Notes This function will embed the frames in the labels file and update the Videos
and Labels
objects in place.
sleap_io/io/slp.py
def embed_frames(\n labels_path: str,\n labels: Labels,\n embed: list[tuple[Video, int]],\n image_format: str = \"png\",\n):\n \"\"\"Embed frames in a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n labels: A `Labels` object to embed in the labels file.\n embed: A list of tuples of `(video, frame_idx)` specifying the frames to embed.\n image_format: The image format to use for embedding. Valid formats are \"png\"\n (the default), \"jpg\" or \"hdf5\".\n\n Notes:\n This function will embed the frames in the labels file and update the `Videos`\n and `Labels` objects in place.\n \"\"\"\n to_embed_by_video = {}\n for video, frame_idx in embed:\n if video not in to_embed_by_video:\n to_embed_by_video[video] = []\n to_embed_by_video[video].append(frame_idx)\n\n for video in to_embed_by_video:\n to_embed_by_video[video] = np.unique(to_embed_by_video[video]).tolist()\n\n replaced_videos = {}\n for video, frame_inds in to_embed_by_video.items():\n video_ind = labels.videos.index(video)\n embedded_video = embed_video(\n labels_path,\n video,\n group=f\"video{video_ind}\",\n frame_inds=frame_inds,\n image_format=image_format,\n )\n\n labels.videos[video_ind] = embedded_video\n replaced_videos[video] = embedded_video\n\n if len(replaced_videos) > 0:\n labels.replace_videos(video_map=replaced_videos)\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.embed_video","title":"embed_video(labels_path, video, group, frame_inds, image_format='png', fixed_length=True)
","text":"Embed frames of a video in a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredvideo
Video
A Video
object to embed in the labels file.
group
str
The name of the group to store the embedded video in. Image data will be stored in a dataset named {group}/video
. Frame indices will be stored in a data set named {group}/frame_numbers
.
frame_inds
list[int]
A list of frame indices to embed.
requiredimage_format
str
The image format to use for embedding. Valid formats are \"png\" (the default), \"jpg\" or \"hdf5\".
'png'
fixed_length
bool
If True
(the default), the embedded images will be padded to the length of the largest image. If False
, the images will be stored as variable length, which is smaller but may not be supported by all readers.
True
Returns:
Type DescriptionVideo
An embedded Video
object.
If the video is already embedded, the original video will be returned. If not, a new Video
object will be created with the embedded data.
sleap_io/io/slp.py
def embed_video(\n labels_path: str,\n video: Video,\n group: str,\n frame_inds: list[int],\n image_format: str = \"png\",\n fixed_length: bool = True,\n) -> Video:\n \"\"\"Embed frames of a video in a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n video: A `Video` object to embed in the labels file.\n group: The name of the group to store the embedded video in. Image data will be\n stored in a dataset named `{group}/video`. Frame indices will be stored\n in a data set named `{group}/frame_numbers`.\n frame_inds: A list of frame indices to embed.\n image_format: The image format to use for embedding. Valid formats are \"png\"\n (the default), \"jpg\" or \"hdf5\".\n fixed_length: If `True` (the default), the embedded images will be padded to the\n length of the largest image. If `False`, the images will be stored as\n variable length, which is smaller but may not be supported by all readers.\n\n Returns:\n An embedded `Video` object.\n\n If the video is already embedded, the original video will be returned. If not,\n a new `Video` object will be created with the embedded data.\n \"\"\"\n # Load the image data and optionally encode it.\n imgs_data = []\n for frame_idx in frame_inds:\n frame = video[frame_idx]\n\n if image_format == \"hdf5\":\n img_data = frame\n else:\n if \"cv2\" in sys.modules:\n img_data = np.squeeze(\n cv2.imencode(\".\" + image_format, frame)[1]\n ).astype(\"int8\")\n else:\n if frame.shape[-1] == 1:\n frame = frame.squeeze(axis=-1)\n img_data = np.frombuffer(\n iio.imwrite(\"<bytes>\", frame, extension=\".\" + image_format),\n dtype=\"int8\",\n )\n\n imgs_data.append(img_data)\n\n # Write the image data to the labels file.\n with h5py.File(labels_path, \"a\") as f:\n if image_format == \"hdf5\":\n f.create_dataset(\n f\"{group}/video\", data=imgs_data, compression=\"gzip\", chunks=True\n )\n else:\n if fixed_length:\n ds = f.create_dataset(\n f\"{group}/video\",\n shape=(len(imgs_data), max(len(img) for img in imgs_data)),\n dtype=\"int8\",\n compression=\"gzip\",\n )\n for i, img in enumerate(imgs_data):\n ds[i, : len(img)] = img\n else:\n ds = f.create_dataset(\n f\"{group}/video\",\n shape=(len(imgs_data),),\n dtype=h5py.special_dtype(vlen=np.dtype(\"int8\")),\n )\n for i, img in enumerate(imgs_data):\n ds[i] = img\n\n # Store metadata.\n ds.attrs[\"format\"] = image_format\n video_shape = video.shape\n (\n ds.attrs[\"frames\"],\n ds.attrs[\"height\"],\n ds.attrs[\"width\"],\n ds.attrs[\"channels\"],\n ) = video_shape\n\n # Store frame indices.\n f.create_dataset(f\"{group}/frame_numbers\", data=frame_inds)\n\n # Store source video.\n if video.source_video is not None:\n # If this is already an embedded dataset, retain the previous source video.\n source_video = video.source_video\n else:\n source_video = video\n\n # Create a new video object with the embedded data.\n embedded_video = Video(\n filename=labels_path,\n backend=VideoBackend.from_filename(\n labels_path,\n dataset=f\"{group}/video\",\n grayscale=video.grayscale,\n keep_open=False,\n ),\n source_video=source_video,\n )\n\n grp = f.require_group(f\"{group}/source_video\")\n grp.attrs[\"json\"] = json.dumps(\n video_to_dict(source_video), separators=(\",\", \":\")\n )\n\n return embedded_video\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.embed_videos","title":"embed_videos(labels_path, labels, embed)
","text":"Embed videos in a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file to save.
requiredlabels
Labels
A Labels
object to save.
embed
bool | str | list[tuple[Video, int]]
Frames to embed in the saved labels file. One of None
, True
, \"all\"
, \"user\"
, \"suggestions\"
, \"user+suggestions\"
, \"source\"
or list of tuples of (video, frame_idx)
.
If None
is specified (the default) and the labels contains embedded frames, those embedded frames will be re-saved to the new file.
If True
or \"all\"
, all labeled frames and suggested frames will be embedded.
If \"source\"
is specified, no images will be embedded and the source video will be restored if available.
This argument is only valid for the SLP backend.
required Source code insleap_io/io/slp.py
def embed_videos(\n labels_path: str, labels: Labels, embed: bool | str | list[tuple[Video, int]]\n):\n \"\"\"Embed videos in a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file to save.\n labels: A `Labels` object to save.\n embed: Frames to embed in the saved labels file. One of `None`, `True`,\n `\"all\"`, `\"user\"`, `\"suggestions\"`, `\"user+suggestions\"`, `\"source\"` or list\n of tuples of `(video, frame_idx)`.\n\n If `None` is specified (the default) and the labels contains embedded\n frames, those embedded frames will be re-saved to the new file.\n\n If `True` or `\"all\"`, all labeled frames and suggested frames will be\n embedded.\n\n If `\"source\"` is specified, no images will be embedded and the source video\n will be restored if available.\n\n This argument is only valid for the SLP backend.\n \"\"\"\n if embed is True:\n embed = \"all\"\n if embed == \"user\":\n embed = [(lf.video, lf.frame_idx) for lf in labels.user_labeled_frames]\n elif embed == \"suggestions\":\n embed = [(sf.video, sf.frame_idx) for sf in labels.suggestions]\n elif embed == \"user+suggestions\":\n embed = [(lf.video, lf.frame_idx) for lf in labels.user_labeled_frames]\n embed += [(sf.video, sf.frame_idx) for sf in labels.suggestions]\n elif embed == \"all\":\n embed = [(lf.video, lf.frame_idx) for lf in labels]\n embed += [(sf.video, sf.frame_idx) for sf in labels.suggestions]\n elif embed == \"source\":\n embed = []\n elif isinstance(embed, list):\n embed = embed\n else:\n raise ValueError(f\"Invalid value for embed: {embed}\")\n\n embed_frames(labels_path, labels, embed)\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.make_video","title":"make_video(labels_path, video_json, open_backend=True)
","text":"Create a Video
object from a JSON dictionary.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredvideo_json
dict
A dictionary containing the video metadata.
requiredopen_backend
bool
If True
(the default), attempt to open the video backend for I/O. If False
, the backend will not be opened (useful for reading metadata when the video files are not available).
True
Source code in sleap_io/io/slp.py
def make_video(\n labels_path: str,\n video_json: dict,\n open_backend: bool = True,\n) -> Video:\n \"\"\"Create a `Video` object from a JSON dictionary.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n video_json: A dictionary containing the video metadata.\n open_backend: If `True` (the default), attempt to open the video backend for\n I/O. If `False`, the backend will not be opened (useful for reading metadata\n when the video files are not available).\n \"\"\"\n backend_metadata = video_json[\"backend\"]\n video_path = backend_metadata[\"filename\"]\n\n # Marker for embedded videos.\n source_video = None\n is_embedded = False\n if video_path == \".\":\n video_path = labels_path\n is_embedded = True\n\n # Basic path resolution.\n video_path = Path(sanitize_filename(video_path))\n\n if is_embedded:\n # Try to recover the source video.\n with h5py.File(labels_path, \"r\") as f:\n dataset = backend_metadata[\"dataset\"]\n if dataset.endswith(\"/video\"):\n dataset = dataset[:-6]\n if dataset in f:\n source_video_json = json.loads(\n f[f\"{dataset}/source_video\"].attrs[\"json\"]\n )\n source_video = make_video(\n labels_path,\n source_video_json,\n open_backend=open_backend,\n )\n\n backend = None\n if open_backend:\n try:\n if not is_file_accessible(video_path):\n # Check for the same filename in the same directory as the labels file.\n candidate_video_path = Path(labels_path).parent / video_path.name\n if is_file_accessible(candidate_video_path):\n video_path = candidate_video_path\n else:\n # TODO (TP): Expand capabilities of path resolution to support more\n # complex path finding strategies.\n pass\n except (OSError, PermissionError, FileNotFoundError):\n pass\n\n # Convert video path to string.\n video_path = video_path.as_posix()\n\n if \"filenames\" in backend_metadata:\n # This is an ImageVideo.\n # TODO: Path resolution.\n video_path = backend_metadata[\"filenames\"]\n video_path = [Path(sanitize_filename(p)) for p in video_path]\n\n try:\n grayscale = None\n if \"grayscale\" in backend_metadata:\n grayscale = backend_metadata[\"grayscale\"]\n elif \"shape\" in backend_metadata:\n grayscale = backend_metadata[\"shape\"][-1] == 1\n backend = VideoBackend.from_filename(\n video_path,\n dataset=backend_metadata.get(\"dataset\", None),\n grayscale=grayscale,\n input_format=backend_metadata.get(\"input_format\", None),\n )\n except Exception:\n backend = None\n\n return Video(\n filename=video_path,\n backend=backend,\n backend_metadata=backend_metadata,\n source_video=source_video,\n open_backend=open_backend,\n )\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.read_instances","title":"read_instances(labels_path, skeletons, tracks, points, pred_points, format_id)
","text":"Read Instance
dataset in a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredskeletons
list[Skeleton]
A list of Skeleton
objects (see read_skeletons
).
tracks
list[Track]
A list of Track
objects (see read_tracks
).
points
list[Point]
A list of Point
objects (see read_points
).
pred_points
list[PredictedPoint]
A list of PredictedPoint
objects (see read_pred_points
).
format_id
float
The format version identifier used to specify the format of the input file.
requiredReturns:
Type Descriptionlist[Union[Instance, PredictedInstance]]
A list of Instance
and/or PredictedInstance
objects.
sleap_io/io/slp.py
def read_instances(\n labels_path: str,\n skeletons: list[Skeleton],\n tracks: list[Track],\n points: list[Point],\n pred_points: list[PredictedPoint],\n format_id: float,\n) -> list[Union[Instance, PredictedInstance]]:\n \"\"\"Read `Instance` dataset in a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n skeletons: A list of `Skeleton` objects (see `read_skeletons`).\n tracks: A list of `Track` objects (see `read_tracks`).\n points: A list of `Point` objects (see `read_points`).\n pred_points: A list of `PredictedPoint` objects (see `read_pred_points`).\n format_id: The format version identifier used to specify the format of the input\n file.\n\n Returns:\n A list of `Instance` and/or `PredictedInstance` objects.\n \"\"\"\n instances_data = read_hdf5_dataset(labels_path, \"instances\")\n\n instances = {}\n from_predicted_pairs = []\n for instance_data in instances_data:\n if format_id < 1.2:\n (\n instance_id,\n instance_type,\n frame_id,\n skeleton_id,\n track_id,\n from_predicted,\n instance_score,\n point_id_start,\n point_id_end,\n ) = instance_data\n tracking_score = np.zeros_like(instance_score)\n else:\n (\n instance_id,\n instance_type,\n frame_id,\n skeleton_id,\n track_id,\n from_predicted,\n instance_score,\n point_id_start,\n point_id_end,\n tracking_score,\n ) = instance_data\n\n if instance_type == InstanceType.USER:\n instances[instance_id] = Instance(\n points=points[point_id_start:point_id_end], # type: ignore[arg-type]\n skeleton=skeletons[skeleton_id],\n track=tracks[track_id] if track_id >= 0 else None,\n )\n if from_predicted >= 0:\n from_predicted_pairs.append((instance_id, from_predicted))\n elif instance_type == InstanceType.PREDICTED:\n instances[instance_id] = PredictedInstance(\n points=pred_points[point_id_start:point_id_end], # type: ignore[arg-type]\n skeleton=skeletons[skeleton_id],\n track=tracks[track_id] if track_id >= 0 else None,\n score=instance_score,\n tracking_score=tracking_score,\n )\n\n # Link instances based on from_predicted field.\n for instance_id, from_predicted in from_predicted_pairs:\n instances[instance_id].from_predicted = instances[from_predicted]\n\n # Convert instances back to list.\n instances = list(instances.values())\n\n return instances\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.read_labels","title":"read_labels(labels_path, open_videos=True)
","text":"Read a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredopen_videos
bool
If True
(the default), attempt to open the video backend for I/O. If False
, the backend will not be opened (useful for reading metadata when the video files are not available).
True
Returns:
Type DescriptionLabels
The processed Labels
object.
sleap_io/io/slp.py
def read_labels(labels_path: str, open_videos: bool = True) -> Labels:\n \"\"\"Read a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n open_videos: If `True` (the default), attempt to open the video backend for\n I/O. If `False`, the backend will not be opened (useful for reading metadata\n when the video files are not available).\n\n Returns:\n The processed `Labels` object.\n \"\"\"\n tracks = read_tracks(labels_path)\n videos = read_videos(labels_path, open_backend=open_videos)\n skeletons = read_skeletons(labels_path)\n points = read_points(labels_path)\n pred_points = read_pred_points(labels_path)\n format_id = read_hdf5_attrs(labels_path, \"metadata\", \"format_id\")\n instances = read_instances(\n labels_path, skeletons, tracks, points, pred_points, format_id\n )\n suggestions = read_suggestions(labels_path, videos)\n metadata = read_metadata(labels_path)\n provenance = metadata.get(\"provenance\", dict())\n\n frames = read_hdf5_dataset(labels_path, \"frames\")\n labeled_frames = []\n for _, video_id, frame_idx, instance_id_start, instance_id_end in frames:\n labeled_frames.append(\n LabeledFrame(\n video=videos[video_id],\n frame_idx=int(frame_idx),\n instances=instances[instance_id_start:instance_id_end],\n )\n )\n\n labels = Labels(\n labeled_frames=labeled_frames,\n videos=videos,\n skeletons=skeletons,\n tracks=tracks,\n suggestions=suggestions,\n provenance=provenance,\n )\n labels.provenance[\"filename\"] = labels_path\n\n return labels\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.read_metadata","title":"read_metadata(labels_path)
","text":"Read metadata from a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredReturns:
Type Descriptiondict
A dict containing the metadata from a SLEAP labels file.
Source code insleap_io/io/slp.py
def read_metadata(labels_path: str) -> dict:\n \"\"\"Read metadata from a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n\n Returns:\n A dict containing the metadata from a SLEAP labels file.\n \"\"\"\n md = read_hdf5_attrs(labels_path, \"metadata\", \"json\")\n return json.loads(md.decode())\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.read_points","title":"read_points(labels_path)
","text":"Read Point
dataset from a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredReturns:
Type Descriptionlist[Point]
A list of Point
objects.
sleap_io/io/slp.py
def read_points(labels_path: str) -> list[Point]:\n \"\"\"Read `Point` dataset from a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n\n Returns:\n A list of `Point` objects.\n \"\"\"\n pts = read_hdf5_dataset(labels_path, \"points\")\n return [\n Point(x=x, y=y, visible=visible, complete=complete)\n for x, y, visible, complete in pts\n ]\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.read_pred_points","title":"read_pred_points(labels_path)
","text":"Read PredictedPoint
dataset from a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredReturns:
Type Descriptionlist[PredictedPoint]
A list of PredictedPoint
objects.
sleap_io/io/slp.py
def read_pred_points(labels_path: str) -> list[PredictedPoint]:\n \"\"\"Read `PredictedPoint` dataset from a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n\n Returns:\n A list of `PredictedPoint` objects.\n \"\"\"\n pred_pts = read_hdf5_dataset(labels_path, \"pred_points\")\n return [\n PredictedPoint(x=x, y=y, visible=visible, complete=complete, score=score)\n for x, y, visible, complete, score in pred_pts\n ]\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.read_skeletons","title":"read_skeletons(labels_path)
","text":"Read Skeleton
dataset from a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string that contains the path to the labels file.
requiredReturns:
Type Descriptionlist[Skeleton]
A list of Skeleton
objects.
sleap_io/io/slp.py
def read_skeletons(labels_path: str) -> list[Skeleton]:\n \"\"\"Read `Skeleton` dataset from a SLEAP labels file.\n\n Args:\n labels_path: A string that contains the path to the labels file.\n\n Returns:\n A list of `Skeleton` objects.\n \"\"\"\n metadata = read_metadata(labels_path)\n\n # Get node names. This is a superset of all nodes across all skeletons. Note that\n # node ordering is specific to each skeleton, so we'll need to fix this afterwards.\n node_names = [x[\"name\"] for x in metadata[\"nodes\"]]\n\n skeleton_objects = []\n for skel in metadata[\"skeletons\"]:\n # Parse out the cattr-based serialization stuff from the skeleton links.\n edge_inds, symmetry_inds = [], []\n for link in skel[\"links\"]:\n if \"py/reduce\" in link[\"type\"]:\n edge_type = link[\"type\"][\"py/reduce\"][1][\"py/tuple\"][0]\n else:\n edge_type = link[\"type\"][\"py/id\"]\n\n if edge_type == 1: # 1 -> real edge, 2 -> symmetry edge\n edge_inds.append((link[\"source\"], link[\"target\"]))\n\n elif edge_type == 2:\n symmetry_inds.append((link[\"source\"], link[\"target\"]))\n\n # Re-index correctly.\n skeleton_node_inds = [node[\"id\"] for node in skel[\"nodes\"]]\n sorted_node_names = [node_names[i] for i in skeleton_node_inds]\n\n # Create nodes.\n nodes = []\n for name in sorted_node_names:\n nodes.append(Node(name=name))\n\n # Create edges.\n edge_inds = [\n (skeleton_node_inds.index(s), skeleton_node_inds.index(d))\n for s, d in edge_inds\n ]\n edges = []\n for edge in edge_inds:\n edges.append(Edge(source=nodes[edge[0]], destination=nodes[edge[1]]))\n\n # Create symmetries.\n symmetry_inds = [\n (skeleton_node_inds.index(s), skeleton_node_inds.index(d))\n for s, d in symmetry_inds\n ]\n symmetries = []\n for symmetry in symmetry_inds:\n symmetries.append(Symmetry([nodes[symmetry[0]], nodes[symmetry[1]]]))\n\n # Create the full skeleton.\n skel = Skeleton(\n nodes=nodes, edges=edges, symmetries=symmetries, name=skel[\"graph\"][\"name\"]\n )\n skeleton_objects.append(skel)\n return skeleton_objects\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.read_suggestions","title":"read_suggestions(labels_path, videos)
","text":"Read SuggestionFrame
dataset in a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredvideos
list[Video]
A list of Video
objects.
Returns:
Type Descriptionlist[SuggestionFrame]
A list of SuggestionFrame
objects.
sleap_io/io/slp.py
def read_suggestions(labels_path: str, videos: list[Video]) -> list[SuggestionFrame]:\n \"\"\"Read `SuggestionFrame` dataset in a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n videos: A list of `Video` objects.\n\n Returns:\n A list of `SuggestionFrame` objects.\n \"\"\"\n try:\n suggestions = read_hdf5_dataset(labels_path, \"suggestions_json\")\n except KeyError:\n return []\n suggestions = [json.loads(x) for x in suggestions]\n suggestions_objects = []\n for suggestion in suggestions:\n suggestions_objects.append(\n SuggestionFrame(\n video=videos[int(suggestion[\"video\"])],\n frame_idx=suggestion[\"frame_idx\"],\n )\n )\n return suggestions_objects\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.read_tracks","title":"read_tracks(labels_path)
","text":"Read Track
dataset in a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredReturns:
Type Descriptionlist[Track]
A list of Track
objects.
sleap_io/io/slp.py
def read_tracks(labels_path: str) -> list[Track]:\n \"\"\"Read `Track` dataset in a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n\n Returns:\n A list of `Track` objects.\n \"\"\"\n tracks = [json.loads(x) for x in read_hdf5_dataset(labels_path, \"tracks_json\")]\n track_objects = []\n for track in tracks:\n track_objects.append(Track(name=track[1]))\n return track_objects\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.read_videos","title":"read_videos(labels_path, open_backend=True)
","text":"Read Video
dataset in a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredopen_backend
bool
If True
(the default), attempt to open the video backend for I/O. If False
, the backend will not be opened (useful for reading metadata when the video files are not available).
True
Returns:
Type Descriptionlist[Video]
A list of Video
objects.
sleap_io/io/slp.py
def read_videos(labels_path: str, open_backend: bool = True) -> list[Video]:\n \"\"\"Read `Video` dataset in a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n open_backend: If `True` (the default), attempt to open the video backend for\n I/O. If `False`, the backend will not be opened (useful for reading metadata\n when the video files are not available).\n\n Returns:\n A list of `Video` objects.\n \"\"\"\n videos = []\n videos_metadata = read_hdf5_dataset(labels_path, \"videos_json\")\n for video_data in videos_metadata:\n video_json = json.loads(video_data)\n video = make_video(labels_path, video_json, open_backend=open_backend)\n videos.append(video)\n return videos\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.sanitize_filename","title":"sanitize_filename(filename)
","text":"Sanitize a filename to a canonical posix-compatible format.
Parameters:
Name Type Description Defaultfilename
str | Path | list[str] | list[Path]
A string or Path
object or list of either to sanitize.
Returns:
Type Descriptionstr | list[str]
A sanitized filename as a string (or list of strings if a list was provided) with forward slashes and posix-formatted.
Source code insleap_io/io/slp.py
def sanitize_filename(\n filename: str | Path | list[str] | list[Path],\n) -> str | list[str]:\n \"\"\"Sanitize a filename to a canonical posix-compatible format.\n\n Args:\n filename: A string or `Path` object or list of either to sanitize.\n\n Returns:\n A sanitized filename as a string (or list of strings if a list was provided)\n with forward slashes and posix-formatted.\n \"\"\"\n if isinstance(filename, list):\n return [sanitize_filename(f) for f in filename]\n return Path(filename).as_posix().replace(\"\\\\\", \"/\")\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.serialize_skeletons","title":"serialize_skeletons(skeletons)
","text":"Serialize a list of Skeleton
objects to JSON-compatible dicts.
Parameters:
Name Type Description Defaultskeletons
list[Skeleton]
A list of Skeleton
objects.
Returns:
Type Descriptiontuple[list[dict], list[dict]]
A tuple of nodes_dicts, skeletons_dicts
.
nodes_dicts
is a list of dicts containing the nodes in all the skeletons.
skeletons_dicts
is a list of dicts containing the skeletons.
This function attempts to replicate the serialization of skeletons in legacy SLEAP which relies on a combination of networkx's graph serialization and our own metadata used to store nodes and edges independent of the graph structure.
However, because sleap-io does not currently load in the legacy metadata, this function will not produce byte-level compatible serialization with legacy formats, even though the ordering and all attributes of nodes and edges should match up.
Source code insleap_io/io/slp.py
def serialize_skeletons(skeletons: list[Skeleton]) -> tuple[list[dict], list[dict]]:\n \"\"\"Serialize a list of `Skeleton` objects to JSON-compatible dicts.\n\n Args:\n skeletons: A list of `Skeleton` objects.\n\n Returns:\n A tuple of `nodes_dicts, skeletons_dicts`.\n\n `nodes_dicts` is a list of dicts containing the nodes in all the skeletons.\n\n `skeletons_dicts` is a list of dicts containing the skeletons.\n\n Notes:\n This function attempts to replicate the serialization of skeletons in legacy\n SLEAP which relies on a combination of networkx's graph serialization and our\n own metadata used to store nodes and edges independent of the graph structure.\n\n However, because sleap-io does not currently load in the legacy metadata, this\n function will not produce byte-level compatible serialization with legacy\n formats, even though the ordering and all attributes of nodes and edges should\n match up.\n \"\"\"\n # Create global list of nodes with all nodes from all skeletons.\n nodes_dicts = []\n node_to_id = {}\n for skeleton in skeletons:\n for node in skeleton.nodes:\n if node not in node_to_id:\n # Note: This ID is not the same as the node index in the skeleton in\n # legacy SLEAP, but we do not retain this information in the labels, so\n # IDs will be different.\n #\n # The weight is also kept fixed here, but technically this is not\n # modified or used in legacy SLEAP either.\n #\n # TODO: Store legacy metadata in labels to get byte-level compatibility?\n node_to_id[node] = len(node_to_id)\n nodes_dicts.append({\"name\": node.name, \"weight\": 1.0})\n\n skeletons_dicts = []\n for skeleton in skeletons:\n # Build links dicts for normal edges.\n edges_dicts = []\n for edge_ind, edge in enumerate(skeleton.edges):\n if edge_ind == 0:\n edge_type = {\n \"py/reduce\": [\n {\"py/type\": \"sleap.skeleton.EdgeType\"},\n {\"py/tuple\": [1]}, # 1 = real edge, 2 = symmetry edge\n ]\n }\n else:\n edge_type = {\"py/id\": 1}\n\n edges_dicts.append(\n {\n # Note: Insert idx is not the same as the edge index in the skeleton\n # in legacy SLEAP.\n \"edge_insert_idx\": edge_ind,\n \"key\": 0, # Always 0.\n \"source\": node_to_id[edge.source],\n \"target\": node_to_id[edge.destination],\n \"type\": edge_type,\n }\n )\n\n # Build links dicts for symmetry edges.\n for symmetry_ind, symmetry in enumerate(skeleton.symmetries):\n if symmetry_ind == 0:\n edge_type = {\n \"py/reduce\": [\n {\"py/type\": \"sleap.skeleton.EdgeType\"},\n {\"py/tuple\": [2]}, # 1 = real edge, 2 = symmetry edge\n ]\n }\n else:\n edge_type = {\"py/id\": 2}\n\n src, dst = tuple(symmetry.nodes)\n edges_dicts.append(\n {\n \"key\": 0,\n \"source\": node_to_id[src],\n \"target\": node_to_id[dst],\n \"type\": edge_type,\n }\n )\n\n # Create skeleton dict.\n skeletons_dicts.append(\n {\n \"directed\": True,\n \"graph\": {\n \"name\": skeleton.name,\n \"num_edges_inserted\": len(skeleton.edges),\n },\n \"links\": edges_dicts,\n \"multigraph\": True,\n # In the order in Skeleton.nodes and must match up with nodes_dicts.\n \"nodes\": [{\"id\": node_to_id[node]} for node in skeleton.nodes],\n }\n )\n\n return skeletons_dicts, nodes_dicts\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.video_to_dict","title":"video_to_dict(video)
","text":"Convert a Video
object to a JSON-compatible dictionary.
Parameters:
Name Type Description Defaultvideo
Video
A Video
object to convert.
Returns:
Type Descriptiondict
A dictionary containing the video metadata.
Source code insleap_io/io/slp.py
def video_to_dict(video: Video) -> dict:\n \"\"\"Convert a `Video` object to a JSON-compatible dictionary.\n\n Args:\n video: A `Video` object to convert.\n\n Returns:\n A dictionary containing the video metadata.\n \"\"\"\n video_filename = sanitize_filename(video.filename)\n if video.backend is None:\n return {\"filename\": video_filename, \"backend\": video.backend_metadata}\n\n if type(video.backend) == MediaVideo:\n return {\n \"filename\": video_filename,\n \"backend\": {\n \"type\": \"MediaVideo\",\n \"shape\": video.shape,\n \"filename\": video_filename,\n \"grayscale\": video.grayscale,\n \"bgr\": True,\n \"dataset\": \"\",\n \"input_format\": \"\",\n },\n }\n\n elif type(video.backend) == HDF5Video:\n return {\n \"filename\": video_filename,\n \"backend\": {\n \"type\": \"HDF5Video\",\n \"shape\": video.shape,\n \"filename\": (\n \".\" if video.backend.has_embedded_images else video_filename\n ),\n \"dataset\": video.backend.dataset,\n \"input_format\": video.backend.input_format,\n \"convert_range\": False,\n \"has_embedded_images\": video.backend.has_embedded_images,\n \"grayscale\": video.grayscale,\n },\n }\n\n elif type(video.backend) == ImageVideo:\n return {\n \"filename\": video_filename,\n \"backend\": {\n \"type\": \"ImageVideo\",\n \"shape\": video.shape,\n \"filename\": sanitize_filename(video.backend.filename[0]),\n \"filenames\": sanitize_filename(video.backend.filename),\n \"dataset\": video.backend_metadata.get(\"dataset\", None),\n \"grayscale\": video.grayscale,\n \"input_format\": video.backend_metadata.get(\"input_format\", None),\n },\n }\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.write_labels","title":"write_labels(labels_path, labels, embed=None)
","text":"Write a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file to save.
requiredlabels
Labels
A Labels
object to save.
embed
bool | str | list[tuple[Video, int]] | None
Frames to embed in the saved labels file. One of None
, True
, \"all\"
, \"user\"
, \"suggestions\"
, \"user+suggestions\"
, \"source\"
or list of tuples of (video, frame_idx)
.
If None
is specified (the default) and the labels contains embedded frames, those embedded frames will be re-saved to the new file.
If True
or \"all\"
, all labeled frames and suggested frames will be embedded.
If \"source\"
is specified, no images will be embedded and the source video will be restored if available.
This argument is only valid for the SLP backend.
None
Source code in sleap_io/io/slp.py
def write_labels(\n labels_path: str,\n labels: Labels,\n embed: bool | str | list[tuple[Video, int]] | None = None,\n):\n \"\"\"Write a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file to save.\n labels: A `Labels` object to save.\n embed: Frames to embed in the saved labels file. One of `None`, `True`,\n `\"all\"`, `\"user\"`, `\"suggestions\"`, `\"user+suggestions\"`, `\"source\"` or list\n of tuples of `(video, frame_idx)`.\n\n If `None` is specified (the default) and the labels contains embedded\n frames, those embedded frames will be re-saved to the new file.\n\n If `True` or `\"all\"`, all labeled frames and suggested frames will be\n embedded.\n\n If `\"source\"` is specified, no images will be embedded and the source video\n will be restored if available.\n\n This argument is only valid for the SLP backend.\n \"\"\"\n if Path(labels_path).exists():\n Path(labels_path).unlink()\n\n if embed:\n embed_videos(labels_path, labels, embed)\n write_videos(labels_path, labels.videos, restore_source=(embed == \"source\"))\n write_tracks(labels_path, labels.tracks)\n write_suggestions(labels_path, labels.suggestions, labels.videos)\n write_metadata(labels_path, labels)\n write_lfs(labels_path, labels)\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.write_lfs","title":"write_lfs(labels_path, labels)
","text":"Write labeled frames, instances and points to a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredlabels
Labels
A Labels
object to store the metadata for.
sleap_io/io/slp.py
def write_lfs(labels_path: str, labels: Labels):\n \"\"\"Write labeled frames, instances and points to a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n labels: A `Labels` object to store the metadata for.\n \"\"\"\n # We store the data in structured arrays for performance, so we first define the\n # dtype fields.\n instance_dtype = np.dtype(\n [\n (\"instance_id\", \"i8\"),\n (\"instance_type\", \"u1\"),\n (\"frame_id\", \"u8\"),\n (\"skeleton\", \"u4\"),\n (\"track\", \"i4\"),\n (\"from_predicted\", \"i8\"),\n (\"score\", \"f4\"),\n (\"point_id_start\", \"u8\"),\n (\"point_id_end\", \"u8\"),\n (\"tracking_score\", \"f4\"), # FORMAT_ID >= 1.2\n ]\n )\n frame_dtype = np.dtype(\n [\n (\"frame_id\", \"u8\"),\n (\"video\", \"u4\"),\n (\"frame_idx\", \"u8\"),\n (\"instance_id_start\", \"u8\"),\n (\"instance_id_end\", \"u8\"),\n ]\n )\n point_dtype = np.dtype(\n [(\"x\", \"f8\"), (\"y\", \"f8\"), (\"visible\", \"?\"), (\"complete\", \"?\")]\n )\n predicted_point_dtype = np.dtype(\n [(\"x\", \"f8\"), (\"y\", \"f8\"), (\"visible\", \"?\"), (\"complete\", \"?\"), (\"score\", \"f8\")]\n )\n\n # Next, we extract the data from the labels object into lists with the same fields.\n frames, instances, points, predicted_points, to_link = [], [], [], [], []\n inst_to_id = {}\n for lf in labels:\n frame_id = len(frames)\n instance_id_start = len(instances)\n for inst in lf:\n instance_id = len(instances)\n inst_to_id[id(inst)] = instance_id\n skeleton_id = labels.skeletons.index(inst.skeleton)\n track = labels.tracks.index(inst.track) if inst.track else -1\n from_predicted = -1\n if inst.from_predicted:\n to_link.append((instance_id, inst.from_predicted))\n\n if type(inst) == Instance:\n instance_type = InstanceType.USER\n score = np.nan\n tracking_score = np.nan\n point_id_start = len(points)\n\n for node in inst.skeleton.nodes:\n pt = inst.points[node]\n points.append([pt.x, pt.y, pt.visible, pt.complete])\n\n point_id_end = len(points)\n\n elif type(inst) == PredictedInstance:\n instance_type = InstanceType.PREDICTED\n score = inst.score\n tracking_score = inst.tracking_score\n point_id_start = len(predicted_points)\n\n for node in inst.skeleton.nodes:\n pt = inst.points[node]\n predicted_points.append(\n [pt.x, pt.y, pt.visible, pt.complete, pt.score]\n )\n\n point_id_end = len(predicted_points)\n\n else:\n raise ValueError(f\"Unknown instance type: {type(inst)}\")\n\n instances.append(\n [\n instance_id,\n int(instance_type),\n frame_id,\n skeleton_id,\n track,\n from_predicted,\n score,\n point_id_start,\n point_id_end,\n tracking_score,\n ]\n )\n\n instance_id_end = len(instances)\n\n frames.append(\n [\n frame_id,\n labels.videos.index(lf.video),\n lf.frame_idx,\n instance_id_start,\n instance_id_end,\n ]\n )\n\n # Link instances based on from_predicted field.\n for instance_id, from_predicted in to_link:\n # Source instance may be missing if predictions were removed from the labels, in\n # which case, remove the link.\n instances[instance_id][5] = inst_to_id.get(id(from_predicted), -1)\n\n # Create structured arrays.\n points = np.array([tuple(x) for x in points], dtype=point_dtype)\n predicted_points = np.array(\n [tuple(x) for x in predicted_points], dtype=predicted_point_dtype\n )\n instances = np.array([tuple(x) for x in instances], dtype=instance_dtype)\n frames = np.array([tuple(x) for x in frames], dtype=frame_dtype)\n\n # Write to file.\n with h5py.File(labels_path, \"a\") as f:\n f.create_dataset(\"points\", data=points, dtype=points.dtype)\n f.create_dataset(\n \"pred_points\",\n data=predicted_points,\n dtype=predicted_points.dtype,\n )\n f.create_dataset(\n \"instances\",\n data=instances,\n dtype=instances.dtype,\n )\n f.create_dataset(\n \"frames\",\n data=frames,\n dtype=frames.dtype,\n )\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.write_metadata","title":"write_metadata(labels_path, labels)
","text":"Write metadata to a SLEAP labels file.
This function will write the skeletons and provenance for the labels.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredlabels
Labels
A Labels
object to store the metadata for.
See also: serialize_skeletons
Source code insleap_io/io/slp.py
def write_metadata(labels_path: str, labels: Labels):\n \"\"\"Write metadata to a SLEAP labels file.\n\n This function will write the skeletons and provenance for the labels.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n labels: A `Labels` object to store the metadata for.\n\n See also: serialize_skeletons\n \"\"\"\n skeletons_dicts, nodes_dicts = serialize_skeletons(labels.skeletons)\n\n md = {\n \"version\": \"2.0.0\",\n \"skeletons\": skeletons_dicts,\n \"nodes\": nodes_dicts,\n \"videos\": [],\n \"tracks\": [],\n \"suggestions\": [], # TODO: Handle suggestions metadata.\n \"negative_anchors\": {},\n \"provenance\": labels.provenance,\n }\n\n # Custom encoding.\n for k in md[\"provenance\"]:\n if isinstance(md[\"provenance\"][k], Path):\n # Path -> str\n md[\"provenance\"][k] = md[\"provenance\"][k].as_posix()\n\n with h5py.File(labels_path, \"a\") as f:\n grp = f.require_group(\"metadata\")\n grp.attrs[\"format_id\"] = 1.2\n grp.attrs[\"json\"] = np.bytes_(json.dumps(md, separators=(\",\", \":\")))\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.write_suggestions","title":"write_suggestions(labels_path, suggestions, videos)
","text":"Write track metadata to a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredsuggestions
list[SuggestionFrame]
A list of SuggestionFrame
objects to store the metadata for.
videos
list[Video]
A list of Video
objects.
sleap_io/io/slp.py
def write_suggestions(\n labels_path: str, suggestions: list[SuggestionFrame], videos: list[Video]\n):\n \"\"\"Write track metadata to a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n suggestions: A list of `SuggestionFrame` objects to store the metadata for.\n videos: A list of `Video` objects.\n \"\"\"\n GROUP = 0 # TODO: Handle storing extraneous metadata.\n suggestions_json = []\n for suggestion in suggestions:\n suggestion_dict = {\n \"video\": str(videos.index(suggestion.video)),\n \"frame_idx\": suggestion.frame_idx,\n \"group\": GROUP,\n }\n suggestion_json = np.bytes_(json.dumps(suggestion_dict, separators=(\",\", \":\")))\n suggestions_json.append(suggestion_json)\n\n with h5py.File(labels_path, \"a\") as f:\n f.create_dataset(\"suggestions_json\", data=suggestions_json, maxshape=(None,))\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.write_tracks","title":"write_tracks(labels_path, tracks)
","text":"Write track metadata to a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredtracks
list[Track]
A list of Track
objects to store the metadata for.
sleap_io/io/slp.py
def write_tracks(labels_path: str, tracks: list[Track]):\n \"\"\"Write track metadata to a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n tracks: A list of `Track` objects to store the metadata for.\n \"\"\"\n # TODO: Add support for track metadata like spawned on frame.\n SPAWNED_ON = 0\n tracks_json = [\n np.bytes_(json.dumps([SPAWNED_ON, track.name], separators=(\",\", \":\")))\n for track in tracks\n ]\n with h5py.File(labels_path, \"a\") as f:\n f.create_dataset(\"tracks_json\", data=tracks_json, maxshape=(None,))\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.write_videos","title":"write_videos(labels_path, videos, restore_source=False)
","text":"Write video metadata to a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredvideos
list[Video]
A list of Video
objects to store the metadata for.
restore_source
bool
If True
, restore source videos if available and will not re-embed the embedded images. If False
(the default), will re-embed images that were previously embedded.
False
Source code in sleap_io/io/slp.py
def write_videos(labels_path: str, videos: list[Video], restore_source: bool = False):\n \"\"\"Write video metadata to a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n videos: A list of `Video` objects to store the metadata for.\n restore_source: If `True`, restore source videos if available and will not\n re-embed the embedded images. If `False` (the default), will re-embed images\n that were previously embedded.\n \"\"\"\n video_jsons = []\n for video_ind, video in enumerate(videos):\n if type(video.backend) == HDF5Video and video.backend.has_embedded_images:\n if restore_source:\n video = video.source_video\n else:\n # If the video has embedded images, embed them images again if we haven't\n # already.\n already_embedded = False\n if Path(labels_path).exists():\n with h5py.File(labels_path, \"r\") as f:\n already_embedded = f\"video{video_ind}/video\" in f\n\n if not already_embedded:\n video = embed_video(\n labels_path,\n video,\n group=f\"video{video_ind}\",\n frame_inds=video.backend.source_inds,\n image_format=video.backend.image_format,\n )\n\n video_json = video_to_dict(video)\n\n video_jsons.append(np.bytes_(json.dumps(video_json, separators=(\",\", \":\"))))\n\n with h5py.File(labels_path, \"a\") as f:\n f.create_dataset(\"videos_json\", data=video_jsons, maxshape=(None,))\n
"},{"location":"reference/sleap_io/io/utils/","title":"utils","text":""},{"location":"reference/sleap_io/io/utils/#sleap_io.io.utils","title":"sleap_io.io.utils
","text":"Miscellaneous utilities for working with different I/O formats.
Functions:
Name Descriptionis_file_accessible
Check if a file is accessible.
read_hdf5_attrs
Read attributes from an HDF5 dataset.
read_hdf5_dataset
Read data from an HDF5 file.
read_hdf5_group
Read an entire group from an HDF5 file.
write_hdf5_attrs
Write attributes to an HDF5 dataset.
write_hdf5_dataset
Write data to an HDF5 file.
write_hdf5_group
Write an entire group to an HDF5 file.
"},{"location":"reference/sleap_io/io/utils/#sleap_io.io.utils.is_file_accessible","title":"is_file_accessible(filename)
","text":"Check if a file is accessible.
Parameters:
Name Type Description Defaultfilename
str | Path
Path to a file.
requiredReturns:
Type Descriptionbool
True
if the file is accessible, False
otherwise.
This checks if the file readable by the current user by reading one byte from the file.
Source code insleap_io/io/utils.py
def is_file_accessible(filename: str | Path) -> bool:\n \"\"\"Check if a file is accessible.\n\n Args:\n filename: Path to a file.\n\n Returns:\n `True` if the file is accessible, `False` otherwise.\n\n Notes:\n This checks if the file readable by the current user by reading one byte from\n the file.\n \"\"\"\n filename = Path(filename)\n try:\n with open(filename, \"rb\") as f:\n f.read(1)\n return True\n except (FileNotFoundError, PermissionError, OSError, ValueError):\n return False\n
"},{"location":"reference/sleap_io/io/utils/#sleap_io.io.utils.read_hdf5_attrs","title":"read_hdf5_attrs(filename, dataset='/', attribute=None)
","text":"Read attributes from an HDF5 dataset.
Parameters:
Name Type Description Defaultfilename
str
Path to an HDF5 file.
requireddataset
str
Path to a dataset or group from which attributes will be read.
'/'
attribute
Optional[str]
If specified, the attribute name to read. If None
(the default), all attributes for the dataset will be returned.
None
Returns:
Type DescriptionUnion[Any, dict[str, Any]]
The attributes in a dictionary, or the attribute field if attribute
was provided.
sleap_io/io/utils.py
def read_hdf5_attrs(\n filename: str, dataset: str = \"/\", attribute: Optional[str] = None\n) -> Union[Any, dict[str, Any]]:\n \"\"\"Read attributes from an HDF5 dataset.\n\n Args:\n filename: Path to an HDF5 file.\n dataset: Path to a dataset or group from which attributes will be read.\n attribute: If specified, the attribute name to read. If `None` (the default),\n all attributes for the dataset will be returned.\n\n Returns:\n The attributes in a dictionary, or the attribute field if `attribute` was\n provided.\n \"\"\"\n with h5py.File(filename, \"r\") as f:\n ds = f[dataset]\n if attribute is None:\n data = dict(ds.attrs)\n else:\n data = ds.attrs[attribute]\n return data\n
"},{"location":"reference/sleap_io/io/utils/#sleap_io.io.utils.read_hdf5_dataset","title":"read_hdf5_dataset(filename, dataset)
","text":"Read data from an HDF5 file.
Parameters:
Name Type Description Defaultfilename
str
Path to an HDF5 file.
requireddataset
str
Path to a dataset.
requiredReturns:
Type Descriptionndarray
The data as an array.
Source code insleap_io/io/utils.py
def read_hdf5_dataset(filename: str, dataset: str) -> np.ndarray:\n \"\"\"Read data from an HDF5 file.\n\n Args:\n filename: Path to an HDF5 file.\n dataset: Path to a dataset.\n\n Returns:\n The data as an array.\n \"\"\"\n with h5py.File(filename, \"r\") as f:\n data = f[dataset][()]\n return data\n
"},{"location":"reference/sleap_io/io/utils/#sleap_io.io.utils.read_hdf5_group","title":"read_hdf5_group(filename, group='/')
","text":"Read an entire group from an HDF5 file.
Parameters:
Name Type Description Defaultfilename
str
Path an HDF5 file.
requiredgroup
str
Path to a group within the HDF5 file. Defaults to \"/\" (read the entire file).
'/'
Returns:
Type Descriptiondict[str, ndarray]
A flat dictionary with keys corresponding to dataset paths and values corresponding to the datasets as arrays.
Source code insleap_io/io/utils.py
def read_hdf5_group(filename: str, group: str = \"/\") -> dict[str, np.ndarray]:\n \"\"\"Read an entire group from an HDF5 file.\n\n Args:\n filename: Path an HDF5 file.\n group: Path to a group within the HDF5 file. Defaults to \"/\" (read the entire\n file).\n\n Returns:\n A flat dictionary with keys corresponding to dataset paths and values\n corresponding to the datasets as arrays.\n \"\"\"\n data = {}\n\n def read_datasets(k, v):\n if type(v) == h5py.Dataset:\n data[v.name] = v[()]\n\n with h5py.File(filename, \"r\") as f:\n f[group].visititems(read_datasets)\n\n return data\n
"},{"location":"reference/sleap_io/io/utils/#sleap_io.io.utils.write_hdf5_attrs","title":"write_hdf5_attrs(filename, dataset, attributes)
","text":"Write attributes to an HDF5 dataset.
Parameters:
Name Type Description Defaultfilename
str
Path to an HDF5 file.
requireddataset
str
Path to a dataset or group to which attributes will be written.
requiredattributes
dict[str, Any]
The attributes in a dictionary with the keys as the attribute names.
required Source code insleap_io/io/utils.py
def write_hdf5_attrs(filename: str, dataset: str, attributes: dict[str, Any]):\n \"\"\"Write attributes to an HDF5 dataset.\n\n Args:\n filename: Path to an HDF5 file.\n dataset: Path to a dataset or group to which attributes will be written.\n attributes: The attributes in a dictionary with the keys as the attribute names.\n \"\"\"\n\n def _overwrite_hdf5_attr(\n group_or_dataset: Union[h5py.Group, h5py.Dataset], attr_name: str, data: Any\n ):\n \"\"\"Overwrite attribute for group or dataset in HDF5 file.\n\n Args:\n group_or_dataset: Path to group or dataset in HDF5 file.\n attr_name: Name of attribute.\n data: Data to write to attribute.\n \"\"\"\n try:\n del group_or_dataset.attrs[attr_name]\n except KeyError:\n pass\n group_or_dataset.attrs.create(attr_name, data)\n\n with h5py.File(filename, \"a\") as f: # \"a\": read/write if exists, create otherwise\n ds = f[dataset]\n for attr_name, attr_value in attributes.items():\n _overwrite_hdf5_attr(ds, attr_name, attr_value)\n
"},{"location":"reference/sleap_io/io/utils/#sleap_io.io.utils.write_hdf5_dataset","title":"write_hdf5_dataset(filename, dataset, data)
","text":"Write data to an HDF5 file.
Parameters:
Name Type Description Defaultfilename
str
Path to an HDF5 file.
requireddataset
str
Path to a dataset.
requireddata
ndarray
Data to write to dataset.
required Source code insleap_io/io/utils.py
def write_hdf5_dataset(filename: str, dataset: str, data: np.ndarray):\n \"\"\"Write data to an HDF5 file.\n\n Args:\n filename: Path to an HDF5 file.\n dataset: Path to a dataset.\n data: Data to write to dataset.\n \"\"\"\n with h5py.File(filename, \"a\") as f: # \"a\": read/write if exists, create otherwise\n _overwrite_hdf5_dataset(f, dataset, data)\n
"},{"location":"reference/sleap_io/io/utils/#sleap_io.io.utils.write_hdf5_group","title":"write_hdf5_group(filename, data)
","text":"Write an entire group to an HDF5 file.
Parameters:
Name Type Description Defaultfilename
str
Path an HDF5 file.
requireddata
dict[str, ndarray]
A dictionary with keys corresponding to dataset/group paths and values corresponding to either sub group paths or the datasets as arrays.
required Source code insleap_io/io/utils.py
def write_hdf5_group(filename: str, data: dict[str, np.ndarray]):\n \"\"\"Write an entire group to an HDF5 file.\n\n Args:\n filename: Path an HDF5 file.\n data: A dictionary with keys corresponding to dataset/group paths and values\n corresponding to either sub group paths or the datasets as arrays.\n \"\"\"\n\n def overwrite_hdf5_group(\n file_or_group: Union[h5py.File, h5py.Group], group_name: str\n ) -> h5py.Group:\n \"\"\"Overwrite group in HDF5 file.\n\n Args:\n file_or_group: Path to an HDF5 file or parent group.\n group_name: Path to a group.\n\n Return:\n group: (Sub-)group under specified file or parent group.\n \"\"\"\n try:\n del file_or_group[group_name]\n except KeyError:\n pass\n group = file_or_group.create_group(group_name)\n return group\n\n def write_group(parent_group, data_to_write):\n for name, dataset_or_group in data_to_write.items():\n if isinstance(dataset_or_group, dict):\n # Create (sub-)group under parent group (top level being the file)\n group = overwrite_hdf5_group(parent_group, name)\n write_group(group, dataset_or_group) # Recall with new parent\n else:\n # Create dataset if dataset_or_group is a dataset\n _overwrite_hdf5_dataset(\n f=parent_group, dataset=name, data=dataset_or_group\n )\n\n with h5py.File(filename, \"a\") as f: # \"a\": read/write if exists, create otherwise\n write_group(f, data)\n
"},{"location":"reference/sleap_io/io/video_reading/","title":"video_reading","text":""},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading","title":"sleap_io.io.video_reading
","text":"Backends for reading videos.
Classes:
Name DescriptionHDF5Video
Video backend for reading videos stored in HDF5 files.
ImageVideo
Video backend for reading videos stored as image files.
MediaVideo
Video backend for reading videos stored as common media files.
VideoBackend
Base class for video backends.
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.HDF5Video","title":"HDF5Video
","text":" Bases: VideoBackend
Video backend for reading videos stored in HDF5 files.
This backend supports reading videos stored in HDF5 files, both in rank-4 datasets as well as in datasets with lists of binary-encoded images.
Embedded image datasets are used in SLEAP when exporting package files (.pkg.slp
) with videos embedded in them. This is useful for bundling training or inference data without having to worry about the videos (or frame images) being moved or deleted. It is expected that these types of datasets will be in a Group
with a int8
variable length dataset called \"video\"
. This dataset must also contain an attribute called \"format\" with a string describing the image format (e.g., \"png\" or \"jpg\") which will be used to decode it appropriately.
If a frame_numbers
dataset is present in the group, it will be used to map from source video frames to the frames in the dataset. This is useful to preserve frame indexing when exporting a subset of frames in the video. It will also be used to populate frame_map
and source_inds
attributes.
Attributes:
Name Type Descriptionfilename
Path to HDF5 file (.h5, .hdf5 or .slp).
grayscale
Whether to force grayscale. If None, autodetect on first frame load.
keep_open
Whether to keep the video reader open between calls to read frames. If False, will close the reader after each call. If True (the default), it will keep the reader open and cache it for subsequent calls which may enhance the performance of reading multiple frames.
dataset
Optional[str]
Name of dataset to read from. If None
, will try to find a rank-4 dataset by iterating through datasets in the file. If specifying an embedded dataset, this can be the group containing a \"video\" dataset or the dataset itself (e.g., \"video0\" or \"video0/video\").
input_format
str
Format of the data in the dataset. One of \"channels_last\" (the default) in (frames, height, width, channels)
order or \"channels_first\" in (frames, channels, width, height)
order. Embedded datasets should use the \"channels_last\" format.
frame_map
dict[int, int]
Mapping from frame indices to indices in the dataset. This is used to translate between the frame indices of the images within their source video and the indices of the images in the dataset. This is only used when reading embedded image datasets.
source_filename
Optional[str]
Path to the source video file. This is metadata and only used when reading embedded image datasets.
source_inds
Optional[ndarray]
Indices of the frames in the source video file. This is metadata and only used when reading embedded image datasets.
image_format
str
Format of the images in the embedded dataset. This is metadata and only used when reading embedded image datasets.
Methods:
Name Description__attrs_post_init__
Auto-detect dataset and frame map heuristically.
decode_embedded
Decode an embedded image string into a numpy array.
has_frame
Check if a frame index is contained in the video.
read_test_frame
Read a single frame from the video to test for grayscale.
Attributes:
Name Type Descriptionembedded_frame_inds
list[int]
Return the frame indices of the embedded images.
has_embedded_images
bool
Return True if the dataset contains embedded images.
img_shape
Tuple[int, int, int]
Shape of a single frame in the video as (height, width, channels)
.
num_frames
int
Number of frames in the video.
Source code insleap_io/io/video_reading.py
@attrs.define\nclass HDF5Video(VideoBackend):\n \"\"\"Video backend for reading videos stored in HDF5 files.\n\n This backend supports reading videos stored in HDF5 files, both in rank-4 datasets\n as well as in datasets with lists of binary-encoded images.\n\n Embedded image datasets are used in SLEAP when exporting package files (`.pkg.slp`)\n with videos embedded in them. This is useful for bundling training or inference data\n without having to worry about the videos (or frame images) being moved or deleted.\n It is expected that these types of datasets will be in a `Group` with a `int8`\n variable length dataset called `\"video\"`. This dataset must also contain an\n attribute called \"format\" with a string describing the image format (e.g., \"png\" or\n \"jpg\") which will be used to decode it appropriately.\n\n If a `frame_numbers` dataset is present in the group, it will be used to map from\n source video frames to the frames in the dataset. This is useful to preserve frame\n indexing when exporting a subset of frames in the video. It will also be used to\n populate `frame_map` and `source_inds` attributes.\n\n Attributes:\n filename: Path to HDF5 file (.h5, .hdf5 or .slp).\n grayscale: Whether to force grayscale. If None, autodetect on first frame load.\n keep_open: Whether to keep the video reader open between calls to read frames.\n If False, will close the reader after each call. If True (the default), it\n will keep the reader open and cache it for subsequent calls which may\n enhance the performance of reading multiple frames.\n dataset: Name of dataset to read from. If `None`, will try to find a rank-4\n dataset by iterating through datasets in the file. If specifying an embedded\n dataset, this can be the group containing a \"video\" dataset or the dataset\n itself (e.g., \"video0\" or \"video0/video\").\n input_format: Format of the data in the dataset. One of \"channels_last\" (the\n default) in `(frames, height, width, channels)` order or \"channels_first\" in\n `(frames, channels, width, height)` order. Embedded datasets should use the\n \"channels_last\" format.\n frame_map: Mapping from frame indices to indices in the dataset. This is used to\n translate between the frame indices of the images within their source video\n and the indices of the images in the dataset. This is only used when reading\n embedded image datasets.\n source_filename: Path to the source video file. This is metadata and only used\n when reading embedded image datasets.\n source_inds: Indices of the frames in the source video file. This is metadata\n and only used when reading embedded image datasets.\n image_format: Format of the images in the embedded dataset. This is metadata and\n only used when reading embedded image datasets.\n \"\"\"\n\n dataset: Optional[str] = None\n input_format: str = attrs.field(\n default=\"channels_last\",\n validator=attrs.validators.in_([\"channels_last\", \"channels_first\"]),\n )\n frame_map: dict[int, int] = attrs.field(init=False, default=attrs.Factory(dict))\n source_filename: Optional[str] = None\n source_inds: Optional[np.ndarray] = None\n image_format: str = \"hdf5\"\n\n EXTS = (\"h5\", \"hdf5\", \"slp\")\n\n def __attrs_post_init__(self):\n \"\"\"Auto-detect dataset and frame map heuristically.\"\"\"\n # Check if the file accessible before applying heuristics.\n try:\n f = h5py.File(self.filename, \"r\")\n except OSError:\n return\n\n if self.dataset is None:\n # Iterate through datasets to find a rank 4 array.\n def find_movies(name, obj):\n if isinstance(obj, h5py.Dataset) and obj.ndim == 4:\n self.dataset = name\n return True\n\n f.visititems(find_movies)\n\n if self.dataset is None:\n # Iterate through datasets to find an embedded video dataset.\n def find_embedded(name, obj):\n if isinstance(obj, h5py.Dataset) and name.endswith(\"/video\"):\n self.dataset = name\n return True\n\n f.visititems(find_embedded)\n\n if self.dataset is None:\n # Couldn't find video datasets.\n return\n\n if isinstance(f[self.dataset], h5py.Group):\n # If this is a group, assume it's an embedded video dataset.\n if \"video\" in f[self.dataset]:\n self.dataset = f\"{self.dataset}/video\"\n\n if self.dataset.split(\"/\")[-1] == \"video\":\n # This may be an embedded video dataset. Check for frame map.\n ds = f[self.dataset]\n\n if \"format\" in ds.attrs:\n self.image_format = ds.attrs[\"format\"]\n\n if \"frame_numbers\" in ds.parent:\n frame_numbers = ds.parent[\"frame_numbers\"][:].astype(int)\n self.frame_map = {frame: idx for idx, frame in enumerate(frame_numbers)}\n self.source_inds = frame_numbers\n\n if \"source_video\" in ds.parent:\n self.source_filename = json.loads(\n ds.parent[\"source_video\"].attrs[\"json\"]\n )[\"backend\"][\"filename\"]\n\n f.close()\n\n @property\n def num_frames(self) -> int:\n \"\"\"Number of frames in the video.\"\"\"\n with h5py.File(self.filename, \"r\") as f:\n return f[self.dataset].shape[0]\n\n @property\n def img_shape(self) -> Tuple[int, int, int]:\n \"\"\"Shape of a single frame in the video as `(height, width, channels)`.\"\"\"\n with h5py.File(self.filename, \"r\") as f:\n ds = f[self.dataset]\n\n img_shape = None\n if \"height\" in ds.attrs:\n # Try to get shape from the attributes.\n img_shape = (\n ds.attrs[\"height\"],\n ds.attrs[\"width\"],\n ds.attrs[\"channels\"],\n )\n\n if img_shape[0] == 0 or img_shape[1] == 0:\n # Invalidate the shape if the attributes are zero.\n img_shape = None\n\n if img_shape is None and self.image_format == \"hdf5\" and ds.ndim == 4:\n # Use the dataset shape if just stored as a rank-4 array.\n img_shape = ds.shape[1:]\n\n if self.input_format == \"channels_first\":\n img_shape = img_shape[::-1]\n\n if img_shape is None:\n # Fall back to reading a test frame.\n return super().img_shape\n\n return int(img_shape[0]), int(img_shape[1]), int(img_shape[2])\n\n def read_test_frame(self) -> np.ndarray:\n \"\"\"Read a single frame from the video to test for grayscale.\"\"\"\n if self.frame_map:\n frame_idx = list(self.frame_map.keys())[0]\n else:\n frame_idx = 0\n return self._read_frame(frame_idx)\n\n @property\n def has_embedded_images(self) -> bool:\n \"\"\"Return True if the dataset contains embedded images.\"\"\"\n return self.image_format is not None and self.image_format != \"hdf5\"\n\n @property\n def embedded_frame_inds(self) -> list[int]:\n \"\"\"Return the frame indices of the embedded images.\"\"\"\n return list(self.frame_map.keys())\n\n def decode_embedded(self, img_string: np.ndarray) -> np.ndarray:\n \"\"\"Decode an embedded image string into a numpy array.\n\n Args:\n img_string: Binary string of the image as a `int8` numpy vector with the\n bytes as values corresponding to the format-encoded image.\n\n Returns:\n The decoded image as a numpy array of shape `(height, width, channels)`. If\n a rank-2 image is decoded, it will be expanded such that channels will be 1.\n\n This method does not apply grayscale conversion as per the `grayscale`\n attribute. Use the `get_frame` or `get_frames` methods of the `VideoBackend`\n to apply grayscale conversion rather than calling this function directly.\n \"\"\"\n if \"cv2\" in sys.modules:\n img = cv2.imdecode(img_string, cv2.IMREAD_UNCHANGED)\n else:\n img = iio.imread(BytesIO(img_string), extension=f\".{self.image_format}\")\n\n if img.ndim == 2:\n img = np.expand_dims(img, axis=-1)\n return img\n\n def has_frame(self, frame_idx: int) -> bool:\n \"\"\"Check if a frame index is contained in the video.\n\n Args:\n frame_idx: Index of frame to check.\n\n Returns:\n `True` if the index is contained in the video, otherwise `False`.\n \"\"\"\n if self.frame_map:\n return frame_idx in self.frame_map\n else:\n return frame_idx < len(self)\n\n def _read_frame(self, frame_idx: int) -> np.ndarray:\n \"\"\"Read a single frame from the video.\n\n Args:\n frame_idx: Index of frame to read.\n\n Returns:\n The frame as a numpy array of shape `(height, width, channels)`.\n\n Notes:\n This does not apply grayscale conversion. It is recommended to use the\n `get_frame` method of the `VideoBackend` class instead.\n \"\"\"\n if self.keep_open:\n if self._open_reader is None:\n self._open_reader = h5py.File(self.filename, \"r\")\n f = self._open_reader\n else:\n f = h5py.File(self.filename, \"r\")\n\n ds = f[self.dataset]\n\n if self.frame_map:\n frame_idx = self.frame_map[frame_idx]\n\n img = ds[frame_idx]\n\n if self.has_embedded_images:\n img = self.decode_embedded(img)\n\n if self.input_format == \"channels_first\":\n img = np.transpose(img, (2, 1, 0))\n\n if not self.keep_open:\n f.close()\n return img\n\n def _read_frames(self, frame_inds: list) -> np.ndarray:\n \"\"\"Read a list of frames from the video.\n\n Args:\n frame_inds: List of indices of frames to read.\n\n Returns:\n The frame as a numpy array of shape `(frames, height, width, channels)`.\n\n Notes:\n This does not apply grayscale conversion. It is recommended to use the\n `get_frames` method of the `VideoBackend` class instead.\n \"\"\"\n if self.keep_open:\n if self._open_reader is None:\n self._open_reader = h5py.File(self.filename, \"r\")\n f = self._open_reader\n else:\n f = h5py.File(self.filename, \"r\")\n\n if self.frame_map:\n frame_inds = [self.frame_map[idx] for idx in frame_inds]\n\n ds = f[self.dataset]\n imgs = ds[frame_inds]\n\n if \"format\" in ds.attrs:\n imgs = np.stack(\n [self.decode_embedded(img) for img in imgs],\n axis=0,\n )\n\n if self.input_format == \"channels_first\":\n imgs = np.transpose(imgs, (0, 3, 2, 1))\n\n if not self.keep_open:\n f.close()\n\n return imgs\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.HDF5Video.embedded_frame_inds","title":"embedded_frame_inds: list[int]
property
","text":"Return the frame indices of the embedded images.
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.HDF5Video.has_embedded_images","title":"has_embedded_images: bool
property
","text":"Return True if the dataset contains embedded images.
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.HDF5Video.img_shape","title":"img_shape: Tuple[int, int, int]
property
","text":"Shape of a single frame in the video as (height, width, channels)
.
num_frames: int
property
","text":"Number of frames in the video.
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.HDF5Video.__attrs_post_init__","title":"__attrs_post_init__()
","text":"Auto-detect dataset and frame map heuristically.
Source code insleap_io/io/video_reading.py
def __attrs_post_init__(self):\n \"\"\"Auto-detect dataset and frame map heuristically.\"\"\"\n # Check if the file accessible before applying heuristics.\n try:\n f = h5py.File(self.filename, \"r\")\n except OSError:\n return\n\n if self.dataset is None:\n # Iterate through datasets to find a rank 4 array.\n def find_movies(name, obj):\n if isinstance(obj, h5py.Dataset) and obj.ndim == 4:\n self.dataset = name\n return True\n\n f.visititems(find_movies)\n\n if self.dataset is None:\n # Iterate through datasets to find an embedded video dataset.\n def find_embedded(name, obj):\n if isinstance(obj, h5py.Dataset) and name.endswith(\"/video\"):\n self.dataset = name\n return True\n\n f.visititems(find_embedded)\n\n if self.dataset is None:\n # Couldn't find video datasets.\n return\n\n if isinstance(f[self.dataset], h5py.Group):\n # If this is a group, assume it's an embedded video dataset.\n if \"video\" in f[self.dataset]:\n self.dataset = f\"{self.dataset}/video\"\n\n if self.dataset.split(\"/\")[-1] == \"video\":\n # This may be an embedded video dataset. Check for frame map.\n ds = f[self.dataset]\n\n if \"format\" in ds.attrs:\n self.image_format = ds.attrs[\"format\"]\n\n if \"frame_numbers\" in ds.parent:\n frame_numbers = ds.parent[\"frame_numbers\"][:].astype(int)\n self.frame_map = {frame: idx for idx, frame in enumerate(frame_numbers)}\n self.source_inds = frame_numbers\n\n if \"source_video\" in ds.parent:\n self.source_filename = json.loads(\n ds.parent[\"source_video\"].attrs[\"json\"]\n )[\"backend\"][\"filename\"]\n\n f.close()\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.HDF5Video.decode_embedded","title":"decode_embedded(img_string)
","text":"Decode an embedded image string into a numpy array.
Parameters:
Name Type Description Defaultimg_string
ndarray
Binary string of the image as a int8
numpy vector with the bytes as values corresponding to the format-encoded image.
Returns:
Type Descriptionndarray
The decoded image as a numpy array of shape (height, width, channels)
. If a rank-2 image is decoded, it will be expanded such that channels will be 1.
This method does not apply grayscale conversion as per the grayscale
attribute. Use the get_frame
or get_frames
methods of the VideoBackend
to apply grayscale conversion rather than calling this function directly.
sleap_io/io/video_reading.py
def decode_embedded(self, img_string: np.ndarray) -> np.ndarray:\n \"\"\"Decode an embedded image string into a numpy array.\n\n Args:\n img_string: Binary string of the image as a `int8` numpy vector with the\n bytes as values corresponding to the format-encoded image.\n\n Returns:\n The decoded image as a numpy array of shape `(height, width, channels)`. If\n a rank-2 image is decoded, it will be expanded such that channels will be 1.\n\n This method does not apply grayscale conversion as per the `grayscale`\n attribute. Use the `get_frame` or `get_frames` methods of the `VideoBackend`\n to apply grayscale conversion rather than calling this function directly.\n \"\"\"\n if \"cv2\" in sys.modules:\n img = cv2.imdecode(img_string, cv2.IMREAD_UNCHANGED)\n else:\n img = iio.imread(BytesIO(img_string), extension=f\".{self.image_format}\")\n\n if img.ndim == 2:\n img = np.expand_dims(img, axis=-1)\n return img\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.HDF5Video.has_frame","title":"has_frame(frame_idx)
","text":"Check if a frame index is contained in the video.
Parameters:
Name Type Description Defaultframe_idx
int
Index of frame to check.
requiredReturns:
Type Descriptionbool
True
if the index is contained in the video, otherwise False
.
sleap_io/io/video_reading.py
def has_frame(self, frame_idx: int) -> bool:\n \"\"\"Check if a frame index is contained in the video.\n\n Args:\n frame_idx: Index of frame to check.\n\n Returns:\n `True` if the index is contained in the video, otherwise `False`.\n \"\"\"\n if self.frame_map:\n return frame_idx in self.frame_map\n else:\n return frame_idx < len(self)\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.HDF5Video.read_test_frame","title":"read_test_frame()
","text":"Read a single frame from the video to test for grayscale.
Source code insleap_io/io/video_reading.py
def read_test_frame(self) -> np.ndarray:\n \"\"\"Read a single frame from the video to test for grayscale.\"\"\"\n if self.frame_map:\n frame_idx = list(self.frame_map.keys())[0]\n else:\n frame_idx = 0\n return self._read_frame(frame_idx)\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.ImageVideo","title":"ImageVideo
","text":" Bases: VideoBackend
Video backend for reading videos stored as image files.
This backend supports reading videos stored as a list of images.
Attributes:
Name Type Descriptionfilename
Path to image files.
grayscale
Whether to force grayscale. If None, autodetect on first frame load.
Methods:
Name Descriptionfind_images
Find images in a folder and return a list of filenames.
Attributes:
Name Type Descriptionnum_frames
int
Number of frames in the video.
Source code insleap_io/io/video_reading.py
@attrs.define\nclass ImageVideo(VideoBackend):\n \"\"\"Video backend for reading videos stored as image files.\n\n This backend supports reading videos stored as a list of images.\n\n Attributes:\n filename: Path to image files.\n grayscale: Whether to force grayscale. If None, autodetect on first frame load.\n \"\"\"\n\n EXTS = (\"png\", \"jpg\", \"jpeg\", \"tif\", \"tiff\", \"bmp\")\n\n @staticmethod\n def find_images(folder: str) -> list[str]:\n \"\"\"Find images in a folder and return a list of filenames.\"\"\"\n folder = Path(folder)\n return sorted(\n [f.as_posix() for f in folder.glob(\"*\") if f.suffix[1:] in ImageVideo.EXTS]\n )\n\n @property\n def num_frames(self) -> int:\n \"\"\"Number of frames in the video.\"\"\"\n return len(self.filename)\n\n def _read_frame(self, frame_idx: int) -> np.ndarray:\n \"\"\"Read a single frame from the video.\n\n Args:\n frame_idx: Index of frame to read.\n\n Returns:\n The frame as a numpy array of shape `(height, width, channels)`.\n\n Notes:\n This does not apply grayscale conversion. It is recommended to use the\n `get_frame` method of the `VideoBackend` class instead.\n \"\"\"\n img = iio.imread(self.filename[frame_idx])\n if img.ndim == 2:\n img = np.expand_dims(img, axis=-1)\n return img\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.ImageVideo.num_frames","title":"num_frames: int
property
","text":"Number of frames in the video.
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.ImageVideo.find_images","title":"find_images(folder)
staticmethod
","text":"Find images in a folder and return a list of filenames.
Source code insleap_io/io/video_reading.py
@staticmethod\ndef find_images(folder: str) -> list[str]:\n \"\"\"Find images in a folder and return a list of filenames.\"\"\"\n folder = Path(folder)\n return sorted(\n [f.as_posix() for f in folder.glob(\"*\") if f.suffix[1:] in ImageVideo.EXTS]\n )\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.MediaVideo","title":"MediaVideo
","text":" Bases: VideoBackend
Video backend for reading videos stored as common media files.
This backend supports reading through FFMPEG (the default), pyav, or OpenCV. Here are their trade-offs:
- \"opencv\": Fastest video reader, but only supports a limited number of codecs\n and may not be able to read some videos. It requires `opencv-python` to be\n installed. It is the fastest because it uses the OpenCV C++ library to read\n videos, but is limited by the version of FFMPEG that was linked into it at\n build time as well as the OpenCV version used.\n- \"FFMPEG\": Slowest, but most reliable. This is the default backend. It requires\n `imageio-ffmpeg` and a `ffmpeg` executable on the system path (which can be\n installed via conda). The `imageio` plugin for FFMPEG reads frames into raw\n bytes which are communicated to Python through STDOUT on a subprocess pipe,\n which can be slow. However, it is the most reliable and feature-complete. If\n you install the conda-forge version of ffmpeg, it will be compiled with\n support for many codecs, including GPU-accelerated codecs like NVDEC for\n H264 and others.\n- \"pyav\": Supports most codecs that FFMPEG does, but not as complete or reliable\n of an implementation in `imageio` as FFMPEG for some video types. It is\n faster than FFMPEG because it uses the `av` package to read frames directly\n into numpy arrays in memory without the need for a subprocess pipe. These\n are Python bindings for the C library libav, which is the same library that\n FFMPEG uses under the hood.\n
Attributes:
Name Type Descriptionfilename
Path to video file.
grayscale
Whether to force grayscale. If None, autodetect on first frame load.
keep_open
Whether to keep the video reader open between calls to read frames. If False, will close the reader after each call. If True (the default), it will keep the reader open and cache it for subsequent calls which may enhance the performance of reading multiple frames.
plugin
str
Video plugin to use. One of \"opencv\", \"FFMPEG\", or \"pyav\". If None
, will use the first available plugin in the order listed above.
Attributes:
Name Type Descriptionnum_frames
int
Number of frames in the video.
reader
object
Return the reader object for the video, caching if necessary.
Source code insleap_io/io/video_reading.py
@attrs.define\nclass MediaVideo(VideoBackend):\n \"\"\"Video backend for reading videos stored as common media files.\n\n This backend supports reading through FFMPEG (the default), pyav, or OpenCV. Here\n are their trade-offs:\n\n - \"opencv\": Fastest video reader, but only supports a limited number of codecs\n and may not be able to read some videos. It requires `opencv-python` to be\n installed. It is the fastest because it uses the OpenCV C++ library to read\n videos, but is limited by the version of FFMPEG that was linked into it at\n build time as well as the OpenCV version used.\n - \"FFMPEG\": Slowest, but most reliable. This is the default backend. It requires\n `imageio-ffmpeg` and a `ffmpeg` executable on the system path (which can be\n installed via conda). The `imageio` plugin for FFMPEG reads frames into raw\n bytes which are communicated to Python through STDOUT on a subprocess pipe,\n which can be slow. However, it is the most reliable and feature-complete. If\n you install the conda-forge version of ffmpeg, it will be compiled with\n support for many codecs, including GPU-accelerated codecs like NVDEC for\n H264 and others.\n - \"pyav\": Supports most codecs that FFMPEG does, but not as complete or reliable\n of an implementation in `imageio` as FFMPEG for some video types. It is\n faster than FFMPEG because it uses the `av` package to read frames directly\n into numpy arrays in memory without the need for a subprocess pipe. These\n are Python bindings for the C library libav, which is the same library that\n FFMPEG uses under the hood.\n\n Attributes:\n filename: Path to video file.\n grayscale: Whether to force grayscale. If None, autodetect on first frame load.\n keep_open: Whether to keep the video reader open between calls to read frames.\n If False, will close the reader after each call. If True (the default), it\n will keep the reader open and cache it for subsequent calls which may\n enhance the performance of reading multiple frames.\n plugin: Video plugin to use. One of \"opencv\", \"FFMPEG\", or \"pyav\". If `None`,\n will use the first available plugin in the order listed above.\n \"\"\"\n\n plugin: str = attrs.field(\n validator=attrs.validators.in_([\"opencv\", \"FFMPEG\", \"pyav\"])\n )\n\n EXTS = (\"mp4\", \"avi\", \"mov\", \"mj2\", \"mkv\")\n\n @plugin.default\n def _default_plugin(self) -> str:\n if \"cv2\" in sys.modules:\n return \"opencv\"\n elif \"imageio_ffmpeg\" in sys.modules:\n return \"FFMPEG\"\n elif \"av\" in sys.modules:\n return \"pyav\"\n else:\n raise ImportError(\n \"No video plugins found. Install opencv-python, imageio-ffmpeg, or av.\"\n )\n\n @property\n def reader(self) -> object:\n \"\"\"Return the reader object for the video, caching if necessary.\"\"\"\n if self.keep_open:\n if self._open_reader is None:\n if self.plugin == \"opencv\":\n self._open_reader = cv2.VideoCapture(self.filename)\n elif self.plugin == \"pyav\" or self.plugin == \"FFMPEG\":\n self._open_reader = iio.imopen(\n self.filename, \"r\", plugin=self.plugin\n )\n return self._open_reader\n else:\n if self.plugin == \"opencv\":\n return cv2.VideoCapture(self.filename)\n elif self.plugin == \"pyav\" or self.plugin == \"FFMPEG\":\n return iio.imopen(self.filename, \"r\", plugin=self.plugin)\n\n @property\n def num_frames(self) -> int:\n \"\"\"Number of frames in the video.\"\"\"\n if self.plugin == \"opencv\":\n return int(self.reader.get(cv2.CAP_PROP_FRAME_COUNT))\n else:\n props = iio.improps(self.filename, plugin=self.plugin)\n n_frames = props.n_images\n if np.isinf(n_frames):\n legacy_reader = self.reader.legacy_get_reader()\n # Note: This might be super slow for some videos, so maybe we should\n # defer evaluation of this or give the user control over it.\n n_frames = legacy_reader.count_frames()\n return n_frames\n\n def _read_frame(self, frame_idx: int) -> np.ndarray:\n \"\"\"Read a single frame from the video.\n\n Args:\n frame_idx: Index of frame to read.\n\n Returns:\n The frame as a numpy array of shape `(height, width, channels)`.\n\n Notes:\n This does not apply grayscale conversion. It is recommended to use the\n `get_frame` method of the `VideoBackend` class instead.\n \"\"\"\n failed = False\n if self.plugin == \"opencv\":\n if self.reader.get(cv2.CAP_PROP_POS_FRAMES) != frame_idx:\n self.reader.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)\n success, img = self.reader.read()\n elif self.plugin == \"pyav\" or self.plugin == \"FFMPEG\":\n if self.keep_open:\n img = self.reader.read(index=frame_idx)\n else:\n with iio.imopen(self.filename, \"r\", plugin=self.plugin) as reader:\n img = reader.read(index=frame_idx)\n\n success = (not failed) and (img is not None)\n if not success:\n raise IndexError(f\"Failed to read frame index {frame_idx}.\")\n return img\n\n def _read_frames(self, frame_inds: list) -> np.ndarray:\n \"\"\"Read a list of frames from the video.\n\n Args:\n frame_inds: List of indices of frames to read.\n\n Returns:\n The frame as a numpy array of shape `(frames, height, width, channels)`.\n\n Notes:\n This does not apply grayscale conversion. It is recommended to use the\n `get_frames` method of the `VideoBackend` class instead.\n \"\"\"\n if self.plugin == \"opencv\":\n if self.keep_open:\n if self._open_reader is None:\n self._open_reader = cv2.VideoCapture(self.filename)\n reader = self._open_reader\n else:\n reader = cv2.VideoCapture(self.filename)\n\n reader.set(cv2.CAP_PROP_POS_FRAMES, frame_inds[0])\n imgs = []\n for idx in frame_inds:\n if reader.get(cv2.CAP_PROP_POS_FRAMES) != idx:\n reader.set(cv2.CAP_PROP_POS_FRAMES, idx)\n _, img = reader.read()\n img = img[..., ::-1] # BGR -> RGB\n imgs.append(img)\n imgs = np.stack(imgs, axis=0)\n\n elif self.plugin == \"pyav\" or self.plugin == \"FFMPEG\":\n if self.keep_open:\n if self._open_reader is None:\n self._open_reader = iio.imopen(\n self.filename, \"r\", plugin=self.plugin\n )\n reader = self._open_reader\n imgs = np.stack([reader.read(index=idx) for idx in frame_inds], axis=0)\n else:\n with iio.imopen(self.filename, \"r\", plugin=self.plugin) as reader:\n imgs = np.stack(\n [reader.read(index=idx) for idx in frame_inds], axis=0\n )\n return imgs\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.MediaVideo.num_frames","title":"num_frames: int
property
","text":"Number of frames in the video.
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.MediaVideo.reader","title":"reader: object
property
","text":"Return the reader object for the video, caching if necessary.
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.VideoBackend","title":"VideoBackend
","text":"Base class for video backends.
This class is not meant to be used directly. Instead, use the from_filename
constructor to create a backend instance.
Attributes:
Name Type Descriptionfilename
str | Path | list[str] | list[Path]
Path to video file(s).
grayscale
Optional[bool]
Whether to force grayscale. If None, autodetect on first frame load.
keep_open
bool
Whether to keep the video reader open between calls to read frames. If False, will close the reader after each call. If True (the default), it will keep the reader open and cache it for subsequent calls which may enhance the performance of reading multiple frames.
Methods:
Name Description__getitem__
Return a single frame or a list of frames from the video.
__len__
Return number of frames in the video.
detect_grayscale
Detect whether the video is grayscale.
from_filename
Create a VideoBackend from a filename.
get_frame
Read a single frame from the video.
get_frames
Read a list of frames from the video.
has_frame
Check if a frame index is contained in the video.
read_test_frame
Read a single frame from the video to test for grayscale.
Attributes:
Name Type Descriptionframes
int
Number of frames in the video.
img_shape
Tuple[int, int, int]
Shape of a single frame in the video.
num_frames
int
Number of frames in the video. Must be implemented in subclasses.
shape
Tuple[int, int, int, int]
Shape of the video as a tuple of (frames, height, width, channels)
.
sleap_io/io/video_reading.py
@attrs.define\nclass VideoBackend:\n \"\"\"Base class for video backends.\n\n This class is not meant to be used directly. Instead, use the `from_filename`\n constructor to create a backend instance.\n\n Attributes:\n filename: Path to video file(s).\n grayscale: Whether to force grayscale. If None, autodetect on first frame load.\n keep_open: Whether to keep the video reader open between calls to read frames.\n If False, will close the reader after each call. If True (the default), it\n will keep the reader open and cache it for subsequent calls which may\n enhance the performance of reading multiple frames.\n \"\"\"\n\n filename: str | Path | list[str] | list[Path]\n grayscale: Optional[bool] = None\n keep_open: bool = True\n _cached_shape: Optional[Tuple[int, int, int, int]] = None\n _open_reader: Optional[object] = None\n\n @classmethod\n def from_filename(\n cls,\n filename: str | list[str],\n dataset: Optional[str] = None,\n grayscale: Optional[bool] = None,\n keep_open: bool = True,\n **kwargs,\n ) -> VideoBackend:\n \"\"\"Create a VideoBackend from a filename.\n\n Args:\n filename: Path to video file(s).\n dataset: Name of dataset in HDF5 file.\n grayscale: Whether to force grayscale. If None, autodetect on first frame\n load.\n keep_open: Whether to keep the video reader open between calls to read\n frames. If False, will close the reader after each call. If True (the\n default), it will keep the reader open and cache it for subsequent calls\n which may enhance the performance of reading multiple frames.\n\n Returns:\n VideoBackend subclass instance.\n \"\"\"\n if isinstance(filename, Path):\n filename = filename.as_posix()\n\n if type(filename) == str and Path(filename).is_dir():\n filename = ImageVideo.find_images(filename)\n\n if type(filename) == list:\n filename = [Path(f).as_posix() for f in filename]\n return ImageVideo(\n filename, grayscale=grayscale, **_get_valid_kwargs(ImageVideo, kwargs)\n )\n elif filename.endswith(ImageVideo.EXTS):\n return ImageVideo(\n [filename], grayscale=grayscale, **_get_valid_kwargs(ImageVideo, kwargs)\n )\n elif filename.endswith(MediaVideo.EXTS):\n return MediaVideo(\n filename,\n grayscale=grayscale,\n keep_open=keep_open,\n **_get_valid_kwargs(MediaVideo, kwargs),\n )\n elif filename.endswith(HDF5Video.EXTS):\n return HDF5Video(\n filename,\n dataset=dataset,\n grayscale=grayscale,\n keep_open=keep_open,\n **_get_valid_kwargs(HDF5Video, kwargs),\n )\n else:\n raise ValueError(f\"Unknown video file type: {filename}\")\n\n def _read_frame(self, frame_idx: int) -> np.ndarray:\n \"\"\"Read a single frame from the video. Must be implemented in subclasses.\"\"\"\n raise NotImplementedError\n\n def _read_frames(self, frame_inds: list) -> np.ndarray:\n \"\"\"Read a list of frames from the video.\"\"\"\n return np.stack([self.get_frame(i) for i in frame_inds], axis=0)\n\n def read_test_frame(self) -> np.ndarray:\n \"\"\"Read a single frame from the video to test for grayscale.\n\n Note:\n This reads the frame at index 0. This may not be appropriate if the first\n frame is not available in a given backend.\n \"\"\"\n return self._read_frame(0)\n\n def detect_grayscale(self, test_img: np.ndarray | None = None) -> bool:\n \"\"\"Detect whether the video is grayscale.\n\n This works by reading in a test frame and comparing the first and last channel\n for equality. It may fail in cases where, due to compression, the first and\n last channels are not exactly the same.\n\n Args:\n test_img: Optional test image to use. If not provided, a test image will be\n loaded via the `read_test_frame` method.\n\n Returns:\n Whether the video is grayscale. This value is also cached in the `grayscale`\n attribute of the class.\n \"\"\"\n if test_img is None:\n test_img = self.read_test_frame()\n is_grayscale = np.array_equal(test_img[..., 0], test_img[..., -1])\n self.grayscale = is_grayscale\n return is_grayscale\n\n @property\n def num_frames(self) -> int:\n \"\"\"Number of frames in the video. Must be implemented in subclasses.\"\"\"\n raise NotImplementedError\n\n @property\n def img_shape(self) -> Tuple[int, int, int]:\n \"\"\"Shape of a single frame in the video.\"\"\"\n height, width, channels = self.read_test_frame().shape\n if self.grayscale is None:\n self.detect_grayscale()\n if self.grayscale is False:\n channels = 3\n elif self.grayscale is True:\n channels = 1\n return int(height), int(width), int(channels)\n\n @property\n def shape(self) -> Tuple[int, int, int, int]:\n \"\"\"Shape of the video as a tuple of `(frames, height, width, channels)`.\n\n On first call, this will defer to `num_frames` and `img_shape` to determine the\n full shape. This call may be expensive for some subclasses, so the result is\n cached and returned on subsequent calls.\n \"\"\"\n if self._cached_shape is not None:\n return self._cached_shape\n else:\n shape = (self.num_frames,) + self.img_shape\n self._cached_shape = shape\n return shape\n\n @property\n def frames(self) -> int:\n \"\"\"Number of frames in the video.\"\"\"\n return self.shape[0]\n\n def __len__(self) -> int:\n \"\"\"Return number of frames in the video.\"\"\"\n return self.shape[0]\n\n def has_frame(self, frame_idx: int) -> bool:\n \"\"\"Check if a frame index is contained in the video.\n\n Args:\n frame_idx: Index of frame to check.\n\n Returns:\n `True` if the index is contained in the video, otherwise `False`.\n \"\"\"\n return frame_idx < len(self)\n\n def get_frame(self, frame_idx: int) -> np.ndarray:\n \"\"\"Read a single frame from the video.\n\n Args:\n frame_idx: Index of frame to read.\n\n Returns:\n Frame as a numpy array of shape `(height, width, channels)` where the\n `channels` dimension is 1 for grayscale videos and 3 for color videos.\n\n Notes:\n If the `grayscale` attribute is set to `True`, the `channels` dimension will\n be reduced to 1 if an RGB frame is loaded from the backend.\n\n If the `grayscale` attribute is set to `None`, the `grayscale` attribute\n will be automatically set based on the first frame read.\n\n See also: `get_frames`\n \"\"\"\n if not self.has_frame(frame_idx):\n raise IndexError(f\"Frame index {frame_idx} out of range.\")\n\n img = self._read_frame(frame_idx)\n\n if self.grayscale is None:\n self.detect_grayscale(img)\n\n if self.grayscale:\n img = img[..., [0]]\n\n return img\n\n def get_frames(self, frame_inds: list[int]) -> np.ndarray:\n \"\"\"Read a list of frames from the video.\n\n Depending on the backend implementation, this may be faster than reading frames\n individually using `get_frame`.\n\n Args:\n frame_inds: List of frame indices to read.\n\n Returns:\n Frames as a numpy array of shape `(frames, height, width, channels)` where\n `channels` dimension is 1 for grayscale videos and 3 for color videos.\n\n Notes:\n If the `grayscale` attribute is set to `True`, the `channels` dimension will\n be reduced to 1 if an RGB frame is loaded from the backend.\n\n If the `grayscale` attribute is set to `None`, the `grayscale` attribute\n will be automatically set based on the first frame read.\n\n See also: `get_frame`\n \"\"\"\n imgs = self._read_frames(frame_inds)\n\n if self.grayscale is None:\n self.detect_grayscale(imgs[0])\n\n if self.grayscale:\n imgs = imgs[..., [0]]\n\n return imgs\n\n def __getitem__(self, ind: int | list[int] | slice) -> np.ndarray:\n \"\"\"Return a single frame or a list of frames from the video.\n\n Args:\n ind: Index or list of indices of frames to read.\n\n Returns:\n Frame or frames as a numpy array of shape `(height, width, channels)` if a\n scalar index is provided, or `(frames, height, width, channels)` if a list\n of indices is provided.\n\n See also: get_frame, get_frames\n \"\"\"\n if np.isscalar(ind):\n return self.get_frame(ind)\n else:\n if type(ind) is slice:\n start = (ind.start or 0) % len(self)\n stop = ind.stop or len(self)\n if stop < 0:\n stop = len(self) + stop\n step = ind.step or 1\n ind = range(start, stop, step)\n return self.get_frames(ind)\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.VideoBackend.frames","title":"frames: int
property
","text":"Number of frames in the video.
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.VideoBackend.img_shape","title":"img_shape: Tuple[int, int, int]
property
","text":"Shape of a single frame in the video.
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.VideoBackend.num_frames","title":"num_frames: int
property
","text":"Number of frames in the video. Must be implemented in subclasses.
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.VideoBackend.shape","title":"shape: Tuple[int, int, int, int]
property
","text":"Shape of the video as a tuple of (frames, height, width, channels)
.
On first call, this will defer to num_frames
and img_shape
to determine the full shape. This call may be expensive for some subclasses, so the result is cached and returned on subsequent calls.
__getitem__(ind)
","text":"Return a single frame or a list of frames from the video.
Parameters:
Name Type Description Defaultind
int | list[int] | slice
Index or list of indices of frames to read.
requiredReturns:
Type Descriptionndarray
Frame or frames as a numpy array of shape (height, width, channels)
if a scalar index is provided, or (frames, height, width, channels)
if a list of indices is provided.
See also: get_frame, get_frames
Source code insleap_io/io/video_reading.py
def __getitem__(self, ind: int | list[int] | slice) -> np.ndarray:\n \"\"\"Return a single frame or a list of frames from the video.\n\n Args:\n ind: Index or list of indices of frames to read.\n\n Returns:\n Frame or frames as a numpy array of shape `(height, width, channels)` if a\n scalar index is provided, or `(frames, height, width, channels)` if a list\n of indices is provided.\n\n See also: get_frame, get_frames\n \"\"\"\n if np.isscalar(ind):\n return self.get_frame(ind)\n else:\n if type(ind) is slice:\n start = (ind.start or 0) % len(self)\n stop = ind.stop or len(self)\n if stop < 0:\n stop = len(self) + stop\n step = ind.step or 1\n ind = range(start, stop, step)\n return self.get_frames(ind)\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.VideoBackend.__len__","title":"__len__()
","text":"Return number of frames in the video.
Source code insleap_io/io/video_reading.py
def __len__(self) -> int:\n \"\"\"Return number of frames in the video.\"\"\"\n return self.shape[0]\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.VideoBackend.detect_grayscale","title":"detect_grayscale(test_img=None)
","text":"Detect whether the video is grayscale.
This works by reading in a test frame and comparing the first and last channel for equality. It may fail in cases where, due to compression, the first and last channels are not exactly the same.
Parameters:
Name Type Description Defaulttest_img
ndarray | None
Optional test image to use. If not provided, a test image will be loaded via the read_test_frame
method.
None
Returns:
Type Descriptionbool
Whether the video is grayscale. This value is also cached in the grayscale
attribute of the class.
sleap_io/io/video_reading.py
def detect_grayscale(self, test_img: np.ndarray | None = None) -> bool:\n \"\"\"Detect whether the video is grayscale.\n\n This works by reading in a test frame and comparing the first and last channel\n for equality. It may fail in cases where, due to compression, the first and\n last channels are not exactly the same.\n\n Args:\n test_img: Optional test image to use. If not provided, a test image will be\n loaded via the `read_test_frame` method.\n\n Returns:\n Whether the video is grayscale. This value is also cached in the `grayscale`\n attribute of the class.\n \"\"\"\n if test_img is None:\n test_img = self.read_test_frame()\n is_grayscale = np.array_equal(test_img[..., 0], test_img[..., -1])\n self.grayscale = is_grayscale\n return is_grayscale\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.VideoBackend.from_filename","title":"from_filename(filename, dataset=None, grayscale=None, keep_open=True, **kwargs)
classmethod
","text":"Create a VideoBackend from a filename.
Parameters:
Name Type Description Defaultfilename
str | list[str]
Path to video file(s).
requireddataset
Optional[str]
Name of dataset in HDF5 file.
None
grayscale
Optional[bool]
Whether to force grayscale. If None, autodetect on first frame load.
None
keep_open
bool
Whether to keep the video reader open between calls to read frames. If False, will close the reader after each call. If True (the default), it will keep the reader open and cache it for subsequent calls which may enhance the performance of reading multiple frames.
True
Returns:
Type DescriptionVideoBackend
VideoBackend subclass instance.
Source code insleap_io/io/video_reading.py
@classmethod\ndef from_filename(\n cls,\n filename: str | list[str],\n dataset: Optional[str] = None,\n grayscale: Optional[bool] = None,\n keep_open: bool = True,\n **kwargs,\n) -> VideoBackend:\n \"\"\"Create a VideoBackend from a filename.\n\n Args:\n filename: Path to video file(s).\n dataset: Name of dataset in HDF5 file.\n grayscale: Whether to force grayscale. If None, autodetect on first frame\n load.\n keep_open: Whether to keep the video reader open between calls to read\n frames. If False, will close the reader after each call. If True (the\n default), it will keep the reader open and cache it for subsequent calls\n which may enhance the performance of reading multiple frames.\n\n Returns:\n VideoBackend subclass instance.\n \"\"\"\n if isinstance(filename, Path):\n filename = filename.as_posix()\n\n if type(filename) == str and Path(filename).is_dir():\n filename = ImageVideo.find_images(filename)\n\n if type(filename) == list:\n filename = [Path(f).as_posix() for f in filename]\n return ImageVideo(\n filename, grayscale=grayscale, **_get_valid_kwargs(ImageVideo, kwargs)\n )\n elif filename.endswith(ImageVideo.EXTS):\n return ImageVideo(\n [filename], grayscale=grayscale, **_get_valid_kwargs(ImageVideo, kwargs)\n )\n elif filename.endswith(MediaVideo.EXTS):\n return MediaVideo(\n filename,\n grayscale=grayscale,\n keep_open=keep_open,\n **_get_valid_kwargs(MediaVideo, kwargs),\n )\n elif filename.endswith(HDF5Video.EXTS):\n return HDF5Video(\n filename,\n dataset=dataset,\n grayscale=grayscale,\n keep_open=keep_open,\n **_get_valid_kwargs(HDF5Video, kwargs),\n )\n else:\n raise ValueError(f\"Unknown video file type: {filename}\")\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.VideoBackend.get_frame","title":"get_frame(frame_idx)
","text":"Read a single frame from the video.
Parameters:
Name Type Description Defaultframe_idx
int
Index of frame to read.
requiredReturns:
Type Descriptionndarray
Frame as a numpy array of shape (height, width, channels)
where the channels
dimension is 1 for grayscale videos and 3 for color videos.
If the grayscale
attribute is set to True
, the channels
dimension will be reduced to 1 if an RGB frame is loaded from the backend.
If the grayscale
attribute is set to None
, the grayscale
attribute will be automatically set based on the first frame read.
See also: get_frames
sleap_io/io/video_reading.py
def get_frame(self, frame_idx: int) -> np.ndarray:\n \"\"\"Read a single frame from the video.\n\n Args:\n frame_idx: Index of frame to read.\n\n Returns:\n Frame as a numpy array of shape `(height, width, channels)` where the\n `channels` dimension is 1 for grayscale videos and 3 for color videos.\n\n Notes:\n If the `grayscale` attribute is set to `True`, the `channels` dimension will\n be reduced to 1 if an RGB frame is loaded from the backend.\n\n If the `grayscale` attribute is set to `None`, the `grayscale` attribute\n will be automatically set based on the first frame read.\n\n See also: `get_frames`\n \"\"\"\n if not self.has_frame(frame_idx):\n raise IndexError(f\"Frame index {frame_idx} out of range.\")\n\n img = self._read_frame(frame_idx)\n\n if self.grayscale is None:\n self.detect_grayscale(img)\n\n if self.grayscale:\n img = img[..., [0]]\n\n return img\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.VideoBackend.get_frames","title":"get_frames(frame_inds)
","text":"Read a list of frames from the video.
Depending on the backend implementation, this may be faster than reading frames individually using get_frame
.
Parameters:
Name Type Description Defaultframe_inds
list[int]
List of frame indices to read.
requiredReturns:
Type Descriptionndarray
Frames as a numpy array of shape (frames, height, width, channels)
where channels
dimension is 1 for grayscale videos and 3 for color videos.
If the grayscale
attribute is set to True
, the channels
dimension will be reduced to 1 if an RGB frame is loaded from the backend.
If the grayscale
attribute is set to None
, the grayscale
attribute will be automatically set based on the first frame read.
See also: get_frame
sleap_io/io/video_reading.py
def get_frames(self, frame_inds: list[int]) -> np.ndarray:\n \"\"\"Read a list of frames from the video.\n\n Depending on the backend implementation, this may be faster than reading frames\n individually using `get_frame`.\n\n Args:\n frame_inds: List of frame indices to read.\n\n Returns:\n Frames as a numpy array of shape `(frames, height, width, channels)` where\n `channels` dimension is 1 for grayscale videos and 3 for color videos.\n\n Notes:\n If the `grayscale` attribute is set to `True`, the `channels` dimension will\n be reduced to 1 if an RGB frame is loaded from the backend.\n\n If the `grayscale` attribute is set to `None`, the `grayscale` attribute\n will be automatically set based on the first frame read.\n\n See also: `get_frame`\n \"\"\"\n imgs = self._read_frames(frame_inds)\n\n if self.grayscale is None:\n self.detect_grayscale(imgs[0])\n\n if self.grayscale:\n imgs = imgs[..., [0]]\n\n return imgs\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.VideoBackend.has_frame","title":"has_frame(frame_idx)
","text":"Check if a frame index is contained in the video.
Parameters:
Name Type Description Defaultframe_idx
int
Index of frame to check.
requiredReturns:
Type Descriptionbool
True
if the index is contained in the video, otherwise False
.
sleap_io/io/video_reading.py
def has_frame(self, frame_idx: int) -> bool:\n \"\"\"Check if a frame index is contained in the video.\n\n Args:\n frame_idx: Index of frame to check.\n\n Returns:\n `True` if the index is contained in the video, otherwise `False`.\n \"\"\"\n return frame_idx < len(self)\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.VideoBackend.read_test_frame","title":"read_test_frame()
","text":"Read a single frame from the video to test for grayscale.
NoteThis reads the frame at index 0. This may not be appropriate if the first frame is not available in a given backend.
Source code insleap_io/io/video_reading.py
def read_test_frame(self) -> np.ndarray:\n \"\"\"Read a single frame from the video to test for grayscale.\n\n Note:\n This reads the frame at index 0. This may not be appropriate if the first\n frame is not available in a given backend.\n \"\"\"\n return self._read_frame(0)\n
"},{"location":"reference/sleap_io/io/video_writing/","title":"video_writing","text":""},{"location":"reference/sleap_io/io/video_writing/#sleap_io.io.video_writing","title":"sleap_io.io.video_writing
","text":"Utilities for writing videos.
Classes:
Name DescriptionVideoWriter
Simple video writer using imageio and FFMPEG.
"},{"location":"reference/sleap_io/io/video_writing/#sleap_io.io.video_writing.VideoWriter","title":"VideoWriter
","text":"Simple video writer using imageio and FFMPEG.
Attributes:
Name Type Descriptionfilename
Path
Path to output video file.
fps
float
Frames per second. Defaults to 30.
pixelformat
str
Pixel format for video. Defaults to \"yuv420p\".
codec
str
Codec to use for encoding. Defaults to \"libx264\".
crf
int
Constant rate factor to control lossiness of video. Values go from 2 to 32, with numbers in the 18 to 30 range being most common. Lower values mean less compressed/higher quality. Defaults to 25. No effect if codec is not \"libx264\".
preset
str
H264 encoding preset. Defaults to \"superfast\". No effect if codec is not \"libx264\".
output_params
list[str]
Additional output parameters for FFMPEG. This should be a list of strings corresponding to command line arguments for FFMPEG and libx264. Use ffmpeg -h encoder=libx264
to see all options for libx264 output_params.
This class can be used as a context manager to ensure the video is properly closed after writing. For example:
with VideoWriter(\"output.mp4\") as writer:\n for frame in frames:\n writer(frame)\n
Methods:
Name Description__call__
Write a frame to the video.
__enter__
Context manager entry.
__exit__
Context manager exit.
build_output_params
Build the output parameters for FFMPEG.
close
Close the video writer.
open
Open the video writer.
write_frame
Write a frame to the video.
Source code insleap_io/io/video_writing.py
@attrs.define\nclass VideoWriter:\n \"\"\"Simple video writer using imageio and FFMPEG.\n\n Attributes:\n filename: Path to output video file.\n fps: Frames per second. Defaults to 30.\n pixelformat: Pixel format for video. Defaults to \"yuv420p\".\n codec: Codec to use for encoding. Defaults to \"libx264\".\n crf: Constant rate factor to control lossiness of video. Values go from 2 to 32,\n with numbers in the 18 to 30 range being most common. Lower values mean less\n compressed/higher quality. Defaults to 25. No effect if codec is not\n \"libx264\".\n preset: H264 encoding preset. Defaults to \"superfast\". No effect if codec is not\n \"libx264\".\n output_params: Additional output parameters for FFMPEG. This should be a list of\n strings corresponding to command line arguments for FFMPEG and libx264. Use\n `ffmpeg -h encoder=libx264` to see all options for libx264 output_params.\n\n Notes:\n This class can be used as a context manager to ensure the video is properly\n closed after writing. For example:\n\n ```python\n with VideoWriter(\"output.mp4\") as writer:\n for frame in frames:\n writer(frame)\n ```\n \"\"\"\n\n filename: Path = attrs.field(converter=Path)\n fps: float = 30\n pixelformat: str = \"yuv420p\"\n codec: str = \"libx264\"\n crf: int = 25\n preset: str = \"superfast\"\n output_params: list[str] = attrs.field(factory=list)\n _writer: \"imageio.plugins.ffmpeg.FfmpegFormat.Writer\" | None = None\n\n def build_output_params(self) -> list[str]:\n \"\"\"Build the output parameters for FFMPEG.\"\"\"\n output_params = []\n if self.codec == \"libx264\":\n output_params.extend(\n [\n \"-crf\",\n str(self.crf),\n \"-preset\",\n self.preset,\n ]\n )\n return output_params + self.output_params\n\n def open(self):\n \"\"\"Open the video writer.\"\"\"\n self.close()\n\n self.filename.parent.mkdir(parents=True, exist_ok=True)\n self._writer = iio_v2.get_writer(\n self.filename.as_posix(),\n format=\"FFMPEG\",\n fps=self.fps,\n codec=self.codec,\n pixelformat=self.pixelformat,\n output_params=self.build_output_params(),\n )\n\n def close(self):\n \"\"\"Close the video writer.\"\"\"\n if self._writer is not None:\n self._writer.close()\n self._writer = None\n\n def write_frame(self, frame: np.ndarray):\n \"\"\"Write a frame to the video.\n\n Args:\n frame: Frame to write to video. Should be a 2D or 3D numpy array with\n dimensions (height, width) or (height, width, channels).\n \"\"\"\n if self._writer is None:\n self.open()\n\n self._writer.append_data(frame)\n\n def __enter__(self):\n \"\"\"Context manager entry.\"\"\"\n return self\n\n def __exit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_value: Optional[BaseException],\n traceback: Optional[TracebackType],\n ) -> Optional[bool]:\n \"\"\"Context manager exit.\"\"\"\n self.close()\n return False\n\n def __call__(self, frame: np.ndarray):\n \"\"\"Write a frame to the video.\n\n Args:\n frame: Frame to write to video. Should be a 2D or 3D numpy array with\n dimensions (height, width) or (height, width, channels).\n \"\"\"\n self.write_frame(frame)\n
"},{"location":"reference/sleap_io/io/video_writing/#sleap_io.io.video_writing.VideoWriter.__call__","title":"__call__(frame)
","text":"Write a frame to the video.
Parameters:
Name Type Description Defaultframe
ndarray
Frame to write to video. Should be a 2D or 3D numpy array with dimensions (height, width) or (height, width, channels).
required Source code insleap_io/io/video_writing.py
def __call__(self, frame: np.ndarray):\n \"\"\"Write a frame to the video.\n\n Args:\n frame: Frame to write to video. Should be a 2D or 3D numpy array with\n dimensions (height, width) or (height, width, channels).\n \"\"\"\n self.write_frame(frame)\n
"},{"location":"reference/sleap_io/io/video_writing/#sleap_io.io.video_writing.VideoWriter.__enter__","title":"__enter__()
","text":"Context manager entry.
Source code insleap_io/io/video_writing.py
def __enter__(self):\n \"\"\"Context manager entry.\"\"\"\n return self\n
"},{"location":"reference/sleap_io/io/video_writing/#sleap_io.io.video_writing.VideoWriter.__exit__","title":"__exit__(exc_type, exc_value, traceback)
","text":"Context manager exit.
Source code insleap_io/io/video_writing.py
def __exit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_value: Optional[BaseException],\n traceback: Optional[TracebackType],\n) -> Optional[bool]:\n \"\"\"Context manager exit.\"\"\"\n self.close()\n return False\n
"},{"location":"reference/sleap_io/io/video_writing/#sleap_io.io.video_writing.VideoWriter.build_output_params","title":"build_output_params()
","text":"Build the output parameters for FFMPEG.
Source code insleap_io/io/video_writing.py
def build_output_params(self) -> list[str]:\n \"\"\"Build the output parameters for FFMPEG.\"\"\"\n output_params = []\n if self.codec == \"libx264\":\n output_params.extend(\n [\n \"-crf\",\n str(self.crf),\n \"-preset\",\n self.preset,\n ]\n )\n return output_params + self.output_params\n
"},{"location":"reference/sleap_io/io/video_writing/#sleap_io.io.video_writing.VideoWriter.close","title":"close()
","text":"Close the video writer.
Source code insleap_io/io/video_writing.py
def close(self):\n \"\"\"Close the video writer.\"\"\"\n if self._writer is not None:\n self._writer.close()\n self._writer = None\n
"},{"location":"reference/sleap_io/io/video_writing/#sleap_io.io.video_writing.VideoWriter.open","title":"open()
","text":"Open the video writer.
Source code insleap_io/io/video_writing.py
def open(self):\n \"\"\"Open the video writer.\"\"\"\n self.close()\n\n self.filename.parent.mkdir(parents=True, exist_ok=True)\n self._writer = iio_v2.get_writer(\n self.filename.as_posix(),\n format=\"FFMPEG\",\n fps=self.fps,\n codec=self.codec,\n pixelformat=self.pixelformat,\n output_params=self.build_output_params(),\n )\n
"},{"location":"reference/sleap_io/io/video_writing/#sleap_io.io.video_writing.VideoWriter.write_frame","title":"write_frame(frame)
","text":"Write a frame to the video.
Parameters:
Name Type Description Defaultframe
ndarray
Frame to write to video. Should be a 2D or 3D numpy array with dimensions (height, width) or (height, width, channels).
required Source code insleap_io/io/video_writing.py
def write_frame(self, frame: np.ndarray):\n \"\"\"Write a frame to the video.\n\n Args:\n frame: Frame to write to video. Should be a 2D or 3D numpy array with\n dimensions (height, width) or (height, width, channels).\n \"\"\"\n if self._writer is None:\n self.open()\n\n self._writer.append_data(frame)\n
"},{"location":"reference/sleap_io/model/","title":"model","text":""},{"location":"reference/sleap_io/model/#sleap_io.model","title":"sleap_io.model
","text":"This subpackage contains data model interfaces.
Modules:
Name Descriptioncamera
Data structure for a single camera view in a multi-camera setup.
instance
Data structures for data associated with a single instance such as an animal.
labeled_frame
Data structures for data contained within a single video frame.
labels
Data structure for the labels, a top-level container for pose data.
skeleton
Data model for skeletons.
suggestions
Data module for suggestions.
video
Data model for videos.
"},{"location":"reference/sleap_io/model/camera/","title":"camera","text":""},{"location":"reference/sleap_io/model/camera/#sleap_io.model.camera","title":"sleap_io.model.camera
","text":"Data structure for a single camera view in a multi-camera setup.
Classes:
Name DescriptionCamera
A camera used to record in a multi-view RecordingSession
.
Camera
","text":"A camera used to record in a multi-view RecordingSession
.
Attributes:
Name Type Descriptionmatrix
ndarray
Intrinsic camera matrix of size (3, 3) and type float64.
dist
ndarray
Radial-tangential distortion coefficients [k_1, k_2, p_1, p_2, k_3] of size (5,) and type float64.
size
tuple[int, int]
Image size of camera in pixels of size (2,) and type int.
rvec
ndarray
Rotation vector in unnormalized axis-angle representation of size (3,) and type float64.
tvec
ndarray
Translation vector of size (3,) and type float64.
extrinsic_matrix
ndarray
Extrinsic matrix of camera of size (4, 4) and type float64.
name
str
Camera name.
Methods:
Name Description__attrs_post_init__
Initialize extrinsic matrix from rotation and translation vectors.
__getattr__
Get attribute by name.
project
Project 3D points to 2D using camera matrix and distortion coefficients.
undistort_points
Undistort points using camera matrix and distortion coefficients.
Attributes:
Name Type Descriptionextrinsic_matrix
ndarray
Get extrinsic matrix of camera.
rvec
ndarray
Get rotation vector of camera.
tvec
ndarray
Get translation vector of camera.
Source code insleap_io/model/camera.py
@define\nclass Camera:\n \"\"\"A camera used to record in a multi-view `RecordingSession`.\n\n Attributes:\n matrix: Intrinsic camera matrix of size (3, 3) and type float64.\n dist: Radial-tangential distortion coefficients [k_1, k_2, p_1, p_2, k_3] of\n size (5,) and type float64.\n size: Image size of camera in pixels of size (2,) and type int.\n rvec: Rotation vector in unnormalized axis-angle representation of size (3,) and\n type float64.\n tvec: Translation vector of size (3,) and type float64.\n extrinsic_matrix: Extrinsic matrix of camera of size (4, 4) and type float64.\n name: Camera name.\n \"\"\"\n\n matrix: np.ndarray = field(\n default=np.eye(3),\n converter=lambda x: np.array(x, dtype=\"float64\"),\n )\n dist: np.ndarray = field(\n default=np.zeros(5), converter=lambda x: np.array(x, dtype=\"float64\").ravel()\n )\n size: tuple[int, int] = field(\n default=None, converter=attrs.converters.optional(tuple)\n )\n _rvec: np.ndarray = field(\n default=np.zeros(3), converter=lambda x: np.array(x, dtype=\"float64\").ravel()\n )\n _tvec: np.ndarray = field(\n default=np.zeros(3), converter=lambda x: np.array(x, dtype=\"float64\").ravel()\n )\n name: str = field(default=None, converter=attrs.converters.optional(str))\n _extrinsic_matrix: np.ndarray = field(init=False)\n\n @matrix.validator\n @dist.validator\n @size.validator\n @_rvec.validator\n @_tvec.validator\n @_extrinsic_matrix.validator\n def _validate_shape(self, attribute: attrs.Attribute, value):\n \"\"\"Validate shape of attribute based on metadata.\n\n Args:\n attribute: Attribute to validate.\n value: Value of attribute to validate.\n\n Raises:\n ValueError: If attribute shape is not as expected.\n \"\"\"\n\n # Define metadata for each attribute\n attr_metadata = {\n \"matrix\": {\"shape\": (3, 3), \"type\": np.ndarray},\n \"dist\": {\"shape\": (5,), \"type\": np.ndarray},\n \"size\": {\"shape\": (2,), \"type\": tuple},\n \"_rvec\": {\"shape\": (3,), \"type\": np.ndarray},\n \"_tvec\": {\"shape\": (3,), \"type\": np.ndarray},\n \"_extrinsic_matrix\": {\"shape\": (4, 4), \"type\": np.ndarray},\n }\n optional_attrs = [\"size\"]\n\n # Skip validation if optional attribute is None\n if attribute.name in optional_attrs and value is None:\n return\n\n # Validate shape of attribute\n expected_shape = attr_metadata[attribute.name][\"shape\"]\n expected_type = attr_metadata[attribute.name][\"type\"]\n if np.shape(value) != expected_shape:\n raise ValueError(\n f\"{attribute.name} must be a {expected_type} of size {expected_shape}, \"\n f\"but recieved shape: {np.shape(value)} and type: {type(value)} for \"\n f\"value: {value}\"\n )\n\n def __attrs_post_init__(self):\n \"\"\"Initialize extrinsic matrix from rotation and translation vectors.\"\"\"\n\n # Initialize extrinsic matrix\n self._extrinsic_matrix = np.eye(4, dtype=\"float64\")\n self._extrinsic_matrix[:3, :3] = cv2.Rodrigues(self._rvec)[0]\n self._extrinsic_matrix[:3, 3] = self._tvec\n\n @property\n def rvec(self) -> np.ndarray:\n \"\"\"Get rotation vector of camera.\n\n Returns:\n Rotation vector of camera of size 3.\n \"\"\"\n\n return self._rvec\n\n @rvec.setter\n def rvec(self, value: np.ndarray):\n \"\"\"Set rotation vector and update extrinsic matrix.\n\n Args:\n value: Rotation vector of size 3.\n \"\"\"\n self._rvec = value\n\n # Update extrinsic matrix\n rotation_matrix, _ = cv2.Rodrigues(self._rvec)\n self._extrinsic_matrix[:3, :3] = rotation_matrix\n\n @property\n def tvec(self) -> np.ndarray:\n \"\"\"Get translation vector of camera.\n\n Returns:\n Translation vector of camera of size 3.\n \"\"\"\n\n return self._tvec\n\n @tvec.setter\n def tvec(self, value: np.ndarray):\n \"\"\"Set translation vector and update extrinsic matrix.\n\n Args:\n value: Translation vector of size 3.\n \"\"\"\n\n self._tvec = value\n\n # Update extrinsic matrix\n self._extrinsic_matrix[:3, 3] = self._tvec\n\n @property\n def extrinsic_matrix(self) -> np.ndarray:\n \"\"\"Get extrinsic matrix of camera.\n\n Returns:\n Extrinsic matrix of camera of size 4 x 4.\n \"\"\"\n\n return self._extrinsic_matrix\n\n @extrinsic_matrix.setter\n def extrinsic_matrix(self, value: np.ndarray):\n \"\"\"Set extrinsic matrix and update rotation and translation vectors.\n\n Args:\n value: Extrinsic matrix of size 4 x 4.\n \"\"\"\n\n self._extrinsic_matrix = value\n\n # Update rotation and translation vectors\n self._rvec, _ = cv2.Rodrigues(self._extrinsic_matrix[:3, :3])\n self._tvec = self._extrinsic_matrix[:3, 3]\n\n def undistort_points(self, points: np.ndarray) -> np.ndarray:\n \"\"\"Undistort points using camera matrix and distortion coefficients.\n\n Args:\n points: Points to undistort of shape (N, 2).\n\n Returns:\n Undistorted points of shape (N, 2).\n \"\"\"\n\n shape = points.shape\n points = points.reshape(-1, 1, 2)\n out = cv2.undistortPoints(points, self.matrix, self.dist)\n return out.reshape(shape)\n\n def project(self, points: np.ndarray) -> np.ndarray:\n \"\"\"Project 3D points to 2D using camera matrix and distortion coefficients.\n\n Args:\n points: 3D points to project of shape (N, 3) or (N, 1, 3).\n\n Returns:\n Projected 2D points of shape (N, 1, 2).\n \"\"\"\n\n points = points.reshape(-1, 1, 3)\n out, _ = cv2.projectPoints(\n points,\n self.rvec,\n self.tvec,\n self.matrix,\n self.dist,\n )\n return out\n\n # TODO: Remove this when we implement triangulation without aniposelib\n def __getattr__(self, name: str):\n \"\"\"Get attribute by name.\n\n Args:\n name: Name of attribute to get.\n\n Returns:\n Value of attribute.\n\n Raises:\n AttributeError: If attribute does not exist.\n \"\"\"\n\n if name in self.__attrs_attrs__:\n return getattr(self, name)\n\n # The aliases for methods called when triangulate with sleap_anipose\n method_aliases = {\n \"get_name\": self.name,\n \"get_extrinsic_matrix\": self.extrinsic_matrix,\n }\n\n def return_callable_method_alias():\n return method_aliases[name]\n\n if name in method_aliases:\n return return_callable_method_alias\n\n raise AttributeError(f\"'Camera' object has no attribute or method '{name}'\")\n
"},{"location":"reference/sleap_io/model/camera/#sleap_io.model.camera.Camera.extrinsic_matrix","title":"extrinsic_matrix: np.ndarray
property
writable
","text":"Get extrinsic matrix of camera.
Returns:
Type Descriptionndarray
Extrinsic matrix of camera of size 4 x 4.
"},{"location":"reference/sleap_io/model/camera/#sleap_io.model.camera.Camera.rvec","title":"rvec: np.ndarray
property
writable
","text":"Get rotation vector of camera.
Returns:
Type Descriptionndarray
Rotation vector of camera of size 3.
"},{"location":"reference/sleap_io/model/camera/#sleap_io.model.camera.Camera.tvec","title":"tvec: np.ndarray
property
writable
","text":"Get translation vector of camera.
Returns:
Type Descriptionndarray
Translation vector of camera of size 3.
"},{"location":"reference/sleap_io/model/camera/#sleap_io.model.camera.Camera.__attrs_post_init__","title":"__attrs_post_init__()
","text":"Initialize extrinsic matrix from rotation and translation vectors.
Source code insleap_io/model/camera.py
def __attrs_post_init__(self):\n \"\"\"Initialize extrinsic matrix from rotation and translation vectors.\"\"\"\n\n # Initialize extrinsic matrix\n self._extrinsic_matrix = np.eye(4, dtype=\"float64\")\n self._extrinsic_matrix[:3, :3] = cv2.Rodrigues(self._rvec)[0]\n self._extrinsic_matrix[:3, 3] = self._tvec\n
"},{"location":"reference/sleap_io/model/camera/#sleap_io.model.camera.Camera.__getattr__","title":"__getattr__(name)
","text":"Get attribute by name.
Parameters:
Name Type Description Defaultname
str
Name of attribute to get.
requiredReturns:
Type DescriptionValue of attribute.
Raises:
Type DescriptionAttributeError
If attribute does not exist.
Source code insleap_io/model/camera.py
def __getattr__(self, name: str):\n \"\"\"Get attribute by name.\n\n Args:\n name: Name of attribute to get.\n\n Returns:\n Value of attribute.\n\n Raises:\n AttributeError: If attribute does not exist.\n \"\"\"\n\n if name in self.__attrs_attrs__:\n return getattr(self, name)\n\n # The aliases for methods called when triangulate with sleap_anipose\n method_aliases = {\n \"get_name\": self.name,\n \"get_extrinsic_matrix\": self.extrinsic_matrix,\n }\n\n def return_callable_method_alias():\n return method_aliases[name]\n\n if name in method_aliases:\n return return_callable_method_alias\n\n raise AttributeError(f\"'Camera' object has no attribute or method '{name}'\")\n
"},{"location":"reference/sleap_io/model/camera/#sleap_io.model.camera.Camera.project","title":"project(points)
","text":"Project 3D points to 2D using camera matrix and distortion coefficients.
Parameters:
Name Type Description Defaultpoints
ndarray
3D points to project of shape (N, 3) or (N, 1, 3).
requiredReturns:
Type Descriptionndarray
Projected 2D points of shape (N, 1, 2).
Source code insleap_io/model/camera.py
def project(self, points: np.ndarray) -> np.ndarray:\n \"\"\"Project 3D points to 2D using camera matrix and distortion coefficients.\n\n Args:\n points: 3D points to project of shape (N, 3) or (N, 1, 3).\n\n Returns:\n Projected 2D points of shape (N, 1, 2).\n \"\"\"\n\n points = points.reshape(-1, 1, 3)\n out, _ = cv2.projectPoints(\n points,\n self.rvec,\n self.tvec,\n self.matrix,\n self.dist,\n )\n return out\n
"},{"location":"reference/sleap_io/model/camera/#sleap_io.model.camera.Camera.undistort_points","title":"undistort_points(points)
","text":"Undistort points using camera matrix and distortion coefficients.
Parameters:
Name Type Description Defaultpoints
ndarray
Points to undistort of shape (N, 2).
requiredReturns:
Type Descriptionndarray
Undistorted points of shape (N, 2).
Source code insleap_io/model/camera.py
def undistort_points(self, points: np.ndarray) -> np.ndarray:\n \"\"\"Undistort points using camera matrix and distortion coefficients.\n\n Args:\n points: Points to undistort of shape (N, 2).\n\n Returns:\n Undistorted points of shape (N, 2).\n \"\"\"\n\n shape = points.shape\n points = points.reshape(-1, 1, 2)\n out = cv2.undistortPoints(points, self.matrix, self.dist)\n return out.reshape(shape)\n
"},{"location":"reference/sleap_io/model/instance/","title":"instance","text":""},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance","title":"sleap_io.model.instance
","text":"Data structures for data associated with a single instance such as an animal.
The Instance
class is a SLEAP data structure that contains a collection of Point
s that correspond to landmarks within a Skeleton
.
PredictedInstance
additionally contains metadata associated with how the instance was estimated, such as confidence scores.
Classes:
Name DescriptionInstance
This class represents a ground truth instance such as an animal.
Point
A 2D spatial landmark and metadata associated with annotation.
PredictedInstance
A PredictedInstance
is an Instance
that was predicted using a model.
PredictedPoint
A predicted point with associated score generated by a prediction model.
Track
An object that represents the same animal/object across multiple detections.
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Instance","title":"Instance
","text":"This class represents a ground truth instance such as an animal.
An Instance
has a set of landmarks (Point
s) that correspond to the nodes defined in its Skeleton
.
It may also be associated with a Track
which links multiple instances together across frames or videos.
Attributes:
Name Type Descriptionpoints
Union[dict[Node, Point], dict[Node, PredictedPoint]]
A dictionary with keys as Node
s and values as Point
s containing all of the landmarks of the instance. This can also be specified as a dictionary with node names, a list of length n_nodes
, or a numpy array of shape (n_nodes, 2)
.
skeleton
Skeleton
The Skeleton
that describes the Node
s and Edge
s associated with this instance.
track
Optional[Track]
An optional Track
associated with a unique animal/object across frames or videos.
from_predicted
Optional[PredictedInstance]
The PredictedInstance
(if any) that this instance was initialized from. This is used with human-in-the-loop workflows.
Methods:
Name Description__attrs_post_init__
Maintain point mappings between node and points after initialization.
__getitem__
Return the point associated with a node or None
if not set.
__len__
Return the number of points in the instance.
__repr__
Return a readable representation of the instance.
from_numpy
Create an instance object from a numpy array.
numpy
Return the instance points as a numpy array.
replace_skeleton
Replace the skeleton associated with the instance.
update_skeleton
Update the points dictionary to match the skeleton.
Attributes:
Name Type Descriptionis_empty
bool
Return True
if no points are visible on the instance.
n_visible
int
Return the number of visible points in the instance.
Source code insleap_io/model/instance.py
@define(auto_attribs=True, slots=True, eq=True)\nclass Instance:\n \"\"\"This class represents a ground truth instance such as an animal.\n\n An `Instance` has a set of landmarks (`Point`s) that correspond to the nodes defined\n in its `Skeleton`.\n\n It may also be associated with a `Track` which links multiple instances together\n across frames or videos.\n\n Attributes:\n points: A dictionary with keys as `Node`s and values as `Point`s containing all\n of the landmarks of the instance. This can also be specified as a dictionary\n with node names, a list of length `n_nodes`, or a numpy array of shape\n `(n_nodes, 2)`.\n skeleton: The `Skeleton` that describes the `Node`s and `Edge`s associated with\n this instance.\n track: An optional `Track` associated with a unique animal/object across frames\n or videos.\n from_predicted: The `PredictedInstance` (if any) that this instance was\n initialized from. This is used with human-in-the-loop workflows.\n \"\"\"\n\n _POINT_TYPE = Point\n\n def _make_default_point(self, x, y):\n return self._POINT_TYPE(x, y, visible=not (math.isnan(x) or math.isnan(y)))\n\n def _convert_points(self, attr, points):\n \"\"\"Maintain points mappings between nodes and points.\"\"\"\n if type(points) == np.ndarray:\n points = points.tolist()\n\n if type(points) == list:\n if len(points) != len(self.skeleton):\n raise ValueError(\n \"If specifying points as a list, must provide as many points as \"\n \"nodes in the skeleton.\"\n )\n points = {node: pt for node, pt in zip(self.skeleton.nodes, points)}\n\n if type(points) == dict:\n keys = [\n node if type(node) == Node else self.skeleton[node]\n for node in points.keys()\n ]\n vals = [\n (\n point\n if type(point) == self._POINT_TYPE\n else self._make_default_point(*point)\n )\n for point in points.values()\n ]\n points = {k: v for k, v in zip(keys, vals)}\n\n missing_nodes = list(set(self.skeleton.nodes) - set(points.keys()))\n for node in missing_nodes:\n points[node] = self._make_default_point(x=np.nan, y=np.nan)\n\n return points\n\n points: Union[dict[Node, Point], dict[Node, PredictedPoint]] = field(\n on_setattr=_convert_points, eq=cmp_using(eq=_compare_points) # type: ignore\n )\n skeleton: Skeleton\n track: Optional[Track] = None\n from_predicted: Optional[PredictedInstance] = None\n\n def __attrs_post_init__(self):\n \"\"\"Maintain point mappings between node and points after initialization.\"\"\"\n super().__setattr__(\"points\", self._convert_points(None, self.points))\n\n def __getitem__(self, node: Union[int, str, Node]) -> Optional[Point]:\n \"\"\"Return the point associated with a node or `None` if not set.\"\"\"\n if (type(node) == int) or (type(node) == str):\n node = self.skeleton[node]\n if isinstance(node, Node):\n return self.points.get(node, None)\n else:\n raise IndexError(f\"Invalid indexing argument for instance: {node}\")\n\n def __len__(self) -> int:\n \"\"\"Return the number of points in the instance.\"\"\"\n return len(self.points)\n\n def __repr__(self) -> str:\n \"\"\"Return a readable representation of the instance.\"\"\"\n pts = self.numpy().tolist()\n track = f'\"{self.track.name}\"' if self.track is not None else self.track\n\n return f\"Instance(points={pts}, track={track})\"\n\n @property\n def n_visible(self) -> int:\n \"\"\"Return the number of visible points in the instance.\"\"\"\n return sum(pt.visible for pt in self.points.values())\n\n @property\n def is_empty(self) -> bool:\n \"\"\"Return `True` if no points are visible on the instance.\"\"\"\n return self.n_visible == 0\n\n @classmethod\n def from_numpy(\n cls, points: np.ndarray, skeleton: Skeleton, track: Optional[Track] = None\n ) -> \"Instance\":\n \"\"\"Create an instance object from a numpy array.\n\n Args:\n points: A numpy array of shape `(n_nodes, 2)` corresponding to the points of\n the skeleton. Values of `np.nan` indicate \"missing\" nodes.\n skeleton: The `Skeleton` that this `Instance` is associated with. It should\n have `n_nodes` nodes.\n track: An optional `Track` associated with a unique animal/object across\n frames or videos.\n \"\"\"\n return cls(\n points=points, skeleton=skeleton, track=track # type: ignore[arg-type]\n )\n\n def numpy(self) -> np.ndarray:\n \"\"\"Return the instance points as a numpy array.\"\"\"\n pts = np.full((len(self.skeleton), 2), np.nan)\n for node, point in self.points.items():\n if point.visible:\n pts[self.skeleton.index(node)] = point.numpy()\n return pts\n\n def update_skeleton(self):\n \"\"\"Update the points dictionary to match the skeleton.\n\n Points associated with nodes that are no longer in the skeleton will be removed.\n\n Additionally, the keys of the points dictionary will be ordered to match the\n order of the nodes in the skeleton.\n\n Notes:\n This method is useful when the skeleton has been updated (e.g., nodes\n removed or reordered).\n\n However, it is recommended to use `Labels`-level methods (e.g.,\n `Labels.remove_nodes()`) when manipulating the skeleton as these will\n automatically call this method on every instance.\n \"\"\"\n # Create a new dictionary to hold the updated points\n new_points = {}\n\n # Iterate over the nodes in the skeleton\n for node in self.skeleton.nodes:\n # Get the point associated with the node\n point = self.points.get(node, None)\n\n # If the point is not None, add it to the new dictionary\n if point is not None:\n new_points[node] = point\n\n # Update the points dictionary\n self.points = new_points\n\n def replace_skeleton(\n self,\n new_skeleton: Skeleton,\n node_map: dict[NodeOrIndex, NodeOrIndex] | None = None,\n rev_node_map: dict[NodeOrIndex, NodeOrIndex] | None = None,\n ):\n \"\"\"Replace the skeleton associated with the instance.\n\n The points dictionary will be updated to match the new skeleton.\n\n Args:\n new_skeleton: The new `Skeleton` to associate with the instance.\n node_map: Dictionary mapping nodes in the old skeleton to nodes in the new\n skeleton. Keys and values can be specified as `Node` objects, integer\n indices, or string names. If not provided, only nodes with identical\n names will be mapped. Points associated with unmapped nodes will be\n removed.\n rev_node_map: Dictionary mapping nodes in the new skeleton to nodes in the\n old skeleton. This is used internally when calling from\n `Labels.replace_skeleton()` as it is more efficient to compute this\n mapping once and pass it to all instances. No validation is done on this\n mapping, so nodes are expected to be `Node` objects.\n \"\"\"\n if rev_node_map is None:\n if node_map is None:\n node_map = {}\n for old_node in self.skeleton.nodes:\n for new_node in new_skeleton.nodes:\n if old_node.name == new_node.name:\n node_map[old_node] = new_node\n break\n else:\n node_map = {\n self.skeleton.require_node(\n old, add_missing=False\n ): new_skeleton.require_node(new, add_missing=False)\n for old, new in node_map.items()\n }\n\n # Make new -> old mapping for nodes\n rev_node_map = {new: old for old, new in node_map.items()}\n\n # Build new points list with mapped nodes\n new_points = {}\n for new_node in new_skeleton.nodes:\n old_node = rev_node_map.get(new_node, None)\n if old_node is not None and old_node in self.points:\n new_points[new_node] = self.points[old_node]\n\n # Update the skeleton and points\n self.skeleton = new_skeleton\n self.points = new_points\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Instance.is_empty","title":"is_empty: bool
property
","text":"Return True
if no points are visible on the instance.
n_visible: int
property
","text":"Return the number of visible points in the instance.
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Instance.__attrs_post_init__","title":"__attrs_post_init__()
","text":"Maintain point mappings between node and points after initialization.
Source code insleap_io/model/instance.py
def __attrs_post_init__(self):\n \"\"\"Maintain point mappings between node and points after initialization.\"\"\"\n super().__setattr__(\"points\", self._convert_points(None, self.points))\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Instance.__getitem__","title":"__getitem__(node)
","text":"Return the point associated with a node or None
if not set.
sleap_io/model/instance.py
def __getitem__(self, node: Union[int, str, Node]) -> Optional[Point]:\n \"\"\"Return the point associated with a node or `None` if not set.\"\"\"\n if (type(node) == int) or (type(node) == str):\n node = self.skeleton[node]\n if isinstance(node, Node):\n return self.points.get(node, None)\n else:\n raise IndexError(f\"Invalid indexing argument for instance: {node}\")\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Instance.__len__","title":"__len__()
","text":"Return the number of points in the instance.
Source code insleap_io/model/instance.py
def __len__(self) -> int:\n \"\"\"Return the number of points in the instance.\"\"\"\n return len(self.points)\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Instance.__repr__","title":"__repr__()
","text":"Return a readable representation of the instance.
Source code insleap_io/model/instance.py
def __repr__(self) -> str:\n \"\"\"Return a readable representation of the instance.\"\"\"\n pts = self.numpy().tolist()\n track = f'\"{self.track.name}\"' if self.track is not None else self.track\n\n return f\"Instance(points={pts}, track={track})\"\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Instance.from_numpy","title":"from_numpy(points, skeleton, track=None)
classmethod
","text":"Create an instance object from a numpy array.
Parameters:
Name Type Description Defaultpoints
ndarray
A numpy array of shape (n_nodes, 2)
corresponding to the points of the skeleton. Values of np.nan
indicate \"missing\" nodes.
skeleton
Skeleton
The Skeleton
that this Instance
is associated with. It should have n_nodes
nodes.
track
Optional[Track]
An optional Track
associated with a unique animal/object across frames or videos.
None
Source code in sleap_io/model/instance.py
@classmethod\ndef from_numpy(\n cls, points: np.ndarray, skeleton: Skeleton, track: Optional[Track] = None\n) -> \"Instance\":\n \"\"\"Create an instance object from a numpy array.\n\n Args:\n points: A numpy array of shape `(n_nodes, 2)` corresponding to the points of\n the skeleton. Values of `np.nan` indicate \"missing\" nodes.\n skeleton: The `Skeleton` that this `Instance` is associated with. It should\n have `n_nodes` nodes.\n track: An optional `Track` associated with a unique animal/object across\n frames or videos.\n \"\"\"\n return cls(\n points=points, skeleton=skeleton, track=track # type: ignore[arg-type]\n )\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Instance.numpy","title":"numpy()
","text":"Return the instance points as a numpy array.
Source code insleap_io/model/instance.py
def numpy(self) -> np.ndarray:\n \"\"\"Return the instance points as a numpy array.\"\"\"\n pts = np.full((len(self.skeleton), 2), np.nan)\n for node, point in self.points.items():\n if point.visible:\n pts[self.skeleton.index(node)] = point.numpy()\n return pts\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Instance.replace_skeleton","title":"replace_skeleton(new_skeleton, node_map=None, rev_node_map=None)
","text":"Replace the skeleton associated with the instance.
The points dictionary will be updated to match the new skeleton.
Parameters:
Name Type Description Defaultnew_skeleton
Skeleton
The new Skeleton
to associate with the instance.
node_map
dict[NodeOrIndex, NodeOrIndex] | None
Dictionary mapping nodes in the old skeleton to nodes in the new skeleton. Keys and values can be specified as Node
objects, integer indices, or string names. If not provided, only nodes with identical names will be mapped. Points associated with unmapped nodes will be removed.
None
rev_node_map
dict[NodeOrIndex, NodeOrIndex] | None
Dictionary mapping nodes in the new skeleton to nodes in the old skeleton. This is used internally when calling from Labels.replace_skeleton()
as it is more efficient to compute this mapping once and pass it to all instances. No validation is done on this mapping, so nodes are expected to be Node
objects.
None
Source code in sleap_io/model/instance.py
def replace_skeleton(\n self,\n new_skeleton: Skeleton,\n node_map: dict[NodeOrIndex, NodeOrIndex] | None = None,\n rev_node_map: dict[NodeOrIndex, NodeOrIndex] | None = None,\n):\n \"\"\"Replace the skeleton associated with the instance.\n\n The points dictionary will be updated to match the new skeleton.\n\n Args:\n new_skeleton: The new `Skeleton` to associate with the instance.\n node_map: Dictionary mapping nodes in the old skeleton to nodes in the new\n skeleton. Keys and values can be specified as `Node` objects, integer\n indices, or string names. If not provided, only nodes with identical\n names will be mapped. Points associated with unmapped nodes will be\n removed.\n rev_node_map: Dictionary mapping nodes in the new skeleton to nodes in the\n old skeleton. This is used internally when calling from\n `Labels.replace_skeleton()` as it is more efficient to compute this\n mapping once and pass it to all instances. No validation is done on this\n mapping, so nodes are expected to be `Node` objects.\n \"\"\"\n if rev_node_map is None:\n if node_map is None:\n node_map = {}\n for old_node in self.skeleton.nodes:\n for new_node in new_skeleton.nodes:\n if old_node.name == new_node.name:\n node_map[old_node] = new_node\n break\n else:\n node_map = {\n self.skeleton.require_node(\n old, add_missing=False\n ): new_skeleton.require_node(new, add_missing=False)\n for old, new in node_map.items()\n }\n\n # Make new -> old mapping for nodes\n rev_node_map = {new: old for old, new in node_map.items()}\n\n # Build new points list with mapped nodes\n new_points = {}\n for new_node in new_skeleton.nodes:\n old_node = rev_node_map.get(new_node, None)\n if old_node is not None and old_node in self.points:\n new_points[new_node] = self.points[old_node]\n\n # Update the skeleton and points\n self.skeleton = new_skeleton\n self.points = new_points\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Instance.update_skeleton","title":"update_skeleton()
","text":"Update the points dictionary to match the skeleton.
Points associated with nodes that are no longer in the skeleton will be removed.
Additionally, the keys of the points dictionary will be ordered to match the order of the nodes in the skeleton.
NotesThis method is useful when the skeleton has been updated (e.g., nodes removed or reordered).
However, it is recommended to use Labels
-level methods (e.g., Labels.remove_nodes()
) when manipulating the skeleton as these will automatically call this method on every instance.
sleap_io/model/instance.py
def update_skeleton(self):\n \"\"\"Update the points dictionary to match the skeleton.\n\n Points associated with nodes that are no longer in the skeleton will be removed.\n\n Additionally, the keys of the points dictionary will be ordered to match the\n order of the nodes in the skeleton.\n\n Notes:\n This method is useful when the skeleton has been updated (e.g., nodes\n removed or reordered).\n\n However, it is recommended to use `Labels`-level methods (e.g.,\n `Labels.remove_nodes()`) when manipulating the skeleton as these will\n automatically call this method on every instance.\n \"\"\"\n # Create a new dictionary to hold the updated points\n new_points = {}\n\n # Iterate over the nodes in the skeleton\n for node in self.skeleton.nodes:\n # Get the point associated with the node\n point = self.points.get(node, None)\n\n # If the point is not None, add it to the new dictionary\n if point is not None:\n new_points[node] = point\n\n # Update the points dictionary\n self.points = new_points\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Point","title":"Point
","text":"A 2D spatial landmark and metadata associated with annotation.
Attributes:
Name Type Descriptionx
float
The horizontal pixel location of point in image coordinates.
y
float
The vertical pixel location of point in image coordinates.
visible
bool
Whether point is visible in the image or not.
complete
bool
Has the point been verified by the user labeler.
Class variableseq_atol: Controls absolute tolerence allowed in x
and y
when comparing two Point
s for equality. eq_rtol: Controls relative tolerence allowed in x
and y
when comparing two Point
s for equality.
Methods:
Name Description__eq__
Compare self
and other
for equality.
numpy
Return the coordinates as a numpy array of shape (2,)
.
sleap_io/model/instance.py
@define\nclass Point:\n \"\"\"A 2D spatial landmark and metadata associated with annotation.\n\n Attributes:\n x: The horizontal pixel location of point in image coordinates.\n y: The vertical pixel location of point in image coordinates.\n visible: Whether point is visible in the image or not.\n complete: Has the point been verified by the user labeler.\n\n Class variables:\n eq_atol: Controls absolute tolerence allowed in `x` and `y` when comparing two\n `Point`s for equality.\n eq_rtol: Controls relative tolerence allowed in `x` and `y` when comparing two\n `Point`s for equality.\n\n \"\"\"\n\n eq_atol: ClassVar[float] = 1e-08\n eq_rtol: ClassVar[float] = 0\n\n x: float\n y: float\n visible: bool = True\n complete: bool = False\n\n def __eq__(self, other: object) -> bool:\n \"\"\"Compare `self` and `other` for equality.\n\n Precision error between the respective `x` and `y` properties of two\n instances may be allowed or controlled via the `Point.eq_atol` and\n `Point.eq_rtol` class variables. Set to zero to disable their effect.\n Internally, `numpy.isclose()` is used for the comparison:\n https://numpy.org/doc/stable/reference/generated/numpy.isclose.html\n\n Args:\n other: Instance of `Point` to compare to.\n\n Returns:\n Returns True if all attributes of `self` and `other` are the identical\n (possibly allowing precision error for `x` and `y` attributes).\n \"\"\"\n # Check that other is a Point.\n if type(other) is not type(self):\n return False\n\n # We know that we have some kind of point at this point.\n other = cast(Point, other)\n\n return bool(\n np.all(\n np.isclose(\n [self.x, self.y],\n [other.x, other.y],\n rtol=Point.eq_rtol,\n atol=Point.eq_atol,\n equal_nan=True,\n )\n )\n and (self.visible == other.visible)\n and (self.complete == other.complete)\n )\n\n def numpy(self) -> np.ndarray:\n \"\"\"Return the coordinates as a numpy array of shape `(2,)`.\"\"\"\n return np.array([self.x, self.y]) if self.visible else np.full((2,), np.nan)\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Point.__eq__","title":"__eq__(other)
","text":"Compare self
and other
for equality.
Precision error between the respective x
and y
properties of two instances may be allowed or controlled via the Point.eq_atol
and Point.eq_rtol
class variables. Set to zero to disable their effect. Internally, numpy.isclose()
is used for the comparison: https://numpy.org/doc/stable/reference/generated/numpy.isclose.html
Parameters:
Name Type Description Defaultother
object
Instance of Point
to compare to.
Returns:
Type Descriptionbool
Returns True if all attributes of self
and other
are the identical (possibly allowing precision error for x
and y
attributes).
sleap_io/model/instance.py
def __eq__(self, other: object) -> bool:\n \"\"\"Compare `self` and `other` for equality.\n\n Precision error between the respective `x` and `y` properties of two\n instances may be allowed or controlled via the `Point.eq_atol` and\n `Point.eq_rtol` class variables. Set to zero to disable their effect.\n Internally, `numpy.isclose()` is used for the comparison:\n https://numpy.org/doc/stable/reference/generated/numpy.isclose.html\n\n Args:\n other: Instance of `Point` to compare to.\n\n Returns:\n Returns True if all attributes of `self` and `other` are the identical\n (possibly allowing precision error for `x` and `y` attributes).\n \"\"\"\n # Check that other is a Point.\n if type(other) is not type(self):\n return False\n\n # We know that we have some kind of point at this point.\n other = cast(Point, other)\n\n return bool(\n np.all(\n np.isclose(\n [self.x, self.y],\n [other.x, other.y],\n rtol=Point.eq_rtol,\n atol=Point.eq_atol,\n equal_nan=True,\n )\n )\n and (self.visible == other.visible)\n and (self.complete == other.complete)\n )\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Point.numpy","title":"numpy()
","text":"Return the coordinates as a numpy array of shape (2,)
.
sleap_io/model/instance.py
def numpy(self) -> np.ndarray:\n \"\"\"Return the coordinates as a numpy array of shape `(2,)`.\"\"\"\n return np.array([self.x, self.y]) if self.visible else np.full((2,), np.nan)\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.PredictedInstance","title":"PredictedInstance
","text":" Bases: Instance
A PredictedInstance
is an Instance
that was predicted using a model.
Attributes:
Name Type Descriptionskeleton
The Skeleton
that this Instance
is associated with.
points
A dictionary where keys are Skeleton
nodes and values are Point
s.
track
An optional Track
associated with a unique animal/object across frames or videos.
from_predicted
Optional[PredictedInstance]
Not applicable in PredictedInstance
s (must be set to None
).
score
float
The instance detection or part grouping prediction score. This is a scalar that represents the confidence with which this entire instance was predicted. This may not always be applicable depending on the model type.
tracking_score
Optional[float]
The score associated with the Track
assignment. This is typically the value from the score matrix used in an identity assignment.
Methods:
Name Description__repr__
Return a readable representation of the instance.
from_numpy
Create an instance object from a numpy array.
numpy
Return the instance points as a numpy array.
Source code insleap_io/model/instance.py
@define\nclass PredictedInstance(Instance):\n \"\"\"A `PredictedInstance` is an `Instance` that was predicted using a model.\n\n Attributes:\n skeleton: The `Skeleton` that this `Instance` is associated with.\n points: A dictionary where keys are `Skeleton` nodes and values are `Point`s.\n track: An optional `Track` associated with a unique animal/object across frames\n or videos.\n from_predicted: Not applicable in `PredictedInstance`s (must be set to `None`).\n score: The instance detection or part grouping prediction score. This is a\n scalar that represents the confidence with which this entire instance was\n predicted. This may not always be applicable depending on the model type.\n tracking_score: The score associated with the `Track` assignment. This is\n typically the value from the score matrix used in an identity assignment.\n \"\"\"\n\n _POINT_TYPE = PredictedPoint\n\n from_predicted: Optional[PredictedInstance] = field(\n default=None, validator=validators.instance_of(type(None))\n )\n score: float = 0.0\n tracking_score: Optional[float] = 0\n\n def __repr__(self) -> str:\n \"\"\"Return a readable representation of the instance.\"\"\"\n pts = self.numpy().tolist()\n track = f'\"{self.track.name}\"' if self.track is not None else self.track\n\n score = str(self.score) if self.score is None else f\"{self.score:.2f}\"\n tracking_score = (\n str(self.tracking_score)\n if self.tracking_score is None\n else f\"{self.tracking_score:.2f}\"\n )\n return (\n f\"PredictedInstance(points={pts}, track={track}, \"\n f\"score={score}, tracking_score={tracking_score})\"\n )\n\n @classmethod\n def from_numpy( # type: ignore[override]\n cls,\n points: np.ndarray,\n point_scores: np.ndarray,\n instance_score: float,\n skeleton: Skeleton,\n tracking_score: Optional[float] = None,\n track: Optional[Track] = None,\n ) -> \"PredictedInstance\":\n \"\"\"Create an instance object from a numpy array.\n\n Args:\n points: A numpy array of shape `(n_nodes, 2)` corresponding to the points of\n the skeleton. Values of `np.nan` indicate \"missing\" nodes.\n point_scores: The points-level prediction score. This is an array that\n represents the confidence with which each point in the instance was\n predicted. This may not always be applicable depending on the model\n type.\n instance_score: The instance detection or part grouping prediction score.\n This is a scalar that represents the confidence with which this entire\n instance was predicted. This may not always be applicable depending on\n the model type.\n skeleton: The `Skeleton` that this `Instance` is associated with. It should\n have `n_nodes` nodes.\n tracking_score: The score associated with the `Track` assignment. This is\n typically the value from the score matrix used in an identity\n assignment.\n track: An optional `Track` associated with a unique animal/object across\n frames or videos.\n \"\"\"\n node_points = {\n node: PredictedPoint(pt[0], pt[1], score=score)\n for node, pt, score in zip(skeleton.nodes, points, point_scores)\n }\n return cls(\n points=node_points,\n skeleton=skeleton,\n score=instance_score,\n tracking_score=tracking_score,\n track=track,\n )\n\n def numpy(self, scores: bool = False) -> np.ndarray:\n \"\"\"Return the instance points as a numpy array.\"\"\"\n pts = np.full((len(self.skeleton), 3), np.nan)\n for node, point in self.points.items():\n if point.visible:\n pts[self.skeleton.index(node)] = point.numpy()\n if not scores:\n pts = pts[:, :2]\n return pts\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.PredictedInstance.__repr__","title":"__repr__()
","text":"Return a readable representation of the instance.
Source code insleap_io/model/instance.py
def __repr__(self) -> str:\n \"\"\"Return a readable representation of the instance.\"\"\"\n pts = self.numpy().tolist()\n track = f'\"{self.track.name}\"' if self.track is not None else self.track\n\n score = str(self.score) if self.score is None else f\"{self.score:.2f}\"\n tracking_score = (\n str(self.tracking_score)\n if self.tracking_score is None\n else f\"{self.tracking_score:.2f}\"\n )\n return (\n f\"PredictedInstance(points={pts}, track={track}, \"\n f\"score={score}, tracking_score={tracking_score})\"\n )\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.PredictedInstance.from_numpy","title":"from_numpy(points, point_scores, instance_score, skeleton, tracking_score=None, track=None)
classmethod
","text":"Create an instance object from a numpy array.
Parameters:
Name Type Description Defaultpoints
ndarray
A numpy array of shape (n_nodes, 2)
corresponding to the points of the skeleton. Values of np.nan
indicate \"missing\" nodes.
point_scores
ndarray
The points-level prediction score. This is an array that represents the confidence with which each point in the instance was predicted. This may not always be applicable depending on the model type.
requiredinstance_score
float
The instance detection or part grouping prediction score. This is a scalar that represents the confidence with which this entire instance was predicted. This may not always be applicable depending on the model type.
requiredskeleton
Skeleton
The Skeleton
that this Instance
is associated with. It should have n_nodes
nodes.
tracking_score
Optional[float]
The score associated with the Track
assignment. This is typically the value from the score matrix used in an identity assignment.
None
track
Optional[Track]
An optional Track
associated with a unique animal/object across frames or videos.
None
Source code in sleap_io/model/instance.py
@classmethod\ndef from_numpy( # type: ignore[override]\n cls,\n points: np.ndarray,\n point_scores: np.ndarray,\n instance_score: float,\n skeleton: Skeleton,\n tracking_score: Optional[float] = None,\n track: Optional[Track] = None,\n) -> \"PredictedInstance\":\n \"\"\"Create an instance object from a numpy array.\n\n Args:\n points: A numpy array of shape `(n_nodes, 2)` corresponding to the points of\n the skeleton. Values of `np.nan` indicate \"missing\" nodes.\n point_scores: The points-level prediction score. This is an array that\n represents the confidence with which each point in the instance was\n predicted. This may not always be applicable depending on the model\n type.\n instance_score: The instance detection or part grouping prediction score.\n This is a scalar that represents the confidence with which this entire\n instance was predicted. This may not always be applicable depending on\n the model type.\n skeleton: The `Skeleton` that this `Instance` is associated with. It should\n have `n_nodes` nodes.\n tracking_score: The score associated with the `Track` assignment. This is\n typically the value from the score matrix used in an identity\n assignment.\n track: An optional `Track` associated with a unique animal/object across\n frames or videos.\n \"\"\"\n node_points = {\n node: PredictedPoint(pt[0], pt[1], score=score)\n for node, pt, score in zip(skeleton.nodes, points, point_scores)\n }\n return cls(\n points=node_points,\n skeleton=skeleton,\n score=instance_score,\n tracking_score=tracking_score,\n track=track,\n )\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.PredictedInstance.numpy","title":"numpy(scores=False)
","text":"Return the instance points as a numpy array.
Source code insleap_io/model/instance.py
def numpy(self, scores: bool = False) -> np.ndarray:\n \"\"\"Return the instance points as a numpy array.\"\"\"\n pts = np.full((len(self.skeleton), 3), np.nan)\n for node, point in self.points.items():\n if point.visible:\n pts[self.skeleton.index(node)] = point.numpy()\n if not scores:\n pts = pts[:, :2]\n return pts\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.PredictedPoint","title":"PredictedPoint
","text":" Bases: Point
A predicted point with associated score generated by a prediction model.
It has all the properties of a labeled Point
, plus a score
.
Attributes:
Name Type Descriptionx
The horizontal pixel location of point within image frame.
y
The vertical pixel location of point within image frame.
visible
Whether point is visible in the image or not.
complete
Has the point been verified by the user labeler.
score
float
The point-level prediction score. This is typically the confidence and set to a value between 0 and 1.
Methods:
Name Description__eq__
Compare self
and other
for equality.
numpy
Return the coordinates and score as a numpy array of shape (3,)
.
sleap_io/model/instance.py
@define\nclass PredictedPoint(Point):\n \"\"\"A predicted point with associated score generated by a prediction model.\n\n It has all the properties of a labeled `Point`, plus a `score`.\n\n Attributes:\n x: The horizontal pixel location of point within image frame.\n y: The vertical pixel location of point within image frame.\n visible: Whether point is visible in the image or not.\n complete: Has the point been verified by the user labeler.\n score: The point-level prediction score. This is typically the confidence and\n set to a value between 0 and 1.\n \"\"\"\n\n score: float = 0.0\n\n def numpy(self) -> np.ndarray:\n \"\"\"Return the coordinates and score as a numpy array of shape `(3,)`.\"\"\"\n return (\n np.array([self.x, self.y, self.score])\n if self.visible\n else np.full((3,), np.nan)\n )\n\n def __eq__(self, other: object) -> bool:\n \"\"\"Compare `self` and `other` for equality.\n\n See `Point.__eq__()` for important notes about point equality semantics!\n\n Args:\n other: Instance of `PredictedPoint` to compare\n\n Returns:\n Returns True if all attributes of `self` and `other` are the identical\n (possibly allowing precision error for `x` and `y` attributes).\n \"\"\"\n if not super().__eq__(other):\n return False\n\n # we know that we have a point at this point\n other = cast(PredictedPoint, other)\n\n return self.score == other.score\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.PredictedPoint.__eq__","title":"__eq__(other)
","text":"Compare self
and other
for equality.
See Point.__eq__()
for important notes about point equality semantics!
Parameters:
Name Type Description Defaultother
object
Instance of PredictedPoint
to compare
Returns:
Type Descriptionbool
Returns True if all attributes of self
and other
are the identical (possibly allowing precision error for x
and y
attributes).
sleap_io/model/instance.py
def __eq__(self, other: object) -> bool:\n \"\"\"Compare `self` and `other` for equality.\n\n See `Point.__eq__()` for important notes about point equality semantics!\n\n Args:\n other: Instance of `PredictedPoint` to compare\n\n Returns:\n Returns True if all attributes of `self` and `other` are the identical\n (possibly allowing precision error for `x` and `y` attributes).\n \"\"\"\n if not super().__eq__(other):\n return False\n\n # we know that we have a point at this point\n other = cast(PredictedPoint, other)\n\n return self.score == other.score\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.PredictedPoint.numpy","title":"numpy()
","text":"Return the coordinates and score as a numpy array of shape (3,)
.
sleap_io/model/instance.py
def numpy(self) -> np.ndarray:\n \"\"\"Return the coordinates and score as a numpy array of shape `(3,)`.\"\"\"\n return (\n np.array([self.x, self.y, self.score])\n if self.visible\n else np.full((3,), np.nan)\n )\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Track","title":"Track
","text":"An object that represents the same animal/object across multiple detections.
This allows tracking of unique entities in the video over time and space.
A Track
may also be used to refer to unique identity classes that span multiple videos, such as \"female mouse\"
.
Attributes:
Name Type Descriptionname
str
A name given to this track for identification purposes.
NotesTrack
s are compared by identity. This means that unique track objects with the same name are considered to be different.
sleap_io/model/instance.py
@define(eq=False)\nclass Track:\n \"\"\"An object that represents the same animal/object across multiple detections.\n\n This allows tracking of unique entities in the video over time and space.\n\n A `Track` may also be used to refer to unique identity classes that span multiple\n videos, such as `\"female mouse\"`.\n\n Attributes:\n name: A name given to this track for identification purposes.\n\n Notes:\n `Track`s are compared by identity. This means that unique track objects with the\n same name are considered to be different.\n \"\"\"\n\n name: str = \"\"\n
"},{"location":"reference/sleap_io/model/labeled_frame/","title":"labeled_frame","text":""},{"location":"reference/sleap_io/model/labeled_frame/#sleap_io.model.labeled_frame","title":"sleap_io.model.labeled_frame
","text":"Data structures for data contained within a single video frame.
The LabeledFrame
class is a data structure that contains Instance
s and PredictedInstance
s that are associated with a single frame within a video.
Classes:
Name DescriptionLabeledFrame
Labeled data for a single frame of a video.
"},{"location":"reference/sleap_io/model/labeled_frame/#sleap_io.model.labeled_frame.LabeledFrame","title":"LabeledFrame
","text":"Labeled data for a single frame of a video.
Attributes:
Name Type Descriptionvideo
Video
The Video
associated with this LabeledFrame
.
frame_idx
int
The index of the LabeledFrame
in the Video
.
instances
list[Union[Instance, PredictedInstance]]
List of Instance
objects associated with this LabeledFrame
.
Instances of this class are hashed by identity, not by value. This means that two LabeledFrame
instances with the same attributes will NOT be considered equal in a set or dict.
Methods:
Name Description__getitem__
Return the Instance
at key
index in the instances
list.
__iter__
Iterate over Instance
s in instances
list.
__len__
Return the number of instances in the frame.
numpy
Return all instances in the frame as a numpy array.
remove_empty_instances
Remove all instances with no visible points.
remove_predictions
Remove all PredictedInstance
objects from the frame.
Attributes:
Name Type Descriptionhas_predicted_instances
bool
Return True if the frame has any predicted instances.
has_user_instances
bool
Return True if the frame has any user-labeled instances.
image
ndarray
Return the image of the frame as a numpy array.
predicted_instances
list[Instance]
Frame instances that are predicted by a model (PredictedInstance
objects).
unused_predictions
list[Instance]
Return a list of \"unused\" PredictedInstance
objects in frame.
user_instances
list[Instance]
Frame instances that are user-labeled (Instance
objects).
sleap_io/model/labeled_frame.py
@define(eq=False)\nclass LabeledFrame:\n \"\"\"Labeled data for a single frame of a video.\n\n Attributes:\n video: The `Video` associated with this `LabeledFrame`.\n frame_idx: The index of the `LabeledFrame` in the `Video`.\n instances: List of `Instance` objects associated with this `LabeledFrame`.\n\n Notes:\n Instances of this class are hashed by identity, not by value. This means that\n two `LabeledFrame` instances with the same attributes will NOT be considered\n equal in a set or dict.\n \"\"\"\n\n video: Video\n frame_idx: int = field(converter=int)\n instances: list[Union[Instance, PredictedInstance]] = field(factory=list)\n\n def __len__(self) -> int:\n \"\"\"Return the number of instances in the frame.\"\"\"\n return len(self.instances)\n\n def __getitem__(self, key: int) -> Union[Instance, PredictedInstance]:\n \"\"\"Return the `Instance` at `key` index in the `instances` list.\"\"\"\n return self.instances[key]\n\n def __iter__(self):\n \"\"\"Iterate over `Instance`s in `instances` list.\"\"\"\n return iter(self.instances)\n\n @property\n def user_instances(self) -> list[Instance]:\n \"\"\"Frame instances that are user-labeled (`Instance` objects).\"\"\"\n return [inst for inst in self.instances if type(inst) == Instance]\n\n @property\n def has_user_instances(self) -> bool:\n \"\"\"Return True if the frame has any user-labeled instances.\"\"\"\n for inst in self.instances:\n if type(inst) == Instance:\n return True\n return False\n\n @property\n def predicted_instances(self) -> list[Instance]:\n \"\"\"Frame instances that are predicted by a model (`PredictedInstance` objects).\"\"\"\n return [inst for inst in self.instances if type(inst) == PredictedInstance]\n\n @property\n def has_predicted_instances(self) -> bool:\n \"\"\"Return True if the frame has any predicted instances.\"\"\"\n for inst in self.instances:\n if type(inst) == PredictedInstance:\n return True\n return False\n\n def numpy(self) -> np.ndarray:\n \"\"\"Return all instances in the frame as a numpy array.\n\n Returns:\n Points as a numpy array of shape `(n_instances, n_nodes, 2)`.\n\n Note that the order of the instances is arbitrary.\n \"\"\"\n n_instances = len(self.instances)\n n_nodes = len(self.instances[0]) if n_instances > 0 else 0\n pts = np.full((n_instances, n_nodes, 2), np.nan)\n for i, inst in enumerate(self.instances):\n pts[i] = inst.numpy()[:, 0:2]\n return pts\n\n @property\n def image(self) -> np.ndarray:\n \"\"\"Return the image of the frame as a numpy array.\"\"\"\n return self.video[self.frame_idx]\n\n @property\n def unused_predictions(self) -> list[Instance]:\n \"\"\"Return a list of \"unused\" `PredictedInstance` objects in frame.\n\n This is all of the `PredictedInstance` objects which do not have a corresponding\n `Instance` in the same track in the same frame.\n \"\"\"\n unused_predictions = []\n any_tracks = [inst.track for inst in self.instances if inst.track is not None]\n if len(any_tracks):\n # Use tracks to determine which predicted instances have been used\n used_tracks = [\n inst.track\n for inst in self.instances\n if type(inst) == Instance and inst.track is not None\n ]\n unused_predictions = [\n inst\n for inst in self.instances\n if inst.track not in used_tracks and type(inst) == PredictedInstance\n ]\n\n else:\n # Use from_predicted to determine which predicted instances have been used\n # TODO: should we always do this instead of using tracks?\n used_instances = [\n inst.from_predicted\n for inst in self.instances\n if inst.from_predicted is not None\n ]\n unused_predictions = [\n inst\n for inst in self.instances\n if type(inst) == PredictedInstance and inst not in used_instances\n ]\n\n return unused_predictions\n\n def remove_predictions(self):\n \"\"\"Remove all `PredictedInstance` objects from the frame.\"\"\"\n self.instances = [inst for inst in self.instances if type(inst) == Instance]\n\n def remove_empty_instances(self):\n \"\"\"Remove all instances with no visible points.\"\"\"\n self.instances = [inst for inst in self.instances if not inst.is_empty]\n
"},{"location":"reference/sleap_io/model/labeled_frame/#sleap_io.model.labeled_frame.LabeledFrame.has_predicted_instances","title":"has_predicted_instances: bool
property
","text":"Return True if the frame has any predicted instances.
"},{"location":"reference/sleap_io/model/labeled_frame/#sleap_io.model.labeled_frame.LabeledFrame.has_user_instances","title":"has_user_instances: bool
property
","text":"Return True if the frame has any user-labeled instances.
"},{"location":"reference/sleap_io/model/labeled_frame/#sleap_io.model.labeled_frame.LabeledFrame.image","title":"image: np.ndarray
property
","text":"Return the image of the frame as a numpy array.
"},{"location":"reference/sleap_io/model/labeled_frame/#sleap_io.model.labeled_frame.LabeledFrame.predicted_instances","title":"predicted_instances: list[Instance]
property
","text":"Frame instances that are predicted by a model (PredictedInstance
objects).
unused_predictions: list[Instance]
property
","text":"Return a list of \"unused\" PredictedInstance
objects in frame.
This is all of the PredictedInstance
objects which do not have a corresponding Instance
in the same track in the same frame.
user_instances: list[Instance]
property
","text":"Frame instances that are user-labeled (Instance
objects).
__getitem__(key)
","text":"Return the Instance
at key
index in the instances
list.
sleap_io/model/labeled_frame.py
def __getitem__(self, key: int) -> Union[Instance, PredictedInstance]:\n \"\"\"Return the `Instance` at `key` index in the `instances` list.\"\"\"\n return self.instances[key]\n
"},{"location":"reference/sleap_io/model/labeled_frame/#sleap_io.model.labeled_frame.LabeledFrame.__iter__","title":"__iter__()
","text":"Iterate over Instance
s in instances
list.
sleap_io/model/labeled_frame.py
def __iter__(self):\n \"\"\"Iterate over `Instance`s in `instances` list.\"\"\"\n return iter(self.instances)\n
"},{"location":"reference/sleap_io/model/labeled_frame/#sleap_io.model.labeled_frame.LabeledFrame.__len__","title":"__len__()
","text":"Return the number of instances in the frame.
Source code insleap_io/model/labeled_frame.py
def __len__(self) -> int:\n \"\"\"Return the number of instances in the frame.\"\"\"\n return len(self.instances)\n
"},{"location":"reference/sleap_io/model/labeled_frame/#sleap_io.model.labeled_frame.LabeledFrame.numpy","title":"numpy()
","text":"Return all instances in the frame as a numpy array.
Returns:
Type Descriptionndarray
Points as a numpy array of shape (n_instances, n_nodes, 2)
.
Note that the order of the instances is arbitrary.
Source code insleap_io/model/labeled_frame.py
def numpy(self) -> np.ndarray:\n \"\"\"Return all instances in the frame as a numpy array.\n\n Returns:\n Points as a numpy array of shape `(n_instances, n_nodes, 2)`.\n\n Note that the order of the instances is arbitrary.\n \"\"\"\n n_instances = len(self.instances)\n n_nodes = len(self.instances[0]) if n_instances > 0 else 0\n pts = np.full((n_instances, n_nodes, 2), np.nan)\n for i, inst in enumerate(self.instances):\n pts[i] = inst.numpy()[:, 0:2]\n return pts\n
"},{"location":"reference/sleap_io/model/labeled_frame/#sleap_io.model.labeled_frame.LabeledFrame.remove_empty_instances","title":"remove_empty_instances()
","text":"Remove all instances with no visible points.
Source code insleap_io/model/labeled_frame.py
def remove_empty_instances(self):\n \"\"\"Remove all instances with no visible points.\"\"\"\n self.instances = [inst for inst in self.instances if not inst.is_empty]\n
"},{"location":"reference/sleap_io/model/labeled_frame/#sleap_io.model.labeled_frame.LabeledFrame.remove_predictions","title":"remove_predictions()
","text":"Remove all PredictedInstance
objects from the frame.
sleap_io/model/labeled_frame.py
def remove_predictions(self):\n \"\"\"Remove all `PredictedInstance` objects from the frame.\"\"\"\n self.instances = [inst for inst in self.instances if type(inst) == Instance]\n
"},{"location":"reference/sleap_io/model/labels/","title":"labels","text":""},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels","title":"sleap_io.model.labels
","text":"Data structure for the labels, a top-level container for pose data.
Label
s contain LabeledFrame
s, which in turn contain Instance
s, which contain Point
s.
This structure also maintains metadata that is common across all child objects such as Track
s, Video
s, Skeleton
s and others.
It is intended to be the entrypoint for deserialization and main container that should be used for serialization. It is designed to support both labeled data (used for training models) and predictions (inference results).
Classes:
Name DescriptionLabels
Pose data for a set of videos that have user labels and/or predictions.
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels","title":"Labels
","text":"Pose data for a set of videos that have user labels and/or predictions.
Attributes:
Name Type Descriptionlabeled_frames
list[LabeledFrame]
A list of LabeledFrame
s that are associated with this dataset.
videos
list[Video]
A list of Video
s that are associated with this dataset. Videos do not need to have corresponding LabeledFrame
s if they do not have any labels or predictions yet.
skeletons
list[Skeleton]
A list of Skeleton
s that are associated with this dataset. This should generally only contain a single skeleton.
tracks
list[Track]
A list of Track
s that are associated with this dataset.
suggestions
list[SuggestionFrame]
A list of SuggestionFrame
s that are associated with this dataset.
provenance
dict[str, Any]
Dictionary of arbitrary metadata providing additional information about where the dataset came from.
NotesVideo
s in contain LabeledFrame
s, and Skeleton
s and Track
s in contained Instance
s are added to the respective lists automatically.
Methods:
Name Description__attrs_post_init__
Append videos, skeletons, and tracks seen in labeled_frames
to Labels
.
__getitem__
Return one or more labeled frames based on indexing criteria.
__iter__
Iterate over labeled_frames
list when calling iter method on Labels
.
__len__
Return number of labeled frames.
__repr__
Return a readable representation of the labels.
__str__
Return a readable representation of the labels.
append
Append a labeled frame to the labels.
clean
Remove empty frames, unused skeletons, tracks and videos.
extend
Append a labeled frame to the labels.
extract
Extract a set of frames into a new Labels object.
find
Search for labeled frames given video and/or frame index.
make_training_splits
Make splits for training with embedded images.
numpy
Construct a numpy array from instance points.
remove_nodes
Remove nodes from the skeleton.
remove_predictions
Remove all predicted instances from the labels.
rename_nodes
Rename nodes in the skeleton.
reorder_nodes
Reorder nodes in the skeleton.
replace_filenames
Replace video filenames.
replace_skeleton
Replace the skeleton in the labels.
replace_videos
Replace videos and update all references.
save
Save labels to file in specified format.
split
Separate the labels into random splits.
trim
Trim the labels to a subset of frames and videos accordingly.
update
Update data structures based on contents.
Attributes:
Name Type Descriptioninstances
Iterator[Instance]
Return an iterator over all instances within all labeled frames.
skeleton
Skeleton
Return the skeleton if there is only a single skeleton in the labels.
user_labeled_frames
list[LabeledFrame]
Return all labeled frames with user (non-predicted) instances.
video
Video
Return the video if there is only a single video in the labels.
Source code insleap_io/model/labels.py
@define\nclass Labels:\n \"\"\"Pose data for a set of videos that have user labels and/or predictions.\n\n Attributes:\n labeled_frames: A list of `LabeledFrame`s that are associated with this dataset.\n videos: A list of `Video`s that are associated with this dataset. Videos do not\n need to have corresponding `LabeledFrame`s if they do not have any\n labels or predictions yet.\n skeletons: A list of `Skeleton`s that are associated with this dataset. This\n should generally only contain a single skeleton.\n tracks: A list of `Track`s that are associated with this dataset.\n suggestions: A list of `SuggestionFrame`s that are associated with this dataset.\n provenance: Dictionary of arbitrary metadata providing additional information\n about where the dataset came from.\n\n Notes:\n `Video`s in contain `LabeledFrame`s, and `Skeleton`s and `Track`s in contained\n `Instance`s are added to the respective lists automatically.\n \"\"\"\n\n labeled_frames: list[LabeledFrame] = field(factory=list)\n videos: list[Video] = field(factory=list)\n skeletons: list[Skeleton] = field(factory=list)\n tracks: list[Track] = field(factory=list)\n suggestions: list[SuggestionFrame] = field(factory=list)\n provenance: dict[str, Any] = field(factory=dict)\n\n def __attrs_post_init__(self):\n \"\"\"Append videos, skeletons, and tracks seen in `labeled_frames` to `Labels`.\"\"\"\n self.update()\n\n def update(self):\n \"\"\"Update data structures based on contents.\n\n This function will update the list of skeletons, videos and tracks from the\n labeled frames, instances and suggestions.\n \"\"\"\n for lf in self.labeled_frames:\n if lf.video not in self.videos:\n self.videos.append(lf.video)\n\n for inst in lf:\n if inst.skeleton not in self.skeletons:\n self.skeletons.append(inst.skeleton)\n\n if inst.track is not None and inst.track not in self.tracks:\n self.tracks.append(inst.track)\n\n for sf in self.suggestions:\n if sf.video not in self.videos:\n self.videos.append(sf.video)\n\n def __getitem__(\n self, key: int | slice | list[int] | np.ndarray | tuple[Video, int]\n ) -> list[LabeledFrame] | LabeledFrame:\n \"\"\"Return one or more labeled frames based on indexing criteria.\"\"\"\n if type(key) == int:\n return self.labeled_frames[key]\n elif type(key) == slice:\n return [self.labeled_frames[i] for i in range(*key.indices(len(self)))]\n elif type(key) == list:\n return [self.labeled_frames[i] for i in key]\n elif isinstance(key, np.ndarray):\n return [self.labeled_frames[i] for i in key.tolist()]\n elif type(key) == tuple and len(key) == 2:\n video, frame_idx = key\n res = self.find(video, frame_idx)\n if len(res) == 1:\n return res[0]\n elif len(res) == 0:\n raise IndexError(\n f\"No labeled frames found for video {video} and \"\n f\"frame index {frame_idx}.\"\n )\n elif type(key) == Video:\n res = self.find(key)\n if len(res) == 0:\n raise IndexError(f\"No labeled frames found for video {key}.\")\n return res\n else:\n raise IndexError(f\"Invalid indexing argument for labels: {key}\")\n\n def __iter__(self):\n \"\"\"Iterate over `labeled_frames` list when calling iter method on `Labels`.\"\"\"\n return iter(self.labeled_frames)\n\n def __len__(self) -> int:\n \"\"\"Return number of labeled frames.\"\"\"\n return len(self.labeled_frames)\n\n def __repr__(self) -> str:\n \"\"\"Return a readable representation of the labels.\"\"\"\n return (\n \"Labels(\"\n f\"labeled_frames={len(self.labeled_frames)}, \"\n f\"videos={len(self.videos)}, \"\n f\"skeletons={len(self.skeletons)}, \"\n f\"tracks={len(self.tracks)}, \"\n f\"suggestions={len(self.suggestions)}\"\n \")\"\n )\n\n def __str__(self) -> str:\n \"\"\"Return a readable representation of the labels.\"\"\"\n return self.__repr__()\n\n def append(self, lf: LabeledFrame, update: bool = True):\n \"\"\"Append a labeled frame to the labels.\n\n Args:\n lf: A labeled frame to add to the labels.\n update: If `True` (the default), update list of videos, tracks and\n skeletons from the contents.\n \"\"\"\n self.labeled_frames.append(lf)\n\n if update:\n if lf.video not in self.videos:\n self.videos.append(lf.video)\n\n for inst in lf:\n if inst.skeleton not in self.skeletons:\n self.skeletons.append(inst.skeleton)\n\n if inst.track is not None and inst.track not in self.tracks:\n self.tracks.append(inst.track)\n\n def extend(self, lfs: list[LabeledFrame], update: bool = True):\n \"\"\"Append a labeled frame to the labels.\n\n Args:\n lfs: A list of labeled frames to add to the labels.\n update: If `True` (the default), update list of videos, tracks and\n skeletons from the contents.\n \"\"\"\n self.labeled_frames.extend(lfs)\n\n if update:\n for lf in lfs:\n if lf.video not in self.videos:\n self.videos.append(lf.video)\n\n for inst in lf:\n if inst.skeleton not in self.skeletons:\n self.skeletons.append(inst.skeleton)\n\n if inst.track is not None and inst.track not in self.tracks:\n self.tracks.append(inst.track)\n\n def numpy(\n self,\n video: Optional[Union[Video, int]] = None,\n all_frames: bool = True,\n untracked: bool = False,\n return_confidence: bool = False,\n ) -> np.ndarray:\n \"\"\"Construct a numpy array from instance points.\n\n Args:\n video: Video or video index to convert to numpy arrays. If `None` (the\n default), uses the first video.\n untracked: If `False` (the default), include only instances that have a\n track assignment. If `True`, includes all instances in each frame in\n arbitrary order.\n return_confidence: If `False` (the default), only return points of nodes. If\n `True`, return the points and scores of nodes.\n\n Returns:\n An array of tracks of shape `(n_frames, n_tracks, n_nodes, 2)` if\n `return_confidence` is `False`. Otherwise returned shape is\n `(n_frames, n_tracks, n_nodes, 3)` if `return_confidence` is `True`.\n\n Missing data will be replaced with `np.nan`.\n\n If this is a single instance project, a track does not need to be assigned.\n\n Only predicted instances (NOT user instances) will be returned.\n\n Notes:\n This method assumes that instances have tracks assigned and is intended to\n function primarily for single-video prediction results.\n \"\"\"\n # Get labeled frames for specified video.\n if video is None:\n video = 0\n if type(video) == int:\n video = self.videos[video]\n lfs = [lf for lf in self.labeled_frames if lf.video == video]\n\n # Figure out frame index range.\n first_frame, last_frame = 0, 0\n for lf in lfs:\n first_frame = min(first_frame, lf.frame_idx)\n last_frame = max(last_frame, lf.frame_idx)\n\n # Figure out the number of tracks based on number of instances in each frame.\n # First, let's check the max number of predicted instances (regardless of\n # whether they're tracked.\n n_preds = 0\n for lf in lfs:\n n_pred_instances = len(lf.predicted_instances)\n n_preds = max(n_preds, n_pred_instances)\n\n # Case 1: We don't care about order because there's only 1 instance per frame,\n # or we're considering untracked instances.\n untracked = untracked or n_preds == 1\n if untracked:\n n_tracks = n_preds\n else:\n # Case 2: We're considering only tracked instances.\n n_tracks = len(self.tracks)\n\n n_frames = int(last_frame - first_frame + 1)\n skeleton = self.skeletons[-1] # Assume project only uses last skeleton\n n_nodes = len(skeleton.nodes)\n\n if return_confidence:\n tracks = np.full((n_frames, n_tracks, n_nodes, 3), np.nan, dtype=\"float32\")\n else:\n tracks = np.full((n_frames, n_tracks, n_nodes, 2), np.nan, dtype=\"float32\")\n for lf in lfs:\n i = int(lf.frame_idx - first_frame)\n if untracked:\n for j, inst in enumerate(lf.predicted_instances):\n tracks[i, j] = inst.numpy(scores=return_confidence)\n else:\n tracked_instances = [\n inst\n for inst in lf.instances\n if type(inst) == PredictedInstance and inst.track is not None\n ]\n for inst in tracked_instances:\n j = self.tracks.index(inst.track) # type: ignore[arg-type]\n tracks[i, j] = inst.numpy(scores=return_confidence)\n\n return tracks\n\n @property\n def video(self) -> Video:\n \"\"\"Return the video if there is only a single video in the labels.\"\"\"\n if len(self.videos) == 0:\n raise ValueError(\"There are no videos in the labels.\")\n elif len(self.videos) == 1:\n return self.videos[0]\n else:\n raise ValueError(\n \"Labels.video can only be used when there is only a single video saved \"\n \"in the labels. Use Labels.videos instead.\"\n )\n\n @property\n def skeleton(self) -> Skeleton:\n \"\"\"Return the skeleton if there is only a single skeleton in the labels.\"\"\"\n if len(self.skeletons) == 0:\n raise ValueError(\"There are no skeletons in the labels.\")\n elif len(self.skeletons) == 1:\n return self.skeletons[0]\n else:\n raise ValueError(\n \"Labels.skeleton can only be used when there is only a single skeleton \"\n \"saved in the labels. Use Labels.skeletons instead.\"\n )\n\n def find(\n self,\n video: Video,\n frame_idx: int | list[int] | None = None,\n return_new: bool = False,\n ) -> list[LabeledFrame]:\n \"\"\"Search for labeled frames given video and/or frame index.\n\n Args:\n video: A `Video` that is associated with the project.\n frame_idx: The frame index (or indices) which we want to find in the video.\n If a range is specified, we'll return all frames with indices in that\n range. If not specific, then we'll return all labeled frames for video.\n return_new: Whether to return singleton of new and empty `LabeledFrame` if\n none are found in project.\n\n Returns:\n List of `LabeledFrame` objects that match the criteria.\n\n The list will be empty if no matches found, unless return_new is True, in\n which case it contains new (empty) `LabeledFrame` objects with `video` and\n `frame_index` set.\n \"\"\"\n results = []\n\n if frame_idx is None:\n for lf in self.labeled_frames:\n if lf.video == video:\n results.append(lf)\n return results\n\n if np.isscalar(frame_idx):\n frame_idx = np.array(frame_idx).reshape(-1)\n\n for frame_ind in frame_idx:\n result = None\n for lf in self.labeled_frames:\n if lf.video == video and lf.frame_idx == frame_ind:\n result = lf\n results.append(result)\n break\n if result is None and return_new:\n results.append(LabeledFrame(video=video, frame_idx=frame_ind))\n\n return results\n\n def save(\n self,\n filename: str,\n format: Optional[str] = None,\n embed: bool | str | list[tuple[Video, int]] | None = None,\n **kwargs,\n ):\n \"\"\"Save labels to file in specified format.\n\n Args:\n filename: Path to save labels to.\n format: The format to save the labels in. If `None`, the format will be\n inferred from the file extension. Available formats are `\"slp\"`,\n `\"nwb\"`, `\"labelstudio\"`, and `\"jabs\"`.\n embed: Frames to embed in the saved labels file. One of `None`, `True`,\n `\"all\"`, `\"user\"`, `\"suggestions\"`, `\"user+suggestions\"`, `\"source\"` or\n list of tuples of `(video, frame_idx)`.\n\n If `None` is specified (the default) and the labels contains embedded\n frames, those embedded frames will be re-saved to the new file.\n\n If `True` or `\"all\"`, all labeled frames and suggested frames will be\n embedded.\n\n If `\"source\"` is specified, no images will be embedded and the source\n video will be restored if available.\n\n This argument is only valid for the SLP backend.\n \"\"\"\n from sleap_io import save_file\n\n save_file(self, filename, format=format, embed=embed, **kwargs)\n\n def clean(\n self,\n frames: bool = True,\n empty_instances: bool = False,\n skeletons: bool = True,\n tracks: bool = True,\n videos: bool = False,\n ):\n \"\"\"Remove empty frames, unused skeletons, tracks and videos.\n\n Args:\n frames: If `True` (the default), remove empty frames.\n empty_instances: If `True` (NOT default), remove instances that have no\n visible points.\n skeletons: If `True` (the default), remove unused skeletons.\n tracks: If `True` (the default), remove unused tracks.\n videos: If `True` (NOT default), remove videos that have no labeled frames.\n \"\"\"\n used_skeletons = []\n used_tracks = []\n used_videos = []\n kept_frames = []\n for lf in self.labeled_frames:\n\n if empty_instances:\n lf.remove_empty_instances()\n\n if frames and len(lf) == 0:\n continue\n\n if videos and lf.video not in used_videos:\n used_videos.append(lf.video)\n\n if skeletons or tracks:\n for inst in lf:\n if skeletons and inst.skeleton not in used_skeletons:\n used_skeletons.append(inst.skeleton)\n if (\n tracks\n and inst.track is not None\n and inst.track not in used_tracks\n ):\n used_tracks.append(inst.track)\n\n if frames:\n kept_frames.append(lf)\n\n if videos:\n self.videos = [video for video in self.videos if video in used_videos]\n\n if skeletons:\n self.skeletons = [\n skeleton for skeleton in self.skeletons if skeleton in used_skeletons\n ]\n\n if tracks:\n self.tracks = [track for track in self.tracks if track in used_tracks]\n\n if frames:\n self.labeled_frames = kept_frames\n\n def remove_predictions(self, clean: bool = True):\n \"\"\"Remove all predicted instances from the labels.\n\n Args:\n clean: If `True` (the default), also remove any empty frames and unused\n tracks and skeletons. It does NOT remove videos that have no labeled\n frames or instances with no visible points.\n\n See also: `Labels.clean`\n \"\"\"\n for lf in self.labeled_frames:\n lf.remove_predictions()\n\n if clean:\n self.clean(\n frames=True,\n empty_instances=False,\n skeletons=True,\n tracks=True,\n videos=False,\n )\n\n @property\n def user_labeled_frames(self) -> list[LabeledFrame]:\n \"\"\"Return all labeled frames with user (non-predicted) instances.\"\"\"\n return [lf for lf in self.labeled_frames if lf.has_user_instances]\n\n @property\n def instances(self) -> Iterator[Instance]:\n \"\"\"Return an iterator over all instances within all labeled frames.\"\"\"\n return (instance for lf in self.labeled_frames for instance in lf.instances)\n\n def rename_nodes(\n self,\n name_map: dict[NodeOrIndex, str] | list[str],\n skeleton: Skeleton | None = None,\n ):\n \"\"\"Rename nodes in the skeleton.\n\n Args:\n name_map: A dictionary mapping old node names to new node names. Keys can be\n specified as `Node` objects, integer indices, or string names. Values\n must be specified as string names.\n\n If a list of strings is provided of the same length as the current\n nodes, the nodes will be renamed to the names in the list in order.\n skeleton: `Skeleton` to update. If `None` (the default), assumes there is\n only one skeleton in the labels and raises `ValueError` otherwise.\n\n Raises:\n ValueError: If the new node names exist in the skeleton, if the old node\n names are not found in the skeleton, or if there is more than one\n skeleton in the `Labels` but it is not specified.\n\n Notes:\n This method is recommended over `Skeleton.rename_nodes` as it will update\n all instances in the labels to reflect the new node names.\n\n Example:\n >>> labels = Labels(skeletons=[Skeleton([\"A\", \"B\", \"C\"])])\n >>> labels.rename_nodes({\"A\": \"X\", \"B\": \"Y\", \"C\": \"Z\"})\n >>> labels.skeleton.node_names\n [\"X\", \"Y\", \"Z\"]\n >>> labels.rename_nodes([\"a\", \"b\", \"c\"])\n >>> labels.skeleton.node_names\n [\"a\", \"b\", \"c\"]\n \"\"\"\n if skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Skeleton must be specified when there is more than one skeleton in \"\n \"the labels.\"\n )\n skeleton = self.skeleton\n\n skeleton.rename_nodes(name_map)\n\n def remove_nodes(self, nodes: list[NodeOrIndex], skeleton: Skeleton | None = None):\n \"\"\"Remove nodes from the skeleton.\n\n Args:\n nodes: A list of node names, indices, or `Node` objects to remove.\n skeleton: `Skeleton` to update. If `None` (the default), assumes there is\n only one skeleton in the labels and raises `ValueError` otherwise.\n\n Raises:\n ValueError: If the nodes are not found in the skeleton, or if there is more\n than one skeleton in the `Labels` but it is not specified.\n\n Notes:\n This method should always be used when removing nodes from the skeleton as\n it handles updating the lookup caches necessary for indexing nodes by name,\n and updating instances to reflect the changes made to the skeleton.\n\n Any edges and symmetries that are connected to the removed nodes will also\n be removed.\n \"\"\"\n if skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Skeleton must be specified when there is more than one skeleton \"\n \"in the labels.\"\n )\n skeleton = self.skeleton\n\n skeleton.remove_nodes(nodes)\n\n for inst in self.instances:\n if inst.skeleton == skeleton:\n inst.update_skeleton()\n\n def reorder_nodes(\n self, new_order: list[NodeOrIndex], skeleton: Skeleton | None = None\n ):\n \"\"\"Reorder nodes in the skeleton.\n\n Args:\n new_order: A list of node names, indices, or `Node` objects specifying the\n new order of the nodes.\n skeleton: `Skeleton` to update. If `None` (the default), assumes there is\n only one skeleton in the labels and raises `ValueError` otherwise.\n\n Raises:\n ValueError: If the new order of nodes is not the same length as the current\n nodes, or if there is more than one skeleton in the `Labels` but it is\n not specified.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name, as well as updating instances to reflect the changes made to the\n skeleton.\n \"\"\"\n if skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Skeleton must be specified when there is more than one skeleton \"\n \"in the labels.\"\n )\n skeleton = self.skeleton\n\n skeleton.reorder_nodes(new_order)\n\n for inst in self.instances:\n if inst.skeleton == skeleton:\n inst.update_skeleton()\n\n def replace_skeleton(\n self,\n new_skeleton: Skeleton,\n old_skeleton: Skeleton | None = None,\n node_map: dict[NodeOrIndex, NodeOrIndex] | None = None,\n ):\n \"\"\"Replace the skeleton in the labels.\n\n Args:\n new_skeleton: The new `Skeleton` to replace the old skeleton with.\n old_skeleton: The old `Skeleton` to replace. If `None` (the default),\n assumes there is only one skeleton in the labels and raises `ValueError`\n otherwise.\n node_map: Dictionary mapping nodes in the old skeleton to nodes in the new\n skeleton. Keys and values can be specified as `Node` objects, integer\n indices, or string names. If not provided, only nodes with identical\n names will be mapped. Points associated with unmapped nodes will be\n removed.\n\n Raises:\n ValueError: If there is more than one skeleton in the `Labels` but it is not\n specified.\n\n Warning:\n This method will replace the skeleton in all instances in the labels that\n have the old skeleton. **All point data associated with nodes not in the\n `node_map` will be lost.**\n \"\"\"\n if old_skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Old skeleton must be specified when there is more than one \"\n \"skeleton in the labels.\"\n )\n old_skeleton = self.skeleton\n\n if node_map is None:\n node_map = {}\n for old_node in old_skeleton.nodes:\n for new_node in new_skeleton.nodes:\n if old_node.name == new_node.name:\n node_map[old_node] = new_node\n break\n else:\n node_map = {\n old_skeleton.require_node(\n old, add_missing=False\n ): new_skeleton.require_node(new, add_missing=False)\n for old, new in node_map.items()\n }\n\n # Make new -> old mapping for nodes for efficiency.\n rev_node_map = {new: old for old, new in node_map.items()}\n\n # Replace the skeleton in the instances.\n for inst in self.instances:\n if inst.skeleton == old_skeleton:\n inst.replace_skeleton(new_skeleton, rev_node_map=rev_node_map)\n\n # Replace the skeleton in the labels.\n self.skeletons[self.skeletons.index(old_skeleton)] = new_skeleton\n\n def replace_videos(\n self,\n old_videos: list[Video] | None = None,\n new_videos: list[Video] | None = None,\n video_map: dict[Video, Video] | None = None,\n ):\n \"\"\"Replace videos and update all references.\n\n Args:\n old_videos: List of videos to be replaced.\n new_videos: List of videos to replace with.\n video_map: Alternative input of dictionary where keys are the old videos and\n values are the new videos.\n \"\"\"\n if (\n old_videos is None\n and new_videos is not None\n and len(new_videos) == len(self.videos)\n ):\n old_videos = self.videos\n\n if video_map is None:\n video_map = {o: n for o, n in zip(old_videos, new_videos)}\n\n # Update the labeled frames with the new videos.\n for lf in self.labeled_frames:\n if lf.video in video_map:\n lf.video = video_map[lf.video]\n\n # Update suggestions with the new videos.\n for sf in self.suggestions:\n if sf.video in video_map:\n sf.video = video_map[sf.video]\n\n # Update the list of videos.\n self.videos = [video_map.get(video, video) for video in self.videos]\n\n def replace_filenames(\n self,\n new_filenames: list[str | Path] | None = None,\n filename_map: dict[str | Path, str | Path] | None = None,\n prefix_map: dict[str | Path, str | Path] | None = None,\n ):\n \"\"\"Replace video filenames.\n\n Args:\n new_filenames: List of new filenames. Must have the same length as the\n number of videos in the labels.\n filename_map: Dictionary mapping old filenames (keys) to new filenames\n (values).\n prefix_map: Dictonary mapping old prefixes (keys) to new prefixes (values).\n\n Notes:\n Only one of the argument types can be provided.\n \"\"\"\n n = 0\n if new_filenames is not None:\n n += 1\n if filename_map is not None:\n n += 1\n if prefix_map is not None:\n n += 1\n if n != 1:\n raise ValueError(\n \"Exactly one input method must be provided to replace filenames.\"\n )\n\n if new_filenames is not None:\n if len(self.videos) != len(new_filenames):\n raise ValueError(\n f\"Number of new filenames ({len(new_filenames)}) does not match \"\n f\"the number of videos ({len(self.videos)}).\"\n )\n\n for video, new_filename in zip(self.videos, new_filenames):\n video.replace_filename(new_filename)\n\n elif filename_map is not None:\n for video in self.videos:\n for old_fn, new_fn in filename_map.items():\n if type(video.filename) == list:\n new_fns = []\n for fn in video.filename:\n if Path(fn) == Path(old_fn):\n new_fns.append(new_fn)\n else:\n new_fns.append(fn)\n video.replace_filename(new_fns)\n else:\n if Path(video.filename) == Path(old_fn):\n video.replace_filename(new_fn)\n\n elif prefix_map is not None:\n for video in self.videos:\n for old_prefix, new_prefix in prefix_map.items():\n old_prefix, new_prefix = Path(old_prefix), Path(new_prefix)\n\n if type(video.filename) == list:\n new_fns = []\n for fn in video.filename:\n fn = Path(fn)\n if fn.as_posix().startswith(old_prefix.as_posix()):\n new_fns.append(new_prefix / fn.relative_to(old_prefix))\n else:\n new_fns.append(fn)\n video.replace_filename(new_fns)\n else:\n fn = Path(video.filename)\n if fn.as_posix().startswith(old_prefix.as_posix()):\n video.replace_filename(\n new_prefix / fn.relative_to(old_prefix)\n )\n\n def extract(\n self, inds: list[int] | list[tuple[Video, int]] | np.ndarray, copy: bool = True\n ) -> Labels:\n \"\"\"Extract a set of frames into a new Labels object.\n\n Args:\n inds: Indices of labeled frames. Can be specified as a list of array of\n integer indices of labeled frames or tuples of Video and frame indices.\n copy: If `True` (the default), return a copy of the frames and containing\n objects. Otherwise, return a reference to the data.\n\n Returns:\n A new `Labels` object containing the selected labels.\n\n Notes:\n This copies the labeled frames and their associated data, including\n skeletons and tracks, and tries to maintain the relative ordering.\n\n This also copies the provenance and inserts an extra key: `\"source_labels\"`\n with the path to the current labels, if available.\n\n It does NOT copy suggested frames.\n \"\"\"\n lfs = self[inds]\n\n if copy:\n lfs = deepcopy(lfs)\n labels = Labels(lfs)\n\n # Try to keep the lists in the same order.\n track_to_ind = {track.name: ind for ind, track in enumerate(self.tracks)}\n labels.tracks = sorted(labels.tracks, key=lambda x: track_to_ind[x.name])\n\n skel_to_ind = {skel.name: ind for ind, skel in enumerate(self.skeletons)}\n labels.skeletons = sorted(labels.skeletons, key=lambda x: skel_to_ind[x.name])\n\n labels.provenance = deepcopy(labels.provenance)\n labels.provenance[\"source_labels\"] = self.provenance.get(\"filename\", None)\n\n return labels\n\n def split(self, n: int | float, seed: int | None = None) -> tuple[Labels, Labels]:\n \"\"\"Separate the labels into random splits.\n\n Args:\n n: Size of the first split. If integer >= 1, assumes that this is the number\n of labeled frames in the first split. If < 1.0, this will be treated as\n a fraction of the total labeled frames.\n seed: Optional integer seed to use for reproducibility.\n\n Returns:\n A tuple of `split1, split2`.\n\n If an integer was specified, `len(split1) == n`.\n\n If a fraction was specified, `len(split1) == int(n * len(labels))`.\n\n The second split contains the remainder, i.e.,\n `len(split2) == len(labels) - len(split1)`.\n\n If there are too few frames, a minimum of 1 frame will be kept in the second\n split.\n\n If there is exactly 1 labeled frame in the labels, the same frame will be\n assigned to both splits.\n \"\"\"\n n0 = len(self)\n if n0 == 0:\n return self, self\n n1 = n\n if n < 1.0:\n n1 = max(int(n0 * float(n)), 1)\n n2 = max(n0 - n1, 1)\n n1, n2 = int(n1), int(n2)\n\n rng = np.random.default_rng(seed=seed)\n inds1 = rng.choice(n0, size=(n1,), replace=False)\n\n if n0 == 1:\n inds2 = np.array([0])\n else:\n inds2 = np.setdiff1d(np.arange(n0), inds1)\n\n split1 = self.extract(inds1, copy=True)\n split2 = self.extract(inds2, copy=True)\n\n return split1, split2\n\n def make_training_splits(\n self,\n n_train: int | float,\n n_val: int | float | None = None,\n n_test: int | float | None = None,\n save_dir: str | Path | None = None,\n seed: int | None = None,\n embed: bool = True,\n ) -> tuple[Labels, Labels] | tuple[Labels, Labels, Labels]:\n \"\"\"Make splits for training with embedded images.\n\n Args:\n n_train: Size of the training split as integer or fraction.\n n_val: Size of the validation split as integer or fraction. If `None`,\n this will be inferred based on the values of `n_train` and `n_test`. If\n `n_test` is `None`, this will be the remainder of the data after the\n training split.\n n_test: Size of the testing split as integer or fraction. If `None`, the\n test split will not be saved.\n save_dir: If specified, save splits to SLP files with embedded images.\n seed: Optional integer seed to use for reproducibility.\n embed: If `True` (the default), embed user labeled frame images in the saved\n files, which is useful for portability but can be slow for large\n projects. If `False`, labels are saved with references to the source\n videos files.\n\n Returns:\n A tuple of `labels_train, labels_val` or\n `labels_train, labels_val, labels_test` if `n_test` was specified.\n\n Notes:\n Predictions and suggestions will be removed before saving, leaving only\n frames with user labeled data (the source labels are not affected).\n\n Frames with user labeled data will be embedded in the resulting files.\n\n If `save_dir` is specified, this will save the randomly sampled splits to:\n\n - `{save_dir}/train.pkg.slp`\n - `{save_dir}/val.pkg.slp`\n - `{save_dir}/test.pkg.slp` (if `n_test` is specified)\n\n If `embed` is `False`, the files will be saved without embedded images to:\n\n - `{save_dir}/train.slp`\n - `{save_dir}/val.slp`\n - `{save_dir}/test.slp` (if `n_test` is specified)\n\n See also: `Labels.split`\n \"\"\"\n # Clean up labels.\n labels = deepcopy(self)\n labels.remove_predictions()\n labels.suggestions = []\n labels.clean()\n\n # Make train split.\n labels_train, labels_rest = labels.split(n_train, seed=seed)\n\n # Make test split.\n if n_test is not None:\n if n_test < 1:\n n_test = (n_test * len(labels)) / len(labels_rest)\n labels_test, labels_rest = labels_rest.split(n=n_test, seed=seed)\n\n # Make val split.\n if n_val is not None:\n if n_val < 1:\n n_val = (n_val * len(labels)) / len(labels_rest)\n if isinstance(n_val, float) and n_val == 1.0:\n labels_val = labels_rest\n else:\n labels_val, _ = labels_rest.split(n=n_val, seed=seed)\n else:\n labels_val = labels_rest\n\n # Update provenance.\n source_labels = self.provenance.get(\"filename\", None)\n labels_train.provenance[\"source_labels\"] = source_labels\n if n_val is not None:\n labels_val.provenance[\"source_labels\"] = source_labels\n if n_test is not None:\n labels_test.provenance[\"source_labels\"] = source_labels\n\n # Save.\n if save_dir is not None:\n save_dir = Path(save_dir)\n save_dir.mkdir(exist_ok=True, parents=True)\n\n if embed:\n labels_train.save(save_dir / \"train.pkg.slp\", embed=\"user\")\n labels_val.save(save_dir / \"val.pkg.slp\", embed=\"user\")\n labels_test.save(save_dir / \"test.pkg.slp\", embed=\"user\")\n else:\n labels_train.save(save_dir / \"train.slp\", embed=False)\n labels_val.save(save_dir / \"val.slp\", embed=False)\n labels_test.save(save_dir / \"test.slp\", embed=False)\n\n if n_test is None:\n return labels_train, labels_val\n else:\n return labels_train, labels_val, labels_test\n\n def trim(\n self,\n save_path: str | Path,\n frame_inds: list[int] | np.ndarray,\n video: Video | int | None = None,\n video_kwargs: dict[str, Any] | None = None,\n ) -> Labels:\n \"\"\"Trim the labels to a subset of frames and videos accordingly.\n\n Args:\n save_path: Path to the trimmed labels SLP file. Video will be saved with the\n same base name but with .mp4 extension.\n frame_inds: Frame indices to save. Can be specified as a list or array of\n frame integers.\n video: Video or integer index of the video to trim. Does not need to be\n specified for single-video projects.\n video_kwargs: A dictionary of keyword arguments to provide to\n `sio.save_video` for video compression.\n\n Returns:\n The resulting labels object referencing the trimmed data.\n\n Notes:\n This will remove any data outside of the trimmed frames, save new videos,\n and adjust the frame indices to match the newly trimmed videos.\n \"\"\"\n if video is None:\n if len(self.videos) == 1:\n video = self.video\n else:\n raise ValueError(\n \"Video needs to be specified when trimming multi-video projects.\"\n )\n if type(video) == int:\n video = self.videos[video]\n\n # Write trimmed clip.\n save_path = Path(save_path)\n video_path = save_path.with_suffix(\".mp4\")\n fidx0, fidx1 = np.min(frame_inds), np.max(frame_inds)\n new_video = video.save(\n video_path,\n frame_inds=np.arange(fidx0, fidx1 + 1),\n video_kwargs=video_kwargs,\n )\n\n # Get frames in range.\n # TODO: Create an optimized search function for this access pattern.\n inds = []\n for ind, lf in enumerate(self):\n if lf.video == video and lf.frame_idx >= fidx0 and lf.frame_idx <= fidx1:\n inds.append(ind)\n trimmed_labels = self.extract(inds, copy=True)\n\n # Adjust video and frame indices.\n trimmed_labels.videos = [new_video]\n for lf in trimmed_labels:\n lf.video = new_video\n lf.frame_idx = lf.frame_idx - fidx0\n\n # Save.\n trimmed_labels.save(save_path)\n\n return trimmed_labels\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.instances","title":"instances: Iterator[Instance]
property
","text":"Return an iterator over all instances within all labeled frames.
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.skeleton","title":"skeleton: Skeleton
property
","text":"Return the skeleton if there is only a single skeleton in the labels.
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.user_labeled_frames","title":"user_labeled_frames: list[LabeledFrame]
property
","text":"Return all labeled frames with user (non-predicted) instances.
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.video","title":"video: Video
property
","text":"Return the video if there is only a single video in the labels.
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.__attrs_post_init__","title":"__attrs_post_init__()
","text":"Append videos, skeletons, and tracks seen in labeled_frames
to Labels
.
sleap_io/model/labels.py
def __attrs_post_init__(self):\n \"\"\"Append videos, skeletons, and tracks seen in `labeled_frames` to `Labels`.\"\"\"\n self.update()\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.__getitem__","title":"__getitem__(key)
","text":"Return one or more labeled frames based on indexing criteria.
Source code insleap_io/model/labels.py
def __getitem__(\n self, key: int | slice | list[int] | np.ndarray | tuple[Video, int]\n) -> list[LabeledFrame] | LabeledFrame:\n \"\"\"Return one or more labeled frames based on indexing criteria.\"\"\"\n if type(key) == int:\n return self.labeled_frames[key]\n elif type(key) == slice:\n return [self.labeled_frames[i] for i in range(*key.indices(len(self)))]\n elif type(key) == list:\n return [self.labeled_frames[i] for i in key]\n elif isinstance(key, np.ndarray):\n return [self.labeled_frames[i] for i in key.tolist()]\n elif type(key) == tuple and len(key) == 2:\n video, frame_idx = key\n res = self.find(video, frame_idx)\n if len(res) == 1:\n return res[0]\n elif len(res) == 0:\n raise IndexError(\n f\"No labeled frames found for video {video} and \"\n f\"frame index {frame_idx}.\"\n )\n elif type(key) == Video:\n res = self.find(key)\n if len(res) == 0:\n raise IndexError(f\"No labeled frames found for video {key}.\")\n return res\n else:\n raise IndexError(f\"Invalid indexing argument for labels: {key}\")\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.__iter__","title":"__iter__()
","text":"Iterate over labeled_frames
list when calling iter method on Labels
.
sleap_io/model/labels.py
def __iter__(self):\n \"\"\"Iterate over `labeled_frames` list when calling iter method on `Labels`.\"\"\"\n return iter(self.labeled_frames)\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.__len__","title":"__len__()
","text":"Return number of labeled frames.
Source code insleap_io/model/labels.py
def __len__(self) -> int:\n \"\"\"Return number of labeled frames.\"\"\"\n return len(self.labeled_frames)\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.__repr__","title":"__repr__()
","text":"Return a readable representation of the labels.
Source code insleap_io/model/labels.py
def __repr__(self) -> str:\n \"\"\"Return a readable representation of the labels.\"\"\"\n return (\n \"Labels(\"\n f\"labeled_frames={len(self.labeled_frames)}, \"\n f\"videos={len(self.videos)}, \"\n f\"skeletons={len(self.skeletons)}, \"\n f\"tracks={len(self.tracks)}, \"\n f\"suggestions={len(self.suggestions)}\"\n \")\"\n )\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.__str__","title":"__str__()
","text":"Return a readable representation of the labels.
Source code insleap_io/model/labels.py
def __str__(self) -> str:\n \"\"\"Return a readable representation of the labels.\"\"\"\n return self.__repr__()\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.append","title":"append(lf, update=True)
","text":"Append a labeled frame to the labels.
Parameters:
Name Type Description Defaultlf
LabeledFrame
A labeled frame to add to the labels.
requiredupdate
bool
If True
(the default), update list of videos, tracks and skeletons from the contents.
True
Source code in sleap_io/model/labels.py
def append(self, lf: LabeledFrame, update: bool = True):\n \"\"\"Append a labeled frame to the labels.\n\n Args:\n lf: A labeled frame to add to the labels.\n update: If `True` (the default), update list of videos, tracks and\n skeletons from the contents.\n \"\"\"\n self.labeled_frames.append(lf)\n\n if update:\n if lf.video not in self.videos:\n self.videos.append(lf.video)\n\n for inst in lf:\n if inst.skeleton not in self.skeletons:\n self.skeletons.append(inst.skeleton)\n\n if inst.track is not None and inst.track not in self.tracks:\n self.tracks.append(inst.track)\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.clean","title":"clean(frames=True, empty_instances=False, skeletons=True, tracks=True, videos=False)
","text":"Remove empty frames, unused skeletons, tracks and videos.
Parameters:
Name Type Description Defaultframes
bool
If True
(the default), remove empty frames.
True
empty_instances
bool
If True
(NOT default), remove instances that have no visible points.
False
skeletons
bool
If True
(the default), remove unused skeletons.
True
tracks
bool
If True
(the default), remove unused tracks.
True
videos
bool
If True
(NOT default), remove videos that have no labeled frames.
False
Source code in sleap_io/model/labels.py
def clean(\n self,\n frames: bool = True,\n empty_instances: bool = False,\n skeletons: bool = True,\n tracks: bool = True,\n videos: bool = False,\n):\n \"\"\"Remove empty frames, unused skeletons, tracks and videos.\n\n Args:\n frames: If `True` (the default), remove empty frames.\n empty_instances: If `True` (NOT default), remove instances that have no\n visible points.\n skeletons: If `True` (the default), remove unused skeletons.\n tracks: If `True` (the default), remove unused tracks.\n videos: If `True` (NOT default), remove videos that have no labeled frames.\n \"\"\"\n used_skeletons = []\n used_tracks = []\n used_videos = []\n kept_frames = []\n for lf in self.labeled_frames:\n\n if empty_instances:\n lf.remove_empty_instances()\n\n if frames and len(lf) == 0:\n continue\n\n if videos and lf.video not in used_videos:\n used_videos.append(lf.video)\n\n if skeletons or tracks:\n for inst in lf:\n if skeletons and inst.skeleton not in used_skeletons:\n used_skeletons.append(inst.skeleton)\n if (\n tracks\n and inst.track is not None\n and inst.track not in used_tracks\n ):\n used_tracks.append(inst.track)\n\n if frames:\n kept_frames.append(lf)\n\n if videos:\n self.videos = [video for video in self.videos if video in used_videos]\n\n if skeletons:\n self.skeletons = [\n skeleton for skeleton in self.skeletons if skeleton in used_skeletons\n ]\n\n if tracks:\n self.tracks = [track for track in self.tracks if track in used_tracks]\n\n if frames:\n self.labeled_frames = kept_frames\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.extend","title":"extend(lfs, update=True)
","text":"Append a labeled frame to the labels.
Parameters:
Name Type Description Defaultlfs
list[LabeledFrame]
A list of labeled frames to add to the labels.
requiredupdate
bool
If True
(the default), update list of videos, tracks and skeletons from the contents.
True
Source code in sleap_io/model/labels.py
def extend(self, lfs: list[LabeledFrame], update: bool = True):\n \"\"\"Append a labeled frame to the labels.\n\n Args:\n lfs: A list of labeled frames to add to the labels.\n update: If `True` (the default), update list of videos, tracks and\n skeletons from the contents.\n \"\"\"\n self.labeled_frames.extend(lfs)\n\n if update:\n for lf in lfs:\n if lf.video not in self.videos:\n self.videos.append(lf.video)\n\n for inst in lf:\n if inst.skeleton not in self.skeletons:\n self.skeletons.append(inst.skeleton)\n\n if inst.track is not None and inst.track not in self.tracks:\n self.tracks.append(inst.track)\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.extract","title":"extract(inds, copy=True)
","text":"Extract a set of frames into a new Labels object.
Parameters:
Name Type Description Defaultinds
list[int] | list[tuple[Video, int]] | ndarray
Indices of labeled frames. Can be specified as a list of array of integer indices of labeled frames or tuples of Video and frame indices.
requiredcopy
bool
If True
(the default), return a copy of the frames and containing objects. Otherwise, return a reference to the data.
True
Returns:
Type DescriptionLabels
A new Labels
object containing the selected labels.
This copies the labeled frames and their associated data, including skeletons and tracks, and tries to maintain the relative ordering.
This also copies the provenance and inserts an extra key: \"source_labels\"
with the path to the current labels, if available.
It does NOT copy suggested frames.
Source code insleap_io/model/labels.py
def extract(\n self, inds: list[int] | list[tuple[Video, int]] | np.ndarray, copy: bool = True\n) -> Labels:\n \"\"\"Extract a set of frames into a new Labels object.\n\n Args:\n inds: Indices of labeled frames. Can be specified as a list of array of\n integer indices of labeled frames or tuples of Video and frame indices.\n copy: If `True` (the default), return a copy of the frames and containing\n objects. Otherwise, return a reference to the data.\n\n Returns:\n A new `Labels` object containing the selected labels.\n\n Notes:\n This copies the labeled frames and their associated data, including\n skeletons and tracks, and tries to maintain the relative ordering.\n\n This also copies the provenance and inserts an extra key: `\"source_labels\"`\n with the path to the current labels, if available.\n\n It does NOT copy suggested frames.\n \"\"\"\n lfs = self[inds]\n\n if copy:\n lfs = deepcopy(lfs)\n labels = Labels(lfs)\n\n # Try to keep the lists in the same order.\n track_to_ind = {track.name: ind for ind, track in enumerate(self.tracks)}\n labels.tracks = sorted(labels.tracks, key=lambda x: track_to_ind[x.name])\n\n skel_to_ind = {skel.name: ind for ind, skel in enumerate(self.skeletons)}\n labels.skeletons = sorted(labels.skeletons, key=lambda x: skel_to_ind[x.name])\n\n labels.provenance = deepcopy(labels.provenance)\n labels.provenance[\"source_labels\"] = self.provenance.get(\"filename\", None)\n\n return labels\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.find","title":"find(video, frame_idx=None, return_new=False)
","text":"Search for labeled frames given video and/or frame index.
Parameters:
Name Type Description Defaultvideo
Video
A Video
that is associated with the project.
frame_idx
int | list[int] | None
The frame index (or indices) which we want to find in the video. If a range is specified, we'll return all frames with indices in that range. If not specific, then we'll return all labeled frames for video.
None
return_new
bool
Whether to return singleton of new and empty LabeledFrame
if none are found in project.
False
Returns:
Type Descriptionlist[LabeledFrame]
List of LabeledFrame
objects that match the criteria.
The list will be empty if no matches found, unless return_new is True, in which case it contains new (empty) LabeledFrame
objects with video
and frame_index
set.
sleap_io/model/labels.py
def find(\n self,\n video: Video,\n frame_idx: int | list[int] | None = None,\n return_new: bool = False,\n) -> list[LabeledFrame]:\n \"\"\"Search for labeled frames given video and/or frame index.\n\n Args:\n video: A `Video` that is associated with the project.\n frame_idx: The frame index (or indices) which we want to find in the video.\n If a range is specified, we'll return all frames with indices in that\n range. If not specific, then we'll return all labeled frames for video.\n return_new: Whether to return singleton of new and empty `LabeledFrame` if\n none are found in project.\n\n Returns:\n List of `LabeledFrame` objects that match the criteria.\n\n The list will be empty if no matches found, unless return_new is True, in\n which case it contains new (empty) `LabeledFrame` objects with `video` and\n `frame_index` set.\n \"\"\"\n results = []\n\n if frame_idx is None:\n for lf in self.labeled_frames:\n if lf.video == video:\n results.append(lf)\n return results\n\n if np.isscalar(frame_idx):\n frame_idx = np.array(frame_idx).reshape(-1)\n\n for frame_ind in frame_idx:\n result = None\n for lf in self.labeled_frames:\n if lf.video == video and lf.frame_idx == frame_ind:\n result = lf\n results.append(result)\n break\n if result is None and return_new:\n results.append(LabeledFrame(video=video, frame_idx=frame_ind))\n\n return results\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.make_training_splits","title":"make_training_splits(n_train, n_val=None, n_test=None, save_dir=None, seed=None, embed=True)
","text":"Make splits for training with embedded images.
Parameters:
Name Type Description Defaultn_train
int | float
Size of the training split as integer or fraction.
requiredn_val
int | float | None
Size of the validation split as integer or fraction. If None
, this will be inferred based on the values of n_train
and n_test
. If n_test
is None
, this will be the remainder of the data after the training split.
None
n_test
int | float | None
Size of the testing split as integer or fraction. If None
, the test split will not be saved.
None
save_dir
str | Path | None
If specified, save splits to SLP files with embedded images.
None
seed
int | None
Optional integer seed to use for reproducibility.
None
embed
bool
If True
(the default), embed user labeled frame images in the saved files, which is useful for portability but can be slow for large projects. If False
, labels are saved with references to the source videos files.
True
Returns:
Type Descriptiontuple[Labels, Labels] | tuple[Labels, Labels, Labels]
A tuple of labels_train, labels_val
or labels_train, labels_val, labels_test
if n_test
was specified.
Predictions and suggestions will be removed before saving, leaving only frames with user labeled data (the source labels are not affected).
Frames with user labeled data will be embedded in the resulting files.
If save_dir
is specified, this will save the randomly sampled splits to:
{save_dir}/train.pkg.slp
{save_dir}/val.pkg.slp
{save_dir}/test.pkg.slp
(if n_test
is specified)If embed
is False
, the files will be saved without embedded images to:
{save_dir}/train.slp
{save_dir}/val.slp
{save_dir}/test.slp
(if n_test
is specified)See also: Labels.split
sleap_io/model/labels.py
def make_training_splits(\n self,\n n_train: int | float,\n n_val: int | float | None = None,\n n_test: int | float | None = None,\n save_dir: str | Path | None = None,\n seed: int | None = None,\n embed: bool = True,\n) -> tuple[Labels, Labels] | tuple[Labels, Labels, Labels]:\n \"\"\"Make splits for training with embedded images.\n\n Args:\n n_train: Size of the training split as integer or fraction.\n n_val: Size of the validation split as integer or fraction. If `None`,\n this will be inferred based on the values of `n_train` and `n_test`. If\n `n_test` is `None`, this will be the remainder of the data after the\n training split.\n n_test: Size of the testing split as integer or fraction. If `None`, the\n test split will not be saved.\n save_dir: If specified, save splits to SLP files with embedded images.\n seed: Optional integer seed to use for reproducibility.\n embed: If `True` (the default), embed user labeled frame images in the saved\n files, which is useful for portability but can be slow for large\n projects. If `False`, labels are saved with references to the source\n videos files.\n\n Returns:\n A tuple of `labels_train, labels_val` or\n `labels_train, labels_val, labels_test` if `n_test` was specified.\n\n Notes:\n Predictions and suggestions will be removed before saving, leaving only\n frames with user labeled data (the source labels are not affected).\n\n Frames with user labeled data will be embedded in the resulting files.\n\n If `save_dir` is specified, this will save the randomly sampled splits to:\n\n - `{save_dir}/train.pkg.slp`\n - `{save_dir}/val.pkg.slp`\n - `{save_dir}/test.pkg.slp` (if `n_test` is specified)\n\n If `embed` is `False`, the files will be saved without embedded images to:\n\n - `{save_dir}/train.slp`\n - `{save_dir}/val.slp`\n - `{save_dir}/test.slp` (if `n_test` is specified)\n\n See also: `Labels.split`\n \"\"\"\n # Clean up labels.\n labels = deepcopy(self)\n labels.remove_predictions()\n labels.suggestions = []\n labels.clean()\n\n # Make train split.\n labels_train, labels_rest = labels.split(n_train, seed=seed)\n\n # Make test split.\n if n_test is not None:\n if n_test < 1:\n n_test = (n_test * len(labels)) / len(labels_rest)\n labels_test, labels_rest = labels_rest.split(n=n_test, seed=seed)\n\n # Make val split.\n if n_val is not None:\n if n_val < 1:\n n_val = (n_val * len(labels)) / len(labels_rest)\n if isinstance(n_val, float) and n_val == 1.0:\n labels_val = labels_rest\n else:\n labels_val, _ = labels_rest.split(n=n_val, seed=seed)\n else:\n labels_val = labels_rest\n\n # Update provenance.\n source_labels = self.provenance.get(\"filename\", None)\n labels_train.provenance[\"source_labels\"] = source_labels\n if n_val is not None:\n labels_val.provenance[\"source_labels\"] = source_labels\n if n_test is not None:\n labels_test.provenance[\"source_labels\"] = source_labels\n\n # Save.\n if save_dir is not None:\n save_dir = Path(save_dir)\n save_dir.mkdir(exist_ok=True, parents=True)\n\n if embed:\n labels_train.save(save_dir / \"train.pkg.slp\", embed=\"user\")\n labels_val.save(save_dir / \"val.pkg.slp\", embed=\"user\")\n labels_test.save(save_dir / \"test.pkg.slp\", embed=\"user\")\n else:\n labels_train.save(save_dir / \"train.slp\", embed=False)\n labels_val.save(save_dir / \"val.slp\", embed=False)\n labels_test.save(save_dir / \"test.slp\", embed=False)\n\n if n_test is None:\n return labels_train, labels_val\n else:\n return labels_train, labels_val, labels_test\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.numpy","title":"numpy(video=None, all_frames=True, untracked=False, return_confidence=False)
","text":"Construct a numpy array from instance points.
Parameters:
Name Type Description Defaultvideo
Optional[Union[Video, int]]
Video or video index to convert to numpy arrays. If None
(the default), uses the first video.
None
untracked
bool
If False
(the default), include only instances that have a track assignment. If True
, includes all instances in each frame in arbitrary order.
False
return_confidence
bool
If False
(the default), only return points of nodes. If True
, return the points and scores of nodes.
False
Returns:
Type Descriptionndarray
An array of tracks of shape (n_frames, n_tracks, n_nodes, 2)
if return_confidence
is False
. Otherwise returned shape is (n_frames, n_tracks, n_nodes, 3)
if return_confidence
is True
.
Missing data will be replaced with np.nan
.
If this is a single instance project, a track does not need to be assigned.
Only predicted instances (NOT user instances) will be returned.
NotesThis method assumes that instances have tracks assigned and is intended to function primarily for single-video prediction results.
Source code insleap_io/model/labels.py
def numpy(\n self,\n video: Optional[Union[Video, int]] = None,\n all_frames: bool = True,\n untracked: bool = False,\n return_confidence: bool = False,\n) -> np.ndarray:\n \"\"\"Construct a numpy array from instance points.\n\n Args:\n video: Video or video index to convert to numpy arrays. If `None` (the\n default), uses the first video.\n untracked: If `False` (the default), include only instances that have a\n track assignment. If `True`, includes all instances in each frame in\n arbitrary order.\n return_confidence: If `False` (the default), only return points of nodes. If\n `True`, return the points and scores of nodes.\n\n Returns:\n An array of tracks of shape `(n_frames, n_tracks, n_nodes, 2)` if\n `return_confidence` is `False`. Otherwise returned shape is\n `(n_frames, n_tracks, n_nodes, 3)` if `return_confidence` is `True`.\n\n Missing data will be replaced with `np.nan`.\n\n If this is a single instance project, a track does not need to be assigned.\n\n Only predicted instances (NOT user instances) will be returned.\n\n Notes:\n This method assumes that instances have tracks assigned and is intended to\n function primarily for single-video prediction results.\n \"\"\"\n # Get labeled frames for specified video.\n if video is None:\n video = 0\n if type(video) == int:\n video = self.videos[video]\n lfs = [lf for lf in self.labeled_frames if lf.video == video]\n\n # Figure out frame index range.\n first_frame, last_frame = 0, 0\n for lf in lfs:\n first_frame = min(first_frame, lf.frame_idx)\n last_frame = max(last_frame, lf.frame_idx)\n\n # Figure out the number of tracks based on number of instances in each frame.\n # First, let's check the max number of predicted instances (regardless of\n # whether they're tracked.\n n_preds = 0\n for lf in lfs:\n n_pred_instances = len(lf.predicted_instances)\n n_preds = max(n_preds, n_pred_instances)\n\n # Case 1: We don't care about order because there's only 1 instance per frame,\n # or we're considering untracked instances.\n untracked = untracked or n_preds == 1\n if untracked:\n n_tracks = n_preds\n else:\n # Case 2: We're considering only tracked instances.\n n_tracks = len(self.tracks)\n\n n_frames = int(last_frame - first_frame + 1)\n skeleton = self.skeletons[-1] # Assume project only uses last skeleton\n n_nodes = len(skeleton.nodes)\n\n if return_confidence:\n tracks = np.full((n_frames, n_tracks, n_nodes, 3), np.nan, dtype=\"float32\")\n else:\n tracks = np.full((n_frames, n_tracks, n_nodes, 2), np.nan, dtype=\"float32\")\n for lf in lfs:\n i = int(lf.frame_idx - first_frame)\n if untracked:\n for j, inst in enumerate(lf.predicted_instances):\n tracks[i, j] = inst.numpy(scores=return_confidence)\n else:\n tracked_instances = [\n inst\n for inst in lf.instances\n if type(inst) == PredictedInstance and inst.track is not None\n ]\n for inst in tracked_instances:\n j = self.tracks.index(inst.track) # type: ignore[arg-type]\n tracks[i, j] = inst.numpy(scores=return_confidence)\n\n return tracks\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.remove_nodes","title":"remove_nodes(nodes, skeleton=None)
","text":"Remove nodes from the skeleton.
Parameters:
Name Type Description Defaultnodes
list[NodeOrIndex]
A list of node names, indices, or Node
objects to remove.
skeleton
Skeleton | None
Skeleton
to update. If None
(the default), assumes there is only one skeleton in the labels and raises ValueError
otherwise.
None
Raises:
Type DescriptionValueError
If the nodes are not found in the skeleton, or if there is more than one skeleton in the Labels
but it is not specified.
This method should always be used when removing nodes from the skeleton as it handles updating the lookup caches necessary for indexing nodes by name, and updating instances to reflect the changes made to the skeleton.
Any edges and symmetries that are connected to the removed nodes will also be removed.
Source code insleap_io/model/labels.py
def remove_nodes(self, nodes: list[NodeOrIndex], skeleton: Skeleton | None = None):\n \"\"\"Remove nodes from the skeleton.\n\n Args:\n nodes: A list of node names, indices, or `Node` objects to remove.\n skeleton: `Skeleton` to update. If `None` (the default), assumes there is\n only one skeleton in the labels and raises `ValueError` otherwise.\n\n Raises:\n ValueError: If the nodes are not found in the skeleton, or if there is more\n than one skeleton in the `Labels` but it is not specified.\n\n Notes:\n This method should always be used when removing nodes from the skeleton as\n it handles updating the lookup caches necessary for indexing nodes by name,\n and updating instances to reflect the changes made to the skeleton.\n\n Any edges and symmetries that are connected to the removed nodes will also\n be removed.\n \"\"\"\n if skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Skeleton must be specified when there is more than one skeleton \"\n \"in the labels.\"\n )\n skeleton = self.skeleton\n\n skeleton.remove_nodes(nodes)\n\n for inst in self.instances:\n if inst.skeleton == skeleton:\n inst.update_skeleton()\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.remove_predictions","title":"remove_predictions(clean=True)
","text":"Remove all predicted instances from the labels.
Parameters:
Name Type Description Defaultclean
bool
If True
(the default), also remove any empty frames and unused tracks and skeletons. It does NOT remove videos that have no labeled frames or instances with no visible points.
True
See also: Labels.clean
sleap_io/model/labels.py
def remove_predictions(self, clean: bool = True):\n \"\"\"Remove all predicted instances from the labels.\n\n Args:\n clean: If `True` (the default), also remove any empty frames and unused\n tracks and skeletons. It does NOT remove videos that have no labeled\n frames or instances with no visible points.\n\n See also: `Labels.clean`\n \"\"\"\n for lf in self.labeled_frames:\n lf.remove_predictions()\n\n if clean:\n self.clean(\n frames=True,\n empty_instances=False,\n skeletons=True,\n tracks=True,\n videos=False,\n )\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.rename_nodes","title":"rename_nodes(name_map, skeleton=None)
","text":"Rename nodes in the skeleton.
Parameters:
Name Type Description Defaultname_map
dict[NodeOrIndex, str] | list[str]
A dictionary mapping old node names to new node names. Keys can be specified as Node
objects, integer indices, or string names. Values must be specified as string names.
If a list of strings is provided of the same length as the current nodes, the nodes will be renamed to the names in the list in order.
requiredskeleton
Skeleton | None
Skeleton
to update. If None
(the default), assumes there is only one skeleton in the labels and raises ValueError
otherwise.
None
Raises:
Type DescriptionValueError
If the new node names exist in the skeleton, if the old node names are not found in the skeleton, or if there is more than one skeleton in the Labels
but it is not specified.
This method is recommended over Skeleton.rename_nodes
as it will update all instances in the labels to reflect the new node names.
labels = Labels(skeletons=[Skeleton([\"A\", \"B\", \"C\"])]) labels.rename_nodes({\"A\": \"X\", \"B\": \"Y\", \"C\": \"Z\"}) labels.skeleton.node_names [\"X\", \"Y\", \"Z\"] labels.rename_nodes([\"a\", \"b\", \"c\"]) labels.skeleton.node_names [\"a\", \"b\", \"c\"]
Source code insleap_io/model/labels.py
def rename_nodes(\n self,\n name_map: dict[NodeOrIndex, str] | list[str],\n skeleton: Skeleton | None = None,\n):\n \"\"\"Rename nodes in the skeleton.\n\n Args:\n name_map: A dictionary mapping old node names to new node names. Keys can be\n specified as `Node` objects, integer indices, or string names. Values\n must be specified as string names.\n\n If a list of strings is provided of the same length as the current\n nodes, the nodes will be renamed to the names in the list in order.\n skeleton: `Skeleton` to update. If `None` (the default), assumes there is\n only one skeleton in the labels and raises `ValueError` otherwise.\n\n Raises:\n ValueError: If the new node names exist in the skeleton, if the old node\n names are not found in the skeleton, or if there is more than one\n skeleton in the `Labels` but it is not specified.\n\n Notes:\n This method is recommended over `Skeleton.rename_nodes` as it will update\n all instances in the labels to reflect the new node names.\n\n Example:\n >>> labels = Labels(skeletons=[Skeleton([\"A\", \"B\", \"C\"])])\n >>> labels.rename_nodes({\"A\": \"X\", \"B\": \"Y\", \"C\": \"Z\"})\n >>> labels.skeleton.node_names\n [\"X\", \"Y\", \"Z\"]\n >>> labels.rename_nodes([\"a\", \"b\", \"c\"])\n >>> labels.skeleton.node_names\n [\"a\", \"b\", \"c\"]\n \"\"\"\n if skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Skeleton must be specified when there is more than one skeleton in \"\n \"the labels.\"\n )\n skeleton = self.skeleton\n\n skeleton.rename_nodes(name_map)\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.reorder_nodes","title":"reorder_nodes(new_order, skeleton=None)
","text":"Reorder nodes in the skeleton.
Parameters:
Name Type Description Defaultnew_order
list[NodeOrIndex]
A list of node names, indices, or Node
objects specifying the new order of the nodes.
skeleton
Skeleton | None
Skeleton
to update. If None
(the default), assumes there is only one skeleton in the labels and raises ValueError
otherwise.
None
Raises:
Type DescriptionValueError
If the new order of nodes is not the same length as the current nodes, or if there is more than one skeleton in the Labels
but it is not specified.
This method handles updating the lookup caches necessary for indexing nodes by name, as well as updating instances to reflect the changes made to the skeleton.
Source code insleap_io/model/labels.py
def reorder_nodes(\n self, new_order: list[NodeOrIndex], skeleton: Skeleton | None = None\n):\n \"\"\"Reorder nodes in the skeleton.\n\n Args:\n new_order: A list of node names, indices, or `Node` objects specifying the\n new order of the nodes.\n skeleton: `Skeleton` to update. If `None` (the default), assumes there is\n only one skeleton in the labels and raises `ValueError` otherwise.\n\n Raises:\n ValueError: If the new order of nodes is not the same length as the current\n nodes, or if there is more than one skeleton in the `Labels` but it is\n not specified.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name, as well as updating instances to reflect the changes made to the\n skeleton.\n \"\"\"\n if skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Skeleton must be specified when there is more than one skeleton \"\n \"in the labels.\"\n )\n skeleton = self.skeleton\n\n skeleton.reorder_nodes(new_order)\n\n for inst in self.instances:\n if inst.skeleton == skeleton:\n inst.update_skeleton()\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.replace_filenames","title":"replace_filenames(new_filenames=None, filename_map=None, prefix_map=None)
","text":"Replace video filenames.
Parameters:
Name Type Description Defaultnew_filenames
list[str | Path] | None
List of new filenames. Must have the same length as the number of videos in the labels.
None
filename_map
dict[str | Path, str | Path] | None
Dictionary mapping old filenames (keys) to new filenames (values).
None
prefix_map
dict[str | Path, str | Path] | None
Dictonary mapping old prefixes (keys) to new prefixes (values).
None
Notes Only one of the argument types can be provided.
Source code insleap_io/model/labels.py
def replace_filenames(\n self,\n new_filenames: list[str | Path] | None = None,\n filename_map: dict[str | Path, str | Path] | None = None,\n prefix_map: dict[str | Path, str | Path] | None = None,\n):\n \"\"\"Replace video filenames.\n\n Args:\n new_filenames: List of new filenames. Must have the same length as the\n number of videos in the labels.\n filename_map: Dictionary mapping old filenames (keys) to new filenames\n (values).\n prefix_map: Dictonary mapping old prefixes (keys) to new prefixes (values).\n\n Notes:\n Only one of the argument types can be provided.\n \"\"\"\n n = 0\n if new_filenames is not None:\n n += 1\n if filename_map is not None:\n n += 1\n if prefix_map is not None:\n n += 1\n if n != 1:\n raise ValueError(\n \"Exactly one input method must be provided to replace filenames.\"\n )\n\n if new_filenames is not None:\n if len(self.videos) != len(new_filenames):\n raise ValueError(\n f\"Number of new filenames ({len(new_filenames)}) does not match \"\n f\"the number of videos ({len(self.videos)}).\"\n )\n\n for video, new_filename in zip(self.videos, new_filenames):\n video.replace_filename(new_filename)\n\n elif filename_map is not None:\n for video in self.videos:\n for old_fn, new_fn in filename_map.items():\n if type(video.filename) == list:\n new_fns = []\n for fn in video.filename:\n if Path(fn) == Path(old_fn):\n new_fns.append(new_fn)\n else:\n new_fns.append(fn)\n video.replace_filename(new_fns)\n else:\n if Path(video.filename) == Path(old_fn):\n video.replace_filename(new_fn)\n\n elif prefix_map is not None:\n for video in self.videos:\n for old_prefix, new_prefix in prefix_map.items():\n old_prefix, new_prefix = Path(old_prefix), Path(new_prefix)\n\n if type(video.filename) == list:\n new_fns = []\n for fn in video.filename:\n fn = Path(fn)\n if fn.as_posix().startswith(old_prefix.as_posix()):\n new_fns.append(new_prefix / fn.relative_to(old_prefix))\n else:\n new_fns.append(fn)\n video.replace_filename(new_fns)\n else:\n fn = Path(video.filename)\n if fn.as_posix().startswith(old_prefix.as_posix()):\n video.replace_filename(\n new_prefix / fn.relative_to(old_prefix)\n )\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.replace_skeleton","title":"replace_skeleton(new_skeleton, old_skeleton=None, node_map=None)
","text":"Replace the skeleton in the labels.
Parameters:
Name Type Description Defaultnew_skeleton
Skeleton
The new Skeleton
to replace the old skeleton with.
old_skeleton
Skeleton | None
The old Skeleton
to replace. If None
(the default), assumes there is only one skeleton in the labels and raises ValueError
otherwise.
None
node_map
dict[NodeOrIndex, NodeOrIndex] | None
Dictionary mapping nodes in the old skeleton to nodes in the new skeleton. Keys and values can be specified as Node
objects, integer indices, or string names. If not provided, only nodes with identical names will be mapped. Points associated with unmapped nodes will be removed.
None
Raises:
Type DescriptionValueError
If there is more than one skeleton in the Labels
but it is not specified.
This method will replace the skeleton in all instances in the labels that have the old skeleton. All point data associated with nodes not in the node_map
will be lost.
sleap_io/model/labels.py
def replace_skeleton(\n self,\n new_skeleton: Skeleton,\n old_skeleton: Skeleton | None = None,\n node_map: dict[NodeOrIndex, NodeOrIndex] | None = None,\n):\n \"\"\"Replace the skeleton in the labels.\n\n Args:\n new_skeleton: The new `Skeleton` to replace the old skeleton with.\n old_skeleton: The old `Skeleton` to replace. If `None` (the default),\n assumes there is only one skeleton in the labels and raises `ValueError`\n otherwise.\n node_map: Dictionary mapping nodes in the old skeleton to nodes in the new\n skeleton. Keys and values can be specified as `Node` objects, integer\n indices, or string names. If not provided, only nodes with identical\n names will be mapped. Points associated with unmapped nodes will be\n removed.\n\n Raises:\n ValueError: If there is more than one skeleton in the `Labels` but it is not\n specified.\n\n Warning:\n This method will replace the skeleton in all instances in the labels that\n have the old skeleton. **All point data associated with nodes not in the\n `node_map` will be lost.**\n \"\"\"\n if old_skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Old skeleton must be specified when there is more than one \"\n \"skeleton in the labels.\"\n )\n old_skeleton = self.skeleton\n\n if node_map is None:\n node_map = {}\n for old_node in old_skeleton.nodes:\n for new_node in new_skeleton.nodes:\n if old_node.name == new_node.name:\n node_map[old_node] = new_node\n break\n else:\n node_map = {\n old_skeleton.require_node(\n old, add_missing=False\n ): new_skeleton.require_node(new, add_missing=False)\n for old, new in node_map.items()\n }\n\n # Make new -> old mapping for nodes for efficiency.\n rev_node_map = {new: old for old, new in node_map.items()}\n\n # Replace the skeleton in the instances.\n for inst in self.instances:\n if inst.skeleton == old_skeleton:\n inst.replace_skeleton(new_skeleton, rev_node_map=rev_node_map)\n\n # Replace the skeleton in the labels.\n self.skeletons[self.skeletons.index(old_skeleton)] = new_skeleton\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.replace_videos","title":"replace_videos(old_videos=None, new_videos=None, video_map=None)
","text":"Replace videos and update all references.
Parameters:
Name Type Description Defaultold_videos
list[Video] | None
List of videos to be replaced.
None
new_videos
list[Video] | None
List of videos to replace with.
None
video_map
dict[Video, Video] | None
Alternative input of dictionary where keys are the old videos and values are the new videos.
None
Source code in sleap_io/model/labels.py
def replace_videos(\n self,\n old_videos: list[Video] | None = None,\n new_videos: list[Video] | None = None,\n video_map: dict[Video, Video] | None = None,\n):\n \"\"\"Replace videos and update all references.\n\n Args:\n old_videos: List of videos to be replaced.\n new_videos: List of videos to replace with.\n video_map: Alternative input of dictionary where keys are the old videos and\n values are the new videos.\n \"\"\"\n if (\n old_videos is None\n and new_videos is not None\n and len(new_videos) == len(self.videos)\n ):\n old_videos = self.videos\n\n if video_map is None:\n video_map = {o: n for o, n in zip(old_videos, new_videos)}\n\n # Update the labeled frames with the new videos.\n for lf in self.labeled_frames:\n if lf.video in video_map:\n lf.video = video_map[lf.video]\n\n # Update suggestions with the new videos.\n for sf in self.suggestions:\n if sf.video in video_map:\n sf.video = video_map[sf.video]\n\n # Update the list of videos.\n self.videos = [video_map.get(video, video) for video in self.videos]\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.save","title":"save(filename, format=None, embed=None, **kwargs)
","text":"Save labels to file in specified format.
Parameters:
Name Type Description Defaultfilename
str
Path to save labels to.
requiredformat
Optional[str]
The format to save the labels in. If None
, the format will be inferred from the file extension. Available formats are \"slp\"
, \"nwb\"
, \"labelstudio\"
, and \"jabs\"
.
None
embed
bool | str | list[tuple[Video, int]] | None
Frames to embed in the saved labels file. One of None
, True
, \"all\"
, \"user\"
, \"suggestions\"
, \"user+suggestions\"
, \"source\"
or list of tuples of (video, frame_idx)
.
If None
is specified (the default) and the labels contains embedded frames, those embedded frames will be re-saved to the new file.
If True
or \"all\"
, all labeled frames and suggested frames will be embedded.
If \"source\"
is specified, no images will be embedded and the source video will be restored if available.
This argument is only valid for the SLP backend.
None
Source code in sleap_io/model/labels.py
def save(\n self,\n filename: str,\n format: Optional[str] = None,\n embed: bool | str | list[tuple[Video, int]] | None = None,\n **kwargs,\n):\n \"\"\"Save labels to file in specified format.\n\n Args:\n filename: Path to save labels to.\n format: The format to save the labels in. If `None`, the format will be\n inferred from the file extension. Available formats are `\"slp\"`,\n `\"nwb\"`, `\"labelstudio\"`, and `\"jabs\"`.\n embed: Frames to embed in the saved labels file. One of `None`, `True`,\n `\"all\"`, `\"user\"`, `\"suggestions\"`, `\"user+suggestions\"`, `\"source\"` or\n list of tuples of `(video, frame_idx)`.\n\n If `None` is specified (the default) and the labels contains embedded\n frames, those embedded frames will be re-saved to the new file.\n\n If `True` or `\"all\"`, all labeled frames and suggested frames will be\n embedded.\n\n If `\"source\"` is specified, no images will be embedded and the source\n video will be restored if available.\n\n This argument is only valid for the SLP backend.\n \"\"\"\n from sleap_io import save_file\n\n save_file(self, filename, format=format, embed=embed, **kwargs)\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.split","title":"split(n, seed=None)
","text":"Separate the labels into random splits.
Parameters:
Name Type Description Defaultn
int | float
Size of the first split. If integer >= 1, assumes that this is the number of labeled frames in the first split. If < 1.0, this will be treated as a fraction of the total labeled frames.
requiredseed
int | None
Optional integer seed to use for reproducibility.
None
Returns:
Type Descriptiontuple[Labels, Labels]
A tuple of split1, split2
.
If an integer was specified, len(split1) == n
.
If a fraction was specified, len(split1) == int(n * len(labels))
.
The second split contains the remainder, i.e., len(split2) == len(labels) - len(split1)
.
If there are too few frames, a minimum of 1 frame will be kept in the second split.
If there is exactly 1 labeled frame in the labels, the same frame will be assigned to both splits.
Source code insleap_io/model/labels.py
def split(self, n: int | float, seed: int | None = None) -> tuple[Labels, Labels]:\n \"\"\"Separate the labels into random splits.\n\n Args:\n n: Size of the first split. If integer >= 1, assumes that this is the number\n of labeled frames in the first split. If < 1.0, this will be treated as\n a fraction of the total labeled frames.\n seed: Optional integer seed to use for reproducibility.\n\n Returns:\n A tuple of `split1, split2`.\n\n If an integer was specified, `len(split1) == n`.\n\n If a fraction was specified, `len(split1) == int(n * len(labels))`.\n\n The second split contains the remainder, i.e.,\n `len(split2) == len(labels) - len(split1)`.\n\n If there are too few frames, a minimum of 1 frame will be kept in the second\n split.\n\n If there is exactly 1 labeled frame in the labels, the same frame will be\n assigned to both splits.\n \"\"\"\n n0 = len(self)\n if n0 == 0:\n return self, self\n n1 = n\n if n < 1.0:\n n1 = max(int(n0 * float(n)), 1)\n n2 = max(n0 - n1, 1)\n n1, n2 = int(n1), int(n2)\n\n rng = np.random.default_rng(seed=seed)\n inds1 = rng.choice(n0, size=(n1,), replace=False)\n\n if n0 == 1:\n inds2 = np.array([0])\n else:\n inds2 = np.setdiff1d(np.arange(n0), inds1)\n\n split1 = self.extract(inds1, copy=True)\n split2 = self.extract(inds2, copy=True)\n\n return split1, split2\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.trim","title":"trim(save_path, frame_inds, video=None, video_kwargs=None)
","text":"Trim the labels to a subset of frames and videos accordingly.
Parameters:
Name Type Description Defaultsave_path
str | Path
Path to the trimmed labels SLP file. Video will be saved with the same base name but with .mp4 extension.
requiredframe_inds
list[int] | ndarray
Frame indices to save. Can be specified as a list or array of frame integers.
requiredvideo
Video | int | None
Video or integer index of the video to trim. Does not need to be specified for single-video projects.
None
video_kwargs
dict[str, Any] | None
A dictionary of keyword arguments to provide to sio.save_video
for video compression.
None
Returns:
Type DescriptionLabels
The resulting labels object referencing the trimmed data.
NotesThis will remove any data outside of the trimmed frames, save new videos, and adjust the frame indices to match the newly trimmed videos.
Source code insleap_io/model/labels.py
def trim(\n self,\n save_path: str | Path,\n frame_inds: list[int] | np.ndarray,\n video: Video | int | None = None,\n video_kwargs: dict[str, Any] | None = None,\n) -> Labels:\n \"\"\"Trim the labels to a subset of frames and videos accordingly.\n\n Args:\n save_path: Path to the trimmed labels SLP file. Video will be saved with the\n same base name but with .mp4 extension.\n frame_inds: Frame indices to save. Can be specified as a list or array of\n frame integers.\n video: Video or integer index of the video to trim. Does not need to be\n specified for single-video projects.\n video_kwargs: A dictionary of keyword arguments to provide to\n `sio.save_video` for video compression.\n\n Returns:\n The resulting labels object referencing the trimmed data.\n\n Notes:\n This will remove any data outside of the trimmed frames, save new videos,\n and adjust the frame indices to match the newly trimmed videos.\n \"\"\"\n if video is None:\n if len(self.videos) == 1:\n video = self.video\n else:\n raise ValueError(\n \"Video needs to be specified when trimming multi-video projects.\"\n )\n if type(video) == int:\n video = self.videos[video]\n\n # Write trimmed clip.\n save_path = Path(save_path)\n video_path = save_path.with_suffix(\".mp4\")\n fidx0, fidx1 = np.min(frame_inds), np.max(frame_inds)\n new_video = video.save(\n video_path,\n frame_inds=np.arange(fidx0, fidx1 + 1),\n video_kwargs=video_kwargs,\n )\n\n # Get frames in range.\n # TODO: Create an optimized search function for this access pattern.\n inds = []\n for ind, lf in enumerate(self):\n if lf.video == video and lf.frame_idx >= fidx0 and lf.frame_idx <= fidx1:\n inds.append(ind)\n trimmed_labels = self.extract(inds, copy=True)\n\n # Adjust video and frame indices.\n trimmed_labels.videos = [new_video]\n for lf in trimmed_labels:\n lf.video = new_video\n lf.frame_idx = lf.frame_idx - fidx0\n\n # Save.\n trimmed_labels.save(save_path)\n\n return trimmed_labels\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.update","title":"update()
","text":"Update data structures based on contents.
This function will update the list of skeletons, videos and tracks from the labeled frames, instances and suggestions.
Source code insleap_io/model/labels.py
def update(self):\n \"\"\"Update data structures based on contents.\n\n This function will update the list of skeletons, videos and tracks from the\n labeled frames, instances and suggestions.\n \"\"\"\n for lf in self.labeled_frames:\n if lf.video not in self.videos:\n self.videos.append(lf.video)\n\n for inst in lf:\n if inst.skeleton not in self.skeletons:\n self.skeletons.append(inst.skeleton)\n\n if inst.track is not None and inst.track not in self.tracks:\n self.tracks.append(inst.track)\n\n for sf in self.suggestions:\n if sf.video not in self.videos:\n self.videos.append(sf.video)\n
"},{"location":"reference/sleap_io/model/skeleton/","title":"skeleton","text":""},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton","title":"sleap_io.model.skeleton
","text":"Data model for skeletons.
Skeletons are collections of nodes and edges which describe the landmarks associated with a pose model. The edges represent the connections between them and may be used differently depending on the underlying pose model.
Classes:
Name DescriptionEdge
A connection between two Node
objects within a Skeleton
.
Node
A landmark type within a Skeleton
.
Skeleton
A description of a set of landmark types and connections between them.
Symmetry
A relationship between a pair of nodes denoting their left/right pairing.
Functions:
Name Descriptionis_node_or_index
Check if an object is a Node
, string name or integer index.
Edge
","text":"A connection between two Node
objects within a Skeleton
.
This is a directed edge, representing the ordering of Node
s in the Skeleton
tree.
Attributes:
Name Type Descriptionsource
Node
The origin Node
.
destination
Node
The destination Node
.
Methods:
Name Description__getitem__
Return the source Node
(idx
is 0) or destination Node
(idx
is 1).
sleap_io/model/skeleton.py
@define(frozen=True)\nclass Edge:\n \"\"\"A connection between two `Node` objects within a `Skeleton`.\n\n This is a directed edge, representing the ordering of `Node`s in the `Skeleton`\n tree.\n\n Attributes:\n source: The origin `Node`.\n destination: The destination `Node`.\n \"\"\"\n\n source: Node\n destination: Node\n\n def __getitem__(self, idx) -> Node:\n \"\"\"Return the source `Node` (`idx` is 0) or destination `Node` (`idx` is 1).\"\"\"\n if idx == 0:\n return self.source\n elif idx == 1:\n return self.destination\n else:\n raise IndexError(\"Edge only has 2 nodes (source and destination).\")\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Edge.__getitem__","title":"__getitem__(idx)
","text":"Return the source Node
(idx
is 0) or destination Node
(idx
is 1).
sleap_io/model/skeleton.py
def __getitem__(self, idx) -> Node:\n \"\"\"Return the source `Node` (`idx` is 0) or destination `Node` (`idx` is 1).\"\"\"\n if idx == 0:\n return self.source\n elif idx == 1:\n return self.destination\n else:\n raise IndexError(\"Edge only has 2 nodes (source and destination).\")\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Node","title":"Node
","text":"A landmark type within a Skeleton
.
This typically corresponds to a unique landmark within a skeleton, such as the \"left eye\".
Attributes:
Name Type Descriptionname
str
Descriptive label for the landmark.
Source code insleap_io/model/skeleton.py
@define(eq=False)\nclass Node:\n \"\"\"A landmark type within a `Skeleton`.\n\n This typically corresponds to a unique landmark within a skeleton, such as the \"left\n eye\".\n\n Attributes:\n name: Descriptive label for the landmark.\n \"\"\"\n\n name: str\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton","title":"Skeleton
","text":"A description of a set of landmark types and connections between them.
Skeletons are represented by a directed graph composed of a set of Node
s (landmark types such as body parts) and Edge
s (connections between parts).
Attributes:
Name Type Descriptionnodes
list[Node]
A list of Node
s. May be specified as a list of strings to create new nodes from their names.
edges
list[Edge]
A list of Edge
s. May be specified as a list of 2-tuples of string names or integer indices of nodes
. Each edge corresponds to a pair of source and destination nodes forming a directed edge.
symmetries
list[Symmetry]
A list of Symmetry
s. Each symmetry corresponds to symmetric body parts, such as \"left eye\", \"right eye\"
. This is used when applying flip (reflection) augmentation to images in order to appropriately swap the indices of symmetric landmarks.
name
str | None
A descriptive name for the Skeleton
.
Methods:
Name Description__attrs_post_init__
Ensure nodes are Node
s, edges are Edge
s, and Node
map is updated.
__contains__
Check if a node is in the skeleton.
__getitem__
Return a Node
when indexing by name or integer.
__len__
Return the number of nodes in the skeleton.
__repr__
Return a readable representation of the skeleton.
add_edge
Add an Edge
to the skeleton.
add_edges
Add multiple Edge
s to the skeleton.
add_node
Add a Node
to the skeleton.
add_nodes
Add multiple Node
s to the skeleton.
add_symmetry
Add a symmetry relationship to the skeleton.
get_flipped_node_inds
Returns node indices that should be switched when horizontally flipping.
index
Return the index of a node specified as a Node
or string name.
rebuild_cache
Rebuild the node name/index to Node
map caches.
remove_node
Remove a single node from the skeleton.
remove_nodes
Remove nodes from the skeleton.
rename_node
Rename a single node in the skeleton.
rename_nodes
Rename nodes in the skeleton.
reorder_nodes
Reorder nodes in the skeleton.
require_node
Return a Node
object, handling indexing and adding missing nodes.
Attributes:
Name Type Descriptionedge_inds
list[tuple[int, int]]
Edges indices as a list of 2-tuples.
edge_names
list[str, str]
Edge names as a list of 2-tuples with string node names.
node_names
list[str]
Names of the nodes associated with this skeleton as a list of strings.
symmetry_inds
list[tuple[int, int]]
Symmetry indices as a list of 2-tuples.
symmetry_names
list[str, str]
Symmetry names as a list of 2-tuples with string node names.
Source code insleap_io/model/skeleton.py
@define(eq=False)\nclass Skeleton:\n \"\"\"A description of a set of landmark types and connections between them.\n\n Skeletons are represented by a directed graph composed of a set of `Node`s (landmark\n types such as body parts) and `Edge`s (connections between parts).\n\n Attributes:\n nodes: A list of `Node`s. May be specified as a list of strings to create new\n nodes from their names.\n edges: A list of `Edge`s. May be specified as a list of 2-tuples of string names\n or integer indices of `nodes`. Each edge corresponds to a pair of source and\n destination nodes forming a directed edge.\n symmetries: A list of `Symmetry`s. Each symmetry corresponds to symmetric body\n parts, such as `\"left eye\", \"right eye\"`. This is used when applying flip\n (reflection) augmentation to images in order to appropriately swap the\n indices of symmetric landmarks.\n name: A descriptive name for the `Skeleton`.\n \"\"\"\n\n def _nodes_on_setattr(self, attr, new_nodes):\n \"\"\"Callback to update caches when nodes are set.\"\"\"\n self.rebuild_cache(nodes=new_nodes)\n return new_nodes\n\n nodes: list[Node] = field(\n factory=list,\n on_setattr=_nodes_on_setattr,\n )\n edges: list[Edge] = field(factory=list)\n symmetries: list[Symmetry] = field(factory=list)\n name: str | None = None\n _name_to_node_cache: dict[str, Node] = field(init=False, repr=False, eq=False)\n _node_to_ind_cache: dict[Node, int] = field(init=False, repr=False, eq=False)\n\n def __attrs_post_init__(self):\n \"\"\"Ensure nodes are `Node`s, edges are `Edge`s, and `Node` map is updated.\"\"\"\n self._convert_nodes()\n self._convert_edges()\n self.rebuild_cache()\n\n def _convert_nodes(self):\n \"\"\"Convert nodes to `Node` objects if needed.\"\"\"\n if isinstance(self.nodes, np.ndarray):\n object.__setattr__(self, \"nodes\", self.nodes.tolist())\n for i, node in enumerate(self.nodes):\n if type(node) == str:\n self.nodes[i] = Node(node)\n\n def _convert_edges(self):\n \"\"\"Convert list of edge names or integers to `Edge` objects if needed.\"\"\"\n if isinstance(self.edges, np.ndarray):\n self.edges = self.edges.tolist()\n node_names = self.node_names\n for i, edge in enumerate(self.edges):\n if type(edge) == Edge:\n continue\n src, dst = edge\n if type(src) == str:\n try:\n src = node_names.index(src)\n except ValueError:\n raise ValueError(\n f\"Node '{src}' specified in the edge list is not in the nodes.\"\n )\n if type(src) == int or (\n np.isscalar(src) and np.issubdtype(src.dtype, np.integer)\n ):\n src = self.nodes[src]\n\n if type(dst) == str:\n try:\n dst = node_names.index(dst)\n except ValueError:\n raise ValueError(\n f\"Node '{dst}' specified in the edge list is not in the nodes.\"\n )\n if type(dst) == int or (\n np.isscalar(dst) and np.issubdtype(dst.dtype, np.integer)\n ):\n dst = self.nodes[dst]\n\n self.edges[i] = Edge(src, dst)\n\n def rebuild_cache(self, nodes: list[Node] | None = None):\n \"\"\"Rebuild the node name/index to `Node` map caches.\n\n Args:\n nodes: A list of `Node` objects to update the cache with. If not provided,\n the cache will be updated with the current nodes in the skeleton. If\n nodes are provided, the cache will be updated with the provided nodes,\n but the current nodes in the skeleton will not be updated. Default is\n `None`.\n\n Notes:\n This function should be called when nodes or node list is mutated to update\n the lookup caches for indexing nodes by name or `Node` object.\n\n This is done automatically when nodes are added or removed from the skeleton\n using the convenience methods in this class.\n\n This method only needs to be used when manually mutating nodes or the node\n list directly.\n \"\"\"\n if nodes is None:\n nodes = self.nodes\n self._name_to_node_cache = {node.name: node for node in nodes}\n self._node_to_ind_cache = {node: i for i, node in enumerate(nodes)}\n\n @property\n def node_names(self) -> list[str]:\n \"\"\"Names of the nodes associated with this skeleton as a list of strings.\"\"\"\n return [node.name for node in self.nodes]\n\n @property\n def edge_inds(self) -> list[tuple[int, int]]:\n \"\"\"Edges indices as a list of 2-tuples.\"\"\"\n return [\n (self.nodes.index(edge.source), self.nodes.index(edge.destination))\n for edge in self.edges\n ]\n\n @property\n def edge_names(self) -> list[str, str]:\n \"\"\"Edge names as a list of 2-tuples with string node names.\"\"\"\n return [(edge.source.name, edge.destination.name) for edge in self.edges]\n\n @property\n def symmetry_inds(self) -> list[tuple[int, int]]:\n \"\"\"Symmetry indices as a list of 2-tuples.\"\"\"\n return [\n tuple(sorted((self.index(symmetry[0]), self.index(symmetry[1]))))\n for symmetry in self.symmetries\n ]\n\n @property\n def symmetry_names(self) -> list[str, str]:\n \"\"\"Symmetry names as a list of 2-tuples with string node names.\"\"\"\n return [\n (self.nodes[i].name, self.nodes[j].name) for (i, j) in self.symmetry_inds\n ]\n\n def get_flipped_node_inds(self) -> list[int]:\n \"\"\"Returns node indices that should be switched when horizontally flipping.\n\n This is useful as a lookup table for flipping the landmark coordinates when\n doing data augmentation.\n\n Example:\n >>> skel = Skeleton([\"A\", \"B_left\", \"B_right\", \"C\", \"D_left\", \"D_right\"])\n >>> skel.add_symmetry(\"B_left\", \"B_right\")\n >>> skel.add_symmetry(\"D_left\", \"D_right\")\n >>> skel.flipped_node_inds\n [0, 2, 1, 3, 5, 4]\n >>> pose = np.array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]])\n >>> pose[skel.flipped_node_inds]\n array([[0, 0],\n [2, 2],\n [1, 1],\n [3, 3],\n [5, 5],\n [4, 4]])\n \"\"\"\n flip_idx = np.arange(len(self.nodes))\n if len(self.symmetries) > 0:\n symmetry_inds = np.array(\n [(self.index(a), self.index(b)) for a, b in self.symmetries]\n )\n flip_idx[symmetry_inds[:, 0]] = symmetry_inds[:, 1]\n flip_idx[symmetry_inds[:, 1]] = symmetry_inds[:, 0]\n\n flip_idx = flip_idx.tolist()\n return flip_idx\n\n def __len__(self) -> int:\n \"\"\"Return the number of nodes in the skeleton.\"\"\"\n return len(self.nodes)\n\n def __repr__(self) -> str:\n \"\"\"Return a readable representation of the skeleton.\"\"\"\n nodes = \", \".join([f'\"{node}\"' for node in self.node_names])\n return \"Skeleton(\" f\"nodes=[{nodes}], \" f\"edges={self.edge_inds}\" \")\"\n\n def index(self, node: Node | str) -> int:\n \"\"\"Return the index of a node specified as a `Node` or string name.\"\"\"\n if type(node) == str:\n return self.index(self._name_to_node_cache[node])\n elif type(node) == Node:\n return self._node_to_ind_cache[node]\n else:\n raise IndexError(f\"Invalid indexing argument for skeleton: {node}\")\n\n def __getitem__(self, idx: NodeOrIndex) -> Node:\n \"\"\"Return a `Node` when indexing by name or integer.\"\"\"\n if type(idx) == int:\n return self.nodes[idx]\n elif type(idx) == str:\n return self._name_to_node_cache[idx]\n else:\n raise IndexError(f\"Invalid indexing argument for skeleton: {idx}\")\n\n def __contains__(self, node: NodeOrIndex) -> bool:\n \"\"\"Check if a node is in the skeleton.\"\"\"\n if type(node) == str:\n return node in self._name_to_node_cache\n elif type(node) == Node:\n return node in self.nodes\n elif type(node) == int:\n return 0 <= node < len(self.nodes)\n else:\n raise ValueError(f\"Invalid node type for skeleton: {node}\")\n\n def add_node(self, node: Node | str):\n \"\"\"Add a `Node` to the skeleton.\n\n Args:\n node: A `Node` object or a string name to create a new node.\n\n Raises:\n ValueError: If the node already exists in the skeleton or if the node is\n not specified as a `Node` or string.\n \"\"\"\n if node in self:\n raise ValueError(f\"Node '{node}' already exists in the skeleton.\")\n\n if type(node) == str:\n node = Node(node)\n\n if type(node) != Node:\n raise ValueError(f\"Invalid node type: {node} ({type(node)})\")\n\n self.nodes.append(node)\n\n # Atomic update of the cache.\n self._name_to_node_cache[node.name] = node\n self._node_to_ind_cache[node] = len(self.nodes) - 1\n\n def add_nodes(self, nodes: list[Node | str]):\n \"\"\"Add multiple `Node`s to the skeleton.\n\n Args:\n nodes: A list of `Node` objects or string names to create new nodes.\n \"\"\"\n for node in nodes:\n self.add_node(node)\n\n def require_node(self, node: NodeOrIndex, add_missing: bool = True) -> Node:\n \"\"\"Return a `Node` object, handling indexing and adding missing nodes.\n\n Args:\n node: A `Node` object, name or index.\n add_missing: If `True`, missing nodes will be added to the skeleton. If\n `False`, an error will be raised if the node is not found. Default is\n `True`.\n\n Returns:\n The `Node` object.\n\n Raises:\n IndexError: If the node is not found in the skeleton and `add_missing` is\n `False`.\n \"\"\"\n if node not in self:\n if add_missing:\n self.add_node(node)\n else:\n raise IndexError(f\"Node '{node}' not found in the skeleton.\")\n\n if type(node) == Node:\n return node\n\n return self[node]\n\n def add_edge(\n self,\n src: NodeOrIndex | Edge | tuple[NodeOrIndex, NodeOrIndex],\n dst: NodeOrIndex | None = None,\n ):\n \"\"\"Add an `Edge` to the skeleton.\n\n Args:\n src: The source node specified as a `Node`, name or index.\n dst: The destination node specified as a `Node`, name or index.\n \"\"\"\n edge = None\n if type(src) == tuple:\n src, dst = src\n\n if is_node_or_index(src):\n if not is_node_or_index(dst):\n raise ValueError(\"Destination node must be specified.\")\n\n src = self.require_node(src)\n dst = self.require_node(dst)\n edge = Edge(src, dst)\n\n if type(src) == Edge:\n edge = src\n\n if edge not in self.edges:\n self.edges.append(edge)\n\n def add_edges(self, edges: list[Edge | tuple[NodeOrIndex, NodeOrIndex]]):\n \"\"\"Add multiple `Edge`s to the skeleton.\n\n Args:\n edges: A list of `Edge` objects or 2-tuples of source and destination nodes.\n \"\"\"\n for edge in edges:\n self.add_edge(edge)\n\n def add_symmetry(\n self, node1: Symmetry | NodeOrIndex = None, node2: NodeOrIndex | None = None\n ):\n \"\"\"Add a symmetry relationship to the skeleton.\n\n Args:\n node1: The first node specified as a `Node`, name or index. If a `Symmetry`\n object is provided, it will be added directly to the skeleton.\n node2: The second node specified as a `Node`, name or index.\n \"\"\"\n symmetry = None\n if type(node1) == Symmetry:\n symmetry = node1\n node1, node2 = symmetry\n\n node1 = self.require_node(node1)\n node2 = self.require_node(node2)\n\n if symmetry is None:\n symmetry = Symmetry({node1, node2})\n\n if symmetry not in self.symmetries:\n self.symmetries.append(symmetry)\n\n def rename_nodes(self, name_map: dict[NodeOrIndex, str] | list[str]):\n \"\"\"Rename nodes in the skeleton.\n\n Args:\n name_map: A dictionary mapping old node names to new node names. Keys can be\n specified as `Node` objects, integer indices, or string names. Values\n must be specified as string names.\n\n If a list of strings is provided of the same length as the current\n nodes, the nodes will be renamed to the names in the list in order.\n\n Raises:\n ValueError: If the new node names exist in the skeleton or if the old node\n names are not found in the skeleton.\n\n Notes:\n This method should always be used when renaming nodes in the skeleton as it\n handles updating the lookup caches necessary for indexing nodes by name.\n\n After renaming, instances using this skeleton **do NOT need to be updated**\n as the nodes are stored by reference in the skeleton, so changes are\n reflected automatically.\n\n Example:\n >>> skel = Skeleton([\"A\", \"B\", \"C\"], edges=[(\"A\", \"B\"), (\"B\", \"C\")])\n >>> skel.rename_nodes({\"A\": \"X\", \"B\": \"Y\", \"C\": \"Z\"})\n >>> skel.node_names\n [\"X\", \"Y\", \"Z\"]\n >>> skel.rename_nodes([\"a\", \"b\", \"c\"])\n >>> skel.node_names\n [\"a\", \"b\", \"c\"]\n \"\"\"\n if type(name_map) == list:\n if len(name_map) != len(self.nodes):\n raise ValueError(\n \"List of new node names must be the same length as the current \"\n \"nodes.\"\n )\n name_map = {node: name for node, name in zip(self.nodes, name_map)}\n\n for old_name, new_name in name_map.items():\n if type(old_name) == Node:\n old_name = old_name.name\n if type(old_name) == int:\n old_name = self.nodes[old_name].name\n\n if old_name not in self._name_to_node_cache:\n raise ValueError(f\"Node '{old_name}' not found in the skeleton.\")\n if new_name in self._name_to_node_cache:\n raise ValueError(f\"Node '{new_name}' already exists in the skeleton.\")\n\n node = self._name_to_node_cache[old_name]\n node.name = new_name\n self._name_to_node_cache[new_name] = node\n del self._name_to_node_cache[old_name]\n\n def rename_node(self, old_name: NodeOrIndex, new_name: str):\n \"\"\"Rename a single node in the skeleton.\n\n Args:\n old_name: The name of the node to rename. Can also be specified as an\n integer index or `Node` object.\n new_name: The new name for the node.\n \"\"\"\n self.rename_nodes({old_name: new_name})\n\n def remove_nodes(self, nodes: list[NodeOrIndex]):\n \"\"\"Remove nodes from the skeleton.\n\n Args:\n nodes: A list of node names, indices, or `Node` objects to remove.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name.\n\n Any edges and symmetries that are connected to the removed nodes will also\n be removed.\n\n Warning:\n **This method does NOT update instances** that use this skeleton to reflect\n changes.\n\n It is recommended to use the `Labels.remove_nodes()` method which will\n update all contained to reflect the changes made to the skeleton.\n\n To manually update instances after this method is called, call\n `instance.update_nodes()` on each instance that uses this skeleton.\n \"\"\"\n # Standardize input and make a pre-mutation copy before keys are changed.\n rm_node_objs = [self.require_node(node, add_missing=False) for node in nodes]\n\n # Remove nodes from the skeleton.\n for node in rm_node_objs:\n self.nodes.remove(node)\n del self._name_to_node_cache[node.name]\n\n # Remove edges connected to the removed nodes.\n self.edges = [\n edge\n for edge in self.edges\n if edge.source not in rm_node_objs and edge.destination not in rm_node_objs\n ]\n\n # Remove symmetries connected to the removed nodes.\n self.symmetries = [\n symmetry\n for symmetry in self.symmetries\n if symmetry.nodes.isdisjoint(rm_node_objs)\n ]\n\n # Update node index map.\n self.rebuild_cache()\n\n def remove_node(self, node: NodeOrIndex):\n \"\"\"Remove a single node from the skeleton.\n\n Args:\n node: The node to remove. Can be specified as a string name, integer index,\n or `Node` object.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name.\n\n Any edges and symmetries that are connected to the removed node will also be\n removed.\n\n Warning:\n **This method does NOT update instances** that use this skeleton to reflect\n changes.\n\n It is recommended to use the `Labels.remove_nodes()` method which will\n update all contained instances to reflect the changes made to the skeleton.\n\n To manually update instances after this method is called, call\n `Instance.update_skeleton()` on each instance that uses this skeleton.\n \"\"\"\n self.remove_nodes([node])\n\n def reorder_nodes(self, new_order: list[NodeOrIndex]):\n \"\"\"Reorder nodes in the skeleton.\n\n Args:\n new_order: A list of node names, indices, or `Node` objects specifying the\n new order of the nodes.\n\n Raises:\n ValueError: If the new order of nodes is not the same length as the current\n nodes.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name.\n\n Warning:\n After reordering, instances using this skeleton do not need to be updated as\n the nodes are stored by reference in the skeleton.\n\n However, the order that points are stored in the instances will not be\n updated to match the new order of the nodes in the skeleton. This should not\n matter unless the ordering of the keys in the `Instance.points` dictionary\n is used instead of relying on the skeleton node order.\n\n To make sure these are aligned, it is recommended to use the\n `Labels.reorder_nodes()` method which will update all contained instances to\n reflect the changes made to the skeleton.\n\n To manually update instances after this method is called, call\n `Instance.update_skeleton()` on each instance that uses this skeleton.\n \"\"\"\n if len(new_order) != len(self.nodes):\n raise ValueError(\n \"New order of nodes must be the same length as the current nodes.\"\n )\n\n new_nodes = [self.require_node(node, add_missing=False) for node in new_order]\n self.nodes = new_nodes\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.edge_inds","title":"edge_inds: list[tuple[int, int]]
property
","text":"Edges indices as a list of 2-tuples.
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.edge_names","title":"edge_names: list[str, str]
property
","text":"Edge names as a list of 2-tuples with string node names.
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.node_names","title":"node_names: list[str]
property
","text":"Names of the nodes associated with this skeleton as a list of strings.
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.symmetry_inds","title":"symmetry_inds: list[tuple[int, int]]
property
","text":"Symmetry indices as a list of 2-tuples.
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.symmetry_names","title":"symmetry_names: list[str, str]
property
","text":"Symmetry names as a list of 2-tuples with string node names.
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.__attrs_post_init__","title":"__attrs_post_init__()
","text":"Ensure nodes are Node
s, edges are Edge
s, and Node
map is updated.
sleap_io/model/skeleton.py
def __attrs_post_init__(self):\n \"\"\"Ensure nodes are `Node`s, edges are `Edge`s, and `Node` map is updated.\"\"\"\n self._convert_nodes()\n self._convert_edges()\n self.rebuild_cache()\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.__contains__","title":"__contains__(node)
","text":"Check if a node is in the skeleton.
Source code insleap_io/model/skeleton.py
def __contains__(self, node: NodeOrIndex) -> bool:\n \"\"\"Check if a node is in the skeleton.\"\"\"\n if type(node) == str:\n return node in self._name_to_node_cache\n elif type(node) == Node:\n return node in self.nodes\n elif type(node) == int:\n return 0 <= node < len(self.nodes)\n else:\n raise ValueError(f\"Invalid node type for skeleton: {node}\")\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.__getitem__","title":"__getitem__(idx)
","text":"Return a Node
when indexing by name or integer.
sleap_io/model/skeleton.py
def __getitem__(self, idx: NodeOrIndex) -> Node:\n \"\"\"Return a `Node` when indexing by name or integer.\"\"\"\n if type(idx) == int:\n return self.nodes[idx]\n elif type(idx) == str:\n return self._name_to_node_cache[idx]\n else:\n raise IndexError(f\"Invalid indexing argument for skeleton: {idx}\")\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.__len__","title":"__len__()
","text":"Return the number of nodes in the skeleton.
Source code insleap_io/model/skeleton.py
def __len__(self) -> int:\n \"\"\"Return the number of nodes in the skeleton.\"\"\"\n return len(self.nodes)\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.__repr__","title":"__repr__()
","text":"Return a readable representation of the skeleton.
Source code insleap_io/model/skeleton.py
def __repr__(self) -> str:\n \"\"\"Return a readable representation of the skeleton.\"\"\"\n nodes = \", \".join([f'\"{node}\"' for node in self.node_names])\n return \"Skeleton(\" f\"nodes=[{nodes}], \" f\"edges={self.edge_inds}\" \")\"\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.add_edge","title":"add_edge(src, dst=None)
","text":"Add an Edge
to the skeleton.
Parameters:
Name Type Description Defaultsrc
NodeOrIndex | Edge | tuple[NodeOrIndex, NodeOrIndex]
The source node specified as a Node
, name or index.
dst
NodeOrIndex | None
The destination node specified as a Node
, name or index.
None
Source code in sleap_io/model/skeleton.py
def add_edge(\n self,\n src: NodeOrIndex | Edge | tuple[NodeOrIndex, NodeOrIndex],\n dst: NodeOrIndex | None = None,\n):\n \"\"\"Add an `Edge` to the skeleton.\n\n Args:\n src: The source node specified as a `Node`, name or index.\n dst: The destination node specified as a `Node`, name or index.\n \"\"\"\n edge = None\n if type(src) == tuple:\n src, dst = src\n\n if is_node_or_index(src):\n if not is_node_or_index(dst):\n raise ValueError(\"Destination node must be specified.\")\n\n src = self.require_node(src)\n dst = self.require_node(dst)\n edge = Edge(src, dst)\n\n if type(src) == Edge:\n edge = src\n\n if edge not in self.edges:\n self.edges.append(edge)\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.add_edges","title":"add_edges(edges)
","text":"Add multiple Edge
s to the skeleton.
Parameters:
Name Type Description Defaultedges
list[Edge | tuple[NodeOrIndex, NodeOrIndex]]
A list of Edge
objects or 2-tuples of source and destination nodes.
sleap_io/model/skeleton.py
def add_edges(self, edges: list[Edge | tuple[NodeOrIndex, NodeOrIndex]]):\n \"\"\"Add multiple `Edge`s to the skeleton.\n\n Args:\n edges: A list of `Edge` objects or 2-tuples of source and destination nodes.\n \"\"\"\n for edge in edges:\n self.add_edge(edge)\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.add_node","title":"add_node(node)
","text":"Add a Node
to the skeleton.
Parameters:
Name Type Description Defaultnode
Node | str
A Node
object or a string name to create a new node.
Raises:
Type DescriptionValueError
If the node already exists in the skeleton or if the node is not specified as a Node
or string.
sleap_io/model/skeleton.py
def add_node(self, node: Node | str):\n \"\"\"Add a `Node` to the skeleton.\n\n Args:\n node: A `Node` object or a string name to create a new node.\n\n Raises:\n ValueError: If the node already exists in the skeleton or if the node is\n not specified as a `Node` or string.\n \"\"\"\n if node in self:\n raise ValueError(f\"Node '{node}' already exists in the skeleton.\")\n\n if type(node) == str:\n node = Node(node)\n\n if type(node) != Node:\n raise ValueError(f\"Invalid node type: {node} ({type(node)})\")\n\n self.nodes.append(node)\n\n # Atomic update of the cache.\n self._name_to_node_cache[node.name] = node\n self._node_to_ind_cache[node] = len(self.nodes) - 1\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.add_nodes","title":"add_nodes(nodes)
","text":"Add multiple Node
s to the skeleton.
Parameters:
Name Type Description Defaultnodes
list[Node | str]
A list of Node
objects or string names to create new nodes.
sleap_io/model/skeleton.py
def add_nodes(self, nodes: list[Node | str]):\n \"\"\"Add multiple `Node`s to the skeleton.\n\n Args:\n nodes: A list of `Node` objects or string names to create new nodes.\n \"\"\"\n for node in nodes:\n self.add_node(node)\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.add_symmetry","title":"add_symmetry(node1=None, node2=None)
","text":"Add a symmetry relationship to the skeleton.
Parameters:
Name Type Description Defaultnode1
Symmetry | NodeOrIndex
The first node specified as a Node
, name or index. If a Symmetry
object is provided, it will be added directly to the skeleton.
None
node2
NodeOrIndex | None
The second node specified as a Node
, name or index.
None
Source code in sleap_io/model/skeleton.py
def add_symmetry(\n self, node1: Symmetry | NodeOrIndex = None, node2: NodeOrIndex | None = None\n):\n \"\"\"Add a symmetry relationship to the skeleton.\n\n Args:\n node1: The first node specified as a `Node`, name or index. If a `Symmetry`\n object is provided, it will be added directly to the skeleton.\n node2: The second node specified as a `Node`, name or index.\n \"\"\"\n symmetry = None\n if type(node1) == Symmetry:\n symmetry = node1\n node1, node2 = symmetry\n\n node1 = self.require_node(node1)\n node2 = self.require_node(node2)\n\n if symmetry is None:\n symmetry = Symmetry({node1, node2})\n\n if symmetry not in self.symmetries:\n self.symmetries.append(symmetry)\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.get_flipped_node_inds","title":"get_flipped_node_inds()
","text":"Returns node indices that should be switched when horizontally flipping.
This is useful as a lookup table for flipping the landmark coordinates when doing data augmentation.
Exampleskel = Skeleton([\"A\", \"B_left\", \"B_right\", \"C\", \"D_left\", \"D_right\"]) skel.add_symmetry(\"B_left\", \"B_right\") skel.add_symmetry(\"D_left\", \"D_right\") skel.flipped_node_inds [0, 2, 1, 3, 5, 4] pose = np.array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]) pose[skel.flipped_node_inds] array([[0, 0], [2, 2], [1, 1], [3, 3], [5, 5], [4, 4]])
Source code insleap_io/model/skeleton.py
def get_flipped_node_inds(self) -> list[int]:\n \"\"\"Returns node indices that should be switched when horizontally flipping.\n\n This is useful as a lookup table for flipping the landmark coordinates when\n doing data augmentation.\n\n Example:\n >>> skel = Skeleton([\"A\", \"B_left\", \"B_right\", \"C\", \"D_left\", \"D_right\"])\n >>> skel.add_symmetry(\"B_left\", \"B_right\")\n >>> skel.add_symmetry(\"D_left\", \"D_right\")\n >>> skel.flipped_node_inds\n [0, 2, 1, 3, 5, 4]\n >>> pose = np.array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]])\n >>> pose[skel.flipped_node_inds]\n array([[0, 0],\n [2, 2],\n [1, 1],\n [3, 3],\n [5, 5],\n [4, 4]])\n \"\"\"\n flip_idx = np.arange(len(self.nodes))\n if len(self.symmetries) > 0:\n symmetry_inds = np.array(\n [(self.index(a), self.index(b)) for a, b in self.symmetries]\n )\n flip_idx[symmetry_inds[:, 0]] = symmetry_inds[:, 1]\n flip_idx[symmetry_inds[:, 1]] = symmetry_inds[:, 0]\n\n flip_idx = flip_idx.tolist()\n return flip_idx\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.index","title":"index(node)
","text":"Return the index of a node specified as a Node
or string name.
sleap_io/model/skeleton.py
def index(self, node: Node | str) -> int:\n \"\"\"Return the index of a node specified as a `Node` or string name.\"\"\"\n if type(node) == str:\n return self.index(self._name_to_node_cache[node])\n elif type(node) == Node:\n return self._node_to_ind_cache[node]\n else:\n raise IndexError(f\"Invalid indexing argument for skeleton: {node}\")\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.rebuild_cache","title":"rebuild_cache(nodes=None)
","text":"Rebuild the node name/index to Node
map caches.
Parameters:
Name Type Description Defaultnodes
list[Node] | None
A list of Node
objects to update the cache with. If not provided, the cache will be updated with the current nodes in the skeleton. If nodes are provided, the cache will be updated with the provided nodes, but the current nodes in the skeleton will not be updated. Default is None
.
None
Notes This function should be called when nodes or node list is mutated to update the lookup caches for indexing nodes by name or Node
object.
This is done automatically when nodes are added or removed from the skeleton using the convenience methods in this class.
This method only needs to be used when manually mutating nodes or the node list directly.
Source code insleap_io/model/skeleton.py
def rebuild_cache(self, nodes: list[Node] | None = None):\n \"\"\"Rebuild the node name/index to `Node` map caches.\n\n Args:\n nodes: A list of `Node` objects to update the cache with. If not provided,\n the cache will be updated with the current nodes in the skeleton. If\n nodes are provided, the cache will be updated with the provided nodes,\n but the current nodes in the skeleton will not be updated. Default is\n `None`.\n\n Notes:\n This function should be called when nodes or node list is mutated to update\n the lookup caches for indexing nodes by name or `Node` object.\n\n This is done automatically when nodes are added or removed from the skeleton\n using the convenience methods in this class.\n\n This method only needs to be used when manually mutating nodes or the node\n list directly.\n \"\"\"\n if nodes is None:\n nodes = self.nodes\n self._name_to_node_cache = {node.name: node for node in nodes}\n self._node_to_ind_cache = {node: i for i, node in enumerate(nodes)}\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.remove_node","title":"remove_node(node)
","text":"Remove a single node from the skeleton.
Parameters:
Name Type Description Defaultnode
NodeOrIndex
The node to remove. Can be specified as a string name, integer index, or Node
object.
This method handles updating the lookup caches necessary for indexing nodes by name.
Any edges and symmetries that are connected to the removed node will also be removed.
WarningThis method does NOT update instances that use this skeleton to reflect changes.
It is recommended to use the Labels.remove_nodes()
method which will update all contained instances to reflect the changes made to the skeleton.
To manually update instances after this method is called, call Instance.update_skeleton()
on each instance that uses this skeleton.
sleap_io/model/skeleton.py
def remove_node(self, node: NodeOrIndex):\n \"\"\"Remove a single node from the skeleton.\n\n Args:\n node: The node to remove. Can be specified as a string name, integer index,\n or `Node` object.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name.\n\n Any edges and symmetries that are connected to the removed node will also be\n removed.\n\n Warning:\n **This method does NOT update instances** that use this skeleton to reflect\n changes.\n\n It is recommended to use the `Labels.remove_nodes()` method which will\n update all contained instances to reflect the changes made to the skeleton.\n\n To manually update instances after this method is called, call\n `Instance.update_skeleton()` on each instance that uses this skeleton.\n \"\"\"\n self.remove_nodes([node])\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.remove_nodes","title":"remove_nodes(nodes)
","text":"Remove nodes from the skeleton.
Parameters:
Name Type Description Defaultnodes
list[NodeOrIndex]
A list of node names, indices, or Node
objects to remove.
This method handles updating the lookup caches necessary for indexing nodes by name.
Any edges and symmetries that are connected to the removed nodes will also be removed.
WarningThis method does NOT update instances that use this skeleton to reflect changes.
It is recommended to use the Labels.remove_nodes()
method which will update all contained to reflect the changes made to the skeleton.
To manually update instances after this method is called, call instance.update_nodes()
on each instance that uses this skeleton.
sleap_io/model/skeleton.py
def remove_nodes(self, nodes: list[NodeOrIndex]):\n \"\"\"Remove nodes from the skeleton.\n\n Args:\n nodes: A list of node names, indices, or `Node` objects to remove.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name.\n\n Any edges and symmetries that are connected to the removed nodes will also\n be removed.\n\n Warning:\n **This method does NOT update instances** that use this skeleton to reflect\n changes.\n\n It is recommended to use the `Labels.remove_nodes()` method which will\n update all contained to reflect the changes made to the skeleton.\n\n To manually update instances after this method is called, call\n `instance.update_nodes()` on each instance that uses this skeleton.\n \"\"\"\n # Standardize input and make a pre-mutation copy before keys are changed.\n rm_node_objs = [self.require_node(node, add_missing=False) for node in nodes]\n\n # Remove nodes from the skeleton.\n for node in rm_node_objs:\n self.nodes.remove(node)\n del self._name_to_node_cache[node.name]\n\n # Remove edges connected to the removed nodes.\n self.edges = [\n edge\n for edge in self.edges\n if edge.source not in rm_node_objs and edge.destination not in rm_node_objs\n ]\n\n # Remove symmetries connected to the removed nodes.\n self.symmetries = [\n symmetry\n for symmetry in self.symmetries\n if symmetry.nodes.isdisjoint(rm_node_objs)\n ]\n\n # Update node index map.\n self.rebuild_cache()\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.rename_node","title":"rename_node(old_name, new_name)
","text":"Rename a single node in the skeleton.
Parameters:
Name Type Description Defaultold_name
NodeOrIndex
The name of the node to rename. Can also be specified as an integer index or Node
object.
new_name
str
The new name for the node.
required Source code insleap_io/model/skeleton.py
def rename_node(self, old_name: NodeOrIndex, new_name: str):\n \"\"\"Rename a single node in the skeleton.\n\n Args:\n old_name: The name of the node to rename. Can also be specified as an\n integer index or `Node` object.\n new_name: The new name for the node.\n \"\"\"\n self.rename_nodes({old_name: new_name})\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.rename_nodes","title":"rename_nodes(name_map)
","text":"Rename nodes in the skeleton.
Parameters:
Name Type Description Defaultname_map
dict[NodeOrIndex, str] | list[str]
A dictionary mapping old node names to new node names. Keys can be specified as Node
objects, integer indices, or string names. Values must be specified as string names.
If a list of strings is provided of the same length as the current nodes, the nodes will be renamed to the names in the list in order.
requiredRaises:
Type DescriptionValueError
If the new node names exist in the skeleton or if the old node names are not found in the skeleton.
NotesThis method should always be used when renaming nodes in the skeleton as it handles updating the lookup caches necessary for indexing nodes by name.
After renaming, instances using this skeleton do NOT need to be updated as the nodes are stored by reference in the skeleton, so changes are reflected automatically.
Exampleskel = Skeleton([\"A\", \"B\", \"C\"], edges=[(\"A\", \"B\"), (\"B\", \"C\")]) skel.rename_nodes({\"A\": \"X\", \"B\": \"Y\", \"C\": \"Z\"}) skel.node_names [\"X\", \"Y\", \"Z\"] skel.rename_nodes([\"a\", \"b\", \"c\"]) skel.node_names [\"a\", \"b\", \"c\"]
Source code insleap_io/model/skeleton.py
def rename_nodes(self, name_map: dict[NodeOrIndex, str] | list[str]):\n \"\"\"Rename nodes in the skeleton.\n\n Args:\n name_map: A dictionary mapping old node names to new node names. Keys can be\n specified as `Node` objects, integer indices, or string names. Values\n must be specified as string names.\n\n If a list of strings is provided of the same length as the current\n nodes, the nodes will be renamed to the names in the list in order.\n\n Raises:\n ValueError: If the new node names exist in the skeleton or if the old node\n names are not found in the skeleton.\n\n Notes:\n This method should always be used when renaming nodes in the skeleton as it\n handles updating the lookup caches necessary for indexing nodes by name.\n\n After renaming, instances using this skeleton **do NOT need to be updated**\n as the nodes are stored by reference in the skeleton, so changes are\n reflected automatically.\n\n Example:\n >>> skel = Skeleton([\"A\", \"B\", \"C\"], edges=[(\"A\", \"B\"), (\"B\", \"C\")])\n >>> skel.rename_nodes({\"A\": \"X\", \"B\": \"Y\", \"C\": \"Z\"})\n >>> skel.node_names\n [\"X\", \"Y\", \"Z\"]\n >>> skel.rename_nodes([\"a\", \"b\", \"c\"])\n >>> skel.node_names\n [\"a\", \"b\", \"c\"]\n \"\"\"\n if type(name_map) == list:\n if len(name_map) != len(self.nodes):\n raise ValueError(\n \"List of new node names must be the same length as the current \"\n \"nodes.\"\n )\n name_map = {node: name for node, name in zip(self.nodes, name_map)}\n\n for old_name, new_name in name_map.items():\n if type(old_name) == Node:\n old_name = old_name.name\n if type(old_name) == int:\n old_name = self.nodes[old_name].name\n\n if old_name not in self._name_to_node_cache:\n raise ValueError(f\"Node '{old_name}' not found in the skeleton.\")\n if new_name in self._name_to_node_cache:\n raise ValueError(f\"Node '{new_name}' already exists in the skeleton.\")\n\n node = self._name_to_node_cache[old_name]\n node.name = new_name\n self._name_to_node_cache[new_name] = node\n del self._name_to_node_cache[old_name]\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.reorder_nodes","title":"reorder_nodes(new_order)
","text":"Reorder nodes in the skeleton.
Parameters:
Name Type Description Defaultnew_order
list[NodeOrIndex]
A list of node names, indices, or Node
objects specifying the new order of the nodes.
Raises:
Type DescriptionValueError
If the new order of nodes is not the same length as the current nodes.
NotesThis method handles updating the lookup caches necessary for indexing nodes by name.
WarningAfter reordering, instances using this skeleton do not need to be updated as the nodes are stored by reference in the skeleton.
However, the order that points are stored in the instances will not be updated to match the new order of the nodes in the skeleton. This should not matter unless the ordering of the keys in the Instance.points
dictionary is used instead of relying on the skeleton node order.
To make sure these are aligned, it is recommended to use the Labels.reorder_nodes()
method which will update all contained instances to reflect the changes made to the skeleton.
To manually update instances after this method is called, call Instance.update_skeleton()
on each instance that uses this skeleton.
sleap_io/model/skeleton.py
def reorder_nodes(self, new_order: list[NodeOrIndex]):\n \"\"\"Reorder nodes in the skeleton.\n\n Args:\n new_order: A list of node names, indices, or `Node` objects specifying the\n new order of the nodes.\n\n Raises:\n ValueError: If the new order of nodes is not the same length as the current\n nodes.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name.\n\n Warning:\n After reordering, instances using this skeleton do not need to be updated as\n the nodes are stored by reference in the skeleton.\n\n However, the order that points are stored in the instances will not be\n updated to match the new order of the nodes in the skeleton. This should not\n matter unless the ordering of the keys in the `Instance.points` dictionary\n is used instead of relying on the skeleton node order.\n\n To make sure these are aligned, it is recommended to use the\n `Labels.reorder_nodes()` method which will update all contained instances to\n reflect the changes made to the skeleton.\n\n To manually update instances after this method is called, call\n `Instance.update_skeleton()` on each instance that uses this skeleton.\n \"\"\"\n if len(new_order) != len(self.nodes):\n raise ValueError(\n \"New order of nodes must be the same length as the current nodes.\"\n )\n\n new_nodes = [self.require_node(node, add_missing=False) for node in new_order]\n self.nodes = new_nodes\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.require_node","title":"require_node(node, add_missing=True)
","text":"Return a Node
object, handling indexing and adding missing nodes.
Parameters:
Name Type Description Defaultnode
NodeOrIndex
A Node
object, name or index.
add_missing
bool
If True
, missing nodes will be added to the skeleton. If False
, an error will be raised if the node is not found. Default is True
.
True
Returns:
Type DescriptionNode
The Node
object.
Raises:
Type DescriptionIndexError
If the node is not found in the skeleton and add_missing
is False
.
sleap_io/model/skeleton.py
def require_node(self, node: NodeOrIndex, add_missing: bool = True) -> Node:\n \"\"\"Return a `Node` object, handling indexing and adding missing nodes.\n\n Args:\n node: A `Node` object, name or index.\n add_missing: If `True`, missing nodes will be added to the skeleton. If\n `False`, an error will be raised if the node is not found. Default is\n `True`.\n\n Returns:\n The `Node` object.\n\n Raises:\n IndexError: If the node is not found in the skeleton and `add_missing` is\n `False`.\n \"\"\"\n if node not in self:\n if add_missing:\n self.add_node(node)\n else:\n raise IndexError(f\"Node '{node}' not found in the skeleton.\")\n\n if type(node) == Node:\n return node\n\n return self[node]\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Symmetry","title":"Symmetry
","text":"A relationship between a pair of nodes denoting their left/right pairing.
Attributes:
Name Type Descriptionnodes
set[Node]
A set of two Node
s.
Methods:
Name Description__getitem__
Return the first node.
__iter__
Iterate over the symmetric nodes.
Source code insleap_io/model/skeleton.py
@define\nclass Symmetry:\n \"\"\"A relationship between a pair of nodes denoting their left/right pairing.\n\n Attributes:\n nodes: A set of two `Node`s.\n \"\"\"\n\n nodes: set[Node] = field(converter=set, validator=lambda _, __, val: len(val) == 2)\n\n def __iter__(self):\n \"\"\"Iterate over the symmetric nodes.\"\"\"\n return iter(self.nodes)\n\n def __getitem__(self, idx) -> Node:\n \"\"\"Return the first node.\"\"\"\n for i, node in enumerate(self.nodes):\n if i == idx:\n return node\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Symmetry.__getitem__","title":"__getitem__(idx)
","text":"Return the first node.
Source code insleap_io/model/skeleton.py
def __getitem__(self, idx) -> Node:\n \"\"\"Return the first node.\"\"\"\n for i, node in enumerate(self.nodes):\n if i == idx:\n return node\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Symmetry.__iter__","title":"__iter__()
","text":"Iterate over the symmetric nodes.
Source code insleap_io/model/skeleton.py
def __iter__(self):\n \"\"\"Iterate over the symmetric nodes.\"\"\"\n return iter(self.nodes)\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.is_node_or_index","title":"is_node_or_index(obj)
","text":"Check if an object is a Node
, string name or integer index.
Parameters:
Name Type Description Defaultobj
Any
The object to check.
required NotesThis is mainly for backwards compatibility with Python versions < 3.10 where generics can't be used with isinstance
. In newer Python, this is equivalent to isinstance(obj, NodeOrIndex)
.
sleap_io/model/skeleton.py
def is_node_or_index(obj: typing.Any) -> bool:\n \"\"\"Check if an object is a `Node`, string name or integer index.\n\n Args:\n obj: The object to check.\n\n Notes:\n This is mainly for backwards compatibility with Python versions < 3.10 where\n generics can't be used with `isinstance`. In newer Python, this is equivalent\n to `isinstance(obj, NodeOrIndex)`.\n \"\"\"\n return isinstance(obj, (Node, str, int))\n
"},{"location":"reference/sleap_io/model/suggestions/","title":"suggestions","text":""},{"location":"reference/sleap_io/model/suggestions/#sleap_io.model.suggestions","title":"sleap_io.model.suggestions
","text":"Data module for suggestions.
Classes:
Name DescriptionSuggestionFrame
Data structure for a single frame of suggestions.
"},{"location":"reference/sleap_io/model/suggestions/#sleap_io.model.suggestions.SuggestionFrame","title":"SuggestionFrame
","text":"Data structure for a single frame of suggestions.
Attributes:
Name Type Descriptionvideo
Video
The video associated with the frame.
frame_idx
int
The index of the frame in the video.
Source code insleap_io/model/suggestions.py
@attrs.define(auto_attribs=True)\nclass SuggestionFrame:\n \"\"\"Data structure for a single frame of suggestions.\n\n Attributes:\n video: The video associated with the frame.\n frame_idx: The index of the frame in the video.\n \"\"\"\n\n video: Video\n frame_idx: int\n
"},{"location":"reference/sleap_io/model/video/","title":"video","text":""},{"location":"reference/sleap_io/model/video/#sleap_io.model.video","title":"sleap_io.model.video
","text":"Data model for videos.
The Video
class is a SLEAP data structure that stores information regarding a video and its components used in SLEAP.
Classes:
Name DescriptionVideo
Video
class used by sleap to represent videos and data associated with them.
Video
","text":"Video
class used by sleap to represent videos and data associated with them.
This class is used to store information regarding a video and its components. It is used to store the video's filename
, shape
, and the video's backend
.
To create a Video
object, use the from_filename
method which will select the backend appropriately.
Attributes:
Name Type Descriptionfilename
str | list[str]
The filename(s) of the video. Supported extensions: \"mp4\", \"avi\", \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\", \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are expected. If filename is a folder, it will be searched for images.
backend
Optional[VideoBackend]
An object that implements the basic methods for reading and manipulating frames of a specific video type.
backend_metadata
dict[str, any]
A dictionary of metadata specific to the backend. This is useful for storing metadata that requires an open backend (e.g., shape information) without having access to the video file itself.
source_video
Optional[Video]
The source video object if this is a proxy video. This is present when the video contains an embedded subset of frames from another video.
open_backend
bool
Whether to open the backend when the video is available. If True
(the default), the backend will be automatically opened if the video exists. Set this to False
when you want to manually open the backend, or when the you know the video file does not exist and you want to avoid trying to open the file.
Instances of this class are hashed by identity, not by value. This means that two Video
instances with the same attributes will NOT be considered equal in a set or dict.
See also: VideoBackend
Methods:
Name Description__attrs_post_init__
Post init syntactic sugar.
__deepcopy__
Deep copy the video object.
__getitem__
Return the frames of the video at the given indices.
__len__
Return the length of the video as the number of frames.
__repr__
Informal string representation (for print or format).
__str__
Informal string representation (for print or format).
close
Close the video backend.
exists
Check if the video file exists and is accessible.
from_filename
Create a Video from a filename.
open
Open the video backend for reading.
replace_filename
Update the filename of the video, optionally opening the backend.
save
Save video frames to a new video file.
Attributes:
Name Type Descriptiongrayscale
bool | None
Return whether the video is grayscale.
is_open
bool
Check if the video backend is open.
shape
Tuple[int, int, int, int] | None
Return the shape of the video as (num_frames, height, width, channels).
Source code insleap_io/model/video.py
@attrs.define(eq=False)\nclass Video:\n \"\"\"`Video` class used by sleap to represent videos and data associated with them.\n\n This class is used to store information regarding a video and its components.\n It is used to store the video's `filename`, `shape`, and the video's `backend`.\n\n To create a `Video` object, use the `from_filename` method which will select the\n backend appropriately.\n\n Attributes:\n filename: The filename(s) of the video. Supported extensions: \"mp4\", \"avi\",\n \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\",\n \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are\n expected. If filename is a folder, it will be searched for images.\n backend: An object that implements the basic methods for reading and\n manipulating frames of a specific video type.\n backend_metadata: A dictionary of metadata specific to the backend. This is\n useful for storing metadata that requires an open backend (e.g., shape\n information) without having access to the video file itself.\n source_video: The source video object if this is a proxy video. This is present\n when the video contains an embedded subset of frames from another video.\n open_backend: Whether to open the backend when the video is available. If `True`\n (the default), the backend will be automatically opened if the video exists.\n Set this to `False` when you want to manually open the backend, or when the\n you know the video file does not exist and you want to avoid trying to open\n the file.\n\n Notes:\n Instances of this class are hashed by identity, not by value. This means that\n two `Video` instances with the same attributes will NOT be considered equal in a\n set or dict.\n\n See also: VideoBackend\n \"\"\"\n\n filename: str | list[str]\n backend: Optional[VideoBackend] = None\n backend_metadata: dict[str, any] = attrs.field(factory=dict)\n source_video: Optional[Video] = None\n open_backend: bool = True\n\n EXTS = MediaVideo.EXTS + HDF5Video.EXTS + ImageVideo.EXTS\n\n def __attrs_post_init__(self):\n \"\"\"Post init syntactic sugar.\"\"\"\n if self.open_backend and self.backend is None and self.exists():\n try:\n self.open()\n except Exception as e:\n # If we can't open the backend, just ignore it for now so we don't\n # prevent the user from building the Video object entirely.\n pass\n\n def __deepcopy__(self, memo):\n \"\"\"Deep copy the video object.\"\"\"\n if id(self) in memo:\n return memo[id(self)]\n\n reopen = False\n if self.is_open:\n reopen = True\n self.close()\n\n new_video = Video(\n filename=self.filename,\n backend=None,\n backend_metadata=self.backend_metadata,\n source_video=self.source_video,\n open_backend=self.open_backend,\n )\n\n memo[id(self)] = new_video\n\n if reopen:\n self.open()\n\n return new_video\n\n @classmethod\n def from_filename(\n cls,\n filename: str | list[str],\n dataset: Optional[str] = None,\n grayscale: Optional[bool] = None,\n keep_open: bool = True,\n source_video: Optional[Video] = None,\n **kwargs,\n ) -> VideoBackend:\n \"\"\"Create a Video from a filename.\n\n Args:\n filename: The filename(s) of the video. Supported extensions: \"mp4\", \"avi\",\n \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\",\n \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are\n expected. If filename is a folder, it will be searched for images.\n dataset: Name of dataset in HDF5 file.\n grayscale: Whether to force grayscale. If None, autodetect on first frame\n load.\n keep_open: Whether to keep the video reader open between calls to read\n frames. If False, will close the reader after each call. If True (the\n default), it will keep the reader open and cache it for subsequent calls\n which may enhance the performance of reading multiple frames.\n source_video: The source video object if this is a proxy video. This is\n present when the video contains an embedded subset of frames from\n another video.\n\n Returns:\n Video instance with the appropriate backend instantiated.\n \"\"\"\n return cls(\n filename=filename,\n backend=VideoBackend.from_filename(\n filename,\n dataset=dataset,\n grayscale=grayscale,\n keep_open=keep_open,\n **kwargs,\n ),\n source_video=source_video,\n )\n\n @property\n def shape(self) -> Tuple[int, int, int, int] | None:\n \"\"\"Return the shape of the video as (num_frames, height, width, channels).\n\n If the video backend is not set or it cannot determine the shape of the video,\n this will return None.\n \"\"\"\n return self._get_shape()\n\n def _get_shape(self) -> Tuple[int, int, int, int] | None:\n \"\"\"Return the shape of the video as (num_frames, height, width, channels).\n\n This suppresses errors related to querying the backend for the video shape, such\n as when it has not been set or when the video file is not found.\n \"\"\"\n try:\n return self.backend.shape\n except:\n if \"shape\" in self.backend_metadata:\n return self.backend_metadata[\"shape\"]\n return None\n\n @property\n def grayscale(self) -> bool | None:\n \"\"\"Return whether the video is grayscale.\n\n If the video backend is not set or it cannot determine whether the video is\n grayscale, this will return None.\n \"\"\"\n shape = self.shape\n if shape is not None:\n return shape[-1] == 1\n else:\n grayscale = None\n if \"grayscale\" in self.backend_metadata:\n grayscale = self.backend_metadata[\"grayscale\"]\n return grayscale\n\n @grayscale.setter\n def grayscale(self, value: bool):\n \"\"\"Set the grayscale value and adjust the backend.\"\"\"\n if self.backend is not None:\n self.backend.grayscale = value\n self.backend._cached_shape = None\n\n self.backend_metadata[\"grayscale\"] = value\n\n def __len__(self) -> int:\n \"\"\"Return the length of the video as the number of frames.\"\"\"\n shape = self.shape\n return 0 if shape is None else shape[0]\n\n def __repr__(self) -> str:\n \"\"\"Informal string representation (for print or format).\"\"\"\n dataset = (\n f\"dataset={self.backend.dataset}, \"\n if getattr(self.backend, \"dataset\", \"\")\n else \"\"\n )\n return (\n \"Video(\"\n f'filename=\"{self.filename}\", '\n f\"shape={self.shape}, \"\n f\"{dataset}\"\n f\"backend={type(self.backend).__name__}\"\n \")\"\n )\n\n def __str__(self) -> str:\n \"\"\"Informal string representation (for print or format).\"\"\"\n return self.__repr__()\n\n def __getitem__(self, inds: int | list[int] | slice) -> np.ndarray:\n \"\"\"Return the frames of the video at the given indices.\n\n Args:\n inds: Index or list of indices of frames to read.\n\n Returns:\n Frame or frames as a numpy array of shape `(height, width, channels)` if a\n scalar index is provided, or `(frames, height, width, channels)` if a list\n of indices is provided.\n\n See also: VideoBackend.get_frame, VideoBackend.get_frames\n \"\"\"\n if not self.is_open:\n if self.open_backend:\n self.open()\n else:\n raise ValueError(\n \"Video backend is not open. Call video.open() or set \"\n \"video.open_backend to True to do automatically on frame read.\"\n )\n return self.backend[inds]\n\n def exists(self, check_all: bool = False, dataset: str | None = None) -> bool:\n \"\"\"Check if the video file exists and is accessible.\n\n Args:\n check_all: If `True`, check that all filenames in a list exist. If `False`\n (the default), check that the first filename exists.\n dataset: Name of dataset in HDF5 file. If specified, this will function will\n return `False` if the dataset does not exist.\n\n Returns:\n `True` if the file exists and is accessible, `False` otherwise.\n \"\"\"\n if isinstance(self.filename, list):\n if check_all:\n for f in self.filename:\n if not is_file_accessible(f):\n return False\n return True\n else:\n return is_file_accessible(self.filename[0])\n\n file_is_accessible = is_file_accessible(self.filename)\n if not file_is_accessible:\n return False\n\n if dataset is None or dataset == \"\":\n dataset = self.backend_metadata.get(\"dataset\", None)\n\n if dataset is not None and dataset != \"\":\n has_dataset = False\n if (\n self.backend is not None\n and type(self.backend) == HDF5Video\n and self.backend._open_reader is not None\n ):\n has_dataset = dataset in self.backend._open_reader\n else:\n with h5py.File(self.filename, \"r\") as f:\n has_dataset = dataset in f\n return has_dataset\n\n return True\n\n @property\n def is_open(self) -> bool:\n \"\"\"Check if the video backend is open.\"\"\"\n return self.exists() and self.backend is not None\n\n def open(\n self,\n filename: Optional[str] = None,\n dataset: Optional[str] = None,\n grayscale: Optional[str] = None,\n keep_open: bool = True,\n ):\n \"\"\"Open the video backend for reading.\n\n Args:\n filename: Filename to open. If not specified, will use the filename set on\n the video object.\n dataset: Name of dataset in HDF5 file.\n grayscale: Whether to force grayscale. If None, autodetect on first frame\n load.\n keep_open: Whether to keep the video reader open between calls to read\n frames. If False, will close the reader after each call. If True (the\n default), it will keep the reader open and cache it for subsequent calls\n which may enhance the performance of reading multiple frames.\n\n Notes:\n This is useful for opening the video backend to read frames and then closing\n it after reading all the necessary frames.\n\n If the backend was already open, it will be closed before opening a new one.\n Values for the HDF5 dataset and grayscale will be remembered if not\n specified.\n \"\"\"\n if filename is not None:\n self.replace_filename(filename, open=False)\n\n # Try to remember values from previous backend if available and not specified.\n if self.backend is not None:\n if dataset is None:\n dataset = getattr(self.backend, \"dataset\", None)\n if grayscale is None:\n grayscale = getattr(self.backend, \"grayscale\", None)\n\n else:\n if dataset is None and \"dataset\" in self.backend_metadata:\n dataset = self.backend_metadata[\"dataset\"]\n if grayscale is None:\n if \"grayscale\" in self.backend_metadata:\n grayscale = self.backend_metadata[\"grayscale\"]\n elif \"shape\" in self.backend_metadata:\n grayscale = self.backend_metadata[\"shape\"][-1] == 1\n\n if not self.exists(dataset=dataset):\n msg = f\"Video does not exist or is inaccessible: {self.filename}\"\n if dataset is not None:\n msg += f\" (dataset: {dataset})\"\n raise FileNotFoundError(msg)\n\n # Close previous backend if open.\n self.close()\n\n # Create new backend.\n self.backend = VideoBackend.from_filename(\n self.filename,\n dataset=dataset,\n grayscale=grayscale,\n keep_open=keep_open,\n )\n\n def close(self):\n \"\"\"Close the video backend.\"\"\"\n if self.backend is not None:\n # Try to remember values from previous backend if available and not\n # specified.\n try:\n self.backend_metadata[\"dataset\"] = getattr(\n self.backend, \"dataset\", None\n )\n self.backend_metadata[\"grayscale\"] = getattr(\n self.backend, \"grayscale\", None\n )\n self.backend_metadata[\"shape\"] = getattr(self.backend, \"shape\", None)\n except:\n pass\n\n del self.backend\n self.backend = None\n\n def replace_filename(\n self, new_filename: str | Path | list[str] | list[Path], open: bool = True\n ):\n \"\"\"Update the filename of the video, optionally opening the backend.\n\n Args:\n new_filename: New filename to set for the video.\n open: If `True` (the default), open the backend with the new filename. If\n the new filename does not exist, no error is raised.\n \"\"\"\n if isinstance(new_filename, Path):\n new_filename = new_filename.as_posix()\n\n if isinstance(new_filename, list):\n new_filename = [\n p.as_posix() if isinstance(p, Path) else p for p in new_filename\n ]\n\n self.filename = new_filename\n self.backend_metadata[\"filename\"] = new_filename\n\n if open:\n if self.exists():\n self.open()\n else:\n self.close()\n\n def save(\n self,\n save_path: str | Path,\n frame_inds: list[int] | np.ndarray | None = None,\n video_kwargs: dict[str, Any] | None = None,\n ) -> Video:\n \"\"\"Save video frames to a new video file.\n\n Args:\n save_path: Path to the new video file. Should end in MP4.\n frame_inds: Frame indices to save. Can be specified as a list or array of\n frame integers. If not specified, saves all video frames.\n video_kwargs: A dictionary of keyword arguments to provide to\n `sio.save_video` for video compression.\n\n Returns:\n A new `Video` object pointing to the new video file.\n \"\"\"\n video_kwargs = {} if video_kwargs is None else video_kwargs\n frame_inds = np.arange(len(self)) if frame_inds is None else frame_inds\n\n with VideoWriter(save_path, **video_kwargs) as vw:\n for frame_ind in frame_inds:\n vw(self[frame_ind])\n\n new_video = Video.from_filename(save_path, grayscale=self.grayscale)\n return new_video\n
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.grayscale","title":"grayscale: bool | None
property
writable
","text":"Return whether the video is grayscale.
If the video backend is not set or it cannot determine whether the video is grayscale, this will return None.
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.is_open","title":"is_open: bool
property
","text":"Check if the video backend is open.
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.shape","title":"shape: Tuple[int, int, int, int] | None
property
","text":"Return the shape of the video as (num_frames, height, width, channels).
If the video backend is not set or it cannot determine the shape of the video, this will return None.
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.__attrs_post_init__","title":"__attrs_post_init__()
","text":"Post init syntactic sugar.
Source code insleap_io/model/video.py
def __attrs_post_init__(self):\n \"\"\"Post init syntactic sugar.\"\"\"\n if self.open_backend and self.backend is None and self.exists():\n try:\n self.open()\n except Exception as e:\n # If we can't open the backend, just ignore it for now so we don't\n # prevent the user from building the Video object entirely.\n pass\n
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.__deepcopy__","title":"__deepcopy__(memo)
","text":"Deep copy the video object.
Source code insleap_io/model/video.py
def __deepcopy__(self, memo):\n \"\"\"Deep copy the video object.\"\"\"\n if id(self) in memo:\n return memo[id(self)]\n\n reopen = False\n if self.is_open:\n reopen = True\n self.close()\n\n new_video = Video(\n filename=self.filename,\n backend=None,\n backend_metadata=self.backend_metadata,\n source_video=self.source_video,\n open_backend=self.open_backend,\n )\n\n memo[id(self)] = new_video\n\n if reopen:\n self.open()\n\n return new_video\n
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.__getitem__","title":"__getitem__(inds)
","text":"Return the frames of the video at the given indices.
Parameters:
Name Type Description Defaultinds
int | list[int] | slice
Index or list of indices of frames to read.
requiredReturns:
Type Descriptionndarray
Frame or frames as a numpy array of shape (height, width, channels)
if a scalar index is provided, or (frames, height, width, channels)
if a list of indices is provided.
See also: VideoBackend.get_frame, VideoBackend.get_frames
Source code insleap_io/model/video.py
def __getitem__(self, inds: int | list[int] | slice) -> np.ndarray:\n \"\"\"Return the frames of the video at the given indices.\n\n Args:\n inds: Index or list of indices of frames to read.\n\n Returns:\n Frame or frames as a numpy array of shape `(height, width, channels)` if a\n scalar index is provided, or `(frames, height, width, channels)` if a list\n of indices is provided.\n\n See also: VideoBackend.get_frame, VideoBackend.get_frames\n \"\"\"\n if not self.is_open:\n if self.open_backend:\n self.open()\n else:\n raise ValueError(\n \"Video backend is not open. Call video.open() or set \"\n \"video.open_backend to True to do automatically on frame read.\"\n )\n return self.backend[inds]\n
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.__len__","title":"__len__()
","text":"Return the length of the video as the number of frames.
Source code insleap_io/model/video.py
def __len__(self) -> int:\n \"\"\"Return the length of the video as the number of frames.\"\"\"\n shape = self.shape\n return 0 if shape is None else shape[0]\n
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.__repr__","title":"__repr__()
","text":"Informal string representation (for print or format).
Source code insleap_io/model/video.py
def __repr__(self) -> str:\n \"\"\"Informal string representation (for print or format).\"\"\"\n dataset = (\n f\"dataset={self.backend.dataset}, \"\n if getattr(self.backend, \"dataset\", \"\")\n else \"\"\n )\n return (\n \"Video(\"\n f'filename=\"{self.filename}\", '\n f\"shape={self.shape}, \"\n f\"{dataset}\"\n f\"backend={type(self.backend).__name__}\"\n \")\"\n )\n
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.__str__","title":"__str__()
","text":"Informal string representation (for print or format).
Source code insleap_io/model/video.py
def __str__(self) -> str:\n \"\"\"Informal string representation (for print or format).\"\"\"\n return self.__repr__()\n
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.close","title":"close()
","text":"Close the video backend.
Source code insleap_io/model/video.py
def close(self):\n \"\"\"Close the video backend.\"\"\"\n if self.backend is not None:\n # Try to remember values from previous backend if available and not\n # specified.\n try:\n self.backend_metadata[\"dataset\"] = getattr(\n self.backend, \"dataset\", None\n )\n self.backend_metadata[\"grayscale\"] = getattr(\n self.backend, \"grayscale\", None\n )\n self.backend_metadata[\"shape\"] = getattr(self.backend, \"shape\", None)\n except:\n pass\n\n del self.backend\n self.backend = None\n
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.exists","title":"exists(check_all=False, dataset=None)
","text":"Check if the video file exists and is accessible.
Parameters:
Name Type Description Defaultcheck_all
bool
If True
, check that all filenames in a list exist. If False
(the default), check that the first filename exists.
False
dataset
str | None
Name of dataset in HDF5 file. If specified, this will function will return False
if the dataset does not exist.
None
Returns:
Type Descriptionbool
True
if the file exists and is accessible, False
otherwise.
sleap_io/model/video.py
def exists(self, check_all: bool = False, dataset: str | None = None) -> bool:\n \"\"\"Check if the video file exists and is accessible.\n\n Args:\n check_all: If `True`, check that all filenames in a list exist. If `False`\n (the default), check that the first filename exists.\n dataset: Name of dataset in HDF5 file. If specified, this will function will\n return `False` if the dataset does not exist.\n\n Returns:\n `True` if the file exists and is accessible, `False` otherwise.\n \"\"\"\n if isinstance(self.filename, list):\n if check_all:\n for f in self.filename:\n if not is_file_accessible(f):\n return False\n return True\n else:\n return is_file_accessible(self.filename[0])\n\n file_is_accessible = is_file_accessible(self.filename)\n if not file_is_accessible:\n return False\n\n if dataset is None or dataset == \"\":\n dataset = self.backend_metadata.get(\"dataset\", None)\n\n if dataset is not None and dataset != \"\":\n has_dataset = False\n if (\n self.backend is not None\n and type(self.backend) == HDF5Video\n and self.backend._open_reader is not None\n ):\n has_dataset = dataset in self.backend._open_reader\n else:\n with h5py.File(self.filename, \"r\") as f:\n has_dataset = dataset in f\n return has_dataset\n\n return True\n
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.from_filename","title":"from_filename(filename, dataset=None, grayscale=None, keep_open=True, source_video=None, **kwargs)
classmethod
","text":"Create a Video from a filename.
Parameters:
Name Type Description Defaultfilename
str | list[str]
The filename(s) of the video. Supported extensions: \"mp4\", \"avi\", \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\", \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are expected. If filename is a folder, it will be searched for images.
requireddataset
Optional[str]
Name of dataset in HDF5 file.
None
grayscale
Optional[bool]
Whether to force grayscale. If None, autodetect on first frame load.
None
keep_open
bool
Whether to keep the video reader open between calls to read frames. If False, will close the reader after each call. If True (the default), it will keep the reader open and cache it for subsequent calls which may enhance the performance of reading multiple frames.
True
source_video
Optional[Video]
The source video object if this is a proxy video. This is present when the video contains an embedded subset of frames from another video.
None
Returns:
Type DescriptionVideoBackend
Video instance with the appropriate backend instantiated.
Source code insleap_io/model/video.py
@classmethod\ndef from_filename(\n cls,\n filename: str | list[str],\n dataset: Optional[str] = None,\n grayscale: Optional[bool] = None,\n keep_open: bool = True,\n source_video: Optional[Video] = None,\n **kwargs,\n) -> VideoBackend:\n \"\"\"Create a Video from a filename.\n\n Args:\n filename: The filename(s) of the video. Supported extensions: \"mp4\", \"avi\",\n \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\",\n \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are\n expected. If filename is a folder, it will be searched for images.\n dataset: Name of dataset in HDF5 file.\n grayscale: Whether to force grayscale. If None, autodetect on first frame\n load.\n keep_open: Whether to keep the video reader open between calls to read\n frames. If False, will close the reader after each call. If True (the\n default), it will keep the reader open and cache it for subsequent calls\n which may enhance the performance of reading multiple frames.\n source_video: The source video object if this is a proxy video. This is\n present when the video contains an embedded subset of frames from\n another video.\n\n Returns:\n Video instance with the appropriate backend instantiated.\n \"\"\"\n return cls(\n filename=filename,\n backend=VideoBackend.from_filename(\n filename,\n dataset=dataset,\n grayscale=grayscale,\n keep_open=keep_open,\n **kwargs,\n ),\n source_video=source_video,\n )\n
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.open","title":"open(filename=None, dataset=None, grayscale=None, keep_open=True)
","text":"Open the video backend for reading.
Parameters:
Name Type Description Defaultfilename
Optional[str]
Filename to open. If not specified, will use the filename set on the video object.
None
dataset
Optional[str]
Name of dataset in HDF5 file.
None
grayscale
Optional[str]
Whether to force grayscale. If None, autodetect on first frame load.
None
keep_open
bool
Whether to keep the video reader open between calls to read frames. If False, will close the reader after each call. If True (the default), it will keep the reader open and cache it for subsequent calls which may enhance the performance of reading multiple frames.
True
Notes This is useful for opening the video backend to read frames and then closing it after reading all the necessary frames.
If the backend was already open, it will be closed before opening a new one. Values for the HDF5 dataset and grayscale will be remembered if not specified.
Source code insleap_io/model/video.py
def open(\n self,\n filename: Optional[str] = None,\n dataset: Optional[str] = None,\n grayscale: Optional[str] = None,\n keep_open: bool = True,\n):\n \"\"\"Open the video backend for reading.\n\n Args:\n filename: Filename to open. If not specified, will use the filename set on\n the video object.\n dataset: Name of dataset in HDF5 file.\n grayscale: Whether to force grayscale. If None, autodetect on first frame\n load.\n keep_open: Whether to keep the video reader open between calls to read\n frames. If False, will close the reader after each call. If True (the\n default), it will keep the reader open and cache it for subsequent calls\n which may enhance the performance of reading multiple frames.\n\n Notes:\n This is useful for opening the video backend to read frames and then closing\n it after reading all the necessary frames.\n\n If the backend was already open, it will be closed before opening a new one.\n Values for the HDF5 dataset and grayscale will be remembered if not\n specified.\n \"\"\"\n if filename is not None:\n self.replace_filename(filename, open=False)\n\n # Try to remember values from previous backend if available and not specified.\n if self.backend is not None:\n if dataset is None:\n dataset = getattr(self.backend, \"dataset\", None)\n if grayscale is None:\n grayscale = getattr(self.backend, \"grayscale\", None)\n\n else:\n if dataset is None and \"dataset\" in self.backend_metadata:\n dataset = self.backend_metadata[\"dataset\"]\n if grayscale is None:\n if \"grayscale\" in self.backend_metadata:\n grayscale = self.backend_metadata[\"grayscale\"]\n elif \"shape\" in self.backend_metadata:\n grayscale = self.backend_metadata[\"shape\"][-1] == 1\n\n if not self.exists(dataset=dataset):\n msg = f\"Video does not exist or is inaccessible: {self.filename}\"\n if dataset is not None:\n msg += f\" (dataset: {dataset})\"\n raise FileNotFoundError(msg)\n\n # Close previous backend if open.\n self.close()\n\n # Create new backend.\n self.backend = VideoBackend.from_filename(\n self.filename,\n dataset=dataset,\n grayscale=grayscale,\n keep_open=keep_open,\n )\n
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.replace_filename","title":"replace_filename(new_filename, open=True)
","text":"Update the filename of the video, optionally opening the backend.
Parameters:
Name Type Description Defaultnew_filename
str | Path | list[str] | list[Path]
New filename to set for the video.
requiredopen
bool
If True
(the default), open the backend with the new filename. If the new filename does not exist, no error is raised.
True
Source code in sleap_io/model/video.py
def replace_filename(\n self, new_filename: str | Path | list[str] | list[Path], open: bool = True\n):\n \"\"\"Update the filename of the video, optionally opening the backend.\n\n Args:\n new_filename: New filename to set for the video.\n open: If `True` (the default), open the backend with the new filename. If\n the new filename does not exist, no error is raised.\n \"\"\"\n if isinstance(new_filename, Path):\n new_filename = new_filename.as_posix()\n\n if isinstance(new_filename, list):\n new_filename = [\n p.as_posix() if isinstance(p, Path) else p for p in new_filename\n ]\n\n self.filename = new_filename\n self.backend_metadata[\"filename\"] = new_filename\n\n if open:\n if self.exists():\n self.open()\n else:\n self.close()\n
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.save","title":"save(save_path, frame_inds=None, video_kwargs=None)
","text":"Save video frames to a new video file.
Parameters:
Name Type Description Defaultsave_path
str | Path
Path to the new video file. Should end in MP4.
requiredframe_inds
list[int] | ndarray | None
Frame indices to save. Can be specified as a list or array of frame integers. If not specified, saves all video frames.
None
video_kwargs
dict[str, Any] | None
A dictionary of keyword arguments to provide to sio.save_video
for video compression.
None
Returns:
Type DescriptionVideo
A new Video
object pointing to the new video file.
sleap_io/model/video.py
def save(\n self,\n save_path: str | Path,\n frame_inds: list[int] | np.ndarray | None = None,\n video_kwargs: dict[str, Any] | None = None,\n) -> Video:\n \"\"\"Save video frames to a new video file.\n\n Args:\n save_path: Path to the new video file. Should end in MP4.\n frame_inds: Frame indices to save. Can be specified as a list or array of\n frame integers. If not specified, saves all video frames.\n video_kwargs: A dictionary of keyword arguments to provide to\n `sio.save_video` for video compression.\n\n Returns:\n A new `Video` object pointing to the new video file.\n \"\"\"\n video_kwargs = {} if video_kwargs is None else video_kwargs\n frame_inds = np.arange(len(self)) if frame_inds is None else frame_inds\n\n with VideoWriter(save_path, **video_kwargs) as vw:\n for frame_ind in frame_inds:\n vw(self[frame_ind])\n\n new_video = Video.from_filename(save_path, grayscale=self.grayscale)\n return new_video\n
"}]}
\ No newline at end of file
+{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"sleap-io","text":"Standalone utilities for working with animal pose tracking data.
This is intended to be a complement to the core SLEAP package that aims to provide functionality for interacting with pose tracking-related data structures and file formats with minimal dependencies. This package does not have any functionality related to labeling, training, or inference.
"},{"location":"#features","title":"Features","text":"The main purpose of this library is to provide utilities to load/save from different formats for pose data and standardize them into our common Data Model.
This enables ease-of-use through format-agnostic operations that make it easy to work with pose data, including utilities for common tasks. Some of these include:
See Examples for more usage examples and recipes.
"},{"location":"#installation","title":"Installation","text":"pip install sleap-io\n
or
conda install -c conda-forge sleap-io\n
For development, use one of the following syntaxes:
conda env create -f environment.yml\n
pip install -e .[dev]\n
"},{"location":"#support","title":"Support","text":"For technical inquiries specific to this package, please open an Issue with a description of your problem or request.
For general SLEAP usage, see the main website.
Other questions? Reach out to talmo@salk.edu
.
This package is distributed under a BSD 3-Clause License and can be used without restrictions. See LICENSE
for details.
sio.VideoWriter
: basic imageio-ffmpeg
video writer with sensible H264 presets. This can be used as a context manager: with sio.VideoWriter(\"video.mp4\") as vw:\n for frame in video:\n vw(frame)
sio.save_video
: high-level video writing. This can be used to quickly write a set of frames or even a whole Video
for easy (if inefficient) re-encoding: bad_video = sio.load_video(\"unseekable.avi\")\nsio.save_video(bad_video, \"seekable.mp4\")
IndexError
in VideoBackend
to enable sequence protocol for iteration over Video
s: for frame in video:\n pass
sio.io.video
to sio.io.video_reading
.Skeleton
__contains__(node: NodeOrIndex)
: Returns True
if a node exists in the skeleton.rebuild_cache()
: Method allowing explicit regeneration of the caching attributes from the nodes._name_to_node_cache
and _node_to_ind_cache
, better reflecting the mapping directionality.require_node(node: NodeOrIndex, add_missing: bool = True)
: Returns a Node
given a Node
, int
or str
. If add_missing
is True
, the node is added or created, otherwise an IndexError
is raised. This is helpful for flexibly converting between node representations with convenient existence handling.add_nodes(list[Node | str])
: Convenience method to add a list of nodes.add_edges(edges: list[Edge | tuple[NodeOrIndex, NodeOrIndex]])
: Convenience method to add a list of edges.rename_nodes(name_map: dict[NodeOrIndex, str] | list[str])
: Method to rename nodes either by specifying a potentially partial mapping from node(s) to new name(s), or a list of new names. Handles updating both the Node.name
attributes and the cache.rename_node(old_name: NodeOrIndex, new_name: str)
: Shorter syntax for renaming a single node.remove_nodes(nodes: list[NodeOrIndex])
: Method for removing nodes from the skeleton and updating caches. Does NOT update corresponding instances.remove_node(node: NodeOrIndex)
: Shorter syntax for removing a single node.reorder_nodes(new_order: list[NodeOrIndex])
: Method for setting the order of the nodes within the skeleton with cache updating. Does NOT update corresponding instances.Instance
/PredictedInstance
update_skeleton()
: Updates the points
attribute on the instance to reflect changes in the associated skeleton (removed nodes and reordering). This is called internally after updating the skeleton from the Labels
level, but also exposed for more complex data manipulation workflows.replace_skeleton(new_skeleton: Skeleton, node_map: dict[NodeOrIndex, NodeOrIndex] | None = None, rev_node_map: dict[NodeOrIndex, NodeOrIndex] | None = None)
: Method to replace the skeleton on the instance with optional capability to specify a node mapping so that data stored in the points
attribute is retained and associated with the right nodes in the new skeleton. Mapping is specified in node_map
from old to new nodes and defaults to mapping between node objects with the same name. rev_node_map
maps new nodes to old nodes and is used internally when calling from the Labels
level as it bypasses validation.Labels
instances
: Convenience property that returns a generator that loops over all labeled frames and returns all instances. This can be lazily iterated over without having to construct a huge list of all the instances.rename_nodes(name_map: dict[NodeOrIndex, str] | list[str], skeleton: Skeleton | None = None)
: Method to rename nodes in a specified skeleton within the labels.remove_nodes(nodes: list[NodeOrIndex], skeleton: Skeleton | None = None)
: Method to remove nodes in a specified skeleton within the labels. This also updates all instances associated with the skeleton, removing point data for the removed nodes.reorder_nodes(new_order: list[NodeOrIndex], skeleton: Skeleton | None = None)
: Method to reorder nodes in a specified skeleton within the labels. This also updates all instances associated with the skeleton, reordering point data for the nodes.replace_skeleton(new_skeleton: Skeleton, old_skeleton: Skeleton | None = None, node_map: dict[NodeOrIndex, NodeOrIndex] | None = None)
: Method to replace a skeleton entirely within the labels, updating all instances associated with the old skeleton to use the new skeleton, optionally with node remapping to retain previous point data.HDF5Video
edge cases by @talmo in #137Labels.extract
, Labels.trim
and Video.save
by @talmo in #140 LabeledFrame.frame_idx
: Now always converted to int
type.Video.close()
: Now caches backend metadata to Video.backend_metadata
to persist metadata on close.copy.deepcopy()
now works on Video
objects even if backend is open.Video.save(save_path: str | Path, frame_inds: list[int] | np.ndarray | None = None, video_kwargs: dict[str, Any] | None = None)
: Method to save a video file to an MP4 using VideoWriter
with an optional subset of frames.Labels.extract(inds: list[int] | list[tuple[Video, int]] | np.ndarray, copy: bool = True)
: Add method to extract a subset of frames from the labels, optionally making a copy, and return a new Labels
object.Labels.trim(save_path: str | Path, frame_inds: list[int] | np.ndarray, video: Video | int | None = None, video_kwargs: dict[str, Any] | None = None)
: Add method to extract a subset of the labels, write a video clip with the extracted friends, and adjust frame indices to match the clip.Full Changelog: v0.1.10...v0.2.0
"},{"location":"changelog/#v0110","title":"v0.1.10What's Changed","text":"Full Changelog: v0.1.9...v0.1.10
"},{"location":"changelog/#v019","title":"v0.1.9What's Changed","text":"av
as a dependency since it's still a little buggy and doesn't have broad enough platform compatibility.ndx-pose
< 0.2.0 until #104 is merged in.sio.io.utils.is_file_accessible
to check for readability by actually reading a byte. This catches permission and other esoteric filesystem errors (addresses #116).sio.load_slp(..., open_videos=False)
Video
objects with Video(..., open_backend=False)
.1.0
after taking out the train split.Labels.make_training_splits(..., embed=False)
. Previously, the function would always embed the images, which could be slow for large projects. With this change, the embed
parameter is introduced, allowing the user to choose whether to embed the images or save the labels with references to the source video files.Full Changelog: v0.1.8...v0.1.9
"},{"location":"changelog/#v018","title":"v0.1.8What's ChangedNew Contributors","text":"Full Changelog: v0.1.7...v0.1.8
"},{"location":"changelog/#v017","title":"v0.1.7What's Changed","text":"Full Changelog: v0.1.6...v0.1.7
"},{"location":"changelog/#v016","title":"v0.1.6What's Changed","text":"Full Changelog: v0.1.5...v0.1.6
"},{"location":"changelog/#v015","title":"v0.1.5What's Changed","text":"Labels.split
and Labels.make_training_splits
by @talmo in #98Full Changelog: v0.1.4...v0.1.5
"},{"location":"changelog/#v014","title":"v0.1.4What's Changed","text":"labels.save(\"labels.pkg.slp\", embed=\"user\")
to embed frames with user-labeled instances (Instance
)labels.save(\"labels.pkg.slp\", embed=\"user+suggestion\")
to embed frames with user-labeled instances and suggestion frames (useful for inference after training)labels.save(\"labels.pkg.slp\", embed=\"source\")
to restore the source video (\"unembed\")__repr__
s for Skeleton
, LabeledFrame
, Labels
, Instance
, PredictedInstance
Labels.append()
and Labels.extend()
to add LabeledFrame
s now will update Labels.tracks
, Labels.skeletons
and Labels.videos
with contents.Labels.update()
to manually update Labels.tracks
, Labels.skeletons
and Labels.videos
with contents of Labels.labeled_frames
and Labels.suggestions
.Labels.replace_filenames()
: multiple methods for replacing all video filenames across the project (#85).Skeleton.edge_names
to return list of edges as tuples of string namessio.load_video
and related high level Video
APIs to clarify supported file formats.Video(filename)
construction (#94)Note: This is a re-release of v0.1.3 which had a borked deployment.
Full Changelog: v0.1.2...v0.1.4
"},{"location":"changelog/#v013","title":"v0.1.3What's Changed","text":"labels.save(\"labels.pkg.slp\", embed=\"user\")
to embed frames with user-labeled instances (Instance
)labels.save(\"labels.pkg.slp\", embed=\"user+suggestion\")
to embed frames with user-labeled instances and suggestion frames (useful for inference after training)labels.save(\"labels.pkg.slp\", embed=\"source\")
to restore the source video (\"unembed\")__repr__
s for Skeleton
, LabeledFrame
, Labels
, Instance
, PredictedInstance
Labels.append()
and Labels.extend()
to add LabeledFrame
s now will update Labels.tracks
, Labels.skeletons
and Labels.videos
with contents.Labels.update()
to manually update Labels.tracks
, Labels.skeletons
and Labels.videos
with contents of Labels.labeled_frames
and Labels.suggestions
.Labels.replace_filenames()
: multiple methods for replacing all video filenames across the project (#85).Skeleton.edge_names
to return list of edges as tuples of string namessio.load_video
and related high level Video
APIs to clarify supported file formats.Video(filename)
construction (#94)Full Changelog: v0.1.2...v0.1.3
"},{"location":"changelog/#v012","title":"v0.1.2What's Changed","text":"Full Changelog: v0.1.1...v0.1.2
"},{"location":"changelog/#v011","title":"v0.1.1What's Changed","text":"ImageVideo
backend by @talmo in #88SuggestionFrame
by @talmo in #89ImageVideo
support in SLP by @talmo in #90Full Changelog: v0.1.0...v0.1.1
"},{"location":"changelog/#v010","title":"v0.1.0What's ChangedNotes on dependency pins","text":"Add skeleton utilities by @talmo in #76
Skeleton.add_node
: Add a node by name or object.Skeleton.add_edge
: Add an edge by lists of names or objects.Skeleton.add_symmetry
: Add a symmetry edge by lists of names or objects.Update CI and versions by @talmo in #77
Bump to v0.1.0 by @talmo in #78
Fix multi-skeleton loading by @talmo in #79
Add high level APIs by @talmo in #80
load_video
and load_file
high level APIs (#48)Labels QOL enhancements by @talmo in #81
LabeledFrame.remove_predictions
: Remove predicted instances from a labeled frame.LabeledFrame.remove_empty_instances
: Remove instances with no visible points from a labeled frame.Labels.save
: Instance-level convenience wrapper for sio.save_file
.Labels.clean
: Remove unused or empty frames, instances, videos, skeletons and tracks.Labels.remove_predictions
: Remove predicted instances from all labeled frames (#69).Labels.__getitem__
: Now supports lists, slices, numpy arrays, tuples of (Video, frame_idx)
and Video
.Video QOL enhancements by @talmo in #82
Video.is_open
: Checks if the video exists and the backend is set.Video.open
: Opens or restarts the backend for reading.Video.close
: Closes the backend for reading.Video.exists
: Check if the filename for the video exists.Video.replace_filename
: Replace the filename and restart the backend.ffmpeg < 6.1
due to imageio/imageio-ffmpeg#99h5py >= 3.8.0
due to h5py/h5py#2118python >= 3.8
due to h5py >= 3.8.0
(we still support python==3.7
via pip but this is not longer in CI)Full Changelog: v0.0.14...v0.1.0
"},{"location":"changelog/#v0014","title":"v0.0.14What's Changed","text":"Full Changelog: v0.0.13...v0.0.14
"},{"location":"changelog/#v0013","title":"v0.0.13What's Changed","text":"Full Changelog: v0.0.12...v0.0.13
"},{"location":"changelog/#v0012","title":"v0.0.12What's ChangedNew Contributors","text":"Full Changelog: v0.0.11...v0.0.12
"},{"location":"changelog/#v0011","title":"v0.0.11What's Changed","text":"Full Changelog: v0.0.10...v0.0.11
"},{"location":"changelog/#v0010","title":"v0.0.10What's Changed","text":"This is a hotfix to get around installing in older environments with numpy <1.20.
Full Changelog: v0.0.9...v0.0.10
"},{"location":"changelog/#v009","title":"v0.0.9What's Changed","text":"Full Changelog: v0.0.8...v0.0.9
"},{"location":"changelog/#v008","title":"v0.0.8What's Changed","text":"Full Changelog: v0.0.7...v0.0.8
"},{"location":"changelog/#v007","title":"v0.0.7What's Changed","text":"Full Changelog: v0.0.6...v0.0.7
"},{"location":"changelog/#v006","title":"v0.0.6What's Changed","text":"Full Changelog: v0.0.5...v0.0.6
"},{"location":"changelog/#v005","title":"v0.0.5What's Changed","text":"Full Changelog: v0.0.4...v0.0.5
"},{"location":"changelog/#v004","title":"v0.0.4What's Changed","text":"Full Changelog: v0.0.3...v0.0.4
"},{"location":"changelog/#v003","title":"v0.0.3What's Changed","text":"pyproject.toml
alone instead of setup.cfg
.mypy
type enforcement -- this is too strict for a library intended to be this flexible.Full Changelog: v0.0.2...v0.0.3
"},{"location":"changelog/#v002","title":"v0.0.2What's ChangedNew Contributors","text":"load_nwb
, save_nwb
, load_labelstudio
, save_labelstudio
Full Changelog: v0.0.1...v0.0.2
"},{"location":"changelog/#v001","title":"v0.0.1What's ChangedNew Contributors","text":"Initial stable release of the package.
__repr__
to labels object by @h-mayorquin in #8Full Changelog: https://github.com/talmolab/sleap-io/commits/v0.0.1
"},{"location":"examples/","title":"Examples","text":""},{"location":"examples/#load-and-save-in-different-formats","title":"Load and save in different formats","text":"import sleap_io as sio\n\n# Load from SLEAP file.\nlabels = sio.load_file(\"predictions.slp\")\n\n# Save to NWB file.\nlabels.save(\"predictions.nwb\")\n
See also: Labels.save
and Formats
import sleap_io as sio\n\nlabels = sio.load_slp(\"tests/data/slp/centered_pair_predictions.slp\")\n\n# Convert predictions to point coordinates in a single array.\ntrx = labels.numpy()\nn_frames, n_tracks, n_nodes, xy = trx.shape\nassert xy == 2\n\n# Convert to array with confidence scores appended.\ntrx_with_scores = labels.numpy(return_confidence=True)\nn_frames, n_tracks, n_nodes, xy_score = trx.shape \nassert xy_score == 3\n
See also: Labels.numpy
import sleap_io as sio\n\nvideo = sio.load_video(\"test.mp4\")\nn_frames, height, width, channels = video.shape\n\nframe = video[0]\nheight, width, channels = frame.shape\n
See also: sio.load_video
and Video
import sleap_io as sio\nimport numpy as np\n\n# Create skeleton.\nskeleton = sio.Skeleton(\n nodes=[\"head\", \"thorax\", \"abdomen\"],\n edges=[(\"head\", \"thorax\"), (\"thorax\", \"abdomen\")]\n)\n\n# Create video.\nvideo = sio.load_video(\"test.mp4\")\n\n# Create instance.\ninstance = sio.Instance.from_numpy(\n points=np.array([\n [10.2, 20.4],\n [5.8, 15.1],\n [0.3, 10.6],\n ]),\n skeleton=skeleton\n)\n\n# Create labeled frame.\nlf = sio.LabeledFrame(video=video, frame_idx=0, instances=[instance])\n\n# Create labels.\nlabels = sio.Labels(videos=[video], skeletons=[skeleton], labeled_frames=[lf])\n\n# Save.\nlabels.save(\"labels.slp\")\n
See also: Model, Labels
, LabeledFrame
, Instance
, PredictedInstance
, Skeleton
, Video
, Track
, SuggestionFrame
import sleap_io as sio\n\n# Load labels without trying to open the video files.\nlabels = sio.load_file(\"labels.v001.slp\", open_videos=False)\n\n# Fix paths using prefix replacement.\nlabels.replace_filenames(prefix_map={\n \"D:/data/sleap_projects\": \"/home/user/sleap_projects\",\n \"C:/Users/sleaper/Desktop/test\": \"/home/user/sleap_projects\",\n})\n\n# Save labels with updated paths.\nlabels.save(\"labels.v002.slp\")\n
See also: Labels.replace_filenames
import sleap_io as sio\n\n# Load source labels.\nlabels = sio.load_file(\"labels.v001.slp\")\n\n# Save with embedded images for frames with user labeled data and suggested frames.\nlabels.save(\"labels.v001.pkg.slp\", embed=\"user+suggestions\")\n
See also: Labels.save
import sleap_io as sio\n\n# Load source labels.\nlabels = sio.load_file(\"labels.v001.slp\")\n\n# Make splits and export with embedded images.\nlabels.make_training_splits(n_train=0.8, n_val=0.1, n_test=0.1, save_dir=\"split1\", seed=42)\n\n# Splits will be saved as self-contained SLP package files with images and labels.\nlabels_train = sio.load_file(\"split1/train.pkg.slp\")\nlabels_val = sio.load_file(\"split1/val.pkg.slp\")\nlabels_test = sio.load_file(\"split1/test.pkg.slp\")\n
See also: Labels.make_training_splits
Some video formats are not readily seekable at frame-level accuracy. By reencoding them with the default settings in our video writer, they will be reliably seekable with minimal loss of quality and can be achieved in a single line:
import sleap_io as sio\n\nsio.save_video(sio.load_video(\"input.mp4\"), \"output.mp4\")\n
See also: save_video
It can be sometimes be useful to pull out a short clip of frames, either for sharing or for generating data on only a subset of the video. We can do this with the following recipe:
import sleap_io as sio\n\n# Load existing data.\nlabels = sio.load_file(\"labels.slp\")\n\n# Create a new labels file with data from frames 1000-2000 in video 0.\n# Note: a new video will be saved with filename \"clip.mp4\" and frame indices adjusted in\n# the labels.\nclip = labels.trim(\"clip.slp\", list(range(1_000, 2_000)), video=0)\n
See also: Labels.trim
Skeleton
objects hold metadata about the keypoints, their ordering, names and connections. When converting between different annotation formats, it can be useful to change skeletons while retaining as much information as possible. We can do this as follows:
import sleap_io as sio\n\n# Load existing labels with skeleton with nodes: \"head\", \"trunk\", \"tti\"\nlabels = sio.load_file(\"labels.slp\")\n\n# Create a new skeleton with different nodes.\nnew_skeleton = sio.Skeleton([\"HEAD\", \"CENTROID\", \"TAIL_BASE\" \"TAIL_TIP\"])\n\n# Replace the skeleton with correspondences where possible.\nlabels.replace_skeleton(\n new_skeleton,\n node_map={\n \"head\": \"HEAD\",\n \"trunk\": \"CENTROID\",\n \"tti\": \"TAIL_BASE\"\n }\n)\n\n# Save with the new skeleton format.\nlabels.save(\"labels_with_new_skeleton.slp\")\n
See also: Labels.replace_skeleton
sleap_io.load_file(filename, format=None, **kwargs)
","text":"Load a file and return the appropriate object.
Parameters:
Name Type Description Defaultfilename
str | Path
Path to a file.
requiredformat
Optional[str]
Optional format to load as. If not provided, will be inferred from the file extension. Available formats are: \"slp\", \"nwb\", \"labelstudio\", \"jabs\" and \"video\".
None
Returns:
Type DescriptionUnion[Labels, Video]
A Labels
or Video
object.
sleap_io/io/main.py
def load_file(\n filename: str | Path, format: Optional[str] = None, **kwargs\n) -> Union[Labels, Video]:\n \"\"\"Load a file and return the appropriate object.\n\n Args:\n filename: Path to a file.\n format: Optional format to load as. If not provided, will be inferred from the\n file extension. Available formats are: \"slp\", \"nwb\", \"labelstudio\", \"jabs\"\n and \"video\".\n\n Returns:\n A `Labels` or `Video` object.\n \"\"\"\n if isinstance(filename, Path):\n filename = filename.as_posix()\n\n if format is None:\n if filename.endswith(\".slp\"):\n format = \"slp\"\n elif filename.endswith(\".nwb\"):\n format = \"nwb\"\n elif filename.endswith(\".json\"):\n format = \"json\"\n elif filename.endswith(\".h5\"):\n format = \"jabs\"\n else:\n for vid_ext in Video.EXTS:\n if filename.endswith(vid_ext):\n format = \"video\"\n break\n if format is None:\n raise ValueError(f\"Could not infer format from filename: '{filename}'.\")\n\n if filename.endswith(\".slp\"):\n return load_slp(filename, **kwargs)\n elif filename.endswith(\".nwb\"):\n return load_nwb(filename, **kwargs)\n elif filename.endswith(\".json\"):\n return load_labelstudio(filename, **kwargs)\n elif filename.endswith(\".h5\"):\n return load_jabs(filename, **kwargs)\n elif format == \"video\":\n return load_video(filename, **kwargs)\n
"},{"location":"formats/#sleap_io.save_file","title":"sleap_io.save_file(labels, filename, format=None, **kwargs)
","text":"Save a file based on the extension.
Parameters:
Name Type Description Defaultlabels
Labels
A SLEAP Labels
object (see load_slp
).
filename
str | Path
Path to save labels to.
requiredformat
Optional[str]
Optional format to save as. If not provided, will be inferred from the file extension. Available formats are: \"slp\", \"nwb\", \"labelstudio\" and \"jabs\".
None
Source code in sleap_io/io/main.py
def save_file(\n labels: Labels, filename: str | Path, format: Optional[str] = None, **kwargs\n):\n \"\"\"Save a file based on the extension.\n\n Args:\n labels: A SLEAP `Labels` object (see `load_slp`).\n filename: Path to save labels to.\n format: Optional format to save as. If not provided, will be inferred from the\n file extension. Available formats are: \"slp\", \"nwb\", \"labelstudio\" and\n \"jabs\".\n \"\"\"\n if isinstance(filename, Path):\n filename = str(filename)\n\n if format is None:\n if filename.endswith(\".slp\"):\n format = \"slp\"\n elif filename.endswith(\".nwb\"):\n format = \"nwb\"\n elif filename.endswith(\".json\"):\n format = \"labelstudio\"\n elif \"pose_version\" in kwargs:\n format = \"jabs\"\n\n if format == \"slp\":\n save_slp(labels, filename, **kwargs)\n elif format == \"nwb\":\n save_nwb(labels, filename, **kwargs)\n elif format == \"labelstudio\":\n save_labelstudio(labels, filename, **kwargs)\n elif format == \"jabs\":\n pose_version = kwargs.pop(\"pose_version\", 5)\n root_folder = kwargs.pop(\"root_folder\", filename)\n save_jabs(labels, pose_version=pose_version, root_folder=root_folder)\n else:\n raise ValueError(f\"Unknown format '{format}' for filename: '{filename}'.\")\n
"},{"location":"formats/#sleap_io.load_video","title":"sleap_io.load_video(filename, **kwargs)
","text":"Load a video file.
Parameters:
Name Type Description Defaultfilename
str
The filename(s) of the video. Supported extensions: \"mp4\", \"avi\", \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\", \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are expected. If filename is a folder, it will be searched for images.
requiredReturns:
Type DescriptionVideo
A Video
object.
sleap_io/io/main.py
def load_video(filename: str, **kwargs) -> Video:\n \"\"\"Load a video file.\n\n Args:\n filename: The filename(s) of the video. Supported extensions: \"mp4\", \"avi\",\n \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\",\n \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are\n expected. If filename is a folder, it will be searched for images.\n\n Returns:\n A `Video` object.\n \"\"\"\n return Video.from_filename(filename, **kwargs)\n
"},{"location":"formats/#sleap_io.save_video","title":"sleap_io.save_video(frames, filename, fps=30, pixelformat='yuv420p', codec='libx264', crf=25, preset='superfast', output_params=None)
","text":"Write a list of frames to a video file.
Parameters:
Name Type Description Defaultframes
ndarray | Video
Sequence of frames to write to video. Each frame should be a 2D or 3D numpy array with dimensions (height, width) or (height, width, channels).
requiredfilename
str | Path
Path to output video file.
requiredfps
float
Frames per second. Defaults to 30.
30
pixelformat
str
Pixel format for video. Defaults to \"yuv420p\".
'yuv420p'
codec
str
Codec to use for encoding. Defaults to \"libx264\".
'libx264'
crf
int
Constant rate factor to control lossiness of video. Values go from 2 to 32, with numbers in the 18 to 30 range being most common. Lower values mean less compressed/higher quality. Defaults to 25. No effect if codec is not \"libx264\".
25
preset
str
H264 encoding preset. Defaults to \"superfast\". No effect if codec is not \"libx264\".
'superfast'
output_params
list | None
Additional output parameters for FFMPEG. This should be a list of strings corresponding to command line arguments for FFMPEG and libx264. Use ffmpeg -h encoder=libx264
to see all options for libx264 output_params.
None
See also: sio.VideoWriter
sleap_io/io/main.py
def save_video(\n frames: np.ndarray | Video,\n filename: str | Path,\n fps: float = 30,\n pixelformat: str = \"yuv420p\",\n codec: str = \"libx264\",\n crf: int = 25,\n preset: str = \"superfast\",\n output_params: list | None = None,\n):\n \"\"\"Write a list of frames to a video file.\n\n Args:\n frames: Sequence of frames to write to video. Each frame should be a 2D or 3D\n numpy array with dimensions (height, width) or (height, width, channels).\n filename: Path to output video file.\n fps: Frames per second. Defaults to 30.\n pixelformat: Pixel format for video. Defaults to \"yuv420p\".\n codec: Codec to use for encoding. Defaults to \"libx264\".\n crf: Constant rate factor to control lossiness of video. Values go from 2 to 32,\n with numbers in the 18 to 30 range being most common. Lower values mean less\n compressed/higher quality. Defaults to 25. No effect if codec is not\n \"libx264\".\n preset: H264 encoding preset. Defaults to \"superfast\". No effect if codec is not\n \"libx264\".\n output_params: Additional output parameters for FFMPEG. This should be a list of\n strings corresponding to command line arguments for FFMPEG and libx264. Use\n `ffmpeg -h encoder=libx264` to see all options for libx264 output_params.\n\n See also: `sio.VideoWriter`\n \"\"\"\n if output_params is None:\n output_params = []\n\n with video_writing.VideoWriter(\n filename,\n fps=fps,\n pixelformat=pixelformat,\n codec=codec,\n crf=crf,\n preset=preset,\n output_params=output_params,\n ) as writer:\n for frame in frames:\n writer(frame)\n
"},{"location":"formats/#sleap_io.load_slp","title":"sleap_io.load_slp(filename, open_videos=True)
","text":"Load a SLEAP dataset.
Parameters:
Name Type Description Defaultfilename
str
Path to a SLEAP labels file (.slp
).
open_videos
bool
If True
(the default), attempt to open the video backend for I/O. If False
, the backend will not be opened (useful for reading metadata when the video files are not available).
True
Returns:
Type DescriptionLabels
The dataset as a Labels
object.
sleap_io/io/main.py
def load_slp(filename: str, open_videos: bool = True) -> Labels:\n \"\"\"Load a SLEAP dataset.\n\n Args:\n filename: Path to a SLEAP labels file (`.slp`).\n open_videos: If `True` (the default), attempt to open the video backend for\n I/O. If `False`, the backend will not be opened (useful for reading metadata\n when the video files are not available).\n\n Returns:\n The dataset as a `Labels` object.\n \"\"\"\n return slp.read_labels(filename, open_videos=open_videos)\n
"},{"location":"formats/#sleap_io.save_slp","title":"sleap_io.save_slp(labels, filename, embed=None)
","text":"Save a SLEAP dataset to a .slp
file.
Parameters:
Name Type Description Defaultlabels
Labels
A SLEAP Labels
object (see load_slp
).
filename
str
Path to save labels to ending with .slp
.
embed
bool | str | list[tuple[Video, int]] | None
Frames to embed in the saved labels file. One of None
, True
, \"all\"
, \"user\"
, \"suggestions\"
, \"user+suggestions\"
, \"source\"
or list of tuples of (video, frame_idx)
.
If None
is specified (the default) and the labels contains embedded frames, those embedded frames will be re-saved to the new file.
If True
or \"all\"
, all labeled frames and suggested frames will be embedded.
If \"source\"
is specified, no images will be embedded and the source video will be restored if available.
This argument is only valid for the SLP backend.
None
Source code in sleap_io/io/main.py
def save_slp(\n labels: Labels,\n filename: str,\n embed: bool | str | list[tuple[Video, int]] | None = None,\n):\n \"\"\"Save a SLEAP dataset to a `.slp` file.\n\n Args:\n labels: A SLEAP `Labels` object (see `load_slp`).\n filename: Path to save labels to ending with `.slp`.\n embed: Frames to embed in the saved labels file. One of `None`, `True`,\n `\"all\"`, `\"user\"`, `\"suggestions\"`, `\"user+suggestions\"`, `\"source\"` or list\n of tuples of `(video, frame_idx)`.\n\n If `None` is specified (the default) and the labels contains embedded\n frames, those embedded frames will be re-saved to the new file.\n\n If `True` or `\"all\"`, all labeled frames and suggested frames will be\n embedded.\n\n If `\"source\"` is specified, no images will be embedded and the source video\n will be restored if available.\n\n This argument is only valid for the SLP backend.\n \"\"\"\n return slp.write_labels(filename, labels, embed=embed)\n
"},{"location":"formats/#sleap_io.load_nwb","title":"sleap_io.load_nwb(filename)
","text":"Load an NWB dataset as a SLEAP Labels
object.
Parameters:
Name Type Description Defaultfilename
str
Path to a NWB file (.nwb
).
Returns:
Type DescriptionLabels
The dataset as a Labels
object.
sleap_io/io/main.py
def load_nwb(filename: str) -> Labels:\n \"\"\"Load an NWB dataset as a SLEAP `Labels` object.\n\n Args:\n filename: Path to a NWB file (`.nwb`).\n\n Returns:\n The dataset as a `Labels` object.\n \"\"\"\n return nwb.read_nwb(filename)\n
"},{"location":"formats/#sleap_io.save_nwb","title":"sleap_io.save_nwb(labels, filename, append=True)
","text":"Save a SLEAP dataset to NWB format.
Parameters:
Name Type Description Defaultlabels
Labels
A SLEAP Labels
object (see load_slp
).
filename
str
Path to NWB file to save to. Must end in .nwb
.
append
bool
If True
(the default), append to existing NWB file. File will be created if it does not exist.
True
See also: nwb.write_nwb, nwb.append_nwb
Source code insleap_io/io/main.py
def save_nwb(labels: Labels, filename: str, append: bool = True):\n \"\"\"Save a SLEAP dataset to NWB format.\n\n Args:\n labels: A SLEAP `Labels` object (see `load_slp`).\n filename: Path to NWB file to save to. Must end in `.nwb`.\n append: If `True` (the default), append to existing NWB file. File will be\n created if it does not exist.\n\n See also: nwb.write_nwb, nwb.append_nwb\n \"\"\"\n if append and Path(filename).exists():\n nwb.append_nwb(labels, filename)\n else:\n nwb.write_nwb(labels, filename)\n
"},{"location":"formats/#sleap_io.load_jabs","title":"sleap_io.load_jabs(filename, skeleton=None)
","text":"Read JABS-style predictions from a file and return a Labels
object.
Parameters:
Name Type Description Defaultfilename
str
Path to the jabs h5 pose file.
requiredskeleton
Optional[Skeleton]
An optional Skeleton
object.
None
Returns:
Type DescriptionLabels
Parsed labels as a Labels
instance.
sleap_io/io/main.py
def load_jabs(filename: str, skeleton: Optional[Skeleton] = None) -> Labels:\n \"\"\"Read JABS-style predictions from a file and return a `Labels` object.\n\n Args:\n filename: Path to the jabs h5 pose file.\n skeleton: An optional `Skeleton` object.\n\n Returns:\n Parsed labels as a `Labels` instance.\n \"\"\"\n return jabs.read_labels(filename, skeleton=skeleton)\n
"},{"location":"formats/#sleap_io.save_jabs","title":"sleap_io.save_jabs(labels, pose_version, root_folder=None)
","text":"Save a SLEAP dataset to JABS pose file format.
Parameters:
Name Type Description Defaultlabels
Labels
SLEAP Labels
object.
pose_version
int
The JABS pose version to write data out.
requiredroot_folder
Optional[str]
Optional root folder where the files should be saved.
None
Note Filenames for JABS poses are based on video filenames.
Source code insleap_io/io/main.py
def save_jabs(labels: Labels, pose_version: int, root_folder: Optional[str] = None):\n \"\"\"Save a SLEAP dataset to JABS pose file format.\n\n Args:\n labels: SLEAP `Labels` object.\n pose_version: The JABS pose version to write data out.\n root_folder: Optional root folder where the files should be saved.\n\n Note:\n Filenames for JABS poses are based on video filenames.\n \"\"\"\n jabs.write_labels(labels, pose_version, root_folder)\n
"},{"location":"formats/#sleap_io.load_labelstudio","title":"sleap_io.load_labelstudio(filename, skeleton=None)
","text":"Read Label Studio-style annotations from a file and return a Labels
object.
Parameters:
Name Type Description Defaultfilename
str
Path to the label-studio annotation file in JSON format.
requiredskeleton
Optional[Union[Skeleton, list[str]]]
An optional Skeleton
object or list of node names. If not provided (the default), skeleton will be inferred from the data. It may be useful to provide this so the keypoint label types can be filtered to just the ones in the skeleton.
None
Returns:
Type DescriptionLabels
Parsed labels as a Labels
instance.
sleap_io/io/main.py
def load_labelstudio(\n filename: str, skeleton: Optional[Union[Skeleton, list[str]]] = None\n) -> Labels:\n \"\"\"Read Label Studio-style annotations from a file and return a `Labels` object.\n\n Args:\n filename: Path to the label-studio annotation file in JSON format.\n skeleton: An optional `Skeleton` object or list of node names. If not provided\n (the default), skeleton will be inferred from the data. It may be useful to\n provide this so the keypoint label types can be filtered to just the ones in\n the skeleton.\n\n Returns:\n Parsed labels as a `Labels` instance.\n \"\"\"\n return labelstudio.read_labels(filename, skeleton=skeleton)\n
"},{"location":"formats/#sleap_io.save_labelstudio","title":"sleap_io.save_labelstudio(labels, filename)
","text":"Save a SLEAP dataset to Label Studio format.
Parameters:
Name Type Description Defaultlabels
Labels
A SLEAP Labels
object (see load_slp
).
filename
str
Path to save labels to ending with .json
.
sleap_io/io/main.py
def save_labelstudio(labels: Labels, filename: str):\n \"\"\"Save a SLEAP dataset to Label Studio format.\n\n Args:\n labels: A SLEAP `Labels` object (see `load_slp`).\n filename: Path to save labels to ending with `.json`.\n \"\"\"\n labelstudio.write_labels(labels, filename)\n
"},{"location":"model/","title":"Data model","text":"sleap-io
implements the core data structures used in SLEAP for storing data related to multi-instance pose tracking, including for annotation, training and inference.
sleap_io.Labels
","text":"Pose data for a set of videos that have user labels and/or predictions.
Attributes:
Name Type Descriptionlabeled_frames
list[LabeledFrame]
A list of LabeledFrame
s that are associated with this dataset.
videos
list[Video]
A list of Video
s that are associated with this dataset. Videos do not need to have corresponding LabeledFrame
s if they do not have any labels or predictions yet.
skeletons
list[Skeleton]
A list of Skeleton
s that are associated with this dataset. This should generally only contain a single skeleton.
tracks
list[Track]
A list of Track
s that are associated with this dataset.
suggestions
list[SuggestionFrame]
A list of SuggestionFrame
s that are associated with this dataset.
provenance
dict[str, Any]
Dictionary of arbitrary metadata providing additional information about where the dataset came from.
NotesVideo
s in contain LabeledFrame
s, and Skeleton
s and Track
s in contained Instance
s are added to the respective lists automatically.
Methods:
Name Description__attrs_post_init__
Append videos, skeletons, and tracks seen in labeled_frames
to Labels
.
__getitem__
Return one or more labeled frames based on indexing criteria.
__iter__
Iterate over labeled_frames
list when calling iter method on Labels
.
__len__
Return number of labeled frames.
__repr__
Return a readable representation of the labels.
__str__
Return a readable representation of the labels.
append
Append a labeled frame to the labels.
clean
Remove empty frames, unused skeletons, tracks and videos.
extend
Append a labeled frame to the labels.
extract
Extract a set of frames into a new Labels object.
find
Search for labeled frames given video and/or frame index.
make_training_splits
Make splits for training with embedded images.
numpy
Construct a numpy array from instance points.
remove_nodes
Remove nodes from the skeleton.
remove_predictions
Remove all predicted instances from the labels.
rename_nodes
Rename nodes in the skeleton.
reorder_nodes
Reorder nodes in the skeleton.
replace_filenames
Replace video filenames.
replace_skeleton
Replace the skeleton in the labels.
replace_videos
Replace videos and update all references.
save
Save labels to file in specified format.
split
Separate the labels into random splits.
trim
Trim the labels to a subset of frames and videos accordingly.
update
Update data structures based on contents.
Attributes:
Name Type Descriptioninstances
Iterator[Instance]
Return an iterator over all instances within all labeled frames.
skeleton
Skeleton
Return the skeleton if there is only a single skeleton in the labels.
user_labeled_frames
list[LabeledFrame]
Return all labeled frames with user (non-predicted) instances.
video
Video
Return the video if there is only a single video in the labels.
Source code insleap_io/model/labels.py
@define\nclass Labels:\n \"\"\"Pose data for a set of videos that have user labels and/or predictions.\n\n Attributes:\n labeled_frames: A list of `LabeledFrame`s that are associated with this dataset.\n videos: A list of `Video`s that are associated with this dataset. Videos do not\n need to have corresponding `LabeledFrame`s if they do not have any\n labels or predictions yet.\n skeletons: A list of `Skeleton`s that are associated with this dataset. This\n should generally only contain a single skeleton.\n tracks: A list of `Track`s that are associated with this dataset.\n suggestions: A list of `SuggestionFrame`s that are associated with this dataset.\n provenance: Dictionary of arbitrary metadata providing additional information\n about where the dataset came from.\n\n Notes:\n `Video`s in contain `LabeledFrame`s, and `Skeleton`s and `Track`s in contained\n `Instance`s are added to the respective lists automatically.\n \"\"\"\n\n labeled_frames: list[LabeledFrame] = field(factory=list)\n videos: list[Video] = field(factory=list)\n skeletons: list[Skeleton] = field(factory=list)\n tracks: list[Track] = field(factory=list)\n suggestions: list[SuggestionFrame] = field(factory=list)\n provenance: dict[str, Any] = field(factory=dict)\n\n def __attrs_post_init__(self):\n \"\"\"Append videos, skeletons, and tracks seen in `labeled_frames` to `Labels`.\"\"\"\n self.update()\n\n def update(self):\n \"\"\"Update data structures based on contents.\n\n This function will update the list of skeletons, videos and tracks from the\n labeled frames, instances and suggestions.\n \"\"\"\n for lf in self.labeled_frames:\n if lf.video not in self.videos:\n self.videos.append(lf.video)\n\n for inst in lf:\n if inst.skeleton not in self.skeletons:\n self.skeletons.append(inst.skeleton)\n\n if inst.track is not None and inst.track not in self.tracks:\n self.tracks.append(inst.track)\n\n for sf in self.suggestions:\n if sf.video not in self.videos:\n self.videos.append(sf.video)\n\n def __getitem__(\n self, key: int | slice | list[int] | np.ndarray | tuple[Video, int]\n ) -> list[LabeledFrame] | LabeledFrame:\n \"\"\"Return one or more labeled frames based on indexing criteria.\"\"\"\n if type(key) == int:\n return self.labeled_frames[key]\n elif type(key) == slice:\n return [self.labeled_frames[i] for i in range(*key.indices(len(self)))]\n elif type(key) == list:\n return [self.labeled_frames[i] for i in key]\n elif isinstance(key, np.ndarray):\n return [self.labeled_frames[i] for i in key.tolist()]\n elif type(key) == tuple and len(key) == 2:\n video, frame_idx = key\n res = self.find(video, frame_idx)\n if len(res) == 1:\n return res[0]\n elif len(res) == 0:\n raise IndexError(\n f\"No labeled frames found for video {video} and \"\n f\"frame index {frame_idx}.\"\n )\n elif type(key) == Video:\n res = self.find(key)\n if len(res) == 0:\n raise IndexError(f\"No labeled frames found for video {key}.\")\n return res\n else:\n raise IndexError(f\"Invalid indexing argument for labels: {key}\")\n\n def __iter__(self):\n \"\"\"Iterate over `labeled_frames` list when calling iter method on `Labels`.\"\"\"\n return iter(self.labeled_frames)\n\n def __len__(self) -> int:\n \"\"\"Return number of labeled frames.\"\"\"\n return len(self.labeled_frames)\n\n def __repr__(self) -> str:\n \"\"\"Return a readable representation of the labels.\"\"\"\n return (\n \"Labels(\"\n f\"labeled_frames={len(self.labeled_frames)}, \"\n f\"videos={len(self.videos)}, \"\n f\"skeletons={len(self.skeletons)}, \"\n f\"tracks={len(self.tracks)}, \"\n f\"suggestions={len(self.suggestions)}\"\n \")\"\n )\n\n def __str__(self) -> str:\n \"\"\"Return a readable representation of the labels.\"\"\"\n return self.__repr__()\n\n def append(self, lf: LabeledFrame, update: bool = True):\n \"\"\"Append a labeled frame to the labels.\n\n Args:\n lf: A labeled frame to add to the labels.\n update: If `True` (the default), update list of videos, tracks and\n skeletons from the contents.\n \"\"\"\n self.labeled_frames.append(lf)\n\n if update:\n if lf.video not in self.videos:\n self.videos.append(lf.video)\n\n for inst in lf:\n if inst.skeleton not in self.skeletons:\n self.skeletons.append(inst.skeleton)\n\n if inst.track is not None and inst.track not in self.tracks:\n self.tracks.append(inst.track)\n\n def extend(self, lfs: list[LabeledFrame], update: bool = True):\n \"\"\"Append a labeled frame to the labels.\n\n Args:\n lfs: A list of labeled frames to add to the labels.\n update: If `True` (the default), update list of videos, tracks and\n skeletons from the contents.\n \"\"\"\n self.labeled_frames.extend(lfs)\n\n if update:\n for lf in lfs:\n if lf.video not in self.videos:\n self.videos.append(lf.video)\n\n for inst in lf:\n if inst.skeleton not in self.skeletons:\n self.skeletons.append(inst.skeleton)\n\n if inst.track is not None and inst.track not in self.tracks:\n self.tracks.append(inst.track)\n\n def numpy(\n self,\n video: Optional[Union[Video, int]] = None,\n all_frames: bool = True,\n untracked: bool = False,\n return_confidence: bool = False,\n ) -> np.ndarray:\n \"\"\"Construct a numpy array from instance points.\n\n Args:\n video: Video or video index to convert to numpy arrays. If `None` (the\n default), uses the first video.\n untracked: If `False` (the default), include only instances that have a\n track assignment. If `True`, includes all instances in each frame in\n arbitrary order.\n return_confidence: If `False` (the default), only return points of nodes. If\n `True`, return the points and scores of nodes.\n\n Returns:\n An array of tracks of shape `(n_frames, n_tracks, n_nodes, 2)` if\n `return_confidence` is `False`. Otherwise returned shape is\n `(n_frames, n_tracks, n_nodes, 3)` if `return_confidence` is `True`.\n\n Missing data will be replaced with `np.nan`.\n\n If this is a single instance project, a track does not need to be assigned.\n\n Only predicted instances (NOT user instances) will be returned.\n\n Notes:\n This method assumes that instances have tracks assigned and is intended to\n function primarily for single-video prediction results.\n \"\"\"\n # Get labeled frames for specified video.\n if video is None:\n video = 0\n if type(video) == int:\n video = self.videos[video]\n lfs = [lf for lf in self.labeled_frames if lf.video == video]\n\n # Figure out frame index range.\n first_frame, last_frame = 0, 0\n for lf in lfs:\n first_frame = min(first_frame, lf.frame_idx)\n last_frame = max(last_frame, lf.frame_idx)\n\n # Figure out the number of tracks based on number of instances in each frame.\n # First, let's check the max number of predicted instances (regardless of\n # whether they're tracked.\n n_preds = 0\n for lf in lfs:\n n_pred_instances = len(lf.predicted_instances)\n n_preds = max(n_preds, n_pred_instances)\n\n # Case 1: We don't care about order because there's only 1 instance per frame,\n # or we're considering untracked instances.\n untracked = untracked or n_preds == 1\n if untracked:\n n_tracks = n_preds\n else:\n # Case 2: We're considering only tracked instances.\n n_tracks = len(self.tracks)\n\n n_frames = int(last_frame - first_frame + 1)\n skeleton = self.skeletons[-1] # Assume project only uses last skeleton\n n_nodes = len(skeleton.nodes)\n\n if return_confidence:\n tracks = np.full((n_frames, n_tracks, n_nodes, 3), np.nan, dtype=\"float32\")\n else:\n tracks = np.full((n_frames, n_tracks, n_nodes, 2), np.nan, dtype=\"float32\")\n for lf in lfs:\n i = int(lf.frame_idx - first_frame)\n if untracked:\n for j, inst in enumerate(lf.predicted_instances):\n tracks[i, j] = inst.numpy(scores=return_confidence)\n else:\n tracked_instances = [\n inst\n for inst in lf.instances\n if type(inst) == PredictedInstance and inst.track is not None\n ]\n for inst in tracked_instances:\n j = self.tracks.index(inst.track) # type: ignore[arg-type]\n tracks[i, j] = inst.numpy(scores=return_confidence)\n\n return tracks\n\n @property\n def video(self) -> Video:\n \"\"\"Return the video if there is only a single video in the labels.\"\"\"\n if len(self.videos) == 0:\n raise ValueError(\"There are no videos in the labels.\")\n elif len(self.videos) == 1:\n return self.videos[0]\n else:\n raise ValueError(\n \"Labels.video can only be used when there is only a single video saved \"\n \"in the labels. Use Labels.videos instead.\"\n )\n\n @property\n def skeleton(self) -> Skeleton:\n \"\"\"Return the skeleton if there is only a single skeleton in the labels.\"\"\"\n if len(self.skeletons) == 0:\n raise ValueError(\"There are no skeletons in the labels.\")\n elif len(self.skeletons) == 1:\n return self.skeletons[0]\n else:\n raise ValueError(\n \"Labels.skeleton can only be used when there is only a single skeleton \"\n \"saved in the labels. Use Labels.skeletons instead.\"\n )\n\n def find(\n self,\n video: Video,\n frame_idx: int | list[int] | None = None,\n return_new: bool = False,\n ) -> list[LabeledFrame]:\n \"\"\"Search for labeled frames given video and/or frame index.\n\n Args:\n video: A `Video` that is associated with the project.\n frame_idx: The frame index (or indices) which we want to find in the video.\n If a range is specified, we'll return all frames with indices in that\n range. If not specific, then we'll return all labeled frames for video.\n return_new: Whether to return singleton of new and empty `LabeledFrame` if\n none are found in project.\n\n Returns:\n List of `LabeledFrame` objects that match the criteria.\n\n The list will be empty if no matches found, unless return_new is True, in\n which case it contains new (empty) `LabeledFrame` objects with `video` and\n `frame_index` set.\n \"\"\"\n results = []\n\n if frame_idx is None:\n for lf in self.labeled_frames:\n if lf.video == video:\n results.append(lf)\n return results\n\n if np.isscalar(frame_idx):\n frame_idx = np.array(frame_idx).reshape(-1)\n\n for frame_ind in frame_idx:\n result = None\n for lf in self.labeled_frames:\n if lf.video == video and lf.frame_idx == frame_ind:\n result = lf\n results.append(result)\n break\n if result is None and return_new:\n results.append(LabeledFrame(video=video, frame_idx=frame_ind))\n\n return results\n\n def save(\n self,\n filename: str,\n format: Optional[str] = None,\n embed: bool | str | list[tuple[Video, int]] | None = None,\n **kwargs,\n ):\n \"\"\"Save labels to file in specified format.\n\n Args:\n filename: Path to save labels to.\n format: The format to save the labels in. If `None`, the format will be\n inferred from the file extension. Available formats are `\"slp\"`,\n `\"nwb\"`, `\"labelstudio\"`, and `\"jabs\"`.\n embed: Frames to embed in the saved labels file. One of `None`, `True`,\n `\"all\"`, `\"user\"`, `\"suggestions\"`, `\"user+suggestions\"`, `\"source\"` or\n list of tuples of `(video, frame_idx)`.\n\n If `None` is specified (the default) and the labels contains embedded\n frames, those embedded frames will be re-saved to the new file.\n\n If `True` or `\"all\"`, all labeled frames and suggested frames will be\n embedded.\n\n If `\"source\"` is specified, no images will be embedded and the source\n video will be restored if available.\n\n This argument is only valid for the SLP backend.\n \"\"\"\n from sleap_io import save_file\n\n save_file(self, filename, format=format, embed=embed, **kwargs)\n\n def clean(\n self,\n frames: bool = True,\n empty_instances: bool = False,\n skeletons: bool = True,\n tracks: bool = True,\n videos: bool = False,\n ):\n \"\"\"Remove empty frames, unused skeletons, tracks and videos.\n\n Args:\n frames: If `True` (the default), remove empty frames.\n empty_instances: If `True` (NOT default), remove instances that have no\n visible points.\n skeletons: If `True` (the default), remove unused skeletons.\n tracks: If `True` (the default), remove unused tracks.\n videos: If `True` (NOT default), remove videos that have no labeled frames.\n \"\"\"\n used_skeletons = []\n used_tracks = []\n used_videos = []\n kept_frames = []\n for lf in self.labeled_frames:\n\n if empty_instances:\n lf.remove_empty_instances()\n\n if frames and len(lf) == 0:\n continue\n\n if videos and lf.video not in used_videos:\n used_videos.append(lf.video)\n\n if skeletons or tracks:\n for inst in lf:\n if skeletons and inst.skeleton not in used_skeletons:\n used_skeletons.append(inst.skeleton)\n if (\n tracks\n and inst.track is not None\n and inst.track not in used_tracks\n ):\n used_tracks.append(inst.track)\n\n if frames:\n kept_frames.append(lf)\n\n if videos:\n self.videos = [video for video in self.videos if video in used_videos]\n\n if skeletons:\n self.skeletons = [\n skeleton for skeleton in self.skeletons if skeleton in used_skeletons\n ]\n\n if tracks:\n self.tracks = [track for track in self.tracks if track in used_tracks]\n\n if frames:\n self.labeled_frames = kept_frames\n\n def remove_predictions(self, clean: bool = True):\n \"\"\"Remove all predicted instances from the labels.\n\n Args:\n clean: If `True` (the default), also remove any empty frames and unused\n tracks and skeletons. It does NOT remove videos that have no labeled\n frames or instances with no visible points.\n\n See also: `Labels.clean`\n \"\"\"\n for lf in self.labeled_frames:\n lf.remove_predictions()\n\n if clean:\n self.clean(\n frames=True,\n empty_instances=False,\n skeletons=True,\n tracks=True,\n videos=False,\n )\n\n @property\n def user_labeled_frames(self) -> list[LabeledFrame]:\n \"\"\"Return all labeled frames with user (non-predicted) instances.\"\"\"\n return [lf for lf in self.labeled_frames if lf.has_user_instances]\n\n @property\n def instances(self) -> Iterator[Instance]:\n \"\"\"Return an iterator over all instances within all labeled frames.\"\"\"\n return (instance for lf in self.labeled_frames for instance in lf.instances)\n\n def rename_nodes(\n self,\n name_map: dict[NodeOrIndex, str] | list[str],\n skeleton: Skeleton | None = None,\n ):\n \"\"\"Rename nodes in the skeleton.\n\n Args:\n name_map: A dictionary mapping old node names to new node names. Keys can be\n specified as `Node` objects, integer indices, or string names. Values\n must be specified as string names.\n\n If a list of strings is provided of the same length as the current\n nodes, the nodes will be renamed to the names in the list in order.\n skeleton: `Skeleton` to update. If `None` (the default), assumes there is\n only one skeleton in the labels and raises `ValueError` otherwise.\n\n Raises:\n ValueError: If the new node names exist in the skeleton, if the old node\n names are not found in the skeleton, or if there is more than one\n skeleton in the `Labels` but it is not specified.\n\n Notes:\n This method is recommended over `Skeleton.rename_nodes` as it will update\n all instances in the labels to reflect the new node names.\n\n Example:\n >>> labels = Labels(skeletons=[Skeleton([\"A\", \"B\", \"C\"])])\n >>> labels.rename_nodes({\"A\": \"X\", \"B\": \"Y\", \"C\": \"Z\"})\n >>> labels.skeleton.node_names\n [\"X\", \"Y\", \"Z\"]\n >>> labels.rename_nodes([\"a\", \"b\", \"c\"])\n >>> labels.skeleton.node_names\n [\"a\", \"b\", \"c\"]\n \"\"\"\n if skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Skeleton must be specified when there is more than one skeleton in \"\n \"the labels.\"\n )\n skeleton = self.skeleton\n\n skeleton.rename_nodes(name_map)\n\n def remove_nodes(self, nodes: list[NodeOrIndex], skeleton: Skeleton | None = None):\n \"\"\"Remove nodes from the skeleton.\n\n Args:\n nodes: A list of node names, indices, or `Node` objects to remove.\n skeleton: `Skeleton` to update. If `None` (the default), assumes there is\n only one skeleton in the labels and raises `ValueError` otherwise.\n\n Raises:\n ValueError: If the nodes are not found in the skeleton, or if there is more\n than one skeleton in the `Labels` but it is not specified.\n\n Notes:\n This method should always be used when removing nodes from the skeleton as\n it handles updating the lookup caches necessary for indexing nodes by name,\n and updating instances to reflect the changes made to the skeleton.\n\n Any edges and symmetries that are connected to the removed nodes will also\n be removed.\n \"\"\"\n if skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Skeleton must be specified when there is more than one skeleton \"\n \"in the labels.\"\n )\n skeleton = self.skeleton\n\n skeleton.remove_nodes(nodes)\n\n for inst in self.instances:\n if inst.skeleton == skeleton:\n inst.update_skeleton()\n\n def reorder_nodes(\n self, new_order: list[NodeOrIndex], skeleton: Skeleton | None = None\n ):\n \"\"\"Reorder nodes in the skeleton.\n\n Args:\n new_order: A list of node names, indices, or `Node` objects specifying the\n new order of the nodes.\n skeleton: `Skeleton` to update. If `None` (the default), assumes there is\n only one skeleton in the labels and raises `ValueError` otherwise.\n\n Raises:\n ValueError: If the new order of nodes is not the same length as the current\n nodes, or if there is more than one skeleton in the `Labels` but it is\n not specified.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name, as well as updating instances to reflect the changes made to the\n skeleton.\n \"\"\"\n if skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Skeleton must be specified when there is more than one skeleton \"\n \"in the labels.\"\n )\n skeleton = self.skeleton\n\n skeleton.reorder_nodes(new_order)\n\n for inst in self.instances:\n if inst.skeleton == skeleton:\n inst.update_skeleton()\n\n def replace_skeleton(\n self,\n new_skeleton: Skeleton,\n old_skeleton: Skeleton | None = None,\n node_map: dict[NodeOrIndex, NodeOrIndex] | None = None,\n ):\n \"\"\"Replace the skeleton in the labels.\n\n Args:\n new_skeleton: The new `Skeleton` to replace the old skeleton with.\n old_skeleton: The old `Skeleton` to replace. If `None` (the default),\n assumes there is only one skeleton in the labels and raises `ValueError`\n otherwise.\n node_map: Dictionary mapping nodes in the old skeleton to nodes in the new\n skeleton. Keys and values can be specified as `Node` objects, integer\n indices, or string names. If not provided, only nodes with identical\n names will be mapped. Points associated with unmapped nodes will be\n removed.\n\n Raises:\n ValueError: If there is more than one skeleton in the `Labels` but it is not\n specified.\n\n Warning:\n This method will replace the skeleton in all instances in the labels that\n have the old skeleton. **All point data associated with nodes not in the\n `node_map` will be lost.**\n \"\"\"\n if old_skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Old skeleton must be specified when there is more than one \"\n \"skeleton in the labels.\"\n )\n old_skeleton = self.skeleton\n\n if node_map is None:\n node_map = {}\n for old_node in old_skeleton.nodes:\n for new_node in new_skeleton.nodes:\n if old_node.name == new_node.name:\n node_map[old_node] = new_node\n break\n else:\n node_map = {\n old_skeleton.require_node(\n old, add_missing=False\n ): new_skeleton.require_node(new, add_missing=False)\n for old, new in node_map.items()\n }\n\n # Make new -> old mapping for nodes for efficiency.\n rev_node_map = {new: old for old, new in node_map.items()}\n\n # Replace the skeleton in the instances.\n for inst in self.instances:\n if inst.skeleton == old_skeleton:\n inst.replace_skeleton(new_skeleton, rev_node_map=rev_node_map)\n\n # Replace the skeleton in the labels.\n self.skeletons[self.skeletons.index(old_skeleton)] = new_skeleton\n\n def replace_videos(\n self,\n old_videos: list[Video] | None = None,\n new_videos: list[Video] | None = None,\n video_map: dict[Video, Video] | None = None,\n ):\n \"\"\"Replace videos and update all references.\n\n Args:\n old_videos: List of videos to be replaced.\n new_videos: List of videos to replace with.\n video_map: Alternative input of dictionary where keys are the old videos and\n values are the new videos.\n \"\"\"\n if (\n old_videos is None\n and new_videos is not None\n and len(new_videos) == len(self.videos)\n ):\n old_videos = self.videos\n\n if video_map is None:\n video_map = {o: n for o, n in zip(old_videos, new_videos)}\n\n # Update the labeled frames with the new videos.\n for lf in self.labeled_frames:\n if lf.video in video_map:\n lf.video = video_map[lf.video]\n\n # Update suggestions with the new videos.\n for sf in self.suggestions:\n if sf.video in video_map:\n sf.video = video_map[sf.video]\n\n # Update the list of videos.\n self.videos = [video_map.get(video, video) for video in self.videos]\n\n def replace_filenames(\n self,\n new_filenames: list[str | Path] | None = None,\n filename_map: dict[str | Path, str | Path] | None = None,\n prefix_map: dict[str | Path, str | Path] | None = None,\n ):\n \"\"\"Replace video filenames.\n\n Args:\n new_filenames: List of new filenames. Must have the same length as the\n number of videos in the labels.\n filename_map: Dictionary mapping old filenames (keys) to new filenames\n (values).\n prefix_map: Dictonary mapping old prefixes (keys) to new prefixes (values).\n\n Notes:\n Only one of the argument types can be provided.\n \"\"\"\n n = 0\n if new_filenames is not None:\n n += 1\n if filename_map is not None:\n n += 1\n if prefix_map is not None:\n n += 1\n if n != 1:\n raise ValueError(\n \"Exactly one input method must be provided to replace filenames.\"\n )\n\n if new_filenames is not None:\n if len(self.videos) != len(new_filenames):\n raise ValueError(\n f\"Number of new filenames ({len(new_filenames)}) does not match \"\n f\"the number of videos ({len(self.videos)}).\"\n )\n\n for video, new_filename in zip(self.videos, new_filenames):\n video.replace_filename(new_filename)\n\n elif filename_map is not None:\n for video in self.videos:\n for old_fn, new_fn in filename_map.items():\n if type(video.filename) == list:\n new_fns = []\n for fn in video.filename:\n if Path(fn) == Path(old_fn):\n new_fns.append(new_fn)\n else:\n new_fns.append(fn)\n video.replace_filename(new_fns)\n else:\n if Path(video.filename) == Path(old_fn):\n video.replace_filename(new_fn)\n\n elif prefix_map is not None:\n for video in self.videos:\n for old_prefix, new_prefix in prefix_map.items():\n old_prefix, new_prefix = Path(old_prefix), Path(new_prefix)\n\n if type(video.filename) == list:\n new_fns = []\n for fn in video.filename:\n fn = Path(fn)\n if fn.as_posix().startswith(old_prefix.as_posix()):\n new_fns.append(new_prefix / fn.relative_to(old_prefix))\n else:\n new_fns.append(fn)\n video.replace_filename(new_fns)\n else:\n fn = Path(video.filename)\n if fn.as_posix().startswith(old_prefix.as_posix()):\n video.replace_filename(\n new_prefix / fn.relative_to(old_prefix)\n )\n\n def extract(\n self, inds: list[int] | list[tuple[Video, int]] | np.ndarray, copy: bool = True\n ) -> Labels:\n \"\"\"Extract a set of frames into a new Labels object.\n\n Args:\n inds: Indices of labeled frames. Can be specified as a list of array of\n integer indices of labeled frames or tuples of Video and frame indices.\n copy: If `True` (the default), return a copy of the frames and containing\n objects. Otherwise, return a reference to the data.\n\n Returns:\n A new `Labels` object containing the selected labels.\n\n Notes:\n This copies the labeled frames and their associated data, including\n skeletons and tracks, and tries to maintain the relative ordering.\n\n This also copies the provenance and inserts an extra key: `\"source_labels\"`\n with the path to the current labels, if available.\n\n It does NOT copy suggested frames.\n \"\"\"\n lfs = self[inds]\n\n if copy:\n lfs = deepcopy(lfs)\n labels = Labels(lfs)\n\n # Try to keep the lists in the same order.\n track_to_ind = {track.name: ind for ind, track in enumerate(self.tracks)}\n labels.tracks = sorted(labels.tracks, key=lambda x: track_to_ind[x.name])\n\n skel_to_ind = {skel.name: ind for ind, skel in enumerate(self.skeletons)}\n labels.skeletons = sorted(labels.skeletons, key=lambda x: skel_to_ind[x.name])\n\n labels.provenance = deepcopy(labels.provenance)\n labels.provenance[\"source_labels\"] = self.provenance.get(\"filename\", None)\n\n return labels\n\n def split(self, n: int | float, seed: int | None = None) -> tuple[Labels, Labels]:\n \"\"\"Separate the labels into random splits.\n\n Args:\n n: Size of the first split. If integer >= 1, assumes that this is the number\n of labeled frames in the first split. If < 1.0, this will be treated as\n a fraction of the total labeled frames.\n seed: Optional integer seed to use for reproducibility.\n\n Returns:\n A tuple of `split1, split2`.\n\n If an integer was specified, `len(split1) == n`.\n\n If a fraction was specified, `len(split1) == int(n * len(labels))`.\n\n The second split contains the remainder, i.e.,\n `len(split2) == len(labels) - len(split1)`.\n\n If there are too few frames, a minimum of 1 frame will be kept in the second\n split.\n\n If there is exactly 1 labeled frame in the labels, the same frame will be\n assigned to both splits.\n \"\"\"\n n0 = len(self)\n if n0 == 0:\n return self, self\n n1 = n\n if n < 1.0:\n n1 = max(int(n0 * float(n)), 1)\n n2 = max(n0 - n1, 1)\n n1, n2 = int(n1), int(n2)\n\n rng = np.random.default_rng(seed=seed)\n inds1 = rng.choice(n0, size=(n1,), replace=False)\n\n if n0 == 1:\n inds2 = np.array([0])\n else:\n inds2 = np.setdiff1d(np.arange(n0), inds1)\n\n split1 = self.extract(inds1, copy=True)\n split2 = self.extract(inds2, copy=True)\n\n return split1, split2\n\n def make_training_splits(\n self,\n n_train: int | float,\n n_val: int | float | None = None,\n n_test: int | float | None = None,\n save_dir: str | Path | None = None,\n seed: int | None = None,\n embed: bool = True,\n ) -> tuple[Labels, Labels] | tuple[Labels, Labels, Labels]:\n \"\"\"Make splits for training with embedded images.\n\n Args:\n n_train: Size of the training split as integer or fraction.\n n_val: Size of the validation split as integer or fraction. If `None`,\n this will be inferred based on the values of `n_train` and `n_test`. If\n `n_test` is `None`, this will be the remainder of the data after the\n training split.\n n_test: Size of the testing split as integer or fraction. If `None`, the\n test split will not be saved.\n save_dir: If specified, save splits to SLP files with embedded images.\n seed: Optional integer seed to use for reproducibility.\n embed: If `True` (the default), embed user labeled frame images in the saved\n files, which is useful for portability but can be slow for large\n projects. If `False`, labels are saved with references to the source\n videos files.\n\n Returns:\n A tuple of `labels_train, labels_val` or\n `labels_train, labels_val, labels_test` if `n_test` was specified.\n\n Notes:\n Predictions and suggestions will be removed before saving, leaving only\n frames with user labeled data (the source labels are not affected).\n\n Frames with user labeled data will be embedded in the resulting files.\n\n If `save_dir` is specified, this will save the randomly sampled splits to:\n\n - `{save_dir}/train.pkg.slp`\n - `{save_dir}/val.pkg.slp`\n - `{save_dir}/test.pkg.slp` (if `n_test` is specified)\n\n If `embed` is `False`, the files will be saved without embedded images to:\n\n - `{save_dir}/train.slp`\n - `{save_dir}/val.slp`\n - `{save_dir}/test.slp` (if `n_test` is specified)\n\n See also: `Labels.split`\n \"\"\"\n # Clean up labels.\n labels = deepcopy(self)\n labels.remove_predictions()\n labels.suggestions = []\n labels.clean()\n\n # Make train split.\n labels_train, labels_rest = labels.split(n_train, seed=seed)\n\n # Make test split.\n if n_test is not None:\n if n_test < 1:\n n_test = (n_test * len(labels)) / len(labels_rest)\n labels_test, labels_rest = labels_rest.split(n=n_test, seed=seed)\n\n # Make val split.\n if n_val is not None:\n if n_val < 1:\n n_val = (n_val * len(labels)) / len(labels_rest)\n if isinstance(n_val, float) and n_val == 1.0:\n labels_val = labels_rest\n else:\n labels_val, _ = labels_rest.split(n=n_val, seed=seed)\n else:\n labels_val = labels_rest\n\n # Update provenance.\n source_labels = self.provenance.get(\"filename\", None)\n labels_train.provenance[\"source_labels\"] = source_labels\n if n_val is not None:\n labels_val.provenance[\"source_labels\"] = source_labels\n if n_test is not None:\n labels_test.provenance[\"source_labels\"] = source_labels\n\n # Save.\n if save_dir is not None:\n save_dir = Path(save_dir)\n save_dir.mkdir(exist_ok=True, parents=True)\n\n if embed:\n labels_train.save(save_dir / \"train.pkg.slp\", embed=\"user\")\n labels_val.save(save_dir / \"val.pkg.slp\", embed=\"user\")\n labels_test.save(save_dir / \"test.pkg.slp\", embed=\"user\")\n else:\n labels_train.save(save_dir / \"train.slp\", embed=False)\n labels_val.save(save_dir / \"val.slp\", embed=False)\n labels_test.save(save_dir / \"test.slp\", embed=False)\n\n if n_test is None:\n return labels_train, labels_val\n else:\n return labels_train, labels_val, labels_test\n\n def trim(\n self,\n save_path: str | Path,\n frame_inds: list[int] | np.ndarray,\n video: Video | int | None = None,\n video_kwargs: dict[str, Any] | None = None,\n ) -> Labels:\n \"\"\"Trim the labels to a subset of frames and videos accordingly.\n\n Args:\n save_path: Path to the trimmed labels SLP file. Video will be saved with the\n same base name but with .mp4 extension.\n frame_inds: Frame indices to save. Can be specified as a list or array of\n frame integers.\n video: Video or integer index of the video to trim. Does not need to be\n specified for single-video projects.\n video_kwargs: A dictionary of keyword arguments to provide to\n `sio.save_video` for video compression.\n\n Returns:\n The resulting labels object referencing the trimmed data.\n\n Notes:\n This will remove any data outside of the trimmed frames, save new videos,\n and adjust the frame indices to match the newly trimmed videos.\n \"\"\"\n if video is None:\n if len(self.videos) == 1:\n video = self.video\n else:\n raise ValueError(\n \"Video needs to be specified when trimming multi-video projects.\"\n )\n if type(video) == int:\n video = self.videos[video]\n\n # Write trimmed clip.\n save_path = Path(save_path)\n video_path = save_path.with_suffix(\".mp4\")\n fidx0, fidx1 = np.min(frame_inds), np.max(frame_inds)\n new_video = video.save(\n video_path,\n frame_inds=np.arange(fidx0, fidx1 + 1),\n video_kwargs=video_kwargs,\n )\n\n # Get frames in range.\n # TODO: Create an optimized search function for this access pattern.\n inds = []\n for ind, lf in enumerate(self):\n if lf.video == video and lf.frame_idx >= fidx0 and lf.frame_idx <= fidx1:\n inds.append(ind)\n trimmed_labels = self.extract(inds, copy=True)\n\n # Adjust video and frame indices.\n trimmed_labels.videos = [new_video]\n for lf in trimmed_labels:\n lf.video = new_video\n lf.frame_idx = lf.frame_idx - fidx0\n\n # Save.\n trimmed_labels.save(save_path)\n\n return trimmed_labels\n
"},{"location":"model/#sleap_io.Labels.instances","title":"instances: Iterator[Instance]
property
","text":"Return an iterator over all instances within all labeled frames.
"},{"location":"model/#sleap_io.Labels.skeleton","title":"skeleton: Skeleton
property
","text":"Return the skeleton if there is only a single skeleton in the labels.
"},{"location":"model/#sleap_io.Labels.user_labeled_frames","title":"user_labeled_frames: list[LabeledFrame]
property
","text":"Return all labeled frames with user (non-predicted) instances.
"},{"location":"model/#sleap_io.Labels.video","title":"video: Video
property
","text":"Return the video if there is only a single video in the labels.
"},{"location":"model/#sleap_io.Labels.__attrs_post_init__","title":"__attrs_post_init__()
","text":"Append videos, skeletons, and tracks seen in labeled_frames
to Labels
.
sleap_io/model/labels.py
def __attrs_post_init__(self):\n \"\"\"Append videos, skeletons, and tracks seen in `labeled_frames` to `Labels`.\"\"\"\n self.update()\n
"},{"location":"model/#sleap_io.Labels.__getitem__","title":"__getitem__(key)
","text":"Return one or more labeled frames based on indexing criteria.
Source code insleap_io/model/labels.py
def __getitem__(\n self, key: int | slice | list[int] | np.ndarray | tuple[Video, int]\n) -> list[LabeledFrame] | LabeledFrame:\n \"\"\"Return one or more labeled frames based on indexing criteria.\"\"\"\n if type(key) == int:\n return self.labeled_frames[key]\n elif type(key) == slice:\n return [self.labeled_frames[i] for i in range(*key.indices(len(self)))]\n elif type(key) == list:\n return [self.labeled_frames[i] for i in key]\n elif isinstance(key, np.ndarray):\n return [self.labeled_frames[i] for i in key.tolist()]\n elif type(key) == tuple and len(key) == 2:\n video, frame_idx = key\n res = self.find(video, frame_idx)\n if len(res) == 1:\n return res[0]\n elif len(res) == 0:\n raise IndexError(\n f\"No labeled frames found for video {video} and \"\n f\"frame index {frame_idx}.\"\n )\n elif type(key) == Video:\n res = self.find(key)\n if len(res) == 0:\n raise IndexError(f\"No labeled frames found for video {key}.\")\n return res\n else:\n raise IndexError(f\"Invalid indexing argument for labels: {key}\")\n
"},{"location":"model/#sleap_io.Labels.__iter__","title":"__iter__()
","text":"Iterate over labeled_frames
list when calling iter method on Labels
.
sleap_io/model/labels.py
def __iter__(self):\n \"\"\"Iterate over `labeled_frames` list when calling iter method on `Labels`.\"\"\"\n return iter(self.labeled_frames)\n
"},{"location":"model/#sleap_io.Labels.__len__","title":"__len__()
","text":"Return number of labeled frames.
Source code insleap_io/model/labels.py
def __len__(self) -> int:\n \"\"\"Return number of labeled frames.\"\"\"\n return len(self.labeled_frames)\n
"},{"location":"model/#sleap_io.Labels.__repr__","title":"__repr__()
","text":"Return a readable representation of the labels.
Source code insleap_io/model/labels.py
def __repr__(self) -> str:\n \"\"\"Return a readable representation of the labels.\"\"\"\n return (\n \"Labels(\"\n f\"labeled_frames={len(self.labeled_frames)}, \"\n f\"videos={len(self.videos)}, \"\n f\"skeletons={len(self.skeletons)}, \"\n f\"tracks={len(self.tracks)}, \"\n f\"suggestions={len(self.suggestions)}\"\n \")\"\n )\n
"},{"location":"model/#sleap_io.Labels.__str__","title":"__str__()
","text":"Return a readable representation of the labels.
Source code insleap_io/model/labels.py
def __str__(self) -> str:\n \"\"\"Return a readable representation of the labels.\"\"\"\n return self.__repr__()\n
"},{"location":"model/#sleap_io.Labels.append","title":"append(lf, update=True)
","text":"Append a labeled frame to the labels.
Parameters:
Name Type Description Defaultlf
LabeledFrame
A labeled frame to add to the labels.
requiredupdate
bool
If True
(the default), update list of videos, tracks and skeletons from the contents.
True
Source code in sleap_io/model/labels.py
def append(self, lf: LabeledFrame, update: bool = True):\n \"\"\"Append a labeled frame to the labels.\n\n Args:\n lf: A labeled frame to add to the labels.\n update: If `True` (the default), update list of videos, tracks and\n skeletons from the contents.\n \"\"\"\n self.labeled_frames.append(lf)\n\n if update:\n if lf.video not in self.videos:\n self.videos.append(lf.video)\n\n for inst in lf:\n if inst.skeleton not in self.skeletons:\n self.skeletons.append(inst.skeleton)\n\n if inst.track is not None and inst.track not in self.tracks:\n self.tracks.append(inst.track)\n
"},{"location":"model/#sleap_io.Labels.clean","title":"clean(frames=True, empty_instances=False, skeletons=True, tracks=True, videos=False)
","text":"Remove empty frames, unused skeletons, tracks and videos.
Parameters:
Name Type Description Defaultframes
bool
If True
(the default), remove empty frames.
True
empty_instances
bool
If True
(NOT default), remove instances that have no visible points.
False
skeletons
bool
If True
(the default), remove unused skeletons.
True
tracks
bool
If True
(the default), remove unused tracks.
True
videos
bool
If True
(NOT default), remove videos that have no labeled frames.
False
Source code in sleap_io/model/labels.py
def clean(\n self,\n frames: bool = True,\n empty_instances: bool = False,\n skeletons: bool = True,\n tracks: bool = True,\n videos: bool = False,\n):\n \"\"\"Remove empty frames, unused skeletons, tracks and videos.\n\n Args:\n frames: If `True` (the default), remove empty frames.\n empty_instances: If `True` (NOT default), remove instances that have no\n visible points.\n skeletons: If `True` (the default), remove unused skeletons.\n tracks: If `True` (the default), remove unused tracks.\n videos: If `True` (NOT default), remove videos that have no labeled frames.\n \"\"\"\n used_skeletons = []\n used_tracks = []\n used_videos = []\n kept_frames = []\n for lf in self.labeled_frames:\n\n if empty_instances:\n lf.remove_empty_instances()\n\n if frames and len(lf) == 0:\n continue\n\n if videos and lf.video not in used_videos:\n used_videos.append(lf.video)\n\n if skeletons or tracks:\n for inst in lf:\n if skeletons and inst.skeleton not in used_skeletons:\n used_skeletons.append(inst.skeleton)\n if (\n tracks\n and inst.track is not None\n and inst.track not in used_tracks\n ):\n used_tracks.append(inst.track)\n\n if frames:\n kept_frames.append(lf)\n\n if videos:\n self.videos = [video for video in self.videos if video in used_videos]\n\n if skeletons:\n self.skeletons = [\n skeleton for skeleton in self.skeletons if skeleton in used_skeletons\n ]\n\n if tracks:\n self.tracks = [track for track in self.tracks if track in used_tracks]\n\n if frames:\n self.labeled_frames = kept_frames\n
"},{"location":"model/#sleap_io.Labels.extend","title":"extend(lfs, update=True)
","text":"Append a labeled frame to the labels.
Parameters:
Name Type Description Defaultlfs
list[LabeledFrame]
A list of labeled frames to add to the labels.
requiredupdate
bool
If True
(the default), update list of videos, tracks and skeletons from the contents.
True
Source code in sleap_io/model/labels.py
def extend(self, lfs: list[LabeledFrame], update: bool = True):\n \"\"\"Append a labeled frame to the labels.\n\n Args:\n lfs: A list of labeled frames to add to the labels.\n update: If `True` (the default), update list of videos, tracks and\n skeletons from the contents.\n \"\"\"\n self.labeled_frames.extend(lfs)\n\n if update:\n for lf in lfs:\n if lf.video not in self.videos:\n self.videos.append(lf.video)\n\n for inst in lf:\n if inst.skeleton not in self.skeletons:\n self.skeletons.append(inst.skeleton)\n\n if inst.track is not None and inst.track not in self.tracks:\n self.tracks.append(inst.track)\n
"},{"location":"model/#sleap_io.Labels.extract","title":"extract(inds, copy=True)
","text":"Extract a set of frames into a new Labels object.
Parameters:
Name Type Description Defaultinds
list[int] | list[tuple[Video, int]] | ndarray
Indices of labeled frames. Can be specified as a list of array of integer indices of labeled frames or tuples of Video and frame indices.
requiredcopy
bool
If True
(the default), return a copy of the frames and containing objects. Otherwise, return a reference to the data.
True
Returns:
Type DescriptionLabels
A new Labels
object containing the selected labels.
This copies the labeled frames and their associated data, including skeletons and tracks, and tries to maintain the relative ordering.
This also copies the provenance and inserts an extra key: \"source_labels\"
with the path to the current labels, if available.
It does NOT copy suggested frames.
Source code insleap_io/model/labels.py
def extract(\n self, inds: list[int] | list[tuple[Video, int]] | np.ndarray, copy: bool = True\n) -> Labels:\n \"\"\"Extract a set of frames into a new Labels object.\n\n Args:\n inds: Indices of labeled frames. Can be specified as a list of array of\n integer indices of labeled frames or tuples of Video and frame indices.\n copy: If `True` (the default), return a copy of the frames and containing\n objects. Otherwise, return a reference to the data.\n\n Returns:\n A new `Labels` object containing the selected labels.\n\n Notes:\n This copies the labeled frames and their associated data, including\n skeletons and tracks, and tries to maintain the relative ordering.\n\n This also copies the provenance and inserts an extra key: `\"source_labels\"`\n with the path to the current labels, if available.\n\n It does NOT copy suggested frames.\n \"\"\"\n lfs = self[inds]\n\n if copy:\n lfs = deepcopy(lfs)\n labels = Labels(lfs)\n\n # Try to keep the lists in the same order.\n track_to_ind = {track.name: ind for ind, track in enumerate(self.tracks)}\n labels.tracks = sorted(labels.tracks, key=lambda x: track_to_ind[x.name])\n\n skel_to_ind = {skel.name: ind for ind, skel in enumerate(self.skeletons)}\n labels.skeletons = sorted(labels.skeletons, key=lambda x: skel_to_ind[x.name])\n\n labels.provenance = deepcopy(labels.provenance)\n labels.provenance[\"source_labels\"] = self.provenance.get(\"filename\", None)\n\n return labels\n
"},{"location":"model/#sleap_io.Labels.find","title":"find(video, frame_idx=None, return_new=False)
","text":"Search for labeled frames given video and/or frame index.
Parameters:
Name Type Description Defaultvideo
Video
A Video
that is associated with the project.
frame_idx
int | list[int] | None
The frame index (or indices) which we want to find in the video. If a range is specified, we'll return all frames with indices in that range. If not specific, then we'll return all labeled frames for video.
None
return_new
bool
Whether to return singleton of new and empty LabeledFrame
if none are found in project.
False
Returns:
Type Descriptionlist[LabeledFrame]
List of LabeledFrame
objects that match the criteria.
The list will be empty if no matches found, unless return_new is True, in which case it contains new (empty) LabeledFrame
objects with video
and frame_index
set.
sleap_io/model/labels.py
def find(\n self,\n video: Video,\n frame_idx: int | list[int] | None = None,\n return_new: bool = False,\n) -> list[LabeledFrame]:\n \"\"\"Search for labeled frames given video and/or frame index.\n\n Args:\n video: A `Video` that is associated with the project.\n frame_idx: The frame index (or indices) which we want to find in the video.\n If a range is specified, we'll return all frames with indices in that\n range. If not specific, then we'll return all labeled frames for video.\n return_new: Whether to return singleton of new and empty `LabeledFrame` if\n none are found in project.\n\n Returns:\n List of `LabeledFrame` objects that match the criteria.\n\n The list will be empty if no matches found, unless return_new is True, in\n which case it contains new (empty) `LabeledFrame` objects with `video` and\n `frame_index` set.\n \"\"\"\n results = []\n\n if frame_idx is None:\n for lf in self.labeled_frames:\n if lf.video == video:\n results.append(lf)\n return results\n\n if np.isscalar(frame_idx):\n frame_idx = np.array(frame_idx).reshape(-1)\n\n for frame_ind in frame_idx:\n result = None\n for lf in self.labeled_frames:\n if lf.video == video and lf.frame_idx == frame_ind:\n result = lf\n results.append(result)\n break\n if result is None and return_new:\n results.append(LabeledFrame(video=video, frame_idx=frame_ind))\n\n return results\n
"},{"location":"model/#sleap_io.Labels.make_training_splits","title":"make_training_splits(n_train, n_val=None, n_test=None, save_dir=None, seed=None, embed=True)
","text":"Make splits for training with embedded images.
Parameters:
Name Type Description Defaultn_train
int | float
Size of the training split as integer or fraction.
requiredn_val
int | float | None
Size of the validation split as integer or fraction. If None
, this will be inferred based on the values of n_train
and n_test
. If n_test
is None
, this will be the remainder of the data after the training split.
None
n_test
int | float | None
Size of the testing split as integer or fraction. If None
, the test split will not be saved.
None
save_dir
str | Path | None
If specified, save splits to SLP files with embedded images.
None
seed
int | None
Optional integer seed to use for reproducibility.
None
embed
bool
If True
(the default), embed user labeled frame images in the saved files, which is useful for portability but can be slow for large projects. If False
, labels are saved with references to the source videos files.
True
Returns:
Type Descriptiontuple[Labels, Labels] | tuple[Labels, Labels, Labels]
A tuple of labels_train, labels_val
or labels_train, labels_val, labels_test
if n_test
was specified.
Predictions and suggestions will be removed before saving, leaving only frames with user labeled data (the source labels are not affected).
Frames with user labeled data will be embedded in the resulting files.
If save_dir
is specified, this will save the randomly sampled splits to:
{save_dir}/train.pkg.slp
{save_dir}/val.pkg.slp
{save_dir}/test.pkg.slp
(if n_test
is specified)If embed
is False
, the files will be saved without embedded images to:
{save_dir}/train.slp
{save_dir}/val.slp
{save_dir}/test.slp
(if n_test
is specified)See also: Labels.split
sleap_io/model/labels.py
def make_training_splits(\n self,\n n_train: int | float,\n n_val: int | float | None = None,\n n_test: int | float | None = None,\n save_dir: str | Path | None = None,\n seed: int | None = None,\n embed: bool = True,\n) -> tuple[Labels, Labels] | tuple[Labels, Labels, Labels]:\n \"\"\"Make splits for training with embedded images.\n\n Args:\n n_train: Size of the training split as integer or fraction.\n n_val: Size of the validation split as integer or fraction. If `None`,\n this will be inferred based on the values of `n_train` and `n_test`. If\n `n_test` is `None`, this will be the remainder of the data after the\n training split.\n n_test: Size of the testing split as integer or fraction. If `None`, the\n test split will not be saved.\n save_dir: If specified, save splits to SLP files with embedded images.\n seed: Optional integer seed to use for reproducibility.\n embed: If `True` (the default), embed user labeled frame images in the saved\n files, which is useful for portability but can be slow for large\n projects. If `False`, labels are saved with references to the source\n videos files.\n\n Returns:\n A tuple of `labels_train, labels_val` or\n `labels_train, labels_val, labels_test` if `n_test` was specified.\n\n Notes:\n Predictions and suggestions will be removed before saving, leaving only\n frames with user labeled data (the source labels are not affected).\n\n Frames with user labeled data will be embedded in the resulting files.\n\n If `save_dir` is specified, this will save the randomly sampled splits to:\n\n - `{save_dir}/train.pkg.slp`\n - `{save_dir}/val.pkg.slp`\n - `{save_dir}/test.pkg.slp` (if `n_test` is specified)\n\n If `embed` is `False`, the files will be saved without embedded images to:\n\n - `{save_dir}/train.slp`\n - `{save_dir}/val.slp`\n - `{save_dir}/test.slp` (if `n_test` is specified)\n\n See also: `Labels.split`\n \"\"\"\n # Clean up labels.\n labels = deepcopy(self)\n labels.remove_predictions()\n labels.suggestions = []\n labels.clean()\n\n # Make train split.\n labels_train, labels_rest = labels.split(n_train, seed=seed)\n\n # Make test split.\n if n_test is not None:\n if n_test < 1:\n n_test = (n_test * len(labels)) / len(labels_rest)\n labels_test, labels_rest = labels_rest.split(n=n_test, seed=seed)\n\n # Make val split.\n if n_val is not None:\n if n_val < 1:\n n_val = (n_val * len(labels)) / len(labels_rest)\n if isinstance(n_val, float) and n_val == 1.0:\n labels_val = labels_rest\n else:\n labels_val, _ = labels_rest.split(n=n_val, seed=seed)\n else:\n labels_val = labels_rest\n\n # Update provenance.\n source_labels = self.provenance.get(\"filename\", None)\n labels_train.provenance[\"source_labels\"] = source_labels\n if n_val is not None:\n labels_val.provenance[\"source_labels\"] = source_labels\n if n_test is not None:\n labels_test.provenance[\"source_labels\"] = source_labels\n\n # Save.\n if save_dir is not None:\n save_dir = Path(save_dir)\n save_dir.mkdir(exist_ok=True, parents=True)\n\n if embed:\n labels_train.save(save_dir / \"train.pkg.slp\", embed=\"user\")\n labels_val.save(save_dir / \"val.pkg.slp\", embed=\"user\")\n labels_test.save(save_dir / \"test.pkg.slp\", embed=\"user\")\n else:\n labels_train.save(save_dir / \"train.slp\", embed=False)\n labels_val.save(save_dir / \"val.slp\", embed=False)\n labels_test.save(save_dir / \"test.slp\", embed=False)\n\n if n_test is None:\n return labels_train, labels_val\n else:\n return labels_train, labels_val, labels_test\n
"},{"location":"model/#sleap_io.Labels.numpy","title":"numpy(video=None, all_frames=True, untracked=False, return_confidence=False)
","text":"Construct a numpy array from instance points.
Parameters:
Name Type Description Defaultvideo
Optional[Union[Video, int]]
Video or video index to convert to numpy arrays. If None
(the default), uses the first video.
None
untracked
bool
If False
(the default), include only instances that have a track assignment. If True
, includes all instances in each frame in arbitrary order.
False
return_confidence
bool
If False
(the default), only return points of nodes. If True
, return the points and scores of nodes.
False
Returns:
Type Descriptionndarray
An array of tracks of shape (n_frames, n_tracks, n_nodes, 2)
if return_confidence
is False
. Otherwise returned shape is (n_frames, n_tracks, n_nodes, 3)
if return_confidence
is True
.
Missing data will be replaced with np.nan
.
If this is a single instance project, a track does not need to be assigned.
Only predicted instances (NOT user instances) will be returned.
NotesThis method assumes that instances have tracks assigned and is intended to function primarily for single-video prediction results.
Source code insleap_io/model/labels.py
def numpy(\n self,\n video: Optional[Union[Video, int]] = None,\n all_frames: bool = True,\n untracked: bool = False,\n return_confidence: bool = False,\n) -> np.ndarray:\n \"\"\"Construct a numpy array from instance points.\n\n Args:\n video: Video or video index to convert to numpy arrays. If `None` (the\n default), uses the first video.\n untracked: If `False` (the default), include only instances that have a\n track assignment. If `True`, includes all instances in each frame in\n arbitrary order.\n return_confidence: If `False` (the default), only return points of nodes. If\n `True`, return the points and scores of nodes.\n\n Returns:\n An array of tracks of shape `(n_frames, n_tracks, n_nodes, 2)` if\n `return_confidence` is `False`. Otherwise returned shape is\n `(n_frames, n_tracks, n_nodes, 3)` if `return_confidence` is `True`.\n\n Missing data will be replaced with `np.nan`.\n\n If this is a single instance project, a track does not need to be assigned.\n\n Only predicted instances (NOT user instances) will be returned.\n\n Notes:\n This method assumes that instances have tracks assigned and is intended to\n function primarily for single-video prediction results.\n \"\"\"\n # Get labeled frames for specified video.\n if video is None:\n video = 0\n if type(video) == int:\n video = self.videos[video]\n lfs = [lf for lf in self.labeled_frames if lf.video == video]\n\n # Figure out frame index range.\n first_frame, last_frame = 0, 0\n for lf in lfs:\n first_frame = min(first_frame, lf.frame_idx)\n last_frame = max(last_frame, lf.frame_idx)\n\n # Figure out the number of tracks based on number of instances in each frame.\n # First, let's check the max number of predicted instances (regardless of\n # whether they're tracked.\n n_preds = 0\n for lf in lfs:\n n_pred_instances = len(lf.predicted_instances)\n n_preds = max(n_preds, n_pred_instances)\n\n # Case 1: We don't care about order because there's only 1 instance per frame,\n # or we're considering untracked instances.\n untracked = untracked or n_preds == 1\n if untracked:\n n_tracks = n_preds\n else:\n # Case 2: We're considering only tracked instances.\n n_tracks = len(self.tracks)\n\n n_frames = int(last_frame - first_frame + 1)\n skeleton = self.skeletons[-1] # Assume project only uses last skeleton\n n_nodes = len(skeleton.nodes)\n\n if return_confidence:\n tracks = np.full((n_frames, n_tracks, n_nodes, 3), np.nan, dtype=\"float32\")\n else:\n tracks = np.full((n_frames, n_tracks, n_nodes, 2), np.nan, dtype=\"float32\")\n for lf in lfs:\n i = int(lf.frame_idx - first_frame)\n if untracked:\n for j, inst in enumerate(lf.predicted_instances):\n tracks[i, j] = inst.numpy(scores=return_confidence)\n else:\n tracked_instances = [\n inst\n for inst in lf.instances\n if type(inst) == PredictedInstance and inst.track is not None\n ]\n for inst in tracked_instances:\n j = self.tracks.index(inst.track) # type: ignore[arg-type]\n tracks[i, j] = inst.numpy(scores=return_confidence)\n\n return tracks\n
"},{"location":"model/#sleap_io.Labels.remove_nodes","title":"remove_nodes(nodes, skeleton=None)
","text":"Remove nodes from the skeleton.
Parameters:
Name Type Description Defaultnodes
list[NodeOrIndex]
A list of node names, indices, or Node
objects to remove.
skeleton
Skeleton | None
Skeleton
to update. If None
(the default), assumes there is only one skeleton in the labels and raises ValueError
otherwise.
None
Raises:
Type DescriptionValueError
If the nodes are not found in the skeleton, or if there is more than one skeleton in the Labels
but it is not specified.
This method should always be used when removing nodes from the skeleton as it handles updating the lookup caches necessary for indexing nodes by name, and updating instances to reflect the changes made to the skeleton.
Any edges and symmetries that are connected to the removed nodes will also be removed.
Source code insleap_io/model/labels.py
def remove_nodes(self, nodes: list[NodeOrIndex], skeleton: Skeleton | None = None):\n \"\"\"Remove nodes from the skeleton.\n\n Args:\n nodes: A list of node names, indices, or `Node` objects to remove.\n skeleton: `Skeleton` to update. If `None` (the default), assumes there is\n only one skeleton in the labels and raises `ValueError` otherwise.\n\n Raises:\n ValueError: If the nodes are not found in the skeleton, or if there is more\n than one skeleton in the `Labels` but it is not specified.\n\n Notes:\n This method should always be used when removing nodes from the skeleton as\n it handles updating the lookup caches necessary for indexing nodes by name,\n and updating instances to reflect the changes made to the skeleton.\n\n Any edges and symmetries that are connected to the removed nodes will also\n be removed.\n \"\"\"\n if skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Skeleton must be specified when there is more than one skeleton \"\n \"in the labels.\"\n )\n skeleton = self.skeleton\n\n skeleton.remove_nodes(nodes)\n\n for inst in self.instances:\n if inst.skeleton == skeleton:\n inst.update_skeleton()\n
"},{"location":"model/#sleap_io.Labels.remove_predictions","title":"remove_predictions(clean=True)
","text":"Remove all predicted instances from the labels.
Parameters:
Name Type Description Defaultclean
bool
If True
(the default), also remove any empty frames and unused tracks and skeletons. It does NOT remove videos that have no labeled frames or instances with no visible points.
True
See also: Labels.clean
sleap_io/model/labels.py
def remove_predictions(self, clean: bool = True):\n \"\"\"Remove all predicted instances from the labels.\n\n Args:\n clean: If `True` (the default), also remove any empty frames and unused\n tracks and skeletons. It does NOT remove videos that have no labeled\n frames or instances with no visible points.\n\n See also: `Labels.clean`\n \"\"\"\n for lf in self.labeled_frames:\n lf.remove_predictions()\n\n if clean:\n self.clean(\n frames=True,\n empty_instances=False,\n skeletons=True,\n tracks=True,\n videos=False,\n )\n
"},{"location":"model/#sleap_io.Labels.rename_nodes","title":"rename_nodes(name_map, skeleton=None)
","text":"Rename nodes in the skeleton.
Parameters:
Name Type Description Defaultname_map
dict[NodeOrIndex, str] | list[str]
A dictionary mapping old node names to new node names. Keys can be specified as Node
objects, integer indices, or string names. Values must be specified as string names.
If a list of strings is provided of the same length as the current nodes, the nodes will be renamed to the names in the list in order.
requiredskeleton
Skeleton | None
Skeleton
to update. If None
(the default), assumes there is only one skeleton in the labels and raises ValueError
otherwise.
None
Raises:
Type DescriptionValueError
If the new node names exist in the skeleton, if the old node names are not found in the skeleton, or if there is more than one skeleton in the Labels
but it is not specified.
This method is recommended over Skeleton.rename_nodes
as it will update all instances in the labels to reflect the new node names.
labels = Labels(skeletons=[Skeleton([\"A\", \"B\", \"C\"])]) labels.rename_nodes({\"A\": \"X\", \"B\": \"Y\", \"C\": \"Z\"}) labels.skeleton.node_names [\"X\", \"Y\", \"Z\"] labels.rename_nodes([\"a\", \"b\", \"c\"]) labels.skeleton.node_names [\"a\", \"b\", \"c\"]
Source code insleap_io/model/labels.py
def rename_nodes(\n self,\n name_map: dict[NodeOrIndex, str] | list[str],\n skeleton: Skeleton | None = None,\n):\n \"\"\"Rename nodes in the skeleton.\n\n Args:\n name_map: A dictionary mapping old node names to new node names. Keys can be\n specified as `Node` objects, integer indices, or string names. Values\n must be specified as string names.\n\n If a list of strings is provided of the same length as the current\n nodes, the nodes will be renamed to the names in the list in order.\n skeleton: `Skeleton` to update. If `None` (the default), assumes there is\n only one skeleton in the labels and raises `ValueError` otherwise.\n\n Raises:\n ValueError: If the new node names exist in the skeleton, if the old node\n names are not found in the skeleton, or if there is more than one\n skeleton in the `Labels` but it is not specified.\n\n Notes:\n This method is recommended over `Skeleton.rename_nodes` as it will update\n all instances in the labels to reflect the new node names.\n\n Example:\n >>> labels = Labels(skeletons=[Skeleton([\"A\", \"B\", \"C\"])])\n >>> labels.rename_nodes({\"A\": \"X\", \"B\": \"Y\", \"C\": \"Z\"})\n >>> labels.skeleton.node_names\n [\"X\", \"Y\", \"Z\"]\n >>> labels.rename_nodes([\"a\", \"b\", \"c\"])\n >>> labels.skeleton.node_names\n [\"a\", \"b\", \"c\"]\n \"\"\"\n if skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Skeleton must be specified when there is more than one skeleton in \"\n \"the labels.\"\n )\n skeleton = self.skeleton\n\n skeleton.rename_nodes(name_map)\n
"},{"location":"model/#sleap_io.Labels.reorder_nodes","title":"reorder_nodes(new_order, skeleton=None)
","text":"Reorder nodes in the skeleton.
Parameters:
Name Type Description Defaultnew_order
list[NodeOrIndex]
A list of node names, indices, or Node
objects specifying the new order of the nodes.
skeleton
Skeleton | None
Skeleton
to update. If None
(the default), assumes there is only one skeleton in the labels and raises ValueError
otherwise.
None
Raises:
Type DescriptionValueError
If the new order of nodes is not the same length as the current nodes, or if there is more than one skeleton in the Labels
but it is not specified.
This method handles updating the lookup caches necessary for indexing nodes by name, as well as updating instances to reflect the changes made to the skeleton.
Source code insleap_io/model/labels.py
def reorder_nodes(\n self, new_order: list[NodeOrIndex], skeleton: Skeleton | None = None\n):\n \"\"\"Reorder nodes in the skeleton.\n\n Args:\n new_order: A list of node names, indices, or `Node` objects specifying the\n new order of the nodes.\n skeleton: `Skeleton` to update. If `None` (the default), assumes there is\n only one skeleton in the labels and raises `ValueError` otherwise.\n\n Raises:\n ValueError: If the new order of nodes is not the same length as the current\n nodes, or if there is more than one skeleton in the `Labels` but it is\n not specified.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name, as well as updating instances to reflect the changes made to the\n skeleton.\n \"\"\"\n if skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Skeleton must be specified when there is more than one skeleton \"\n \"in the labels.\"\n )\n skeleton = self.skeleton\n\n skeleton.reorder_nodes(new_order)\n\n for inst in self.instances:\n if inst.skeleton == skeleton:\n inst.update_skeleton()\n
"},{"location":"model/#sleap_io.Labels.replace_filenames","title":"replace_filenames(new_filenames=None, filename_map=None, prefix_map=None)
","text":"Replace video filenames.
Parameters:
Name Type Description Defaultnew_filenames
list[str | Path] | None
List of new filenames. Must have the same length as the number of videos in the labels.
None
filename_map
dict[str | Path, str | Path] | None
Dictionary mapping old filenames (keys) to new filenames (values).
None
prefix_map
dict[str | Path, str | Path] | None
Dictonary mapping old prefixes (keys) to new prefixes (values).
None
Notes Only one of the argument types can be provided.
Source code insleap_io/model/labels.py
def replace_filenames(\n self,\n new_filenames: list[str | Path] | None = None,\n filename_map: dict[str | Path, str | Path] | None = None,\n prefix_map: dict[str | Path, str | Path] | None = None,\n):\n \"\"\"Replace video filenames.\n\n Args:\n new_filenames: List of new filenames. Must have the same length as the\n number of videos in the labels.\n filename_map: Dictionary mapping old filenames (keys) to new filenames\n (values).\n prefix_map: Dictonary mapping old prefixes (keys) to new prefixes (values).\n\n Notes:\n Only one of the argument types can be provided.\n \"\"\"\n n = 0\n if new_filenames is not None:\n n += 1\n if filename_map is not None:\n n += 1\n if prefix_map is not None:\n n += 1\n if n != 1:\n raise ValueError(\n \"Exactly one input method must be provided to replace filenames.\"\n )\n\n if new_filenames is not None:\n if len(self.videos) != len(new_filenames):\n raise ValueError(\n f\"Number of new filenames ({len(new_filenames)}) does not match \"\n f\"the number of videos ({len(self.videos)}).\"\n )\n\n for video, new_filename in zip(self.videos, new_filenames):\n video.replace_filename(new_filename)\n\n elif filename_map is not None:\n for video in self.videos:\n for old_fn, new_fn in filename_map.items():\n if type(video.filename) == list:\n new_fns = []\n for fn in video.filename:\n if Path(fn) == Path(old_fn):\n new_fns.append(new_fn)\n else:\n new_fns.append(fn)\n video.replace_filename(new_fns)\n else:\n if Path(video.filename) == Path(old_fn):\n video.replace_filename(new_fn)\n\n elif prefix_map is not None:\n for video in self.videos:\n for old_prefix, new_prefix in prefix_map.items():\n old_prefix, new_prefix = Path(old_prefix), Path(new_prefix)\n\n if type(video.filename) == list:\n new_fns = []\n for fn in video.filename:\n fn = Path(fn)\n if fn.as_posix().startswith(old_prefix.as_posix()):\n new_fns.append(new_prefix / fn.relative_to(old_prefix))\n else:\n new_fns.append(fn)\n video.replace_filename(new_fns)\n else:\n fn = Path(video.filename)\n if fn.as_posix().startswith(old_prefix.as_posix()):\n video.replace_filename(\n new_prefix / fn.relative_to(old_prefix)\n )\n
"},{"location":"model/#sleap_io.Labels.replace_skeleton","title":"replace_skeleton(new_skeleton, old_skeleton=None, node_map=None)
","text":"Replace the skeleton in the labels.
Parameters:
Name Type Description Defaultnew_skeleton
Skeleton
The new Skeleton
to replace the old skeleton with.
old_skeleton
Skeleton | None
The old Skeleton
to replace. If None
(the default), assumes there is only one skeleton in the labels and raises ValueError
otherwise.
None
node_map
dict[NodeOrIndex, NodeOrIndex] | None
Dictionary mapping nodes in the old skeleton to nodes in the new skeleton. Keys and values can be specified as Node
objects, integer indices, or string names. If not provided, only nodes with identical names will be mapped. Points associated with unmapped nodes will be removed.
None
Raises:
Type DescriptionValueError
If there is more than one skeleton in the Labels
but it is not specified.
This method will replace the skeleton in all instances in the labels that have the old skeleton. All point data associated with nodes not in the node_map
will be lost.
sleap_io/model/labels.py
def replace_skeleton(\n self,\n new_skeleton: Skeleton,\n old_skeleton: Skeleton | None = None,\n node_map: dict[NodeOrIndex, NodeOrIndex] | None = None,\n):\n \"\"\"Replace the skeleton in the labels.\n\n Args:\n new_skeleton: The new `Skeleton` to replace the old skeleton with.\n old_skeleton: The old `Skeleton` to replace. If `None` (the default),\n assumes there is only one skeleton in the labels and raises `ValueError`\n otherwise.\n node_map: Dictionary mapping nodes in the old skeleton to nodes in the new\n skeleton. Keys and values can be specified as `Node` objects, integer\n indices, or string names. If not provided, only nodes with identical\n names will be mapped. Points associated with unmapped nodes will be\n removed.\n\n Raises:\n ValueError: If there is more than one skeleton in the `Labels` but it is not\n specified.\n\n Warning:\n This method will replace the skeleton in all instances in the labels that\n have the old skeleton. **All point data associated with nodes not in the\n `node_map` will be lost.**\n \"\"\"\n if old_skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Old skeleton must be specified when there is more than one \"\n \"skeleton in the labels.\"\n )\n old_skeleton = self.skeleton\n\n if node_map is None:\n node_map = {}\n for old_node in old_skeleton.nodes:\n for new_node in new_skeleton.nodes:\n if old_node.name == new_node.name:\n node_map[old_node] = new_node\n break\n else:\n node_map = {\n old_skeleton.require_node(\n old, add_missing=False\n ): new_skeleton.require_node(new, add_missing=False)\n for old, new in node_map.items()\n }\n\n # Make new -> old mapping for nodes for efficiency.\n rev_node_map = {new: old for old, new in node_map.items()}\n\n # Replace the skeleton in the instances.\n for inst in self.instances:\n if inst.skeleton == old_skeleton:\n inst.replace_skeleton(new_skeleton, rev_node_map=rev_node_map)\n\n # Replace the skeleton in the labels.\n self.skeletons[self.skeletons.index(old_skeleton)] = new_skeleton\n
"},{"location":"model/#sleap_io.Labels.replace_videos","title":"replace_videos(old_videos=None, new_videos=None, video_map=None)
","text":"Replace videos and update all references.
Parameters:
Name Type Description Defaultold_videos
list[Video] | None
List of videos to be replaced.
None
new_videos
list[Video] | None
List of videos to replace with.
None
video_map
dict[Video, Video] | None
Alternative input of dictionary where keys are the old videos and values are the new videos.
None
Source code in sleap_io/model/labels.py
def replace_videos(\n self,\n old_videos: list[Video] | None = None,\n new_videos: list[Video] | None = None,\n video_map: dict[Video, Video] | None = None,\n):\n \"\"\"Replace videos and update all references.\n\n Args:\n old_videos: List of videos to be replaced.\n new_videos: List of videos to replace with.\n video_map: Alternative input of dictionary where keys are the old videos and\n values are the new videos.\n \"\"\"\n if (\n old_videos is None\n and new_videos is not None\n and len(new_videos) == len(self.videos)\n ):\n old_videos = self.videos\n\n if video_map is None:\n video_map = {o: n for o, n in zip(old_videos, new_videos)}\n\n # Update the labeled frames with the new videos.\n for lf in self.labeled_frames:\n if lf.video in video_map:\n lf.video = video_map[lf.video]\n\n # Update suggestions with the new videos.\n for sf in self.suggestions:\n if sf.video in video_map:\n sf.video = video_map[sf.video]\n\n # Update the list of videos.\n self.videos = [video_map.get(video, video) for video in self.videos]\n
"},{"location":"model/#sleap_io.Labels.save","title":"save(filename, format=None, embed=None, **kwargs)
","text":"Save labels to file in specified format.
Parameters:
Name Type Description Defaultfilename
str
Path to save labels to.
requiredformat
Optional[str]
The format to save the labels in. If None
, the format will be inferred from the file extension. Available formats are \"slp\"
, \"nwb\"
, \"labelstudio\"
, and \"jabs\"
.
None
embed
bool | str | list[tuple[Video, int]] | None
Frames to embed in the saved labels file. One of None
, True
, \"all\"
, \"user\"
, \"suggestions\"
, \"user+suggestions\"
, \"source\"
or list of tuples of (video, frame_idx)
.
If None
is specified (the default) and the labels contains embedded frames, those embedded frames will be re-saved to the new file.
If True
or \"all\"
, all labeled frames and suggested frames will be embedded.
If \"source\"
is specified, no images will be embedded and the source video will be restored if available.
This argument is only valid for the SLP backend.
None
Source code in sleap_io/model/labels.py
def save(\n self,\n filename: str,\n format: Optional[str] = None,\n embed: bool | str | list[tuple[Video, int]] | None = None,\n **kwargs,\n):\n \"\"\"Save labels to file in specified format.\n\n Args:\n filename: Path to save labels to.\n format: The format to save the labels in. If `None`, the format will be\n inferred from the file extension. Available formats are `\"slp\"`,\n `\"nwb\"`, `\"labelstudio\"`, and `\"jabs\"`.\n embed: Frames to embed in the saved labels file. One of `None`, `True`,\n `\"all\"`, `\"user\"`, `\"suggestions\"`, `\"user+suggestions\"`, `\"source\"` or\n list of tuples of `(video, frame_idx)`.\n\n If `None` is specified (the default) and the labels contains embedded\n frames, those embedded frames will be re-saved to the new file.\n\n If `True` or `\"all\"`, all labeled frames and suggested frames will be\n embedded.\n\n If `\"source\"` is specified, no images will be embedded and the source\n video will be restored if available.\n\n This argument is only valid for the SLP backend.\n \"\"\"\n from sleap_io import save_file\n\n save_file(self, filename, format=format, embed=embed, **kwargs)\n
"},{"location":"model/#sleap_io.Labels.split","title":"split(n, seed=None)
","text":"Separate the labels into random splits.
Parameters:
Name Type Description Defaultn
int | float
Size of the first split. If integer >= 1, assumes that this is the number of labeled frames in the first split. If < 1.0, this will be treated as a fraction of the total labeled frames.
requiredseed
int | None
Optional integer seed to use for reproducibility.
None
Returns:
Type Descriptiontuple[Labels, Labels]
A tuple of split1, split2
.
If an integer was specified, len(split1) == n
.
If a fraction was specified, len(split1) == int(n * len(labels))
.
The second split contains the remainder, i.e., len(split2) == len(labels) - len(split1)
.
If there are too few frames, a minimum of 1 frame will be kept in the second split.
If there is exactly 1 labeled frame in the labels, the same frame will be assigned to both splits.
Source code insleap_io/model/labels.py
def split(self, n: int | float, seed: int | None = None) -> tuple[Labels, Labels]:\n \"\"\"Separate the labels into random splits.\n\n Args:\n n: Size of the first split. If integer >= 1, assumes that this is the number\n of labeled frames in the first split. If < 1.0, this will be treated as\n a fraction of the total labeled frames.\n seed: Optional integer seed to use for reproducibility.\n\n Returns:\n A tuple of `split1, split2`.\n\n If an integer was specified, `len(split1) == n`.\n\n If a fraction was specified, `len(split1) == int(n * len(labels))`.\n\n The second split contains the remainder, i.e.,\n `len(split2) == len(labels) - len(split1)`.\n\n If there are too few frames, a minimum of 1 frame will be kept in the second\n split.\n\n If there is exactly 1 labeled frame in the labels, the same frame will be\n assigned to both splits.\n \"\"\"\n n0 = len(self)\n if n0 == 0:\n return self, self\n n1 = n\n if n < 1.0:\n n1 = max(int(n0 * float(n)), 1)\n n2 = max(n0 - n1, 1)\n n1, n2 = int(n1), int(n2)\n\n rng = np.random.default_rng(seed=seed)\n inds1 = rng.choice(n0, size=(n1,), replace=False)\n\n if n0 == 1:\n inds2 = np.array([0])\n else:\n inds2 = np.setdiff1d(np.arange(n0), inds1)\n\n split1 = self.extract(inds1, copy=True)\n split2 = self.extract(inds2, copy=True)\n\n return split1, split2\n
"},{"location":"model/#sleap_io.Labels.trim","title":"trim(save_path, frame_inds, video=None, video_kwargs=None)
","text":"Trim the labels to a subset of frames and videos accordingly.
Parameters:
Name Type Description Defaultsave_path
str | Path
Path to the trimmed labels SLP file. Video will be saved with the same base name but with .mp4 extension.
requiredframe_inds
list[int] | ndarray
Frame indices to save. Can be specified as a list or array of frame integers.
requiredvideo
Video | int | None
Video or integer index of the video to trim. Does not need to be specified for single-video projects.
None
video_kwargs
dict[str, Any] | None
A dictionary of keyword arguments to provide to sio.save_video
for video compression.
None
Returns:
Type DescriptionLabels
The resulting labels object referencing the trimmed data.
NotesThis will remove any data outside of the trimmed frames, save new videos, and adjust the frame indices to match the newly trimmed videos.
Source code insleap_io/model/labels.py
def trim(\n self,\n save_path: str | Path,\n frame_inds: list[int] | np.ndarray,\n video: Video | int | None = None,\n video_kwargs: dict[str, Any] | None = None,\n) -> Labels:\n \"\"\"Trim the labels to a subset of frames and videos accordingly.\n\n Args:\n save_path: Path to the trimmed labels SLP file. Video will be saved with the\n same base name but with .mp4 extension.\n frame_inds: Frame indices to save. Can be specified as a list or array of\n frame integers.\n video: Video or integer index of the video to trim. Does not need to be\n specified for single-video projects.\n video_kwargs: A dictionary of keyword arguments to provide to\n `sio.save_video` for video compression.\n\n Returns:\n The resulting labels object referencing the trimmed data.\n\n Notes:\n This will remove any data outside of the trimmed frames, save new videos,\n and adjust the frame indices to match the newly trimmed videos.\n \"\"\"\n if video is None:\n if len(self.videos) == 1:\n video = self.video\n else:\n raise ValueError(\n \"Video needs to be specified when trimming multi-video projects.\"\n )\n if type(video) == int:\n video = self.videos[video]\n\n # Write trimmed clip.\n save_path = Path(save_path)\n video_path = save_path.with_suffix(\".mp4\")\n fidx0, fidx1 = np.min(frame_inds), np.max(frame_inds)\n new_video = video.save(\n video_path,\n frame_inds=np.arange(fidx0, fidx1 + 1),\n video_kwargs=video_kwargs,\n )\n\n # Get frames in range.\n # TODO: Create an optimized search function for this access pattern.\n inds = []\n for ind, lf in enumerate(self):\n if lf.video == video and lf.frame_idx >= fidx0 and lf.frame_idx <= fidx1:\n inds.append(ind)\n trimmed_labels = self.extract(inds, copy=True)\n\n # Adjust video and frame indices.\n trimmed_labels.videos = [new_video]\n for lf in trimmed_labels:\n lf.video = new_video\n lf.frame_idx = lf.frame_idx - fidx0\n\n # Save.\n trimmed_labels.save(save_path)\n\n return trimmed_labels\n
"},{"location":"model/#sleap_io.Labels.update","title":"update()
","text":"Update data structures based on contents.
This function will update the list of skeletons, videos and tracks from the labeled frames, instances and suggestions.
Source code insleap_io/model/labels.py
def update(self):\n \"\"\"Update data structures based on contents.\n\n This function will update the list of skeletons, videos and tracks from the\n labeled frames, instances and suggestions.\n \"\"\"\n for lf in self.labeled_frames:\n if lf.video not in self.videos:\n self.videos.append(lf.video)\n\n for inst in lf:\n if inst.skeleton not in self.skeletons:\n self.skeletons.append(inst.skeleton)\n\n if inst.track is not None and inst.track not in self.tracks:\n self.tracks.append(inst.track)\n\n for sf in self.suggestions:\n if sf.video not in self.videos:\n self.videos.append(sf.video)\n
"},{"location":"model/#sleap_io.LabeledFrame","title":"sleap_io.LabeledFrame
","text":"Labeled data for a single frame of a video.
Attributes:
Name Type Descriptionvideo
Video
The Video
associated with this LabeledFrame
.
frame_idx
int
The index of the LabeledFrame
in the Video
.
instances
list[Union[Instance, PredictedInstance]]
List of Instance
objects associated with this LabeledFrame
.
Instances of this class are hashed by identity, not by value. This means that two LabeledFrame
instances with the same attributes will NOT be considered equal in a set or dict.
Methods:
Name Description__getitem__
Return the Instance
at key
index in the instances
list.
__iter__
Iterate over Instance
s in instances
list.
__len__
Return the number of instances in the frame.
numpy
Return all instances in the frame as a numpy array.
remove_empty_instances
Remove all instances with no visible points.
remove_predictions
Remove all PredictedInstance
objects from the frame.
Attributes:
Name Type Descriptionhas_predicted_instances
bool
Return True if the frame has any predicted instances.
has_user_instances
bool
Return True if the frame has any user-labeled instances.
image
ndarray
Return the image of the frame as a numpy array.
predicted_instances
list[Instance]
Frame instances that are predicted by a model (PredictedInstance
objects).
unused_predictions
list[Instance]
Return a list of \"unused\" PredictedInstance
objects in frame.
user_instances
list[Instance]
Frame instances that are user-labeled (Instance
objects).
sleap_io/model/labeled_frame.py
@define(eq=False)\nclass LabeledFrame:\n \"\"\"Labeled data for a single frame of a video.\n\n Attributes:\n video: The `Video` associated with this `LabeledFrame`.\n frame_idx: The index of the `LabeledFrame` in the `Video`.\n instances: List of `Instance` objects associated with this `LabeledFrame`.\n\n Notes:\n Instances of this class are hashed by identity, not by value. This means that\n two `LabeledFrame` instances with the same attributes will NOT be considered\n equal in a set or dict.\n \"\"\"\n\n video: Video\n frame_idx: int = field(converter=int)\n instances: list[Union[Instance, PredictedInstance]] = field(factory=list)\n\n def __len__(self) -> int:\n \"\"\"Return the number of instances in the frame.\"\"\"\n return len(self.instances)\n\n def __getitem__(self, key: int) -> Union[Instance, PredictedInstance]:\n \"\"\"Return the `Instance` at `key` index in the `instances` list.\"\"\"\n return self.instances[key]\n\n def __iter__(self):\n \"\"\"Iterate over `Instance`s in `instances` list.\"\"\"\n return iter(self.instances)\n\n @property\n def user_instances(self) -> list[Instance]:\n \"\"\"Frame instances that are user-labeled (`Instance` objects).\"\"\"\n return [inst for inst in self.instances if type(inst) == Instance]\n\n @property\n def has_user_instances(self) -> bool:\n \"\"\"Return True if the frame has any user-labeled instances.\"\"\"\n for inst in self.instances:\n if type(inst) == Instance:\n return True\n return False\n\n @property\n def predicted_instances(self) -> list[Instance]:\n \"\"\"Frame instances that are predicted by a model (`PredictedInstance` objects).\"\"\"\n return [inst for inst in self.instances if type(inst) == PredictedInstance]\n\n @property\n def has_predicted_instances(self) -> bool:\n \"\"\"Return True if the frame has any predicted instances.\"\"\"\n for inst in self.instances:\n if type(inst) == PredictedInstance:\n return True\n return False\n\n def numpy(self) -> np.ndarray:\n \"\"\"Return all instances in the frame as a numpy array.\n\n Returns:\n Points as a numpy array of shape `(n_instances, n_nodes, 2)`.\n\n Note that the order of the instances is arbitrary.\n \"\"\"\n n_instances = len(self.instances)\n n_nodes = len(self.instances[0]) if n_instances > 0 else 0\n pts = np.full((n_instances, n_nodes, 2), np.nan)\n for i, inst in enumerate(self.instances):\n pts[i] = inst.numpy()[:, 0:2]\n return pts\n\n @property\n def image(self) -> np.ndarray:\n \"\"\"Return the image of the frame as a numpy array.\"\"\"\n return self.video[self.frame_idx]\n\n @property\n def unused_predictions(self) -> list[Instance]:\n \"\"\"Return a list of \"unused\" `PredictedInstance` objects in frame.\n\n This is all of the `PredictedInstance` objects which do not have a corresponding\n `Instance` in the same track in the same frame.\n \"\"\"\n unused_predictions = []\n any_tracks = [inst.track for inst in self.instances if inst.track is not None]\n if len(any_tracks):\n # Use tracks to determine which predicted instances have been used\n used_tracks = [\n inst.track\n for inst in self.instances\n if type(inst) == Instance and inst.track is not None\n ]\n unused_predictions = [\n inst\n for inst in self.instances\n if inst.track not in used_tracks and type(inst) == PredictedInstance\n ]\n\n else:\n # Use from_predicted to determine which predicted instances have been used\n # TODO: should we always do this instead of using tracks?\n used_instances = [\n inst.from_predicted\n for inst in self.instances\n if inst.from_predicted is not None\n ]\n unused_predictions = [\n inst\n for inst in self.instances\n if type(inst) == PredictedInstance and inst not in used_instances\n ]\n\n return unused_predictions\n\n def remove_predictions(self):\n \"\"\"Remove all `PredictedInstance` objects from the frame.\"\"\"\n self.instances = [inst for inst in self.instances if type(inst) == Instance]\n\n def remove_empty_instances(self):\n \"\"\"Remove all instances with no visible points.\"\"\"\n self.instances = [inst for inst in self.instances if not inst.is_empty]\n
"},{"location":"model/#sleap_io.LabeledFrame.has_predicted_instances","title":"has_predicted_instances: bool
property
","text":"Return True if the frame has any predicted instances.
"},{"location":"model/#sleap_io.LabeledFrame.has_user_instances","title":"has_user_instances: bool
property
","text":"Return True if the frame has any user-labeled instances.
"},{"location":"model/#sleap_io.LabeledFrame.image","title":"image: np.ndarray
property
","text":"Return the image of the frame as a numpy array.
"},{"location":"model/#sleap_io.LabeledFrame.predicted_instances","title":"predicted_instances: list[Instance]
property
","text":"Frame instances that are predicted by a model (PredictedInstance
objects).
unused_predictions: list[Instance]
property
","text":"Return a list of \"unused\" PredictedInstance
objects in frame.
This is all of the PredictedInstance
objects which do not have a corresponding Instance
in the same track in the same frame.
user_instances: list[Instance]
property
","text":"Frame instances that are user-labeled (Instance
objects).
__getitem__(key)
","text":"Return the Instance
at key
index in the instances
list.
sleap_io/model/labeled_frame.py
def __getitem__(self, key: int) -> Union[Instance, PredictedInstance]:\n \"\"\"Return the `Instance` at `key` index in the `instances` list.\"\"\"\n return self.instances[key]\n
"},{"location":"model/#sleap_io.LabeledFrame.__iter__","title":"__iter__()
","text":"Iterate over Instance
s in instances
list.
sleap_io/model/labeled_frame.py
def __iter__(self):\n \"\"\"Iterate over `Instance`s in `instances` list.\"\"\"\n return iter(self.instances)\n
"},{"location":"model/#sleap_io.LabeledFrame.__len__","title":"__len__()
","text":"Return the number of instances in the frame.
Source code insleap_io/model/labeled_frame.py
def __len__(self) -> int:\n \"\"\"Return the number of instances in the frame.\"\"\"\n return len(self.instances)\n
"},{"location":"model/#sleap_io.LabeledFrame.numpy","title":"numpy()
","text":"Return all instances in the frame as a numpy array.
Returns:
Type Descriptionndarray
Points as a numpy array of shape (n_instances, n_nodes, 2)
.
Note that the order of the instances is arbitrary.
Source code insleap_io/model/labeled_frame.py
def numpy(self) -> np.ndarray:\n \"\"\"Return all instances in the frame as a numpy array.\n\n Returns:\n Points as a numpy array of shape `(n_instances, n_nodes, 2)`.\n\n Note that the order of the instances is arbitrary.\n \"\"\"\n n_instances = len(self.instances)\n n_nodes = len(self.instances[0]) if n_instances > 0 else 0\n pts = np.full((n_instances, n_nodes, 2), np.nan)\n for i, inst in enumerate(self.instances):\n pts[i] = inst.numpy()[:, 0:2]\n return pts\n
"},{"location":"model/#sleap_io.LabeledFrame.remove_empty_instances","title":"remove_empty_instances()
","text":"Remove all instances with no visible points.
Source code insleap_io/model/labeled_frame.py
def remove_empty_instances(self):\n \"\"\"Remove all instances with no visible points.\"\"\"\n self.instances = [inst for inst in self.instances if not inst.is_empty]\n
"},{"location":"model/#sleap_io.LabeledFrame.remove_predictions","title":"remove_predictions()
","text":"Remove all PredictedInstance
objects from the frame.
sleap_io/model/labeled_frame.py
def remove_predictions(self):\n \"\"\"Remove all `PredictedInstance` objects from the frame.\"\"\"\n self.instances = [inst for inst in self.instances if type(inst) == Instance]\n
"},{"location":"model/#sleap_io.Instance","title":"sleap_io.Instance
","text":"This class represents a ground truth instance such as an animal.
An Instance
has a set of landmarks (Point
s) that correspond to the nodes defined in its Skeleton
.
It may also be associated with a Track
which links multiple instances together across frames or videos.
Attributes:
Name Type Descriptionpoints
Union[dict[Node, Point], dict[Node, PredictedPoint]]
A dictionary with keys as Node
s and values as Point
s containing all of the landmarks of the instance. This can also be specified as a dictionary with node names, a list of length n_nodes
, or a numpy array of shape (n_nodes, 2)
.
skeleton
Skeleton
The Skeleton
that describes the Node
s and Edge
s associated with this instance.
track
Optional[Track]
An optional Track
associated with a unique animal/object across frames or videos.
from_predicted
Optional[PredictedInstance]
The PredictedInstance
(if any) that this instance was initialized from. This is used with human-in-the-loop workflows.
Methods:
Name Description__attrs_post_init__
Maintain point mappings between node and points after initialization.
__getitem__
Return the point associated with a node or None
if not set.
__len__
Return the number of points in the instance.
__repr__
Return a readable representation of the instance.
from_numpy
Create an instance object from a numpy array.
numpy
Return the instance points as a numpy array.
replace_skeleton
Replace the skeleton associated with the instance.
update_skeleton
Update the points dictionary to match the skeleton.
Attributes:
Name Type Descriptionis_empty
bool
Return True
if no points are visible on the instance.
n_visible
int
Return the number of visible points in the instance.
Source code insleap_io/model/instance.py
@define(auto_attribs=True, slots=True, eq=True)\nclass Instance:\n \"\"\"This class represents a ground truth instance such as an animal.\n\n An `Instance` has a set of landmarks (`Point`s) that correspond to the nodes defined\n in its `Skeleton`.\n\n It may also be associated with a `Track` which links multiple instances together\n across frames or videos.\n\n Attributes:\n points: A dictionary with keys as `Node`s and values as `Point`s containing all\n of the landmarks of the instance. This can also be specified as a dictionary\n with node names, a list of length `n_nodes`, or a numpy array of shape\n `(n_nodes, 2)`.\n skeleton: The `Skeleton` that describes the `Node`s and `Edge`s associated with\n this instance.\n track: An optional `Track` associated with a unique animal/object across frames\n or videos.\n from_predicted: The `PredictedInstance` (if any) that this instance was\n initialized from. This is used with human-in-the-loop workflows.\n \"\"\"\n\n _POINT_TYPE = Point\n\n def _make_default_point(self, x, y):\n return self._POINT_TYPE(x, y, visible=not (math.isnan(x) or math.isnan(y)))\n\n def _convert_points(self, attr, points):\n \"\"\"Maintain points mappings between nodes and points.\"\"\"\n if type(points) == np.ndarray:\n points = points.tolist()\n\n if type(points) == list:\n if len(points) != len(self.skeleton):\n raise ValueError(\n \"If specifying points as a list, must provide as many points as \"\n \"nodes in the skeleton.\"\n )\n points = {node: pt for node, pt in zip(self.skeleton.nodes, points)}\n\n if type(points) == dict:\n keys = [\n node if type(node) == Node else self.skeleton[node]\n for node in points.keys()\n ]\n vals = [\n (\n point\n if type(point) == self._POINT_TYPE\n else self._make_default_point(*point)\n )\n for point in points.values()\n ]\n points = {k: v for k, v in zip(keys, vals)}\n\n missing_nodes = list(set(self.skeleton.nodes) - set(points.keys()))\n for node in missing_nodes:\n points[node] = self._make_default_point(x=np.nan, y=np.nan)\n\n return points\n\n points: Union[dict[Node, Point], dict[Node, PredictedPoint]] = field(\n on_setattr=_convert_points, eq=cmp_using(eq=_compare_points) # type: ignore\n )\n skeleton: Skeleton\n track: Optional[Track] = None\n from_predicted: Optional[PredictedInstance] = None\n\n def __attrs_post_init__(self):\n \"\"\"Maintain point mappings between node and points after initialization.\"\"\"\n super().__setattr__(\"points\", self._convert_points(None, self.points))\n\n def __getitem__(self, node: Union[int, str, Node]) -> Optional[Point]:\n \"\"\"Return the point associated with a node or `None` if not set.\"\"\"\n if (type(node) == int) or (type(node) == str):\n node = self.skeleton[node]\n if isinstance(node, Node):\n return self.points.get(node, None)\n else:\n raise IndexError(f\"Invalid indexing argument for instance: {node}\")\n\n def __len__(self) -> int:\n \"\"\"Return the number of points in the instance.\"\"\"\n return len(self.points)\n\n def __repr__(self) -> str:\n \"\"\"Return a readable representation of the instance.\"\"\"\n pts = self.numpy().tolist()\n track = f'\"{self.track.name}\"' if self.track is not None else self.track\n\n return f\"Instance(points={pts}, track={track})\"\n\n @property\n def n_visible(self) -> int:\n \"\"\"Return the number of visible points in the instance.\"\"\"\n return sum(pt.visible for pt in self.points.values())\n\n @property\n def is_empty(self) -> bool:\n \"\"\"Return `True` if no points are visible on the instance.\"\"\"\n return self.n_visible == 0\n\n @classmethod\n def from_numpy(\n cls, points: np.ndarray, skeleton: Skeleton, track: Optional[Track] = None\n ) -> \"Instance\":\n \"\"\"Create an instance object from a numpy array.\n\n Args:\n points: A numpy array of shape `(n_nodes, 2)` corresponding to the points of\n the skeleton. Values of `np.nan` indicate \"missing\" nodes.\n skeleton: The `Skeleton` that this `Instance` is associated with. It should\n have `n_nodes` nodes.\n track: An optional `Track` associated with a unique animal/object across\n frames or videos.\n \"\"\"\n return cls(\n points=points, skeleton=skeleton, track=track # type: ignore[arg-type]\n )\n\n def numpy(self) -> np.ndarray:\n \"\"\"Return the instance points as a numpy array.\"\"\"\n pts = np.full((len(self.skeleton), 2), np.nan)\n for node, point in self.points.items():\n if point.visible:\n pts[self.skeleton.index(node)] = point.numpy()\n return pts\n\n def update_skeleton(self):\n \"\"\"Update the points dictionary to match the skeleton.\n\n Points associated with nodes that are no longer in the skeleton will be removed.\n\n Additionally, the keys of the points dictionary will be ordered to match the\n order of the nodes in the skeleton.\n\n Notes:\n This method is useful when the skeleton has been updated (e.g., nodes\n removed or reordered).\n\n However, it is recommended to use `Labels`-level methods (e.g.,\n `Labels.remove_nodes()`) when manipulating the skeleton as these will\n automatically call this method on every instance.\n \"\"\"\n # Create a new dictionary to hold the updated points\n new_points = {}\n\n # Iterate over the nodes in the skeleton\n for node in self.skeleton.nodes:\n # Get the point associated with the node\n point = self.points.get(node, None)\n\n # If the point is not None, add it to the new dictionary\n if point is not None:\n new_points[node] = point\n\n # Update the points dictionary\n self.points = new_points\n\n def replace_skeleton(\n self,\n new_skeleton: Skeleton,\n node_map: dict[NodeOrIndex, NodeOrIndex] | None = None,\n rev_node_map: dict[NodeOrIndex, NodeOrIndex] | None = None,\n ):\n \"\"\"Replace the skeleton associated with the instance.\n\n The points dictionary will be updated to match the new skeleton.\n\n Args:\n new_skeleton: The new `Skeleton` to associate with the instance.\n node_map: Dictionary mapping nodes in the old skeleton to nodes in the new\n skeleton. Keys and values can be specified as `Node` objects, integer\n indices, or string names. If not provided, only nodes with identical\n names will be mapped. Points associated with unmapped nodes will be\n removed.\n rev_node_map: Dictionary mapping nodes in the new skeleton to nodes in the\n old skeleton. This is used internally when calling from\n `Labels.replace_skeleton()` as it is more efficient to compute this\n mapping once and pass it to all instances. No validation is done on this\n mapping, so nodes are expected to be `Node` objects.\n \"\"\"\n if rev_node_map is None:\n if node_map is None:\n node_map = {}\n for old_node in self.skeleton.nodes:\n for new_node in new_skeleton.nodes:\n if old_node.name == new_node.name:\n node_map[old_node] = new_node\n break\n else:\n node_map = {\n self.skeleton.require_node(\n old, add_missing=False\n ): new_skeleton.require_node(new, add_missing=False)\n for old, new in node_map.items()\n }\n\n # Make new -> old mapping for nodes\n rev_node_map = {new: old for old, new in node_map.items()}\n\n # Build new points list with mapped nodes\n new_points = {}\n for new_node in new_skeleton.nodes:\n old_node = rev_node_map.get(new_node, None)\n if old_node is not None and old_node in self.points:\n new_points[new_node] = self.points[old_node]\n\n # Update the skeleton and points\n self.skeleton = new_skeleton\n self.points = new_points\n
"},{"location":"model/#sleap_io.Instance.is_empty","title":"is_empty: bool
property
","text":"Return True
if no points are visible on the instance.
n_visible: int
property
","text":"Return the number of visible points in the instance.
"},{"location":"model/#sleap_io.Instance.__attrs_post_init__","title":"__attrs_post_init__()
","text":"Maintain point mappings between node and points after initialization.
Source code insleap_io/model/instance.py
def __attrs_post_init__(self):\n \"\"\"Maintain point mappings between node and points after initialization.\"\"\"\n super().__setattr__(\"points\", self._convert_points(None, self.points))\n
"},{"location":"model/#sleap_io.Instance.__getitem__","title":"__getitem__(node)
","text":"Return the point associated with a node or None
if not set.
sleap_io/model/instance.py
def __getitem__(self, node: Union[int, str, Node]) -> Optional[Point]:\n \"\"\"Return the point associated with a node or `None` if not set.\"\"\"\n if (type(node) == int) or (type(node) == str):\n node = self.skeleton[node]\n if isinstance(node, Node):\n return self.points.get(node, None)\n else:\n raise IndexError(f\"Invalid indexing argument for instance: {node}\")\n
"},{"location":"model/#sleap_io.Instance.__len__","title":"__len__()
","text":"Return the number of points in the instance.
Source code insleap_io/model/instance.py
def __len__(self) -> int:\n \"\"\"Return the number of points in the instance.\"\"\"\n return len(self.points)\n
"},{"location":"model/#sleap_io.Instance.__repr__","title":"__repr__()
","text":"Return a readable representation of the instance.
Source code insleap_io/model/instance.py
def __repr__(self) -> str:\n \"\"\"Return a readable representation of the instance.\"\"\"\n pts = self.numpy().tolist()\n track = f'\"{self.track.name}\"' if self.track is not None else self.track\n\n return f\"Instance(points={pts}, track={track})\"\n
"},{"location":"model/#sleap_io.Instance.from_numpy","title":"from_numpy(points, skeleton, track=None)
classmethod
","text":"Create an instance object from a numpy array.
Parameters:
Name Type Description Defaultpoints
ndarray
A numpy array of shape (n_nodes, 2)
corresponding to the points of the skeleton. Values of np.nan
indicate \"missing\" nodes.
skeleton
Skeleton
The Skeleton
that this Instance
is associated with. It should have n_nodes
nodes.
track
Optional[Track]
An optional Track
associated with a unique animal/object across frames or videos.
None
Source code in sleap_io/model/instance.py
@classmethod\ndef from_numpy(\n cls, points: np.ndarray, skeleton: Skeleton, track: Optional[Track] = None\n) -> \"Instance\":\n \"\"\"Create an instance object from a numpy array.\n\n Args:\n points: A numpy array of shape `(n_nodes, 2)` corresponding to the points of\n the skeleton. Values of `np.nan` indicate \"missing\" nodes.\n skeleton: The `Skeleton` that this `Instance` is associated with. It should\n have `n_nodes` nodes.\n track: An optional `Track` associated with a unique animal/object across\n frames or videos.\n \"\"\"\n return cls(\n points=points, skeleton=skeleton, track=track # type: ignore[arg-type]\n )\n
"},{"location":"model/#sleap_io.Instance.numpy","title":"numpy()
","text":"Return the instance points as a numpy array.
Source code insleap_io/model/instance.py
def numpy(self) -> np.ndarray:\n \"\"\"Return the instance points as a numpy array.\"\"\"\n pts = np.full((len(self.skeleton), 2), np.nan)\n for node, point in self.points.items():\n if point.visible:\n pts[self.skeleton.index(node)] = point.numpy()\n return pts\n
"},{"location":"model/#sleap_io.Instance.replace_skeleton","title":"replace_skeleton(new_skeleton, node_map=None, rev_node_map=None)
","text":"Replace the skeleton associated with the instance.
The points dictionary will be updated to match the new skeleton.
Parameters:
Name Type Description Defaultnew_skeleton
Skeleton
The new Skeleton
to associate with the instance.
node_map
dict[NodeOrIndex, NodeOrIndex] | None
Dictionary mapping nodes in the old skeleton to nodes in the new skeleton. Keys and values can be specified as Node
objects, integer indices, or string names. If not provided, only nodes with identical names will be mapped. Points associated with unmapped nodes will be removed.
None
rev_node_map
dict[NodeOrIndex, NodeOrIndex] | None
Dictionary mapping nodes in the new skeleton to nodes in the old skeleton. This is used internally when calling from Labels.replace_skeleton()
as it is more efficient to compute this mapping once and pass it to all instances. No validation is done on this mapping, so nodes are expected to be Node
objects.
None
Source code in sleap_io/model/instance.py
def replace_skeleton(\n self,\n new_skeleton: Skeleton,\n node_map: dict[NodeOrIndex, NodeOrIndex] | None = None,\n rev_node_map: dict[NodeOrIndex, NodeOrIndex] | None = None,\n):\n \"\"\"Replace the skeleton associated with the instance.\n\n The points dictionary will be updated to match the new skeleton.\n\n Args:\n new_skeleton: The new `Skeleton` to associate with the instance.\n node_map: Dictionary mapping nodes in the old skeleton to nodes in the new\n skeleton. Keys and values can be specified as `Node` objects, integer\n indices, or string names. If not provided, only nodes with identical\n names will be mapped. Points associated with unmapped nodes will be\n removed.\n rev_node_map: Dictionary mapping nodes in the new skeleton to nodes in the\n old skeleton. This is used internally when calling from\n `Labels.replace_skeleton()` as it is more efficient to compute this\n mapping once and pass it to all instances. No validation is done on this\n mapping, so nodes are expected to be `Node` objects.\n \"\"\"\n if rev_node_map is None:\n if node_map is None:\n node_map = {}\n for old_node in self.skeleton.nodes:\n for new_node in new_skeleton.nodes:\n if old_node.name == new_node.name:\n node_map[old_node] = new_node\n break\n else:\n node_map = {\n self.skeleton.require_node(\n old, add_missing=False\n ): new_skeleton.require_node(new, add_missing=False)\n for old, new in node_map.items()\n }\n\n # Make new -> old mapping for nodes\n rev_node_map = {new: old for old, new in node_map.items()}\n\n # Build new points list with mapped nodes\n new_points = {}\n for new_node in new_skeleton.nodes:\n old_node = rev_node_map.get(new_node, None)\n if old_node is not None and old_node in self.points:\n new_points[new_node] = self.points[old_node]\n\n # Update the skeleton and points\n self.skeleton = new_skeleton\n self.points = new_points\n
"},{"location":"model/#sleap_io.Instance.update_skeleton","title":"update_skeleton()
","text":"Update the points dictionary to match the skeleton.
Points associated with nodes that are no longer in the skeleton will be removed.
Additionally, the keys of the points dictionary will be ordered to match the order of the nodes in the skeleton.
NotesThis method is useful when the skeleton has been updated (e.g., nodes removed or reordered).
However, it is recommended to use Labels
-level methods (e.g., Labels.remove_nodes()
) when manipulating the skeleton as these will automatically call this method on every instance.
sleap_io/model/instance.py
def update_skeleton(self):\n \"\"\"Update the points dictionary to match the skeleton.\n\n Points associated with nodes that are no longer in the skeleton will be removed.\n\n Additionally, the keys of the points dictionary will be ordered to match the\n order of the nodes in the skeleton.\n\n Notes:\n This method is useful when the skeleton has been updated (e.g., nodes\n removed or reordered).\n\n However, it is recommended to use `Labels`-level methods (e.g.,\n `Labels.remove_nodes()`) when manipulating the skeleton as these will\n automatically call this method on every instance.\n \"\"\"\n # Create a new dictionary to hold the updated points\n new_points = {}\n\n # Iterate over the nodes in the skeleton\n for node in self.skeleton.nodes:\n # Get the point associated with the node\n point = self.points.get(node, None)\n\n # If the point is not None, add it to the new dictionary\n if point is not None:\n new_points[node] = point\n\n # Update the points dictionary\n self.points = new_points\n
"},{"location":"model/#sleap_io.PredictedInstance","title":"sleap_io.PredictedInstance
","text":" Bases: Instance
A PredictedInstance
is an Instance
that was predicted using a model.
Attributes:
Name Type Descriptionskeleton
The Skeleton
that this Instance
is associated with.
points
A dictionary where keys are Skeleton
nodes and values are Point
s.
track
An optional Track
associated with a unique animal/object across frames or videos.
from_predicted
Optional[PredictedInstance]
Not applicable in PredictedInstance
s (must be set to None
).
score
float
The instance detection or part grouping prediction score. This is a scalar that represents the confidence with which this entire instance was predicted. This may not always be applicable depending on the model type.
tracking_score
Optional[float]
The score associated with the Track
assignment. This is typically the value from the score matrix used in an identity assignment.
Methods:
Name Description__repr__
Return a readable representation of the instance.
from_numpy
Create an instance object from a numpy array.
numpy
Return the instance points as a numpy array.
Source code insleap_io/model/instance.py
@define\nclass PredictedInstance(Instance):\n \"\"\"A `PredictedInstance` is an `Instance` that was predicted using a model.\n\n Attributes:\n skeleton: The `Skeleton` that this `Instance` is associated with.\n points: A dictionary where keys are `Skeleton` nodes and values are `Point`s.\n track: An optional `Track` associated with a unique animal/object across frames\n or videos.\n from_predicted: Not applicable in `PredictedInstance`s (must be set to `None`).\n score: The instance detection or part grouping prediction score. This is a\n scalar that represents the confidence with which this entire instance was\n predicted. This may not always be applicable depending on the model type.\n tracking_score: The score associated with the `Track` assignment. This is\n typically the value from the score matrix used in an identity assignment.\n \"\"\"\n\n _POINT_TYPE = PredictedPoint\n\n from_predicted: Optional[PredictedInstance] = field(\n default=None, validator=validators.instance_of(type(None))\n )\n score: float = 0.0\n tracking_score: Optional[float] = 0\n\n def __repr__(self) -> str:\n \"\"\"Return a readable representation of the instance.\"\"\"\n pts = self.numpy().tolist()\n track = f'\"{self.track.name}\"' if self.track is not None else self.track\n\n score = str(self.score) if self.score is None else f\"{self.score:.2f}\"\n tracking_score = (\n str(self.tracking_score)\n if self.tracking_score is None\n else f\"{self.tracking_score:.2f}\"\n )\n return (\n f\"PredictedInstance(points={pts}, track={track}, \"\n f\"score={score}, tracking_score={tracking_score})\"\n )\n\n @classmethod\n def from_numpy( # type: ignore[override]\n cls,\n points: np.ndarray,\n point_scores: np.ndarray,\n instance_score: float,\n skeleton: Skeleton,\n tracking_score: Optional[float] = None,\n track: Optional[Track] = None,\n ) -> \"PredictedInstance\":\n \"\"\"Create an instance object from a numpy array.\n\n Args:\n points: A numpy array of shape `(n_nodes, 2)` corresponding to the points of\n the skeleton. Values of `np.nan` indicate \"missing\" nodes.\n point_scores: The points-level prediction score. This is an array that\n represents the confidence with which each point in the instance was\n predicted. This may not always be applicable depending on the model\n type.\n instance_score: The instance detection or part grouping prediction score.\n This is a scalar that represents the confidence with which this entire\n instance was predicted. This may not always be applicable depending on\n the model type.\n skeleton: The `Skeleton` that this `Instance` is associated with. It should\n have `n_nodes` nodes.\n tracking_score: The score associated with the `Track` assignment. This is\n typically the value from the score matrix used in an identity\n assignment.\n track: An optional `Track` associated with a unique animal/object across\n frames or videos.\n \"\"\"\n node_points = {\n node: PredictedPoint(pt[0], pt[1], score=score)\n for node, pt, score in zip(skeleton.nodes, points, point_scores)\n }\n return cls(\n points=node_points,\n skeleton=skeleton,\n score=instance_score,\n tracking_score=tracking_score,\n track=track,\n )\n\n def numpy(self, scores: bool = False) -> np.ndarray:\n \"\"\"Return the instance points as a numpy array.\"\"\"\n pts = np.full((len(self.skeleton), 3), np.nan)\n for node, point in self.points.items():\n if point.visible:\n pts[self.skeleton.index(node)] = point.numpy()\n if not scores:\n pts = pts[:, :2]\n return pts\n
"},{"location":"model/#sleap_io.PredictedInstance.__repr__","title":"__repr__()
","text":"Return a readable representation of the instance.
Source code insleap_io/model/instance.py
def __repr__(self) -> str:\n \"\"\"Return a readable representation of the instance.\"\"\"\n pts = self.numpy().tolist()\n track = f'\"{self.track.name}\"' if self.track is not None else self.track\n\n score = str(self.score) if self.score is None else f\"{self.score:.2f}\"\n tracking_score = (\n str(self.tracking_score)\n if self.tracking_score is None\n else f\"{self.tracking_score:.2f}\"\n )\n return (\n f\"PredictedInstance(points={pts}, track={track}, \"\n f\"score={score}, tracking_score={tracking_score})\"\n )\n
"},{"location":"model/#sleap_io.PredictedInstance.from_numpy","title":"from_numpy(points, point_scores, instance_score, skeleton, tracking_score=None, track=None)
classmethod
","text":"Create an instance object from a numpy array.
Parameters:
Name Type Description Defaultpoints
ndarray
A numpy array of shape (n_nodes, 2)
corresponding to the points of the skeleton. Values of np.nan
indicate \"missing\" nodes.
point_scores
ndarray
The points-level prediction score. This is an array that represents the confidence with which each point in the instance was predicted. This may not always be applicable depending on the model type.
requiredinstance_score
float
The instance detection or part grouping prediction score. This is a scalar that represents the confidence with which this entire instance was predicted. This may not always be applicable depending on the model type.
requiredskeleton
Skeleton
The Skeleton
that this Instance
is associated with. It should have n_nodes
nodes.
tracking_score
Optional[float]
The score associated with the Track
assignment. This is typically the value from the score matrix used in an identity assignment.
None
track
Optional[Track]
An optional Track
associated with a unique animal/object across frames or videos.
None
Source code in sleap_io/model/instance.py
@classmethod\ndef from_numpy( # type: ignore[override]\n cls,\n points: np.ndarray,\n point_scores: np.ndarray,\n instance_score: float,\n skeleton: Skeleton,\n tracking_score: Optional[float] = None,\n track: Optional[Track] = None,\n) -> \"PredictedInstance\":\n \"\"\"Create an instance object from a numpy array.\n\n Args:\n points: A numpy array of shape `(n_nodes, 2)` corresponding to the points of\n the skeleton. Values of `np.nan` indicate \"missing\" nodes.\n point_scores: The points-level prediction score. This is an array that\n represents the confidence with which each point in the instance was\n predicted. This may not always be applicable depending on the model\n type.\n instance_score: The instance detection or part grouping prediction score.\n This is a scalar that represents the confidence with which this entire\n instance was predicted. This may not always be applicable depending on\n the model type.\n skeleton: The `Skeleton` that this `Instance` is associated with. It should\n have `n_nodes` nodes.\n tracking_score: The score associated with the `Track` assignment. This is\n typically the value from the score matrix used in an identity\n assignment.\n track: An optional `Track` associated with a unique animal/object across\n frames or videos.\n \"\"\"\n node_points = {\n node: PredictedPoint(pt[0], pt[1], score=score)\n for node, pt, score in zip(skeleton.nodes, points, point_scores)\n }\n return cls(\n points=node_points,\n skeleton=skeleton,\n score=instance_score,\n tracking_score=tracking_score,\n track=track,\n )\n
"},{"location":"model/#sleap_io.PredictedInstance.numpy","title":"numpy(scores=False)
","text":"Return the instance points as a numpy array.
Source code insleap_io/model/instance.py
def numpy(self, scores: bool = False) -> np.ndarray:\n \"\"\"Return the instance points as a numpy array.\"\"\"\n pts = np.full((len(self.skeleton), 3), np.nan)\n for node, point in self.points.items():\n if point.visible:\n pts[self.skeleton.index(node)] = point.numpy()\n if not scores:\n pts = pts[:, :2]\n return pts\n
"},{"location":"model/#sleap_io.Point","title":"sleap_io.Point
","text":"A 2D spatial landmark and metadata associated with annotation.
Attributes:
Name Type Descriptionx
float
The horizontal pixel location of point in image coordinates.
y
float
The vertical pixel location of point in image coordinates.
visible
bool
Whether point is visible in the image or not.
complete
bool
Has the point been verified by the user labeler.
Class variableseq_atol: Controls absolute tolerence allowed in x
and y
when comparing two Point
s for equality. eq_rtol: Controls relative tolerence allowed in x
and y
when comparing two Point
s for equality.
Methods:
Name Description__eq__
Compare self
and other
for equality.
numpy
Return the coordinates as a numpy array of shape (2,)
.
sleap_io/model/instance.py
@define\nclass Point:\n \"\"\"A 2D spatial landmark and metadata associated with annotation.\n\n Attributes:\n x: The horizontal pixel location of point in image coordinates.\n y: The vertical pixel location of point in image coordinates.\n visible: Whether point is visible in the image or not.\n complete: Has the point been verified by the user labeler.\n\n Class variables:\n eq_atol: Controls absolute tolerence allowed in `x` and `y` when comparing two\n `Point`s for equality.\n eq_rtol: Controls relative tolerence allowed in `x` and `y` when comparing two\n `Point`s for equality.\n\n \"\"\"\n\n eq_atol: ClassVar[float] = 1e-08\n eq_rtol: ClassVar[float] = 0\n\n x: float\n y: float\n visible: bool = True\n complete: bool = False\n\n def __eq__(self, other: object) -> bool:\n \"\"\"Compare `self` and `other` for equality.\n\n Precision error between the respective `x` and `y` properties of two\n instances may be allowed or controlled via the `Point.eq_atol` and\n `Point.eq_rtol` class variables. Set to zero to disable their effect.\n Internally, `numpy.isclose()` is used for the comparison:\n https://numpy.org/doc/stable/reference/generated/numpy.isclose.html\n\n Args:\n other: Instance of `Point` to compare to.\n\n Returns:\n Returns True if all attributes of `self` and `other` are the identical\n (possibly allowing precision error for `x` and `y` attributes).\n \"\"\"\n # Check that other is a Point.\n if type(other) is not type(self):\n return False\n\n # We know that we have some kind of point at this point.\n other = cast(Point, other)\n\n return bool(\n np.all(\n np.isclose(\n [self.x, self.y],\n [other.x, other.y],\n rtol=Point.eq_rtol,\n atol=Point.eq_atol,\n equal_nan=True,\n )\n )\n and (self.visible == other.visible)\n and (self.complete == other.complete)\n )\n\n def numpy(self) -> np.ndarray:\n \"\"\"Return the coordinates as a numpy array of shape `(2,)`.\"\"\"\n return np.array([self.x, self.y]) if self.visible else np.full((2,), np.nan)\n
"},{"location":"model/#sleap_io.Point.__eq__","title":"__eq__(other)
","text":"Compare self
and other
for equality.
Precision error between the respective x
and y
properties of two instances may be allowed or controlled via the Point.eq_atol
and Point.eq_rtol
class variables. Set to zero to disable their effect. Internally, numpy.isclose()
is used for the comparison: https://numpy.org/doc/stable/reference/generated/numpy.isclose.html
Parameters:
Name Type Description Defaultother
object
Instance of Point
to compare to.
Returns:
Type Descriptionbool
Returns True if all attributes of self
and other
are the identical (possibly allowing precision error for x
and y
attributes).
sleap_io/model/instance.py
def __eq__(self, other: object) -> bool:\n \"\"\"Compare `self` and `other` for equality.\n\n Precision error between the respective `x` and `y` properties of two\n instances may be allowed or controlled via the `Point.eq_atol` and\n `Point.eq_rtol` class variables. Set to zero to disable their effect.\n Internally, `numpy.isclose()` is used for the comparison:\n https://numpy.org/doc/stable/reference/generated/numpy.isclose.html\n\n Args:\n other: Instance of `Point` to compare to.\n\n Returns:\n Returns True if all attributes of `self` and `other` are the identical\n (possibly allowing precision error for `x` and `y` attributes).\n \"\"\"\n # Check that other is a Point.\n if type(other) is not type(self):\n return False\n\n # We know that we have some kind of point at this point.\n other = cast(Point, other)\n\n return bool(\n np.all(\n np.isclose(\n [self.x, self.y],\n [other.x, other.y],\n rtol=Point.eq_rtol,\n atol=Point.eq_atol,\n equal_nan=True,\n )\n )\n and (self.visible == other.visible)\n and (self.complete == other.complete)\n )\n
"},{"location":"model/#sleap_io.Point.numpy","title":"numpy()
","text":"Return the coordinates as a numpy array of shape (2,)
.
sleap_io/model/instance.py
def numpy(self) -> np.ndarray:\n \"\"\"Return the coordinates as a numpy array of shape `(2,)`.\"\"\"\n return np.array([self.x, self.y]) if self.visible else np.full((2,), np.nan)\n
"},{"location":"model/#sleap_io.PredictedPoint","title":"sleap_io.PredictedPoint
","text":" Bases: Point
A predicted point with associated score generated by a prediction model.
It has all the properties of a labeled Point
, plus a score
.
Attributes:
Name Type Descriptionx
The horizontal pixel location of point within image frame.
y
The vertical pixel location of point within image frame.
visible
Whether point is visible in the image or not.
complete
Has the point been verified by the user labeler.
score
float
The point-level prediction score. This is typically the confidence and set to a value between 0 and 1.
Methods:
Name Description__eq__
Compare self
and other
for equality.
numpy
Return the coordinates and score as a numpy array of shape (3,)
.
sleap_io/model/instance.py
@define\nclass PredictedPoint(Point):\n \"\"\"A predicted point with associated score generated by a prediction model.\n\n It has all the properties of a labeled `Point`, plus a `score`.\n\n Attributes:\n x: The horizontal pixel location of point within image frame.\n y: The vertical pixel location of point within image frame.\n visible: Whether point is visible in the image or not.\n complete: Has the point been verified by the user labeler.\n score: The point-level prediction score. This is typically the confidence and\n set to a value between 0 and 1.\n \"\"\"\n\n score: float = 0.0\n\n def numpy(self) -> np.ndarray:\n \"\"\"Return the coordinates and score as a numpy array of shape `(3,)`.\"\"\"\n return (\n np.array([self.x, self.y, self.score])\n if self.visible\n else np.full((3,), np.nan)\n )\n\n def __eq__(self, other: object) -> bool:\n \"\"\"Compare `self` and `other` for equality.\n\n See `Point.__eq__()` for important notes about point equality semantics!\n\n Args:\n other: Instance of `PredictedPoint` to compare\n\n Returns:\n Returns True if all attributes of `self` and `other` are the identical\n (possibly allowing precision error for `x` and `y` attributes).\n \"\"\"\n if not super().__eq__(other):\n return False\n\n # we know that we have a point at this point\n other = cast(PredictedPoint, other)\n\n return self.score == other.score\n
"},{"location":"model/#sleap_io.PredictedPoint.__eq__","title":"__eq__(other)
","text":"Compare self
and other
for equality.
See Point.__eq__()
for important notes about point equality semantics!
Parameters:
Name Type Description Defaultother
object
Instance of PredictedPoint
to compare
Returns:
Type Descriptionbool
Returns True if all attributes of self
and other
are the identical (possibly allowing precision error for x
and y
attributes).
sleap_io/model/instance.py
def __eq__(self, other: object) -> bool:\n \"\"\"Compare `self` and `other` for equality.\n\n See `Point.__eq__()` for important notes about point equality semantics!\n\n Args:\n other: Instance of `PredictedPoint` to compare\n\n Returns:\n Returns True if all attributes of `self` and `other` are the identical\n (possibly allowing precision error for `x` and `y` attributes).\n \"\"\"\n if not super().__eq__(other):\n return False\n\n # we know that we have a point at this point\n other = cast(PredictedPoint, other)\n\n return self.score == other.score\n
"},{"location":"model/#sleap_io.PredictedPoint.numpy","title":"numpy()
","text":"Return the coordinates and score as a numpy array of shape (3,)
.
sleap_io/model/instance.py
def numpy(self) -> np.ndarray:\n \"\"\"Return the coordinates and score as a numpy array of shape `(3,)`.\"\"\"\n return (\n np.array([self.x, self.y, self.score])\n if self.visible\n else np.full((3,), np.nan)\n )\n
"},{"location":"model/#sleap_io.Skeleton","title":"sleap_io.Skeleton
","text":"A description of a set of landmark types and connections between them.
Skeletons are represented by a directed graph composed of a set of Node
s (landmark types such as body parts) and Edge
s (connections between parts).
Attributes:
Name Type Descriptionnodes
list[Node]
A list of Node
s. May be specified as a list of strings to create new nodes from their names.
edges
list[Edge]
A list of Edge
s. May be specified as a list of 2-tuples of string names or integer indices of nodes
. Each edge corresponds to a pair of source and destination nodes forming a directed edge.
symmetries
list[Symmetry]
A list of Symmetry
s. Each symmetry corresponds to symmetric body parts, such as \"left eye\", \"right eye\"
. This is used when applying flip (reflection) augmentation to images in order to appropriately swap the indices of symmetric landmarks.
name
str | None
A descriptive name for the Skeleton
.
Methods:
Name Description__attrs_post_init__
Ensure nodes are Node
s, edges are Edge
s, and Node
map is updated.
__contains__
Check if a node is in the skeleton.
__getitem__
Return a Node
when indexing by name or integer.
__len__
Return the number of nodes in the skeleton.
__repr__
Return a readable representation of the skeleton.
add_edge
Add an Edge
to the skeleton.
add_edges
Add multiple Edge
s to the skeleton.
add_node
Add a Node
to the skeleton.
add_nodes
Add multiple Node
s to the skeleton.
add_symmetry
Add a symmetry relationship to the skeleton.
get_flipped_node_inds
Returns node indices that should be switched when horizontally flipping.
index
Return the index of a node specified as a Node
or string name.
rebuild_cache
Rebuild the node name/index to Node
map caches.
remove_node
Remove a single node from the skeleton.
remove_nodes
Remove nodes from the skeleton.
rename_node
Rename a single node in the skeleton.
rename_nodes
Rename nodes in the skeleton.
reorder_nodes
Reorder nodes in the skeleton.
require_node
Return a Node
object, handling indexing and adding missing nodes.
Attributes:
Name Type Descriptionedge_inds
list[tuple[int, int]]
Edges indices as a list of 2-tuples.
edge_names
list[str, str]
Edge names as a list of 2-tuples with string node names.
node_names
list[str]
Names of the nodes associated with this skeleton as a list of strings.
symmetry_inds
list[tuple[int, int]]
Symmetry indices as a list of 2-tuples.
symmetry_names
list[str, str]
Symmetry names as a list of 2-tuples with string node names.
Source code insleap_io/model/skeleton.py
@define(eq=False)\nclass Skeleton:\n \"\"\"A description of a set of landmark types and connections between them.\n\n Skeletons are represented by a directed graph composed of a set of `Node`s (landmark\n types such as body parts) and `Edge`s (connections between parts).\n\n Attributes:\n nodes: A list of `Node`s. May be specified as a list of strings to create new\n nodes from their names.\n edges: A list of `Edge`s. May be specified as a list of 2-tuples of string names\n or integer indices of `nodes`. Each edge corresponds to a pair of source and\n destination nodes forming a directed edge.\n symmetries: A list of `Symmetry`s. Each symmetry corresponds to symmetric body\n parts, such as `\"left eye\", \"right eye\"`. This is used when applying flip\n (reflection) augmentation to images in order to appropriately swap the\n indices of symmetric landmarks.\n name: A descriptive name for the `Skeleton`.\n \"\"\"\n\n def _nodes_on_setattr(self, attr, new_nodes):\n \"\"\"Callback to update caches when nodes are set.\"\"\"\n self.rebuild_cache(nodes=new_nodes)\n return new_nodes\n\n nodes: list[Node] = field(\n factory=list,\n on_setattr=_nodes_on_setattr,\n )\n edges: list[Edge] = field(factory=list)\n symmetries: list[Symmetry] = field(factory=list)\n name: str | None = None\n _name_to_node_cache: dict[str, Node] = field(init=False, repr=False, eq=False)\n _node_to_ind_cache: dict[Node, int] = field(init=False, repr=False, eq=False)\n\n def __attrs_post_init__(self):\n \"\"\"Ensure nodes are `Node`s, edges are `Edge`s, and `Node` map is updated.\"\"\"\n self._convert_nodes()\n self._convert_edges()\n self.rebuild_cache()\n\n def _convert_nodes(self):\n \"\"\"Convert nodes to `Node` objects if needed.\"\"\"\n if isinstance(self.nodes, np.ndarray):\n object.__setattr__(self, \"nodes\", self.nodes.tolist())\n for i, node in enumerate(self.nodes):\n if type(node) == str:\n self.nodes[i] = Node(node)\n\n def _convert_edges(self):\n \"\"\"Convert list of edge names or integers to `Edge` objects if needed.\"\"\"\n if isinstance(self.edges, np.ndarray):\n self.edges = self.edges.tolist()\n node_names = self.node_names\n for i, edge in enumerate(self.edges):\n if type(edge) == Edge:\n continue\n src, dst = edge\n if type(src) == str:\n try:\n src = node_names.index(src)\n except ValueError:\n raise ValueError(\n f\"Node '{src}' specified in the edge list is not in the nodes.\"\n )\n if type(src) == int or (\n np.isscalar(src) and np.issubdtype(src.dtype, np.integer)\n ):\n src = self.nodes[src]\n\n if type(dst) == str:\n try:\n dst = node_names.index(dst)\n except ValueError:\n raise ValueError(\n f\"Node '{dst}' specified in the edge list is not in the nodes.\"\n )\n if type(dst) == int or (\n np.isscalar(dst) and np.issubdtype(dst.dtype, np.integer)\n ):\n dst = self.nodes[dst]\n\n self.edges[i] = Edge(src, dst)\n\n def rebuild_cache(self, nodes: list[Node] | None = None):\n \"\"\"Rebuild the node name/index to `Node` map caches.\n\n Args:\n nodes: A list of `Node` objects to update the cache with. If not provided,\n the cache will be updated with the current nodes in the skeleton. If\n nodes are provided, the cache will be updated with the provided nodes,\n but the current nodes in the skeleton will not be updated. Default is\n `None`.\n\n Notes:\n This function should be called when nodes or node list is mutated to update\n the lookup caches for indexing nodes by name or `Node` object.\n\n This is done automatically when nodes are added or removed from the skeleton\n using the convenience methods in this class.\n\n This method only needs to be used when manually mutating nodes or the node\n list directly.\n \"\"\"\n if nodes is None:\n nodes = self.nodes\n self._name_to_node_cache = {node.name: node for node in nodes}\n self._node_to_ind_cache = {node: i for i, node in enumerate(nodes)}\n\n @property\n def node_names(self) -> list[str]:\n \"\"\"Names of the nodes associated with this skeleton as a list of strings.\"\"\"\n return [node.name for node in self.nodes]\n\n @property\n def edge_inds(self) -> list[tuple[int, int]]:\n \"\"\"Edges indices as a list of 2-tuples.\"\"\"\n return [\n (self.nodes.index(edge.source), self.nodes.index(edge.destination))\n for edge in self.edges\n ]\n\n @property\n def edge_names(self) -> list[str, str]:\n \"\"\"Edge names as a list of 2-tuples with string node names.\"\"\"\n return [(edge.source.name, edge.destination.name) for edge in self.edges]\n\n @property\n def symmetry_inds(self) -> list[tuple[int, int]]:\n \"\"\"Symmetry indices as a list of 2-tuples.\"\"\"\n return [\n tuple(sorted((self.index(symmetry[0]), self.index(symmetry[1]))))\n for symmetry in self.symmetries\n ]\n\n @property\n def symmetry_names(self) -> list[str, str]:\n \"\"\"Symmetry names as a list of 2-tuples with string node names.\"\"\"\n return [\n (self.nodes[i].name, self.nodes[j].name) for (i, j) in self.symmetry_inds\n ]\n\n def get_flipped_node_inds(self) -> list[int]:\n \"\"\"Returns node indices that should be switched when horizontally flipping.\n\n This is useful as a lookup table for flipping the landmark coordinates when\n doing data augmentation.\n\n Example:\n >>> skel = Skeleton([\"A\", \"B_left\", \"B_right\", \"C\", \"D_left\", \"D_right\"])\n >>> skel.add_symmetry(\"B_left\", \"B_right\")\n >>> skel.add_symmetry(\"D_left\", \"D_right\")\n >>> skel.flipped_node_inds\n [0, 2, 1, 3, 5, 4]\n >>> pose = np.array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]])\n >>> pose[skel.flipped_node_inds]\n array([[0, 0],\n [2, 2],\n [1, 1],\n [3, 3],\n [5, 5],\n [4, 4]])\n \"\"\"\n flip_idx = np.arange(len(self.nodes))\n if len(self.symmetries) > 0:\n symmetry_inds = np.array(\n [(self.index(a), self.index(b)) for a, b in self.symmetries]\n )\n flip_idx[symmetry_inds[:, 0]] = symmetry_inds[:, 1]\n flip_idx[symmetry_inds[:, 1]] = symmetry_inds[:, 0]\n\n flip_idx = flip_idx.tolist()\n return flip_idx\n\n def __len__(self) -> int:\n \"\"\"Return the number of nodes in the skeleton.\"\"\"\n return len(self.nodes)\n\n def __repr__(self) -> str:\n \"\"\"Return a readable representation of the skeleton.\"\"\"\n nodes = \", \".join([f'\"{node}\"' for node in self.node_names])\n return \"Skeleton(\" f\"nodes=[{nodes}], \" f\"edges={self.edge_inds}\" \")\"\n\n def index(self, node: Node | str) -> int:\n \"\"\"Return the index of a node specified as a `Node` or string name.\"\"\"\n if type(node) == str:\n return self.index(self._name_to_node_cache[node])\n elif type(node) == Node:\n return self._node_to_ind_cache[node]\n else:\n raise IndexError(f\"Invalid indexing argument for skeleton: {node}\")\n\n def __getitem__(self, idx: NodeOrIndex) -> Node:\n \"\"\"Return a `Node` when indexing by name or integer.\"\"\"\n if type(idx) == int:\n return self.nodes[idx]\n elif type(idx) == str:\n return self._name_to_node_cache[idx]\n else:\n raise IndexError(f\"Invalid indexing argument for skeleton: {idx}\")\n\n def __contains__(self, node: NodeOrIndex) -> bool:\n \"\"\"Check if a node is in the skeleton.\"\"\"\n if type(node) == str:\n return node in self._name_to_node_cache\n elif type(node) == Node:\n return node in self.nodes\n elif type(node) == int:\n return 0 <= node < len(self.nodes)\n else:\n raise ValueError(f\"Invalid node type for skeleton: {node}\")\n\n def add_node(self, node: Node | str):\n \"\"\"Add a `Node` to the skeleton.\n\n Args:\n node: A `Node` object or a string name to create a new node.\n\n Raises:\n ValueError: If the node already exists in the skeleton or if the node is\n not specified as a `Node` or string.\n \"\"\"\n if node in self:\n raise ValueError(f\"Node '{node}' already exists in the skeleton.\")\n\n if type(node) == str:\n node = Node(node)\n\n if type(node) != Node:\n raise ValueError(f\"Invalid node type: {node} ({type(node)})\")\n\n self.nodes.append(node)\n\n # Atomic update of the cache.\n self._name_to_node_cache[node.name] = node\n self._node_to_ind_cache[node] = len(self.nodes) - 1\n\n def add_nodes(self, nodes: list[Node | str]):\n \"\"\"Add multiple `Node`s to the skeleton.\n\n Args:\n nodes: A list of `Node` objects or string names to create new nodes.\n \"\"\"\n for node in nodes:\n self.add_node(node)\n\n def require_node(self, node: NodeOrIndex, add_missing: bool = True) -> Node:\n \"\"\"Return a `Node` object, handling indexing and adding missing nodes.\n\n Args:\n node: A `Node` object, name or index.\n add_missing: If `True`, missing nodes will be added to the skeleton. If\n `False`, an error will be raised if the node is not found. Default is\n `True`.\n\n Returns:\n The `Node` object.\n\n Raises:\n IndexError: If the node is not found in the skeleton and `add_missing` is\n `False`.\n \"\"\"\n if node not in self:\n if add_missing:\n self.add_node(node)\n else:\n raise IndexError(f\"Node '{node}' not found in the skeleton.\")\n\n if type(node) == Node:\n return node\n\n return self[node]\n\n def add_edge(\n self,\n src: NodeOrIndex | Edge | tuple[NodeOrIndex, NodeOrIndex],\n dst: NodeOrIndex | None = None,\n ):\n \"\"\"Add an `Edge` to the skeleton.\n\n Args:\n src: The source node specified as a `Node`, name or index.\n dst: The destination node specified as a `Node`, name or index.\n \"\"\"\n edge = None\n if type(src) == tuple:\n src, dst = src\n\n if is_node_or_index(src):\n if not is_node_or_index(dst):\n raise ValueError(\"Destination node must be specified.\")\n\n src = self.require_node(src)\n dst = self.require_node(dst)\n edge = Edge(src, dst)\n\n if type(src) == Edge:\n edge = src\n\n if edge not in self.edges:\n self.edges.append(edge)\n\n def add_edges(self, edges: list[Edge | tuple[NodeOrIndex, NodeOrIndex]]):\n \"\"\"Add multiple `Edge`s to the skeleton.\n\n Args:\n edges: A list of `Edge` objects or 2-tuples of source and destination nodes.\n \"\"\"\n for edge in edges:\n self.add_edge(edge)\n\n def add_symmetry(\n self, node1: Symmetry | NodeOrIndex = None, node2: NodeOrIndex | None = None\n ):\n \"\"\"Add a symmetry relationship to the skeleton.\n\n Args:\n node1: The first node specified as a `Node`, name or index. If a `Symmetry`\n object is provided, it will be added directly to the skeleton.\n node2: The second node specified as a `Node`, name or index.\n \"\"\"\n symmetry = None\n if type(node1) == Symmetry:\n symmetry = node1\n node1, node2 = symmetry\n\n node1 = self.require_node(node1)\n node2 = self.require_node(node2)\n\n if symmetry is None:\n symmetry = Symmetry({node1, node2})\n\n if symmetry not in self.symmetries:\n self.symmetries.append(symmetry)\n\n def rename_nodes(self, name_map: dict[NodeOrIndex, str] | list[str]):\n \"\"\"Rename nodes in the skeleton.\n\n Args:\n name_map: A dictionary mapping old node names to new node names. Keys can be\n specified as `Node` objects, integer indices, or string names. Values\n must be specified as string names.\n\n If a list of strings is provided of the same length as the current\n nodes, the nodes will be renamed to the names in the list in order.\n\n Raises:\n ValueError: If the new node names exist in the skeleton or if the old node\n names are not found in the skeleton.\n\n Notes:\n This method should always be used when renaming nodes in the skeleton as it\n handles updating the lookup caches necessary for indexing nodes by name.\n\n After renaming, instances using this skeleton **do NOT need to be updated**\n as the nodes are stored by reference in the skeleton, so changes are\n reflected automatically.\n\n Example:\n >>> skel = Skeleton([\"A\", \"B\", \"C\"], edges=[(\"A\", \"B\"), (\"B\", \"C\")])\n >>> skel.rename_nodes({\"A\": \"X\", \"B\": \"Y\", \"C\": \"Z\"})\n >>> skel.node_names\n [\"X\", \"Y\", \"Z\"]\n >>> skel.rename_nodes([\"a\", \"b\", \"c\"])\n >>> skel.node_names\n [\"a\", \"b\", \"c\"]\n \"\"\"\n if type(name_map) == list:\n if len(name_map) != len(self.nodes):\n raise ValueError(\n \"List of new node names must be the same length as the current \"\n \"nodes.\"\n )\n name_map = {node: name for node, name in zip(self.nodes, name_map)}\n\n for old_name, new_name in name_map.items():\n if type(old_name) == Node:\n old_name = old_name.name\n if type(old_name) == int:\n old_name = self.nodes[old_name].name\n\n if old_name not in self._name_to_node_cache:\n raise ValueError(f\"Node '{old_name}' not found in the skeleton.\")\n if new_name in self._name_to_node_cache:\n raise ValueError(f\"Node '{new_name}' already exists in the skeleton.\")\n\n node = self._name_to_node_cache[old_name]\n node.name = new_name\n self._name_to_node_cache[new_name] = node\n del self._name_to_node_cache[old_name]\n\n def rename_node(self, old_name: NodeOrIndex, new_name: str):\n \"\"\"Rename a single node in the skeleton.\n\n Args:\n old_name: The name of the node to rename. Can also be specified as an\n integer index or `Node` object.\n new_name: The new name for the node.\n \"\"\"\n self.rename_nodes({old_name: new_name})\n\n def remove_nodes(self, nodes: list[NodeOrIndex]):\n \"\"\"Remove nodes from the skeleton.\n\n Args:\n nodes: A list of node names, indices, or `Node` objects to remove.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name.\n\n Any edges and symmetries that are connected to the removed nodes will also\n be removed.\n\n Warning:\n **This method does NOT update instances** that use this skeleton to reflect\n changes.\n\n It is recommended to use the `Labels.remove_nodes()` method which will\n update all contained to reflect the changes made to the skeleton.\n\n To manually update instances after this method is called, call\n `instance.update_nodes()` on each instance that uses this skeleton.\n \"\"\"\n # Standardize input and make a pre-mutation copy before keys are changed.\n rm_node_objs = [self.require_node(node, add_missing=False) for node in nodes]\n\n # Remove nodes from the skeleton.\n for node in rm_node_objs:\n self.nodes.remove(node)\n del self._name_to_node_cache[node.name]\n\n # Remove edges connected to the removed nodes.\n self.edges = [\n edge\n for edge in self.edges\n if edge.source not in rm_node_objs and edge.destination not in rm_node_objs\n ]\n\n # Remove symmetries connected to the removed nodes.\n self.symmetries = [\n symmetry\n for symmetry in self.symmetries\n if symmetry.nodes.isdisjoint(rm_node_objs)\n ]\n\n # Update node index map.\n self.rebuild_cache()\n\n def remove_node(self, node: NodeOrIndex):\n \"\"\"Remove a single node from the skeleton.\n\n Args:\n node: The node to remove. Can be specified as a string name, integer index,\n or `Node` object.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name.\n\n Any edges and symmetries that are connected to the removed node will also be\n removed.\n\n Warning:\n **This method does NOT update instances** that use this skeleton to reflect\n changes.\n\n It is recommended to use the `Labels.remove_nodes()` method which will\n update all contained instances to reflect the changes made to the skeleton.\n\n To manually update instances after this method is called, call\n `Instance.update_skeleton()` on each instance that uses this skeleton.\n \"\"\"\n self.remove_nodes([node])\n\n def reorder_nodes(self, new_order: list[NodeOrIndex]):\n \"\"\"Reorder nodes in the skeleton.\n\n Args:\n new_order: A list of node names, indices, or `Node` objects specifying the\n new order of the nodes.\n\n Raises:\n ValueError: If the new order of nodes is not the same length as the current\n nodes.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name.\n\n Warning:\n After reordering, instances using this skeleton do not need to be updated as\n the nodes are stored by reference in the skeleton.\n\n However, the order that points are stored in the instances will not be\n updated to match the new order of the nodes in the skeleton. This should not\n matter unless the ordering of the keys in the `Instance.points` dictionary\n is used instead of relying on the skeleton node order.\n\n To make sure these are aligned, it is recommended to use the\n `Labels.reorder_nodes()` method which will update all contained instances to\n reflect the changes made to the skeleton.\n\n To manually update instances after this method is called, call\n `Instance.update_skeleton()` on each instance that uses this skeleton.\n \"\"\"\n if len(new_order) != len(self.nodes):\n raise ValueError(\n \"New order of nodes must be the same length as the current nodes.\"\n )\n\n new_nodes = [self.require_node(node, add_missing=False) for node in new_order]\n self.nodes = new_nodes\n
"},{"location":"model/#sleap_io.Skeleton.edge_inds","title":"edge_inds: list[tuple[int, int]]
property
","text":"Edges indices as a list of 2-tuples.
"},{"location":"model/#sleap_io.Skeleton.edge_names","title":"edge_names: list[str, str]
property
","text":"Edge names as a list of 2-tuples with string node names.
"},{"location":"model/#sleap_io.Skeleton.node_names","title":"node_names: list[str]
property
","text":"Names of the nodes associated with this skeleton as a list of strings.
"},{"location":"model/#sleap_io.Skeleton.symmetry_inds","title":"symmetry_inds: list[tuple[int, int]]
property
","text":"Symmetry indices as a list of 2-tuples.
"},{"location":"model/#sleap_io.Skeleton.symmetry_names","title":"symmetry_names: list[str, str]
property
","text":"Symmetry names as a list of 2-tuples with string node names.
"},{"location":"model/#sleap_io.Skeleton.__attrs_post_init__","title":"__attrs_post_init__()
","text":"Ensure nodes are Node
s, edges are Edge
s, and Node
map is updated.
sleap_io/model/skeleton.py
def __attrs_post_init__(self):\n \"\"\"Ensure nodes are `Node`s, edges are `Edge`s, and `Node` map is updated.\"\"\"\n self._convert_nodes()\n self._convert_edges()\n self.rebuild_cache()\n
"},{"location":"model/#sleap_io.Skeleton.__contains__","title":"__contains__(node)
","text":"Check if a node is in the skeleton.
Source code insleap_io/model/skeleton.py
def __contains__(self, node: NodeOrIndex) -> bool:\n \"\"\"Check if a node is in the skeleton.\"\"\"\n if type(node) == str:\n return node in self._name_to_node_cache\n elif type(node) == Node:\n return node in self.nodes\n elif type(node) == int:\n return 0 <= node < len(self.nodes)\n else:\n raise ValueError(f\"Invalid node type for skeleton: {node}\")\n
"},{"location":"model/#sleap_io.Skeleton.__getitem__","title":"__getitem__(idx)
","text":"Return a Node
when indexing by name or integer.
sleap_io/model/skeleton.py
def __getitem__(self, idx: NodeOrIndex) -> Node:\n \"\"\"Return a `Node` when indexing by name or integer.\"\"\"\n if type(idx) == int:\n return self.nodes[idx]\n elif type(idx) == str:\n return self._name_to_node_cache[idx]\n else:\n raise IndexError(f\"Invalid indexing argument for skeleton: {idx}\")\n
"},{"location":"model/#sleap_io.Skeleton.__len__","title":"__len__()
","text":"Return the number of nodes in the skeleton.
Source code insleap_io/model/skeleton.py
def __len__(self) -> int:\n \"\"\"Return the number of nodes in the skeleton.\"\"\"\n return len(self.nodes)\n
"},{"location":"model/#sleap_io.Skeleton.__repr__","title":"__repr__()
","text":"Return a readable representation of the skeleton.
Source code insleap_io/model/skeleton.py
def __repr__(self) -> str:\n \"\"\"Return a readable representation of the skeleton.\"\"\"\n nodes = \", \".join([f'\"{node}\"' for node in self.node_names])\n return \"Skeleton(\" f\"nodes=[{nodes}], \" f\"edges={self.edge_inds}\" \")\"\n
"},{"location":"model/#sleap_io.Skeleton.add_edge","title":"add_edge(src, dst=None)
","text":"Add an Edge
to the skeleton.
Parameters:
Name Type Description Defaultsrc
NodeOrIndex | Edge | tuple[NodeOrIndex, NodeOrIndex]
The source node specified as a Node
, name or index.
dst
NodeOrIndex | None
The destination node specified as a Node
, name or index.
None
Source code in sleap_io/model/skeleton.py
def add_edge(\n self,\n src: NodeOrIndex | Edge | tuple[NodeOrIndex, NodeOrIndex],\n dst: NodeOrIndex | None = None,\n):\n \"\"\"Add an `Edge` to the skeleton.\n\n Args:\n src: The source node specified as a `Node`, name or index.\n dst: The destination node specified as a `Node`, name or index.\n \"\"\"\n edge = None\n if type(src) == tuple:\n src, dst = src\n\n if is_node_or_index(src):\n if not is_node_or_index(dst):\n raise ValueError(\"Destination node must be specified.\")\n\n src = self.require_node(src)\n dst = self.require_node(dst)\n edge = Edge(src, dst)\n\n if type(src) == Edge:\n edge = src\n\n if edge not in self.edges:\n self.edges.append(edge)\n
"},{"location":"model/#sleap_io.Skeleton.add_edges","title":"add_edges(edges)
","text":"Add multiple Edge
s to the skeleton.
Parameters:
Name Type Description Defaultedges
list[Edge | tuple[NodeOrIndex, NodeOrIndex]]
A list of Edge
objects or 2-tuples of source and destination nodes.
sleap_io/model/skeleton.py
def add_edges(self, edges: list[Edge | tuple[NodeOrIndex, NodeOrIndex]]):\n \"\"\"Add multiple `Edge`s to the skeleton.\n\n Args:\n edges: A list of `Edge` objects or 2-tuples of source and destination nodes.\n \"\"\"\n for edge in edges:\n self.add_edge(edge)\n
"},{"location":"model/#sleap_io.Skeleton.add_node","title":"add_node(node)
","text":"Add a Node
to the skeleton.
Parameters:
Name Type Description Defaultnode
Node | str
A Node
object or a string name to create a new node.
Raises:
Type DescriptionValueError
If the node already exists in the skeleton or if the node is not specified as a Node
or string.
sleap_io/model/skeleton.py
def add_node(self, node: Node | str):\n \"\"\"Add a `Node` to the skeleton.\n\n Args:\n node: A `Node` object or a string name to create a new node.\n\n Raises:\n ValueError: If the node already exists in the skeleton or if the node is\n not specified as a `Node` or string.\n \"\"\"\n if node in self:\n raise ValueError(f\"Node '{node}' already exists in the skeleton.\")\n\n if type(node) == str:\n node = Node(node)\n\n if type(node) != Node:\n raise ValueError(f\"Invalid node type: {node} ({type(node)})\")\n\n self.nodes.append(node)\n\n # Atomic update of the cache.\n self._name_to_node_cache[node.name] = node\n self._node_to_ind_cache[node] = len(self.nodes) - 1\n
"},{"location":"model/#sleap_io.Skeleton.add_nodes","title":"add_nodes(nodes)
","text":"Add multiple Node
s to the skeleton.
Parameters:
Name Type Description Defaultnodes
list[Node | str]
A list of Node
objects or string names to create new nodes.
sleap_io/model/skeleton.py
def add_nodes(self, nodes: list[Node | str]):\n \"\"\"Add multiple `Node`s to the skeleton.\n\n Args:\n nodes: A list of `Node` objects or string names to create new nodes.\n \"\"\"\n for node in nodes:\n self.add_node(node)\n
"},{"location":"model/#sleap_io.Skeleton.add_symmetry","title":"add_symmetry(node1=None, node2=None)
","text":"Add a symmetry relationship to the skeleton.
Parameters:
Name Type Description Defaultnode1
Symmetry | NodeOrIndex
The first node specified as a Node
, name or index. If a Symmetry
object is provided, it will be added directly to the skeleton.
None
node2
NodeOrIndex | None
The second node specified as a Node
, name or index.
None
Source code in sleap_io/model/skeleton.py
def add_symmetry(\n self, node1: Symmetry | NodeOrIndex = None, node2: NodeOrIndex | None = None\n):\n \"\"\"Add a symmetry relationship to the skeleton.\n\n Args:\n node1: The first node specified as a `Node`, name or index. If a `Symmetry`\n object is provided, it will be added directly to the skeleton.\n node2: The second node specified as a `Node`, name or index.\n \"\"\"\n symmetry = None\n if type(node1) == Symmetry:\n symmetry = node1\n node1, node2 = symmetry\n\n node1 = self.require_node(node1)\n node2 = self.require_node(node2)\n\n if symmetry is None:\n symmetry = Symmetry({node1, node2})\n\n if symmetry not in self.symmetries:\n self.symmetries.append(symmetry)\n
"},{"location":"model/#sleap_io.Skeleton.get_flipped_node_inds","title":"get_flipped_node_inds()
","text":"Returns node indices that should be switched when horizontally flipping.
This is useful as a lookup table for flipping the landmark coordinates when doing data augmentation.
Exampleskel = Skeleton([\"A\", \"B_left\", \"B_right\", \"C\", \"D_left\", \"D_right\"]) skel.add_symmetry(\"B_left\", \"B_right\") skel.add_symmetry(\"D_left\", \"D_right\") skel.flipped_node_inds [0, 2, 1, 3, 5, 4] pose = np.array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]) pose[skel.flipped_node_inds] array([[0, 0], [2, 2], [1, 1], [3, 3], [5, 5], [4, 4]])
Source code insleap_io/model/skeleton.py
def get_flipped_node_inds(self) -> list[int]:\n \"\"\"Returns node indices that should be switched when horizontally flipping.\n\n This is useful as a lookup table for flipping the landmark coordinates when\n doing data augmentation.\n\n Example:\n >>> skel = Skeleton([\"A\", \"B_left\", \"B_right\", \"C\", \"D_left\", \"D_right\"])\n >>> skel.add_symmetry(\"B_left\", \"B_right\")\n >>> skel.add_symmetry(\"D_left\", \"D_right\")\n >>> skel.flipped_node_inds\n [0, 2, 1, 3, 5, 4]\n >>> pose = np.array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]])\n >>> pose[skel.flipped_node_inds]\n array([[0, 0],\n [2, 2],\n [1, 1],\n [3, 3],\n [5, 5],\n [4, 4]])\n \"\"\"\n flip_idx = np.arange(len(self.nodes))\n if len(self.symmetries) > 0:\n symmetry_inds = np.array(\n [(self.index(a), self.index(b)) for a, b in self.symmetries]\n )\n flip_idx[symmetry_inds[:, 0]] = symmetry_inds[:, 1]\n flip_idx[symmetry_inds[:, 1]] = symmetry_inds[:, 0]\n\n flip_idx = flip_idx.tolist()\n return flip_idx\n
"},{"location":"model/#sleap_io.Skeleton.index","title":"index(node)
","text":"Return the index of a node specified as a Node
or string name.
sleap_io/model/skeleton.py
def index(self, node: Node | str) -> int:\n \"\"\"Return the index of a node specified as a `Node` or string name.\"\"\"\n if type(node) == str:\n return self.index(self._name_to_node_cache[node])\n elif type(node) == Node:\n return self._node_to_ind_cache[node]\n else:\n raise IndexError(f\"Invalid indexing argument for skeleton: {node}\")\n
"},{"location":"model/#sleap_io.Skeleton.rebuild_cache","title":"rebuild_cache(nodes=None)
","text":"Rebuild the node name/index to Node
map caches.
Parameters:
Name Type Description Defaultnodes
list[Node] | None
A list of Node
objects to update the cache with. If not provided, the cache will be updated with the current nodes in the skeleton. If nodes are provided, the cache will be updated with the provided nodes, but the current nodes in the skeleton will not be updated. Default is None
.
None
Notes This function should be called when nodes or node list is mutated to update the lookup caches for indexing nodes by name or Node
object.
This is done automatically when nodes are added or removed from the skeleton using the convenience methods in this class.
This method only needs to be used when manually mutating nodes or the node list directly.
Source code insleap_io/model/skeleton.py
def rebuild_cache(self, nodes: list[Node] | None = None):\n \"\"\"Rebuild the node name/index to `Node` map caches.\n\n Args:\n nodes: A list of `Node` objects to update the cache with. If not provided,\n the cache will be updated with the current nodes in the skeleton. If\n nodes are provided, the cache will be updated with the provided nodes,\n but the current nodes in the skeleton will not be updated. Default is\n `None`.\n\n Notes:\n This function should be called when nodes or node list is mutated to update\n the lookup caches for indexing nodes by name or `Node` object.\n\n This is done automatically when nodes are added or removed from the skeleton\n using the convenience methods in this class.\n\n This method only needs to be used when manually mutating nodes or the node\n list directly.\n \"\"\"\n if nodes is None:\n nodes = self.nodes\n self._name_to_node_cache = {node.name: node for node in nodes}\n self._node_to_ind_cache = {node: i for i, node in enumerate(nodes)}\n
"},{"location":"model/#sleap_io.Skeleton.remove_node","title":"remove_node(node)
","text":"Remove a single node from the skeleton.
Parameters:
Name Type Description Defaultnode
NodeOrIndex
The node to remove. Can be specified as a string name, integer index, or Node
object.
This method handles updating the lookup caches necessary for indexing nodes by name.
Any edges and symmetries that are connected to the removed node will also be removed.
WarningThis method does NOT update instances that use this skeleton to reflect changes.
It is recommended to use the Labels.remove_nodes()
method which will update all contained instances to reflect the changes made to the skeleton.
To manually update instances after this method is called, call Instance.update_skeleton()
on each instance that uses this skeleton.
sleap_io/model/skeleton.py
def remove_node(self, node: NodeOrIndex):\n \"\"\"Remove a single node from the skeleton.\n\n Args:\n node: The node to remove. Can be specified as a string name, integer index,\n or `Node` object.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name.\n\n Any edges and symmetries that are connected to the removed node will also be\n removed.\n\n Warning:\n **This method does NOT update instances** that use this skeleton to reflect\n changes.\n\n It is recommended to use the `Labels.remove_nodes()` method which will\n update all contained instances to reflect the changes made to the skeleton.\n\n To manually update instances after this method is called, call\n `Instance.update_skeleton()` on each instance that uses this skeleton.\n \"\"\"\n self.remove_nodes([node])\n
"},{"location":"model/#sleap_io.Skeleton.remove_nodes","title":"remove_nodes(nodes)
","text":"Remove nodes from the skeleton.
Parameters:
Name Type Description Defaultnodes
list[NodeOrIndex]
A list of node names, indices, or Node
objects to remove.
This method handles updating the lookup caches necessary for indexing nodes by name.
Any edges and symmetries that are connected to the removed nodes will also be removed.
WarningThis method does NOT update instances that use this skeleton to reflect changes.
It is recommended to use the Labels.remove_nodes()
method which will update all contained to reflect the changes made to the skeleton.
To manually update instances after this method is called, call instance.update_nodes()
on each instance that uses this skeleton.
sleap_io/model/skeleton.py
def remove_nodes(self, nodes: list[NodeOrIndex]):\n \"\"\"Remove nodes from the skeleton.\n\n Args:\n nodes: A list of node names, indices, or `Node` objects to remove.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name.\n\n Any edges and symmetries that are connected to the removed nodes will also\n be removed.\n\n Warning:\n **This method does NOT update instances** that use this skeleton to reflect\n changes.\n\n It is recommended to use the `Labels.remove_nodes()` method which will\n update all contained to reflect the changes made to the skeleton.\n\n To manually update instances after this method is called, call\n `instance.update_nodes()` on each instance that uses this skeleton.\n \"\"\"\n # Standardize input and make a pre-mutation copy before keys are changed.\n rm_node_objs = [self.require_node(node, add_missing=False) for node in nodes]\n\n # Remove nodes from the skeleton.\n for node in rm_node_objs:\n self.nodes.remove(node)\n del self._name_to_node_cache[node.name]\n\n # Remove edges connected to the removed nodes.\n self.edges = [\n edge\n for edge in self.edges\n if edge.source not in rm_node_objs and edge.destination not in rm_node_objs\n ]\n\n # Remove symmetries connected to the removed nodes.\n self.symmetries = [\n symmetry\n for symmetry in self.symmetries\n if symmetry.nodes.isdisjoint(rm_node_objs)\n ]\n\n # Update node index map.\n self.rebuild_cache()\n
"},{"location":"model/#sleap_io.Skeleton.rename_node","title":"rename_node(old_name, new_name)
","text":"Rename a single node in the skeleton.
Parameters:
Name Type Description Defaultold_name
NodeOrIndex
The name of the node to rename. Can also be specified as an integer index or Node
object.
new_name
str
The new name for the node.
required Source code insleap_io/model/skeleton.py
def rename_node(self, old_name: NodeOrIndex, new_name: str):\n \"\"\"Rename a single node in the skeleton.\n\n Args:\n old_name: The name of the node to rename. Can also be specified as an\n integer index or `Node` object.\n new_name: The new name for the node.\n \"\"\"\n self.rename_nodes({old_name: new_name})\n
"},{"location":"model/#sleap_io.Skeleton.rename_nodes","title":"rename_nodes(name_map)
","text":"Rename nodes in the skeleton.
Parameters:
Name Type Description Defaultname_map
dict[NodeOrIndex, str] | list[str]
A dictionary mapping old node names to new node names. Keys can be specified as Node
objects, integer indices, or string names. Values must be specified as string names.
If a list of strings is provided of the same length as the current nodes, the nodes will be renamed to the names in the list in order.
requiredRaises:
Type DescriptionValueError
If the new node names exist in the skeleton or if the old node names are not found in the skeleton.
NotesThis method should always be used when renaming nodes in the skeleton as it handles updating the lookup caches necessary for indexing nodes by name.
After renaming, instances using this skeleton do NOT need to be updated as the nodes are stored by reference in the skeleton, so changes are reflected automatically.
Exampleskel = Skeleton([\"A\", \"B\", \"C\"], edges=[(\"A\", \"B\"), (\"B\", \"C\")]) skel.rename_nodes({\"A\": \"X\", \"B\": \"Y\", \"C\": \"Z\"}) skel.node_names [\"X\", \"Y\", \"Z\"] skel.rename_nodes([\"a\", \"b\", \"c\"]) skel.node_names [\"a\", \"b\", \"c\"]
Source code insleap_io/model/skeleton.py
def rename_nodes(self, name_map: dict[NodeOrIndex, str] | list[str]):\n \"\"\"Rename nodes in the skeleton.\n\n Args:\n name_map: A dictionary mapping old node names to new node names. Keys can be\n specified as `Node` objects, integer indices, or string names. Values\n must be specified as string names.\n\n If a list of strings is provided of the same length as the current\n nodes, the nodes will be renamed to the names in the list in order.\n\n Raises:\n ValueError: If the new node names exist in the skeleton or if the old node\n names are not found in the skeleton.\n\n Notes:\n This method should always be used when renaming nodes in the skeleton as it\n handles updating the lookup caches necessary for indexing nodes by name.\n\n After renaming, instances using this skeleton **do NOT need to be updated**\n as the nodes are stored by reference in the skeleton, so changes are\n reflected automatically.\n\n Example:\n >>> skel = Skeleton([\"A\", \"B\", \"C\"], edges=[(\"A\", \"B\"), (\"B\", \"C\")])\n >>> skel.rename_nodes({\"A\": \"X\", \"B\": \"Y\", \"C\": \"Z\"})\n >>> skel.node_names\n [\"X\", \"Y\", \"Z\"]\n >>> skel.rename_nodes([\"a\", \"b\", \"c\"])\n >>> skel.node_names\n [\"a\", \"b\", \"c\"]\n \"\"\"\n if type(name_map) == list:\n if len(name_map) != len(self.nodes):\n raise ValueError(\n \"List of new node names must be the same length as the current \"\n \"nodes.\"\n )\n name_map = {node: name for node, name in zip(self.nodes, name_map)}\n\n for old_name, new_name in name_map.items():\n if type(old_name) == Node:\n old_name = old_name.name\n if type(old_name) == int:\n old_name = self.nodes[old_name].name\n\n if old_name not in self._name_to_node_cache:\n raise ValueError(f\"Node '{old_name}' not found in the skeleton.\")\n if new_name in self._name_to_node_cache:\n raise ValueError(f\"Node '{new_name}' already exists in the skeleton.\")\n\n node = self._name_to_node_cache[old_name]\n node.name = new_name\n self._name_to_node_cache[new_name] = node\n del self._name_to_node_cache[old_name]\n
"},{"location":"model/#sleap_io.Skeleton.reorder_nodes","title":"reorder_nodes(new_order)
","text":"Reorder nodes in the skeleton.
Parameters:
Name Type Description Defaultnew_order
list[NodeOrIndex]
A list of node names, indices, or Node
objects specifying the new order of the nodes.
Raises:
Type DescriptionValueError
If the new order of nodes is not the same length as the current nodes.
NotesThis method handles updating the lookup caches necessary for indexing nodes by name.
WarningAfter reordering, instances using this skeleton do not need to be updated as the nodes are stored by reference in the skeleton.
However, the order that points are stored in the instances will not be updated to match the new order of the nodes in the skeleton. This should not matter unless the ordering of the keys in the Instance.points
dictionary is used instead of relying on the skeleton node order.
To make sure these are aligned, it is recommended to use the Labels.reorder_nodes()
method which will update all contained instances to reflect the changes made to the skeleton.
To manually update instances after this method is called, call Instance.update_skeleton()
on each instance that uses this skeleton.
sleap_io/model/skeleton.py
def reorder_nodes(self, new_order: list[NodeOrIndex]):\n \"\"\"Reorder nodes in the skeleton.\n\n Args:\n new_order: A list of node names, indices, or `Node` objects specifying the\n new order of the nodes.\n\n Raises:\n ValueError: If the new order of nodes is not the same length as the current\n nodes.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name.\n\n Warning:\n After reordering, instances using this skeleton do not need to be updated as\n the nodes are stored by reference in the skeleton.\n\n However, the order that points are stored in the instances will not be\n updated to match the new order of the nodes in the skeleton. This should not\n matter unless the ordering of the keys in the `Instance.points` dictionary\n is used instead of relying on the skeleton node order.\n\n To make sure these are aligned, it is recommended to use the\n `Labels.reorder_nodes()` method which will update all contained instances to\n reflect the changes made to the skeleton.\n\n To manually update instances after this method is called, call\n `Instance.update_skeleton()` on each instance that uses this skeleton.\n \"\"\"\n if len(new_order) != len(self.nodes):\n raise ValueError(\n \"New order of nodes must be the same length as the current nodes.\"\n )\n\n new_nodes = [self.require_node(node, add_missing=False) for node in new_order]\n self.nodes = new_nodes\n
"},{"location":"model/#sleap_io.Skeleton.require_node","title":"require_node(node, add_missing=True)
","text":"Return a Node
object, handling indexing and adding missing nodes.
Parameters:
Name Type Description Defaultnode
NodeOrIndex
A Node
object, name or index.
add_missing
bool
If True
, missing nodes will be added to the skeleton. If False
, an error will be raised if the node is not found. Default is True
.
True
Returns:
Type DescriptionNode
The Node
object.
Raises:
Type DescriptionIndexError
If the node is not found in the skeleton and add_missing
is False
.
sleap_io/model/skeleton.py
def require_node(self, node: NodeOrIndex, add_missing: bool = True) -> Node:\n \"\"\"Return a `Node` object, handling indexing and adding missing nodes.\n\n Args:\n node: A `Node` object, name or index.\n add_missing: If `True`, missing nodes will be added to the skeleton. If\n `False`, an error will be raised if the node is not found. Default is\n `True`.\n\n Returns:\n The `Node` object.\n\n Raises:\n IndexError: If the node is not found in the skeleton and `add_missing` is\n `False`.\n \"\"\"\n if node not in self:\n if add_missing:\n self.add_node(node)\n else:\n raise IndexError(f\"Node '{node}' not found in the skeleton.\")\n\n if type(node) == Node:\n return node\n\n return self[node]\n
"},{"location":"model/#sleap_io.Node","title":"sleap_io.Node
","text":"A landmark type within a Skeleton
.
This typically corresponds to a unique landmark within a skeleton, such as the \"left eye\".
Attributes:
Name Type Descriptionname
str
Descriptive label for the landmark.
Source code insleap_io/model/skeleton.py
@define(eq=False)\nclass Node:\n \"\"\"A landmark type within a `Skeleton`.\n\n This typically corresponds to a unique landmark within a skeleton, such as the \"left\n eye\".\n\n Attributes:\n name: Descriptive label for the landmark.\n \"\"\"\n\n name: str\n
"},{"location":"model/#sleap_io.Edge","title":"sleap_io.Edge
","text":"A connection between two Node
objects within a Skeleton
.
This is a directed edge, representing the ordering of Node
s in the Skeleton
tree.
Attributes:
Name Type Descriptionsource
Node
The origin Node
.
destination
Node
The destination Node
.
Methods:
Name Description__getitem__
Return the source Node
(idx
is 0) or destination Node
(idx
is 1).
sleap_io/model/skeleton.py
@define(frozen=True)\nclass Edge:\n \"\"\"A connection between two `Node` objects within a `Skeleton`.\n\n This is a directed edge, representing the ordering of `Node`s in the `Skeleton`\n tree.\n\n Attributes:\n source: The origin `Node`.\n destination: The destination `Node`.\n \"\"\"\n\n source: Node\n destination: Node\n\n def __getitem__(self, idx) -> Node:\n \"\"\"Return the source `Node` (`idx` is 0) or destination `Node` (`idx` is 1).\"\"\"\n if idx == 0:\n return self.source\n elif idx == 1:\n return self.destination\n else:\n raise IndexError(\"Edge only has 2 nodes (source and destination).\")\n
"},{"location":"model/#sleap_io.Edge.__getitem__","title":"__getitem__(idx)
","text":"Return the source Node
(idx
is 0) or destination Node
(idx
is 1).
sleap_io/model/skeleton.py
def __getitem__(self, idx) -> Node:\n \"\"\"Return the source `Node` (`idx` is 0) or destination `Node` (`idx` is 1).\"\"\"\n if idx == 0:\n return self.source\n elif idx == 1:\n return self.destination\n else:\n raise IndexError(\"Edge only has 2 nodes (source and destination).\")\n
"},{"location":"model/#sleap_io.Symmetry","title":"sleap_io.Symmetry
","text":"A relationship between a pair of nodes denoting their left/right pairing.
Attributes:
Name Type Descriptionnodes
set[Node]
A set of two Node
s.
Methods:
Name Description__getitem__
Return the first node.
__iter__
Iterate over the symmetric nodes.
Source code insleap_io/model/skeleton.py
@define\nclass Symmetry:\n \"\"\"A relationship between a pair of nodes denoting their left/right pairing.\n\n Attributes:\n nodes: A set of two `Node`s.\n \"\"\"\n\n nodes: set[Node] = field(converter=set, validator=lambda _, __, val: len(val) == 2)\n\n def __iter__(self):\n \"\"\"Iterate over the symmetric nodes.\"\"\"\n return iter(self.nodes)\n\n def __getitem__(self, idx) -> Node:\n \"\"\"Return the first node.\"\"\"\n for i, node in enumerate(self.nodes):\n if i == idx:\n return node\n
"},{"location":"model/#sleap_io.Symmetry.__getitem__","title":"__getitem__(idx)
","text":"Return the first node.
Source code insleap_io/model/skeleton.py
def __getitem__(self, idx) -> Node:\n \"\"\"Return the first node.\"\"\"\n for i, node in enumerate(self.nodes):\n if i == idx:\n return node\n
"},{"location":"model/#sleap_io.Symmetry.__iter__","title":"__iter__()
","text":"Iterate over the symmetric nodes.
Source code insleap_io/model/skeleton.py
def __iter__(self):\n \"\"\"Iterate over the symmetric nodes.\"\"\"\n return iter(self.nodes)\n
"},{"location":"model/#sleap_io.Track","title":"sleap_io.Track
","text":"An object that represents the same animal/object across multiple detections.
This allows tracking of unique entities in the video over time and space.
A Track
may also be used to refer to unique identity classes that span multiple videos, such as \"female mouse\"
.
Attributes:
Name Type Descriptionname
str
A name given to this track for identification purposes.
NotesTrack
s are compared by identity. This means that unique track objects with the same name are considered to be different.
sleap_io/model/instance.py
@define(eq=False)\nclass Track:\n \"\"\"An object that represents the same animal/object across multiple detections.\n\n This allows tracking of unique entities in the video over time and space.\n\n A `Track` may also be used to refer to unique identity classes that span multiple\n videos, such as `\"female mouse\"`.\n\n Attributes:\n name: A name given to this track for identification purposes.\n\n Notes:\n `Track`s are compared by identity. This means that unique track objects with the\n same name are considered to be different.\n \"\"\"\n\n name: str = \"\"\n
"},{"location":"model/#sleap_io.Video","title":"sleap_io.Video
","text":"Video
class used by sleap to represent videos and data associated with them.
This class is used to store information regarding a video and its components. It is used to store the video's filename
, shape
, and the video's backend
.
To create a Video
object, use the from_filename
method which will select the backend appropriately.
Attributes:
Name Type Descriptionfilename
str | list[str]
The filename(s) of the video. Supported extensions: \"mp4\", \"avi\", \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\", \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are expected. If filename is a folder, it will be searched for images.
backend
Optional[VideoBackend]
An object that implements the basic methods for reading and manipulating frames of a specific video type.
backend_metadata
dict[str, any]
A dictionary of metadata specific to the backend. This is useful for storing metadata that requires an open backend (e.g., shape information) without having access to the video file itself.
source_video
Optional[Video]
The source video object if this is a proxy video. This is present when the video contains an embedded subset of frames from another video.
open_backend
bool
Whether to open the backend when the video is available. If True
(the default), the backend will be automatically opened if the video exists. Set this to False
when you want to manually open the backend, or when the you know the video file does not exist and you want to avoid trying to open the file.
Instances of this class are hashed by identity, not by value. This means that two Video
instances with the same attributes will NOT be considered equal in a set or dict.
See also: VideoBackend
Methods:
Name Description__attrs_post_init__
Post init syntactic sugar.
__deepcopy__
Deep copy the video object.
__getitem__
Return the frames of the video at the given indices.
__len__
Return the length of the video as the number of frames.
__repr__
Informal string representation (for print or format).
__str__
Informal string representation (for print or format).
close
Close the video backend.
exists
Check if the video file exists and is accessible.
from_filename
Create a Video from a filename.
open
Open the video backend for reading.
replace_filename
Update the filename of the video, optionally opening the backend.
save
Save video frames to a new video file.
Attributes:
Name Type Descriptiongrayscale
bool | None
Return whether the video is grayscale.
is_open
bool
Check if the video backend is open.
shape
Tuple[int, int, int, int] | None
Return the shape of the video as (num_frames, height, width, channels).
Source code insleap_io/model/video.py
@attrs.define(eq=False)\nclass Video:\n \"\"\"`Video` class used by sleap to represent videos and data associated with them.\n\n This class is used to store information regarding a video and its components.\n It is used to store the video's `filename`, `shape`, and the video's `backend`.\n\n To create a `Video` object, use the `from_filename` method which will select the\n backend appropriately.\n\n Attributes:\n filename: The filename(s) of the video. Supported extensions: \"mp4\", \"avi\",\n \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\",\n \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are\n expected. If filename is a folder, it will be searched for images.\n backend: An object that implements the basic methods for reading and\n manipulating frames of a specific video type.\n backend_metadata: A dictionary of metadata specific to the backend. This is\n useful for storing metadata that requires an open backend (e.g., shape\n information) without having access to the video file itself.\n source_video: The source video object if this is a proxy video. This is present\n when the video contains an embedded subset of frames from another video.\n open_backend: Whether to open the backend when the video is available. If `True`\n (the default), the backend will be automatically opened if the video exists.\n Set this to `False` when you want to manually open the backend, or when the\n you know the video file does not exist and you want to avoid trying to open\n the file.\n\n Notes:\n Instances of this class are hashed by identity, not by value. This means that\n two `Video` instances with the same attributes will NOT be considered equal in a\n set or dict.\n\n See also: VideoBackend\n \"\"\"\n\n filename: str | list[str]\n backend: Optional[VideoBackend] = None\n backend_metadata: dict[str, any] = attrs.field(factory=dict)\n source_video: Optional[Video] = None\n open_backend: bool = True\n\n EXTS = MediaVideo.EXTS + HDF5Video.EXTS + ImageVideo.EXTS\n\n def __attrs_post_init__(self):\n \"\"\"Post init syntactic sugar.\"\"\"\n if self.open_backend and self.backend is None and self.exists():\n try:\n self.open()\n except Exception as e:\n # If we can't open the backend, just ignore it for now so we don't\n # prevent the user from building the Video object entirely.\n pass\n\n def __deepcopy__(self, memo):\n \"\"\"Deep copy the video object.\"\"\"\n if id(self) in memo:\n return memo[id(self)]\n\n reopen = False\n if self.is_open:\n reopen = True\n self.close()\n\n new_video = Video(\n filename=self.filename,\n backend=None,\n backend_metadata=self.backend_metadata,\n source_video=self.source_video,\n open_backend=self.open_backend,\n )\n\n memo[id(self)] = new_video\n\n if reopen:\n self.open()\n\n return new_video\n\n @classmethod\n def from_filename(\n cls,\n filename: str | list[str],\n dataset: Optional[str] = None,\n grayscale: Optional[bool] = None,\n keep_open: bool = True,\n source_video: Optional[Video] = None,\n **kwargs,\n ) -> VideoBackend:\n \"\"\"Create a Video from a filename.\n\n Args:\n filename: The filename(s) of the video. Supported extensions: \"mp4\", \"avi\",\n \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\",\n \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are\n expected. If filename is a folder, it will be searched for images.\n dataset: Name of dataset in HDF5 file.\n grayscale: Whether to force grayscale. If None, autodetect on first frame\n load.\n keep_open: Whether to keep the video reader open between calls to read\n frames. If False, will close the reader after each call. If True (the\n default), it will keep the reader open and cache it for subsequent calls\n which may enhance the performance of reading multiple frames.\n source_video: The source video object if this is a proxy video. This is\n present when the video contains an embedded subset of frames from\n another video.\n\n Returns:\n Video instance with the appropriate backend instantiated.\n \"\"\"\n return cls(\n filename=filename,\n backend=VideoBackend.from_filename(\n filename,\n dataset=dataset,\n grayscale=grayscale,\n keep_open=keep_open,\n **kwargs,\n ),\n source_video=source_video,\n )\n\n @property\n def shape(self) -> Tuple[int, int, int, int] | None:\n \"\"\"Return the shape of the video as (num_frames, height, width, channels).\n\n If the video backend is not set or it cannot determine the shape of the video,\n this will return None.\n \"\"\"\n return self._get_shape()\n\n def _get_shape(self) -> Tuple[int, int, int, int] | None:\n \"\"\"Return the shape of the video as (num_frames, height, width, channels).\n\n This suppresses errors related to querying the backend for the video shape, such\n as when it has not been set or when the video file is not found.\n \"\"\"\n try:\n return self.backend.shape\n except:\n if \"shape\" in self.backend_metadata:\n return self.backend_metadata[\"shape\"]\n return None\n\n @property\n def grayscale(self) -> bool | None:\n \"\"\"Return whether the video is grayscale.\n\n If the video backend is not set or it cannot determine whether the video is\n grayscale, this will return None.\n \"\"\"\n shape = self.shape\n if shape is not None:\n return shape[-1] == 1\n else:\n grayscale = None\n if \"grayscale\" in self.backend_metadata:\n grayscale = self.backend_metadata[\"grayscale\"]\n return grayscale\n\n @grayscale.setter\n def grayscale(self, value: bool):\n \"\"\"Set the grayscale value and adjust the backend.\"\"\"\n if self.backend is not None:\n self.backend.grayscale = value\n self.backend._cached_shape = None\n\n self.backend_metadata[\"grayscale\"] = value\n\n def __len__(self) -> int:\n \"\"\"Return the length of the video as the number of frames.\"\"\"\n shape = self.shape\n return 0 if shape is None else shape[0]\n\n def __repr__(self) -> str:\n \"\"\"Informal string representation (for print or format).\"\"\"\n dataset = (\n f\"dataset={self.backend.dataset}, \"\n if getattr(self.backend, \"dataset\", \"\")\n else \"\"\n )\n return (\n \"Video(\"\n f'filename=\"{self.filename}\", '\n f\"shape={self.shape}, \"\n f\"{dataset}\"\n f\"backend={type(self.backend).__name__}\"\n \")\"\n )\n\n def __str__(self) -> str:\n \"\"\"Informal string representation (for print or format).\"\"\"\n return self.__repr__()\n\n def __getitem__(self, inds: int | list[int] | slice) -> np.ndarray:\n \"\"\"Return the frames of the video at the given indices.\n\n Args:\n inds: Index or list of indices of frames to read.\n\n Returns:\n Frame or frames as a numpy array of shape `(height, width, channels)` if a\n scalar index is provided, or `(frames, height, width, channels)` if a list\n of indices is provided.\n\n See also: VideoBackend.get_frame, VideoBackend.get_frames\n \"\"\"\n if not self.is_open:\n if self.open_backend:\n self.open()\n else:\n raise ValueError(\n \"Video backend is not open. Call video.open() or set \"\n \"video.open_backend to True to do automatically on frame read.\"\n )\n return self.backend[inds]\n\n def exists(self, check_all: bool = False, dataset: str | None = None) -> bool:\n \"\"\"Check if the video file exists and is accessible.\n\n Args:\n check_all: If `True`, check that all filenames in a list exist. If `False`\n (the default), check that the first filename exists.\n dataset: Name of dataset in HDF5 file. If specified, this will function will\n return `False` if the dataset does not exist.\n\n Returns:\n `True` if the file exists and is accessible, `False` otherwise.\n \"\"\"\n if isinstance(self.filename, list):\n if check_all:\n for f in self.filename:\n if not is_file_accessible(f):\n return False\n return True\n else:\n return is_file_accessible(self.filename[0])\n\n file_is_accessible = is_file_accessible(self.filename)\n if not file_is_accessible:\n return False\n\n if dataset is None or dataset == \"\":\n dataset = self.backend_metadata.get(\"dataset\", None)\n\n if dataset is not None and dataset != \"\":\n has_dataset = False\n if (\n self.backend is not None\n and type(self.backend) == HDF5Video\n and self.backend._open_reader is not None\n ):\n has_dataset = dataset in self.backend._open_reader\n else:\n with h5py.File(self.filename, \"r\") as f:\n has_dataset = dataset in f\n return has_dataset\n\n return True\n\n @property\n def is_open(self) -> bool:\n \"\"\"Check if the video backend is open.\"\"\"\n return self.exists() and self.backend is not None\n\n def open(\n self,\n filename: Optional[str] = None,\n dataset: Optional[str] = None,\n grayscale: Optional[str] = None,\n keep_open: bool = True,\n ):\n \"\"\"Open the video backend for reading.\n\n Args:\n filename: Filename to open. If not specified, will use the filename set on\n the video object.\n dataset: Name of dataset in HDF5 file.\n grayscale: Whether to force grayscale. If None, autodetect on first frame\n load.\n keep_open: Whether to keep the video reader open between calls to read\n frames. If False, will close the reader after each call. If True (the\n default), it will keep the reader open and cache it for subsequent calls\n which may enhance the performance of reading multiple frames.\n\n Notes:\n This is useful for opening the video backend to read frames and then closing\n it after reading all the necessary frames.\n\n If the backend was already open, it will be closed before opening a new one.\n Values for the HDF5 dataset and grayscale will be remembered if not\n specified.\n \"\"\"\n if filename is not None:\n self.replace_filename(filename, open=False)\n\n # Try to remember values from previous backend if available and not specified.\n if self.backend is not None:\n if dataset is None:\n dataset = getattr(self.backend, \"dataset\", None)\n if grayscale is None:\n grayscale = getattr(self.backend, \"grayscale\", None)\n\n else:\n if dataset is None and \"dataset\" in self.backend_metadata:\n dataset = self.backend_metadata[\"dataset\"]\n if grayscale is None:\n if \"grayscale\" in self.backend_metadata:\n grayscale = self.backend_metadata[\"grayscale\"]\n elif \"shape\" in self.backend_metadata:\n grayscale = self.backend_metadata[\"shape\"][-1] == 1\n\n if not self.exists(dataset=dataset):\n msg = f\"Video does not exist or is inaccessible: {self.filename}\"\n if dataset is not None:\n msg += f\" (dataset: {dataset})\"\n raise FileNotFoundError(msg)\n\n # Close previous backend if open.\n self.close()\n\n # Create new backend.\n self.backend = VideoBackend.from_filename(\n self.filename,\n dataset=dataset,\n grayscale=grayscale,\n keep_open=keep_open,\n )\n\n def close(self):\n \"\"\"Close the video backend.\"\"\"\n if self.backend is not None:\n # Try to remember values from previous backend if available and not\n # specified.\n try:\n self.backend_metadata[\"dataset\"] = getattr(\n self.backend, \"dataset\", None\n )\n self.backend_metadata[\"grayscale\"] = getattr(\n self.backend, \"grayscale\", None\n )\n self.backend_metadata[\"shape\"] = getattr(self.backend, \"shape\", None)\n except:\n pass\n\n del self.backend\n self.backend = None\n\n def replace_filename(\n self, new_filename: str | Path | list[str] | list[Path], open: bool = True\n ):\n \"\"\"Update the filename of the video, optionally opening the backend.\n\n Args:\n new_filename: New filename to set for the video.\n open: If `True` (the default), open the backend with the new filename. If\n the new filename does not exist, no error is raised.\n \"\"\"\n if isinstance(new_filename, Path):\n new_filename = new_filename.as_posix()\n\n if isinstance(new_filename, list):\n new_filename = [\n p.as_posix() if isinstance(p, Path) else p for p in new_filename\n ]\n\n self.filename = new_filename\n self.backend_metadata[\"filename\"] = new_filename\n\n if open:\n if self.exists():\n self.open()\n else:\n self.close()\n\n def save(\n self,\n save_path: str | Path,\n frame_inds: list[int] | np.ndarray | None = None,\n video_kwargs: dict[str, Any] | None = None,\n ) -> Video:\n \"\"\"Save video frames to a new video file.\n\n Args:\n save_path: Path to the new video file. Should end in MP4.\n frame_inds: Frame indices to save. Can be specified as a list or array of\n frame integers. If not specified, saves all video frames.\n video_kwargs: A dictionary of keyword arguments to provide to\n `sio.save_video` for video compression.\n\n Returns:\n A new `Video` object pointing to the new video file.\n \"\"\"\n video_kwargs = {} if video_kwargs is None else video_kwargs\n frame_inds = np.arange(len(self)) if frame_inds is None else frame_inds\n\n with VideoWriter(save_path, **video_kwargs) as vw:\n for frame_ind in frame_inds:\n vw(self[frame_ind])\n\n new_video = Video.from_filename(save_path, grayscale=self.grayscale)\n return new_video\n
"},{"location":"model/#sleap_io.Video.grayscale","title":"grayscale: bool | None
property
writable
","text":"Return whether the video is grayscale.
If the video backend is not set or it cannot determine whether the video is grayscale, this will return None.
"},{"location":"model/#sleap_io.Video.is_open","title":"is_open: bool
property
","text":"Check if the video backend is open.
"},{"location":"model/#sleap_io.Video.shape","title":"shape: Tuple[int, int, int, int] | None
property
","text":"Return the shape of the video as (num_frames, height, width, channels).
If the video backend is not set or it cannot determine the shape of the video, this will return None.
"},{"location":"model/#sleap_io.Video.__attrs_post_init__","title":"__attrs_post_init__()
","text":"Post init syntactic sugar.
Source code insleap_io/model/video.py
def __attrs_post_init__(self):\n \"\"\"Post init syntactic sugar.\"\"\"\n if self.open_backend and self.backend is None and self.exists():\n try:\n self.open()\n except Exception as e:\n # If we can't open the backend, just ignore it for now so we don't\n # prevent the user from building the Video object entirely.\n pass\n
"},{"location":"model/#sleap_io.Video.__deepcopy__","title":"__deepcopy__(memo)
","text":"Deep copy the video object.
Source code insleap_io/model/video.py
def __deepcopy__(self, memo):\n \"\"\"Deep copy the video object.\"\"\"\n if id(self) in memo:\n return memo[id(self)]\n\n reopen = False\n if self.is_open:\n reopen = True\n self.close()\n\n new_video = Video(\n filename=self.filename,\n backend=None,\n backend_metadata=self.backend_metadata,\n source_video=self.source_video,\n open_backend=self.open_backend,\n )\n\n memo[id(self)] = new_video\n\n if reopen:\n self.open()\n\n return new_video\n
"},{"location":"model/#sleap_io.Video.__getitem__","title":"__getitem__(inds)
","text":"Return the frames of the video at the given indices.
Parameters:
Name Type Description Defaultinds
int | list[int] | slice
Index or list of indices of frames to read.
requiredReturns:
Type Descriptionndarray
Frame or frames as a numpy array of shape (height, width, channels)
if a scalar index is provided, or (frames, height, width, channels)
if a list of indices is provided.
See also: VideoBackend.get_frame, VideoBackend.get_frames
Source code insleap_io/model/video.py
def __getitem__(self, inds: int | list[int] | slice) -> np.ndarray:\n \"\"\"Return the frames of the video at the given indices.\n\n Args:\n inds: Index or list of indices of frames to read.\n\n Returns:\n Frame or frames as a numpy array of shape `(height, width, channels)` if a\n scalar index is provided, or `(frames, height, width, channels)` if a list\n of indices is provided.\n\n See also: VideoBackend.get_frame, VideoBackend.get_frames\n \"\"\"\n if not self.is_open:\n if self.open_backend:\n self.open()\n else:\n raise ValueError(\n \"Video backend is not open. Call video.open() or set \"\n \"video.open_backend to True to do automatically on frame read.\"\n )\n return self.backend[inds]\n
"},{"location":"model/#sleap_io.Video.__len__","title":"__len__()
","text":"Return the length of the video as the number of frames.
Source code insleap_io/model/video.py
def __len__(self) -> int:\n \"\"\"Return the length of the video as the number of frames.\"\"\"\n shape = self.shape\n return 0 if shape is None else shape[0]\n
"},{"location":"model/#sleap_io.Video.__repr__","title":"__repr__()
","text":"Informal string representation (for print or format).
Source code insleap_io/model/video.py
def __repr__(self) -> str:\n \"\"\"Informal string representation (for print or format).\"\"\"\n dataset = (\n f\"dataset={self.backend.dataset}, \"\n if getattr(self.backend, \"dataset\", \"\")\n else \"\"\n )\n return (\n \"Video(\"\n f'filename=\"{self.filename}\", '\n f\"shape={self.shape}, \"\n f\"{dataset}\"\n f\"backend={type(self.backend).__name__}\"\n \")\"\n )\n
"},{"location":"model/#sleap_io.Video.__str__","title":"__str__()
","text":"Informal string representation (for print or format).
Source code insleap_io/model/video.py
def __str__(self) -> str:\n \"\"\"Informal string representation (for print or format).\"\"\"\n return self.__repr__()\n
"},{"location":"model/#sleap_io.Video.close","title":"close()
","text":"Close the video backend.
Source code insleap_io/model/video.py
def close(self):\n \"\"\"Close the video backend.\"\"\"\n if self.backend is not None:\n # Try to remember values from previous backend if available and not\n # specified.\n try:\n self.backend_metadata[\"dataset\"] = getattr(\n self.backend, \"dataset\", None\n )\n self.backend_metadata[\"grayscale\"] = getattr(\n self.backend, \"grayscale\", None\n )\n self.backend_metadata[\"shape\"] = getattr(self.backend, \"shape\", None)\n except:\n pass\n\n del self.backend\n self.backend = None\n
"},{"location":"model/#sleap_io.Video.exists","title":"exists(check_all=False, dataset=None)
","text":"Check if the video file exists and is accessible.
Parameters:
Name Type Description Defaultcheck_all
bool
If True
, check that all filenames in a list exist. If False
(the default), check that the first filename exists.
False
dataset
str | None
Name of dataset in HDF5 file. If specified, this will function will return False
if the dataset does not exist.
None
Returns:
Type Descriptionbool
True
if the file exists and is accessible, False
otherwise.
sleap_io/model/video.py
def exists(self, check_all: bool = False, dataset: str | None = None) -> bool:\n \"\"\"Check if the video file exists and is accessible.\n\n Args:\n check_all: If `True`, check that all filenames in a list exist. If `False`\n (the default), check that the first filename exists.\n dataset: Name of dataset in HDF5 file. If specified, this will function will\n return `False` if the dataset does not exist.\n\n Returns:\n `True` if the file exists and is accessible, `False` otherwise.\n \"\"\"\n if isinstance(self.filename, list):\n if check_all:\n for f in self.filename:\n if not is_file_accessible(f):\n return False\n return True\n else:\n return is_file_accessible(self.filename[0])\n\n file_is_accessible = is_file_accessible(self.filename)\n if not file_is_accessible:\n return False\n\n if dataset is None or dataset == \"\":\n dataset = self.backend_metadata.get(\"dataset\", None)\n\n if dataset is not None and dataset != \"\":\n has_dataset = False\n if (\n self.backend is not None\n and type(self.backend) == HDF5Video\n and self.backend._open_reader is not None\n ):\n has_dataset = dataset in self.backend._open_reader\n else:\n with h5py.File(self.filename, \"r\") as f:\n has_dataset = dataset in f\n return has_dataset\n\n return True\n
"},{"location":"model/#sleap_io.Video.from_filename","title":"from_filename(filename, dataset=None, grayscale=None, keep_open=True, source_video=None, **kwargs)
classmethod
","text":"Create a Video from a filename.
Parameters:
Name Type Description Defaultfilename
str | list[str]
The filename(s) of the video. Supported extensions: \"mp4\", \"avi\", \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\", \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are expected. If filename is a folder, it will be searched for images.
requireddataset
Optional[str]
Name of dataset in HDF5 file.
None
grayscale
Optional[bool]
Whether to force grayscale. If None, autodetect on first frame load.
None
keep_open
bool
Whether to keep the video reader open between calls to read frames. If False, will close the reader after each call. If True (the default), it will keep the reader open and cache it for subsequent calls which may enhance the performance of reading multiple frames.
True
source_video
Optional[Video]
The source video object if this is a proxy video. This is present when the video contains an embedded subset of frames from another video.
None
Returns:
Type DescriptionVideoBackend
Video instance with the appropriate backend instantiated.
Source code insleap_io/model/video.py
@classmethod\ndef from_filename(\n cls,\n filename: str | list[str],\n dataset: Optional[str] = None,\n grayscale: Optional[bool] = None,\n keep_open: bool = True,\n source_video: Optional[Video] = None,\n **kwargs,\n) -> VideoBackend:\n \"\"\"Create a Video from a filename.\n\n Args:\n filename: The filename(s) of the video. Supported extensions: \"mp4\", \"avi\",\n \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\",\n \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are\n expected. If filename is a folder, it will be searched for images.\n dataset: Name of dataset in HDF5 file.\n grayscale: Whether to force grayscale. If None, autodetect on first frame\n load.\n keep_open: Whether to keep the video reader open between calls to read\n frames. If False, will close the reader after each call. If True (the\n default), it will keep the reader open and cache it for subsequent calls\n which may enhance the performance of reading multiple frames.\n source_video: The source video object if this is a proxy video. This is\n present when the video contains an embedded subset of frames from\n another video.\n\n Returns:\n Video instance with the appropriate backend instantiated.\n \"\"\"\n return cls(\n filename=filename,\n backend=VideoBackend.from_filename(\n filename,\n dataset=dataset,\n grayscale=grayscale,\n keep_open=keep_open,\n **kwargs,\n ),\n source_video=source_video,\n )\n
"},{"location":"model/#sleap_io.Video.open","title":"open(filename=None, dataset=None, grayscale=None, keep_open=True)
","text":"Open the video backend for reading.
Parameters:
Name Type Description Defaultfilename
Optional[str]
Filename to open. If not specified, will use the filename set on the video object.
None
dataset
Optional[str]
Name of dataset in HDF5 file.
None
grayscale
Optional[str]
Whether to force grayscale. If None, autodetect on first frame load.
None
keep_open
bool
Whether to keep the video reader open between calls to read frames. If False, will close the reader after each call. If True (the default), it will keep the reader open and cache it for subsequent calls which may enhance the performance of reading multiple frames.
True
Notes This is useful for opening the video backend to read frames and then closing it after reading all the necessary frames.
If the backend was already open, it will be closed before opening a new one. Values for the HDF5 dataset and grayscale will be remembered if not specified.
Source code insleap_io/model/video.py
def open(\n self,\n filename: Optional[str] = None,\n dataset: Optional[str] = None,\n grayscale: Optional[str] = None,\n keep_open: bool = True,\n):\n \"\"\"Open the video backend for reading.\n\n Args:\n filename: Filename to open. If not specified, will use the filename set on\n the video object.\n dataset: Name of dataset in HDF5 file.\n grayscale: Whether to force grayscale. If None, autodetect on first frame\n load.\n keep_open: Whether to keep the video reader open between calls to read\n frames. If False, will close the reader after each call. If True (the\n default), it will keep the reader open and cache it for subsequent calls\n which may enhance the performance of reading multiple frames.\n\n Notes:\n This is useful for opening the video backend to read frames and then closing\n it after reading all the necessary frames.\n\n If the backend was already open, it will be closed before opening a new one.\n Values for the HDF5 dataset and grayscale will be remembered if not\n specified.\n \"\"\"\n if filename is not None:\n self.replace_filename(filename, open=False)\n\n # Try to remember values from previous backend if available and not specified.\n if self.backend is not None:\n if dataset is None:\n dataset = getattr(self.backend, \"dataset\", None)\n if grayscale is None:\n grayscale = getattr(self.backend, \"grayscale\", None)\n\n else:\n if dataset is None and \"dataset\" in self.backend_metadata:\n dataset = self.backend_metadata[\"dataset\"]\n if grayscale is None:\n if \"grayscale\" in self.backend_metadata:\n grayscale = self.backend_metadata[\"grayscale\"]\n elif \"shape\" in self.backend_metadata:\n grayscale = self.backend_metadata[\"shape\"][-1] == 1\n\n if not self.exists(dataset=dataset):\n msg = f\"Video does not exist or is inaccessible: {self.filename}\"\n if dataset is not None:\n msg += f\" (dataset: {dataset})\"\n raise FileNotFoundError(msg)\n\n # Close previous backend if open.\n self.close()\n\n # Create new backend.\n self.backend = VideoBackend.from_filename(\n self.filename,\n dataset=dataset,\n grayscale=grayscale,\n keep_open=keep_open,\n )\n
"},{"location":"model/#sleap_io.Video.replace_filename","title":"replace_filename(new_filename, open=True)
","text":"Update the filename of the video, optionally opening the backend.
Parameters:
Name Type Description Defaultnew_filename
str | Path | list[str] | list[Path]
New filename to set for the video.
requiredopen
bool
If True
(the default), open the backend with the new filename. If the new filename does not exist, no error is raised.
True
Source code in sleap_io/model/video.py
def replace_filename(\n self, new_filename: str | Path | list[str] | list[Path], open: bool = True\n):\n \"\"\"Update the filename of the video, optionally opening the backend.\n\n Args:\n new_filename: New filename to set for the video.\n open: If `True` (the default), open the backend with the new filename. If\n the new filename does not exist, no error is raised.\n \"\"\"\n if isinstance(new_filename, Path):\n new_filename = new_filename.as_posix()\n\n if isinstance(new_filename, list):\n new_filename = [\n p.as_posix() if isinstance(p, Path) else p for p in new_filename\n ]\n\n self.filename = new_filename\n self.backend_metadata[\"filename\"] = new_filename\n\n if open:\n if self.exists():\n self.open()\n else:\n self.close()\n
"},{"location":"model/#sleap_io.Video.save","title":"save(save_path, frame_inds=None, video_kwargs=None)
","text":"Save video frames to a new video file.
Parameters:
Name Type Description Defaultsave_path
str | Path
Path to the new video file. Should end in MP4.
requiredframe_inds
list[int] | ndarray | None
Frame indices to save. Can be specified as a list or array of frame integers. If not specified, saves all video frames.
None
video_kwargs
dict[str, Any] | None
A dictionary of keyword arguments to provide to sio.save_video
for video compression.
None
Returns:
Type DescriptionVideo
A new Video
object pointing to the new video file.
sleap_io/model/video.py
def save(\n self,\n save_path: str | Path,\n frame_inds: list[int] | np.ndarray | None = None,\n video_kwargs: dict[str, Any] | None = None,\n) -> Video:\n \"\"\"Save video frames to a new video file.\n\n Args:\n save_path: Path to the new video file. Should end in MP4.\n frame_inds: Frame indices to save. Can be specified as a list or array of\n frame integers. If not specified, saves all video frames.\n video_kwargs: A dictionary of keyword arguments to provide to\n `sio.save_video` for video compression.\n\n Returns:\n A new `Video` object pointing to the new video file.\n \"\"\"\n video_kwargs = {} if video_kwargs is None else video_kwargs\n frame_inds = np.arange(len(self)) if frame_inds is None else frame_inds\n\n with VideoWriter(save_path, **video_kwargs) as vw:\n for frame_ind in frame_inds:\n vw(self[frame_ind])\n\n new_video = Video.from_filename(save_path, grayscale=self.grayscale)\n return new_video\n
"},{"location":"model/#sleap_io.SuggestionFrame","title":"sleap_io.SuggestionFrame
","text":"Data structure for a single frame of suggestions.
Attributes:
Name Type Descriptionvideo
Video
The video associated with the frame.
frame_idx
int
The index of the frame in the video.
Source code insleap_io/model/suggestions.py
@attrs.define(auto_attribs=True)\nclass SuggestionFrame:\n \"\"\"Data structure for a single frame of suggestions.\n\n Attributes:\n video: The video associated with the frame.\n frame_idx: The index of the frame in the video.\n \"\"\"\n\n video: Video\n frame_idx: int\n
"},{"location":"reference/SUMMARY/","title":"SUMMARY","text":"sleap_io
","text":"This module exposes all high level APIs for sleap-io.
Modules:
Name Descriptionio
This sub-package contains I/O-related modules such as specific format backends.
model
This subpackage contains data model interfaces.
version
This module defines the package version.
"},{"location":"reference/sleap_io/version/","title":"version","text":""},{"location":"reference/sleap_io/version/#sleap_io.version","title":"sleap_io.version
","text":"This module defines the package version.
"},{"location":"reference/sleap_io/io/","title":"io","text":""},{"location":"reference/sleap_io/io/#sleap_io.io","title":"sleap_io.io
","text":"This sub-package contains I/O-related modules such as specific format backends.
Modules:
Name Descriptionjabs
This module handles direct I/O operations for working with JABS files.
labelstudio
This module handles direct I/O operations for working with Labelstudio files.
main
This module contains high-level wrappers for utilizing different I/O backends.
nwb
Functions to write and read from the neurodata without borders (NWB) format.
slp
This module handles direct I/O operations for working with .slp files.
utils
Miscellaneous utilities for working with different I/O formats.
video
Backends for reading videos.
video_reading
Backends for reading videos.
video_writing
Utilities for writing videos.
"},{"location":"reference/sleap_io/io/jabs/","title":"jabs","text":""},{"location":"reference/sleap_io/io/jabs/#sleap_io.io.jabs","title":"sleap_io.io.jabs
","text":"This module handles direct I/O operations for working with JABS files.
Functions:
Name Descriptionconvert_labels
Convert a Labels
object into JABS-formatted annotations.
get_max_ids_in_video
Determine the maximum number of identities that exist at the same time.
make_simple_skeleton
Create a Skeleton
with a requested number of nodes attached in a line.
prediction_to_instance
Create an Instance
from prediction data.
read_labels
Read JABS style pose from a file and return a Labels
object.
tracklets_to_v3
Changes identity tracklets to the v3 format specifications.
write_jabs_v2
Write JABS pose file v2 data to file.
write_jabs_v3
Write JABS pose file v3 data to file.
write_jabs_v4
Write JABS pose file v4 data to file.
write_jabs_v5
Write JABS pose file v5 data to file.
write_labels
Convert and save a SLEAP Labels
object to a JABS pose file.
convert_labels(all_labels, video)
","text":"Convert a Labels
object into JABS-formatted annotations.
Parameters:
Name Type Description Defaultall_labels
Labels
SLEAP Labels
to be converted to JABS format.
video
Video
name of video to be converted
requiredReturns:
Type Descriptiondict
Dictionary of JABS data of the Labels
data.
sleap_io/io/jabs.py
def convert_labels(all_labels: Labels, video: Video) -> dict:\n \"\"\"Convert a `Labels` object into JABS-formatted annotations.\n\n Args:\n all_labels: SLEAP `Labels` to be converted to JABS format.\n video: name of video to be converted\n\n Returns:\n Dictionary of JABS data of the `Labels` data.\n \"\"\"\n labels = all_labels.find(video=video)\n\n # Determine shape of output\n # Low estimate of last frame labeled\n num_frames = max([x.frame_idx for x in labels]) + 1\n # If there is metadata available for the video, use that\n if video.shape:\n num_frames = max(num_frames, video.shape[0])\n if len(all_labels.skeletons) == 1:\n skeleton = all_labels.skeleton\n elif len(all_labels.skeletons) > 1:\n skeleton = [x for x in all_labels.skeletons if x.name == \"Mouse\"]\n if len(skeleton) == 0:\n raise ValueError(\"No mouse skeleton found in labels.\")\n skeleton = skeleton[0]\n num_keypoints = len(skeleton.nodes)\n num_mice = get_max_ids_in_video(labels, key=\"Mouse\")\n # Note that this 1-indexes identities\n track_2_idx = {\n key: val + 1\n for key, val in zip(all_labels.tracks, range(len(all_labels.tracks)))\n }\n last_unassigned_id = num_mice\n\n keypoint_mat = np.zeros([num_frames, num_mice, num_keypoints, 2], dtype=np.uint16)\n confidence_mat = np.zeros([num_frames, num_mice, num_keypoints], dtype=np.float32)\n identity_mat = np.zeros([num_frames, num_mice], dtype=np.uint32)\n instance_vector = np.zeros([num_frames], dtype=np.uint8)\n static_objects = {}\n\n # Populate the matrices with data\n for label in labels:\n assigned_instances = 0\n for instance_idx, instance in enumerate(label.instances):\n # Static objects just get added to the object dict\n # This will clobber data if more than one frame is annotated\n if instance.skeleton.name != \"Mouse\":\n static_objects[instance.skeleton.name] = instance.numpy()\n continue\n pose = instance.numpy()\n if pose.shape[0] != len(JABS_DEFAULT_KEYPOINTS):\n warnings.warn(\n f\"JABS format only supports 12 keypoints for mice. Skipping storage of instance on frame {label.frame_idx} with {len(instance.points)} keypoints.\"\n )\n continue\n missing_points = np.isnan(pose[:, 0])\n pose[np.isnan(pose)] = 0\n # JABS stores y,x for poses\n pose = np.flip(pose.astype(np.uint16), axis=-1)\n keypoint_mat[label.frame_idx, instance_idx, :, :] = pose\n confidence_mat[label.frame_idx, instance_idx, ~missing_points] = 1.0\n if instance.track:\n identity_mat[label.frame_idx, instance_idx] = track_2_idx[\n instance.track\n ]\n else:\n warnings.warn(\n f\"Pose with unassigned track found on {label.video.filename} frame {label.frame_idx} instance {instance_idx}. Assigning ID {last_unassigned_id}.\"\n )\n identity_mat[label.frame_idx, instance_idx] = last_unassigned_id\n last_unassigned_id += 1\n assigned_instances += 1\n instance_vector[label.frame_idx] = assigned_instances\n\n # Return the data as a dict\n return {\n \"keypoints\": keypoint_mat.astype(np.uint16),\n \"confidence\": confidence_mat.astype(np.float32),\n \"identity\": identity_mat.astype(np.uint32),\n \"num_identities\": instance_vector.astype(np.uint16),\n \"static_objects\": static_objects,\n }\n
"},{"location":"reference/sleap_io/io/jabs/#sleap_io.io.jabs.get_max_ids_in_video","title":"get_max_ids_in_video(labels, key='Mouse')
","text":"Determine the maximum number of identities that exist at the same time.
Parameters:
Name Type Description Defaultlabels
List[Labels]
SLEAP Labels
to count
key
str
Name of the skeleton to select for identities
'Mouse'
Returns:
Type Descriptionint
Count of the maximum concurrent identities in a single frame
Source code insleap_io/io/jabs.py
def get_max_ids_in_video(labels: List[Labels], key: str = \"Mouse\") -> int:\n \"\"\"Determine the maximum number of identities that exist at the same time.\n\n Args:\n labels: SLEAP `Labels` to count\n key: Name of the skeleton to select for identities\n\n Returns:\n Count of the maximum concurrent identities in a single frame\n \"\"\"\n max_labels = 0\n for label in labels:\n n_labels = sum([x.skeleton.name == key for x in label.instances])\n max_labels = max(max_labels, n_labels)\n\n return max_labels\n
"},{"location":"reference/sleap_io/io/jabs/#sleap_io.io.jabs.make_simple_skeleton","title":"make_simple_skeleton(name, num_points)
","text":"Create a Skeleton
with a requested number of nodes attached in a line.
Parameters:
Name Type Description Defaultname
str
name of the skeleton and prefix to nodes
requirednum_points
int
number of points to use in the skeleton
requiredReturns:
Type DescriptionSkeleton
Generated Skeleton
.
sleap_io/io/jabs.py
def make_simple_skeleton(name: str, num_points: int) -> Skeleton:\n \"\"\"Create a `Skeleton` with a requested number of nodes attached in a line.\n\n Args:\n name: name of the skeleton and prefix to nodes\n num_points: number of points to use in the skeleton\n\n Returns:\n Generated `Skeleton`.\n \"\"\"\n nodes = [Node(name + \"_kp\" + str(i)) for i in range(num_points)]\n edges = [Edge(nodes[i], nodes[i + 1]) for i in range(num_points - 1)]\n return Skeleton(nodes, edges, name=name)\n
"},{"location":"reference/sleap_io/io/jabs/#sleap_io.io.jabs.prediction_to_instance","title":"prediction_to_instance(data, confidence, skeleton, track=None)
","text":"Create an Instance
from prediction data.
Parameters:
Name Type Description Defaultdata
Union[ndarray[uint16], ndarray[float32]]
keypoint locations
requiredconfidence
ndarray[float32]
confidence for keypoints
requiredskeleton
Skeleton
Skeleton
to use for Instance
track
Track
Track
to assign to Instance
None
Returns:
Type DescriptionInstance
Parsed Instance
.
sleap_io/io/jabs.py
def prediction_to_instance(\n data: Union[np.ndarray[np.uint16], np.ndarray[np.float32]],\n confidence: np.ndarray[np.float32],\n skeleton: Skeleton,\n track: Track = None,\n) -> Instance:\n \"\"\"Create an `Instance` from prediction data.\n\n Args:\n data: keypoint locations\n confidence: confidence for keypoints\n skeleton: `Skeleton` to use for `Instance`\n track: `Track` to assign to `Instance`\n\n Returns:\n Parsed `Instance`.\n \"\"\"\n assert (\n len(skeleton.nodes) == data.shape[0]\n ), f\"Skeleton ({len(skeleton.nodes)}) does not match number of keypoints ({data.shape[0]})\"\n\n points = {}\n for i, cur_node in enumerate(skeleton.nodes):\n # confidence of 0 indicates no keypoint predicted for instance\n if confidence[i] > 0:\n points[cur_node] = Point(\n data[i, 0],\n data[i, 1],\n visible=True,\n )\n\n if not points:\n return None\n else:\n return Instance(points, skeleton=skeleton, track=track)\n
"},{"location":"reference/sleap_io/io/jabs/#sleap_io.io.jabs.read_labels","title":"read_labels(labels_path, skeleton=JABS_DEFAULT_SKELETON)
","text":"Read JABS style pose from a file and return a Labels
object.
TODO: Attributes are ignored, including px_to_cm field. TODO: Segmentation data ignored in v6, but will read in pose. TODO: Lixit static objects currently stored as n_lixit,2 (eg 1 object). Should be converted to multiple objects
Parameters:
Name Type Description Defaultlabels_path
str
Path to the JABS pose file.
requiredskeleton
Optional[Skeleton]
An optional Skeleton
object. Defaults to JABS pose version 2-6.
JABS_DEFAULT_SKELETON
Returns:
Type DescriptionLabels
Parsed labels as a Labels
instance.
sleap_io/io/jabs.py
def read_labels(\n labels_path: str, skeleton: Optional[Skeleton] = JABS_DEFAULT_SKELETON\n) -> Labels:\n \"\"\"Read JABS style pose from a file and return a `Labels` object.\n\n TODO: Attributes are ignored, including px_to_cm field.\n TODO: Segmentation data ignored in v6, but will read in pose.\n TODO: Lixit static objects currently stored as n_lixit,2 (eg 1 object). Should be converted to multiple objects\n\n Args:\n labels_path: Path to the JABS pose file.\n skeleton: An optional `Skeleton` object. Defaults to JABS pose version 2-6.\n\n Returns:\n Parsed labels as a `Labels` instance.\n \"\"\"\n frames: List[LabeledFrame] = []\n # Video name is the pose file minus the suffix\n video_name = re.sub(r\"(_pose_est_v[2-6])?\\.h5\", \".avi\", labels_path)\n video = Video.from_filename(video_name)\n if not skeleton:\n skeleton = JABS_DEFAULT_SKELETON\n tracks = {}\n\n if not os.access(labels_path, os.F_OK):\n raise FileNotFoundError(f\"{labels_path} doesn't exist.\")\n if not os.access(labels_path, os.R_OK):\n raise PermissionError(f\"{labels_path} cannot be accessed.\")\n\n with h5py.File(labels_path, \"r\") as pose_file:\n num_frames = pose_file[\"poseest/points\"].shape[0]\n try:\n pose_version = pose_file[\"poseest\"].attrs[\"version\"][0]\n except (KeyError, IndexError):\n pose_version = 2\n data_shape = pose_file[\"poseest/points\"].shape\n assert (\n len(data_shape) == 3\n ), f\"Pose version not present and shape does not match single mouse: shape of {data_shape} for {labels_path}\"\n if pose_version == 2:\n tracks[1] = Track(\"1\")\n # Change field name for newer pose formats\n if pose_version == 3:\n id_key = \"instance_track_id\"\n elif pose_version > 3:\n id_key = \"instance_embed_id\"\n max_ids = pose_file[\"poseest/points\"].shape[1]\n\n for frame_idx in range(num_frames):\n instances = []\n pose_data = pose_file[\"poseest/points\"][frame_idx, ...]\n # JABS stores y,x for poses\n pose_data = np.flip(pose_data, axis=-1)\n pose_conf = pose_file[\"poseest/confidence\"][frame_idx, ...]\n # single animal case\n if pose_version == 2:\n new_instance = prediction_to_instance(\n pose_data, pose_conf, skeleton, tracks[1]\n )\n instances.append(new_instance)\n # multi-animal case\n if pose_version > 2:\n pose_ids = pose_file[\"poseest/\" + id_key][frame_idx, ...]\n # pose_v3 uses another field to describe the number of valid poses\n if pose_version == 3:\n max_ids = pose_file[\"poseest/instance_count\"][frame_idx]\n for cur_id in range(max_ids):\n # v4+ uses reserved values for invalid/unused poses\n # Note: ignores 'poseest/id_mask' to keep predictions that were not assigned an id\n if pose_version > 3 and pose_ids[cur_id] <= 0:\n continue\n if pose_ids[cur_id] not in tracks.keys():\n tracks[pose_ids[cur_id]] = Track(str(pose_ids[cur_id]))\n new_instance = prediction_to_instance(\n pose_data[cur_id],\n pose_conf[cur_id],\n skeleton,\n tracks[pose_ids[cur_id]],\n )\n if new_instance:\n instances.append(new_instance)\n # Static objects\n if (\n frame_idx == 0\n and pose_version >= 5\n and \"static_objects\" in pose_file.keys()\n ):\n present_objects = pose_file[\"static_objects\"].keys()\n for cur_object in present_objects:\n object_keypoints = pose_file[\"static_objects/\" + cur_object][:]\n object_skeleton = make_simple_skeleton(\n cur_object, object_keypoints.shape[0]\n )\n new_instance = prediction_to_instance(\n object_keypoints,\n np.ones(object_keypoints.shape[:-1]),\n object_skeleton,\n )\n if new_instance:\n instances.append(new_instance)\n frame_label = LabeledFrame(video, frame_idx, instances)\n frames.append(frame_label)\n labels = Labels(frames)\n labels.provenance[\"filename\"] = labels_path\n return labels\n
"},{"location":"reference/sleap_io/io/jabs/#sleap_io.io.jabs.tracklets_to_v3","title":"tracklets_to_v3(tracklet_matrix)
","text":"Changes identity tracklets to the v3 format specifications.
v3 specifications require(a) tracklets are 0-indexed (b) tracklets appear in ascending order \u00a9 tracklets exist for continuous blocks of time
Parameters:
Name Type Description Defaulttracklet_matrix
ndarray
Numpy array of shape (frame, n_animals) that contains identity values. Identities are assumed to be 1-indexed.
requiredReturns:
Type Descriptionndarray
A corrected numpy array of the same shape as input
Source code insleap_io/io/jabs.py
def tracklets_to_v3(tracklet_matrix: np.ndarray) -> np.ndarray:\n \"\"\"Changes identity tracklets to the v3 format specifications.\n\n v3 specifications require:\n (a) tracklets are 0-indexed\n (b) tracklets appear in ascending order\n (c) tracklets exist for continuous blocks of time\n\n Args:\n tracklet_matrix: Numpy array of shape (frame, n_animals) that contains identity values. Identities are assumed to be 1-indexed.\n\n Returns:\n A corrected numpy array of the same shape as input\n \"\"\"\n assert tracklet_matrix.ndim == 2\n\n # Fragment the tracklets based on gaps\n valid_ids = np.unique(tracklet_matrix)\n valid_ids = valid_ids[valid_ids != 0]\n track_fragments = {}\n for cur_id in valid_ids:\n frame_idx, column_idx = np.where(tracklet_matrix == cur_id)\n gaps = np.nonzero(np.diff(frame_idx) - 1)[0]\n for sliced_frame, sliced_column in zip(\n np.split(frame_idx, gaps + 1), np.split(column_idx, gaps + 1)\n ):\n # The keys used here are (first frame, first column) such that sorting can be used for ascending order\n track_fragments[sliced_frame[0], sliced_column[0]] = sliced_column\n\n return_mat = np.zeros_like(tracklet_matrix)\n for next_id, key in enumerate(sorted(track_fragments.keys())):\n columns_to_assign = track_fragments[key]\n return_mat[\n range(key[0], key[0] + len(columns_to_assign)), columns_to_assign\n ] = next_id\n\n return return_mat\n
"},{"location":"reference/sleap_io/io/jabs/#sleap_io.io.jabs.write_jabs_v2","title":"write_jabs_v2(data, filename)
","text":"Write JABS pose file v2 data to file.
Writes single mouse pose data.
Parameters:
Name Type Description Defaultdata
dict
Dictionary of JABS data generated from convert_labels
requiredfilename
str
Filename to write data to
required Source code insleap_io/io/jabs.py
def write_jabs_v2(data: dict, filename: str):\n \"\"\"Write JABS pose file v2 data to file.\n\n Writes single mouse pose data.\n\n Args:\n data: Dictionary of JABS data generated from convert_labels\n filename: Filename to write data to\n \"\"\"\n # Check that we're trying to write single mouse data\n assert data[\"keypoints\"].shape[1] == 1\n out_keypoints = np.squeeze(data[\"keypoints\"], axis=1)\n out_confidences = np.squeeze(data[\"confidence\"], axis=1)\n\n with h5py.File(filename, \"w\") as h5:\n pose_grp = h5.require_group(\"poseest\")\n pose_grp.attrs.update({\"version\": [2, 0]})\n pose_grp.require_dataset(\n \"points\", out_keypoints.shape, out_keypoints.dtype, data=out_keypoints\n )\n pose_grp.require_dataset(\n \"confidence\",\n out_confidences.shape,\n out_confidences.dtype,\n data=out_confidences,\n )\n
"},{"location":"reference/sleap_io/io/jabs/#sleap_io.io.jabs.write_jabs_v3","title":"write_jabs_v3(data, filename)
","text":"Write JABS pose file v3 data to file.
Writes multi-mouse pose data.
Parameters:
Name Type Description Defaultdata
dict
Dictionary of JABS data generated from convert_labels
requiredfilename
str
Filename to write data to
required Source code insleap_io/io/jabs.py
def write_jabs_v3(data: dict, filename: str):\n \"\"\"Write JABS pose file v3 data to file.\n\n Writes multi-mouse pose data.\n\n Args:\n data: Dictionary of JABS data generated from convert_labels\n filename: Filename to write data to\n \"\"\"\n v3_tracklets = tracklets_to_v3(data[\"identity\"])\n with h5py.File(filename, \"w\") as h5:\n pose_grp = h5.require_group(\"poseest\")\n pose_grp.attrs.update({\"version\": [3, 0]})\n # keypoint field\n pose_grp.require_dataset(\n \"points\",\n data[\"keypoints\"].shape,\n data[\"keypoints\"].dtype,\n data=data[\"keypoints\"],\n )\n # confidence field\n pose_grp.require_dataset(\n \"confidence\",\n data[\"confidence\"].shape,\n data[\"confidence\"].dtype,\n data=data[\"confidence\"],\n )\n # id field\n pose_grp.require_dataset(\n \"instance_track_id\",\n v3_tracklets.shape,\n v3_tracklets.dtype,\n data=v3_tracklets,\n )\n # instance count field\n pose_grp.require_dataset(\n \"instance_count\",\n data[\"num_identities\"].shape,\n data[\"num_identities\"].dtype,\n data=data[\"num_identities\"],\n )\n # extra field where we don't have data, so fill with default data\n pose_grp.require_dataset(\n \"instance_embedding\",\n data[\"confidence\"].shape,\n data[\"confidence\"].dtype,\n data=np.zeros_like(data[\"confidence\"]),\n )\n
"},{"location":"reference/sleap_io/io/jabs/#sleap_io.io.jabs.write_jabs_v4","title":"write_jabs_v4(data, filename)
","text":"Write JABS pose file v4 data to file.
Writes multi-mouse pose and longterm identity object data.
Parameters:
Name Type Description Defaultdata
dict
Dictionary of JABS data generated from convert_labels
requiredfilename
str
Filename to write data to
required Source code insleap_io/io/jabs.py
def write_jabs_v4(data: dict, filename: str):\n \"\"\"Write JABS pose file v4 data to file.\n\n Writes multi-mouse pose and longterm identity object data.\n\n Args:\n data: Dictionary of JABS data generated from convert_labels\n filename: Filename to write data to\n \"\"\"\n # v4 extends v3\n write_jabs_v3(data, filename)\n with h5py.File(filename, \"a\") as h5:\n pose_grp = h5.require_group(\"poseest\")\n pose_grp.attrs.update({\"version\": [4, 0]})\n # new fields on top of v4\n identity_mask_mat = np.all(data[\"confidence\"] == 0, axis=-1).astype(bool)\n pose_grp.require_dataset(\n \"id_mask\",\n identity_mask_mat.shape,\n identity_mask_mat.dtype,\n data=identity_mask_mat,\n )\n # No identity embedding data\n # Note that since the identity information doesn't exist, this will break any functionality that relies on it\n default_id_embeds = np.zeros(\n list(identity_mask_mat.shape) + [0], dtype=np.float32\n )\n pose_grp.require_dataset(\n \"identity_embeds\",\n default_id_embeds.shape,\n default_id_embeds.dtype,\n data=default_id_embeds,\n )\n default_id_centers = np.zeros(default_id_embeds.shape[1:], dtype=np.float32)\n pose_grp.require_dataset(\n \"instance_id_center\",\n default_id_centers.shape,\n default_id_centers.dtype,\n data=default_id_centers,\n )\n # v4 uses an id field that is 1-indexed\n pose_grp.require_dataset(\n \"instance_embed_id\",\n data[\"identity\"].shape,\n data[\"identity\"].dtype,\n data=data[\"identity\"],\n )\n
"},{"location":"reference/sleap_io/io/jabs/#sleap_io.io.jabs.write_jabs_v5","title":"write_jabs_v5(data, filename)
","text":"Write JABS pose file v5 data to file.
Writes multi-mouse pose, longterm identity, and static object data.
Parameters:
Name Type Description Defaultdata
dict
Dictionary of JABS data generated from convert_labels
requiredfilename
str
Filename to write data to
required Source code insleap_io/io/jabs.py
def write_jabs_v5(data: dict, filename: str):\n \"\"\"Write JABS pose file v5 data to file.\n\n Writes multi-mouse pose, longterm identity, and static object data.\n\n Args:\n data: Dictionary of JABS data generated from convert_labels\n filename: Filename to write data to\n \"\"\"\n # v5 extends v4\n write_jabs_v4(data, filename)\n with h5py.File(filename, \"a\") as h5:\n pose_grp = h5.require_group(\"poseest\")\n pose_grp.attrs.update({\"version\": [5, 0]})\n if \"static_objects\" in data.keys():\n object_grp = h5.require_group(\"static_objects\")\n for object_key, object_keypoints in data[\"static_objects\"].items():\n object_grp.require_dataset(\n object_key,\n object_keypoints.shape,\n np.uint16,\n data=object_keypoints.astype(np.uint16),\n )\n
"},{"location":"reference/sleap_io/io/jabs/#sleap_io.io.jabs.write_labels","title":"write_labels(labels, pose_version, root_folder)
","text":"Convert and save a SLEAP Labels
object to a JABS pose file.
Only supports pose version 2 (single mouse) and 3-5 (multi mouse).
Parameters:
Name Type Description Defaultlabels
Labels
SLEAP Labels
to be converted to JABS pose format.
pose_version
int
JABS pose version to use when writing data.
requiredroot_folder
str
Root folder where the jabs files should be written
required Source code insleap_io/io/jabs.py
def write_labels(labels: Labels, pose_version: int, root_folder: str):\n \"\"\"Convert and save a SLEAP `Labels` object to a JABS pose file.\n\n Only supports pose version 2 (single mouse) and 3-5 (multi mouse).\n\n Args:\n labels: SLEAP `Labels` to be converted to JABS pose format.\n pose_version: JABS pose version to use when writing data.\n root_folder: Root folder where the jabs files should be written\n \"\"\"\n for video in labels.videos:\n converted_labels = convert_labels(labels, video)\n out_filename = (\n os.path.splitext(video.filename)[0] + f\"_pose_est_v{pose_version}.h5\"\n )\n if root_folder:\n out_filename = os.path.join(root_folder, out_filename)\n os.makedirs(os.path.dirname(out_filename), exist_ok=True)\n if os.path.exists(out_filename):\n warnings.warn(f\"Skipping {out_filename} because it already exists.\")\n continue\n if pose_version == 2:\n write_jabs_v2(converted_labels, out_filename)\n elif pose_version == 3:\n write_jabs_v3(converted_labels, out_filename)\n elif pose_version == 4:\n write_jabs_v4(converted_labels, out_filename)\n elif pose_version == 5:\n write_jabs_v5(converted_labels, out_filename)\n else:\n raise NotImplementedError(f\"Pose format {pose_version} not supported.\")\n
"},{"location":"reference/sleap_io/io/labelstudio/","title":"labelstudio","text":""},{"location":"reference/sleap_io/io/labelstudio/#sleap_io.io.labelstudio","title":"sleap_io.io.labelstudio
","text":"This module handles direct I/O operations for working with Labelstudio files.
Some important nomenclaturetasks
: typically maps to a single frame of data to be annotated, closest correspondance is to LabeledFrame
annotations
: collection of points, polygons, relations, etc. corresponds to Instance
s and Point
s, but a flattened hierarchyFunctions:
Name Descriptionbuild_relation_map
Build a two-way relationship map between annotations.
convert_labels
Convert a Labels
object into Label Studio-formatted annotations.
filter_and_index
Filter annotations based on the type field and index them by ID.
infer_nodes
Parse the loaded JSON tasks to create a minimal skeleton.
parse_tasks
Read Label Studio style annotations from a file and return a Labels
object.
read_labels
Read Label Studio style annotations from a file and return a Labels
object.
task_to_labeled_frame
Parse annotations from an entry.
video_from_task
Given a Label Studio task, retrieve video information.
write_labels
Convert and save a SLEAP Labels
object to a Label Studio .json
file.
build_relation_map(annotations)
","text":"Build a two-way relationship map between annotations.
Parameters:
Name Type Description Defaultannotations
Iterable[dict]
annotations, presumably, containing relation types
requiredReturns:
Type DescriptionDict[str, List[str]]
A two way map of relations indexed by from_id
and to_id
fields.
sleap_io/io/labelstudio.py
def build_relation_map(annotations: Iterable[dict]) -> Dict[str, List[str]]:\n \"\"\"Build a two-way relationship map between annotations.\n\n Args:\n annotations: annotations, presumably, containing relation types\n\n Returns:\n A two way map of relations indexed by `from_id` and `to_id` fields.\n \"\"\"\n relations = list(filter(lambda d: d[\"type\"] == \"relation\", annotations))\n relmap: Dict[str, List[str]] = {}\n for rel in relations:\n if rel[\"from_id\"] not in relmap:\n relmap[rel[\"from_id\"]] = []\n relmap[rel[\"from_id\"]].append(rel[\"to_id\"])\n\n if rel[\"to_id\"] not in relmap:\n relmap[rel[\"to_id\"]] = []\n relmap[rel[\"to_id\"]].append(rel[\"from_id\"])\n return relmap\n
"},{"location":"reference/sleap_io/io/labelstudio/#sleap_io.io.labelstudio.convert_labels","title":"convert_labels(labels)
","text":"Convert a Labels
object into Label Studio-formatted annotations.
Parameters:
Name Type Description Defaultlabels
Labels
SLEAP Labels
to be converted to Label Studio task format.
Returns:
Type DescriptionList[dict]
Label Studio dictionaries of the Labels
data.
sleap_io/io/labelstudio.py
def convert_labels(labels: Labels) -> List[dict]:\n \"\"\"Convert a `Labels` object into Label Studio-formatted annotations.\n\n Args:\n labels: SLEAP `Labels` to be converted to Label Studio task format.\n\n Returns:\n Label Studio dictionaries of the `Labels` data.\n \"\"\"\n out = []\n for frame in labels.labeled_frames:\n if frame.video.shape is not None:\n height = frame.video.shape[1]\n width = frame.video.shape[2]\n else:\n height = 100\n width = 100\n\n frame_annots = []\n\n for instance in frame.instances:\n inst_id = str(uuid.uuid4())\n frame_annots.append(\n {\n \"original_width\": width,\n \"original_height\": height,\n \"image_rotation\": 0,\n \"value\": {\n \"x\": 0,\n \"y\": 0,\n \"width\": width,\n \"height\": height,\n \"rotation\": 0,\n \"rectanglelabels\": [\n \"instance_class\"\n ], # TODO: need to handle instance classes / identity\n },\n \"id\": inst_id,\n \"from_name\": \"individuals\",\n \"to_name\": \"image\",\n \"type\": \"rectanglelabels\",\n }\n )\n\n for node, point in instance.points.items():\n point_id = str(uuid.uuid4())\n\n # add this point\n frame_annots.append(\n {\n \"original_width\": width,\n \"original_height\": height,\n \"image_rotation\": 0,\n \"value\": {\n \"x\": point.x / width * 100,\n \"y\": point.y / height * 100,\n \"keypointlabels\": [node.name],\n },\n \"from_name\": \"keypoint-label\",\n \"to_name\": \"image\",\n \"type\": \"keypointlabels\",\n \"id\": point_id,\n }\n )\n\n # add relationship of point to individual\n frame_annots.append(\n {\n \"from_id\": point_id,\n \"to_id\": inst_id,\n \"type\": \"relation\",\n \"direction\": \"right\",\n }\n )\n\n out.append(\n {\n \"data\": {\n # 'image': f\"/data/{up_deets['file']}\"\n },\n \"meta\": {\n \"video\": {\n \"filename\": frame.video.filename,\n \"frame_idx\": frame.frame_idx,\n \"shape\": frame.video.shape,\n }\n },\n \"annotations\": [\n {\n \"result\": frame_annots,\n \"was_cancelled\": False,\n \"ground_truth\": False,\n \"created_at\": datetime.datetime.now(\n datetime.timezone.utc\n ).strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\"),\n \"updated_at\": datetime.datetime.now(\n datetime.timezone.utc\n ).strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\"),\n \"lead_time\": 0,\n \"result_count\": 1,\n # \"completed_by\": user['id']\n }\n ],\n }\n )\n\n return out\n
"},{"location":"reference/sleap_io/io/labelstudio/#sleap_io.io.labelstudio.filter_and_index","title":"filter_and_index(annotations, annot_type)
","text":"Filter annotations based on the type field and index them by ID.
Parameters:
Name Type Description Defaultannotations
Iterable[dict]
annotations to filter and index
requiredannot_type
str
annotation type to filter e.x. 'keypointlabels' or 'rectanglelabels'
requiredReturns:
Type DescriptionDict[str, dict]
Dict of ndexed and filtered annotations. Only annotations of type annot_type
will survive, and annotations are indexed by ID.
sleap_io/io/labelstudio.py
def filter_and_index(annotations: Iterable[dict], annot_type: str) -> Dict[str, dict]:\n \"\"\"Filter annotations based on the type field and index them by ID.\n\n Args:\n annotations: annotations to filter and index\n annot_type: annotation type to filter e.x. 'keypointlabels' or 'rectanglelabels'\n\n Returns:\n Dict of ndexed and filtered annotations. Only annotations of type `annot_type`\n will survive, and annotations are indexed by ID.\n \"\"\"\n filtered = list(filter(lambda d: d[\"type\"] == annot_type, annotations))\n indexed = {item[\"id\"]: item for item in filtered}\n return indexed\n
"},{"location":"reference/sleap_io/io/labelstudio/#sleap_io.io.labelstudio.infer_nodes","title":"infer_nodes(tasks)
","text":"Parse the loaded JSON tasks to create a minimal skeleton.
Parameters:
Name Type Description Defaulttasks
List[Dict]
Collection of tasks loaded from Label Studio JSON.
requiredReturns:
Type DescriptionSkeleton
The inferred Skeleton
.
sleap_io/io/labelstudio.py
def infer_nodes(tasks: List[Dict]) -> Skeleton:\n \"\"\"Parse the loaded JSON tasks to create a minimal skeleton.\n\n Args:\n tasks: Collection of tasks loaded from Label Studio JSON.\n\n Returns:\n The inferred `Skeleton`.\n \"\"\"\n node_names = set()\n for entry in tasks:\n if \"annotations\" in entry:\n key = \"annotations\"\n elif \"completions\" in entry:\n key = \"completions\"\n else:\n raise ValueError(\"Cannot find annotation data for entry!\")\n\n for annotation in entry[key]:\n for datum in annotation[\"result\"]:\n if datum[\"type\"] == \"keypointlabels\":\n for node_name in datum[\"value\"][\"keypointlabels\"]:\n node_names.add(node_name)\n\n skeleton = Skeleton(nodes=list(node_names))\n return skeleton\n
"},{"location":"reference/sleap_io/io/labelstudio/#sleap_io.io.labelstudio.parse_tasks","title":"parse_tasks(tasks, skeleton)
","text":"Read Label Studio style annotations from a file and return a Labels
object.
Parameters:
Name Type Description Defaulttasks
List[Dict]
Collection of tasks to be converted to Labels
.
skeleton
Skeleton
Skeleton
with the nodes and edges to be used.
Returns:
Type DescriptionLabels
Parsed labels as a Labels
instance.
sleap_io/io/labelstudio.py
def parse_tasks(tasks: List[Dict], skeleton: Skeleton) -> Labels:\n \"\"\"Read Label Studio style annotations from a file and return a `Labels` object.\n\n Args:\n tasks: Collection of tasks to be converted to `Labels`.\n skeleton: `Skeleton` with the nodes and edges to be used.\n\n Returns:\n Parsed labels as a `Labels` instance.\n \"\"\"\n frames: List[LabeledFrame] = []\n for entry in tasks:\n # depending version, we have seen keys `annotations` and `completions`\n if \"annotations\" in entry:\n key = \"annotations\"\n elif \"completions\" in entry:\n key = \"completions\"\n else:\n raise ValueError(\"Cannot find annotation data for entry!\")\n\n frames.append(task_to_labeled_frame(entry, skeleton, key=key))\n\n return Labels(frames)\n
"},{"location":"reference/sleap_io/io/labelstudio/#sleap_io.io.labelstudio.read_labels","title":"read_labels(labels_path, skeleton=None)
","text":"Read Label Studio style annotations from a file and return a Labels
object.
Parameters:
Name Type Description Defaultlabels_path
str
Path to the Label Studio annotation file, in json format.
requiredskeleton
Optional[Union[Skeleton, List[str]]]
An optional Skeleton
object or list of node names. If not provided (the default), skeleton will be inferred from the data. It may be useful to provide this so the keypoint label types can be filtered to just the ones in the skeleton.
None
Returns:
Type DescriptionLabels
Parsed labels as a Labels
instance.
sleap_io/io/labelstudio.py
def read_labels(\n labels_path: str, skeleton: Optional[Union[Skeleton, List[str]]] = None\n) -> Labels:\n \"\"\"Read Label Studio style annotations from a file and return a `Labels` object.\n\n Args:\n labels_path: Path to the Label Studio annotation file, in json format.\n skeleton: An optional `Skeleton` object or list of node names. If not provided\n (the default), skeleton will be inferred from the data. It may be useful to\n provide this so the keypoint label types can be filtered to just the ones in\n the skeleton.\n\n Returns:\n Parsed labels as a `Labels` instance.\n \"\"\"\n with open(labels_path, \"r\") as task_file:\n tasks = json.load(task_file)\n\n if type(skeleton) == list:\n skeleton = Skeleton(nodes=skeleton) # type: ignore[arg-type]\n elif skeleton is None:\n skeleton = infer_nodes(tasks)\n else:\n assert isinstance(skeleton, Skeleton)\n\n labels = parse_tasks(tasks, skeleton)\n labels.provenance[\"filename\"] = labels_path\n return labels\n
"},{"location":"reference/sleap_io/io/labelstudio/#sleap_io.io.labelstudio.task_to_labeled_frame","title":"task_to_labeled_frame(task, skeleton, key='annotations')
","text":"Parse annotations from an entry.
Parameters:
Name Type Description Defaulttask
dict
Label Studio task to be parsed.
requiredskeleton
Skeleton
Skeleton to use for parsing.
requiredkey
str
Key to use for parsing annotations. Defaults to \"annotations\".
'annotations'
Returns:
Type DescriptionLabeledFrame
Parsed LabeledFrame
instance.
sleap_io/io/labelstudio.py
def task_to_labeled_frame(\n task: dict, skeleton: Skeleton, key: str = \"annotations\"\n) -> LabeledFrame:\n \"\"\"Parse annotations from an entry.\n\n Args:\n task: Label Studio task to be parsed.\n skeleton: Skeleton to use for parsing.\n key: Key to use for parsing annotations. Defaults to \"annotations\".\n\n Returns:\n Parsed `LabeledFrame` instance.\n \"\"\"\n if len(task[key]) > 1:\n warnings.warn(\n f\"Task {task.get('id', '??')}: Multiple annotations found, \"\n \"only taking the first!\"\n )\n\n # only parse the first entry result\n to_parse = task[key][0][\"result\"]\n\n individuals = filter_and_index(to_parse, \"rectanglelabels\")\n keypoints = filter_and_index(to_parse, \"keypointlabels\")\n relations = build_relation_map(to_parse)\n instances = []\n\n if len(individuals) > 0:\n # multi animal case:\n for indv_id, indv in individuals.items():\n points = {}\n for rel in relations[indv_id]:\n kpt = keypoints.pop(rel)\n node = Node(kpt[\"value\"][\"keypointlabels\"][0])\n x_pos = (kpt[\"value\"][\"x\"] * kpt[\"original_width\"]) / 100\n y_pos = (kpt[\"value\"][\"y\"] * kpt[\"original_height\"]) / 100\n\n # If the value is a NAN, the user did not mark this keypoint\n if math.isnan(x_pos) or math.isnan(y_pos):\n continue\n\n points[node] = Point(x_pos, y_pos)\n\n if len(points) > 0:\n instances.append(Instance(points, skeleton))\n\n # If this is multi-animal, any leftover keypoints should be unique bodyparts, and\n # will be collected here if single-animal, we only have 'unique bodyparts' [in a\n # way] and the process is identical\n points = {}\n for _, kpt in keypoints.items():\n node = Node(kpt[\"value\"][\"keypointlabels\"][0])\n points[node] = Point(\n (kpt[\"value\"][\"x\"] * kpt[\"original_width\"]) / 100,\n (kpt[\"value\"][\"y\"] * kpt[\"original_height\"]) / 100,\n visible=True,\n )\n if len(points) > 0:\n instances.append(Instance(points, skeleton))\n\n video, frame_idx = video_from_task(task)\n\n return LabeledFrame(video, frame_idx, instances)\n
"},{"location":"reference/sleap_io/io/labelstudio/#sleap_io.io.labelstudio.video_from_task","title":"video_from_task(task)
","text":"Given a Label Studio task, retrieve video information.
Parameters:
Name Type Description Defaulttask
dict
Label Studio task
requiredReturns:
Type DescriptionTuple[Video, int]
Video and frame index for this task
Source code insleap_io/io/labelstudio.py
def video_from_task(task: dict) -> Tuple[Video, int]:\n \"\"\"Given a Label Studio task, retrieve video information.\n\n Args:\n task: Label Studio task\n\n Returns:\n Video and frame index for this task\n \"\"\"\n if \"meta\" in task and \"video\" in task[\"meta\"]:\n video = Video(task[\"meta\"][\"video\"][\"filename\"], task[\"meta\"][\"video\"][\"shape\"])\n frame_idx = task[\"meta\"][\"video\"][\"frame_idx\"]\n return video, frame_idx\n\n else:\n raise KeyError(\"Unable to locate video information for task!\", task)\n
"},{"location":"reference/sleap_io/io/labelstudio/#sleap_io.io.labelstudio.write_labels","title":"write_labels(labels, filename)
","text":"Convert and save a SLEAP Labels
object to a Label Studio .json
file.
Parameters:
Name Type Description Defaultlabels
Labels
SLEAP Labels
to be converted to Label Studio task format.
filename
str
Path to save Label Studio annotations (.json
).
sleap_io/io/labelstudio.py
def write_labels(labels: Labels, filename: str):\n \"\"\"Convert and save a SLEAP `Labels` object to a Label Studio `.json` file.\n\n Args:\n labels: SLEAP `Labels` to be converted to Label Studio task format.\n filename: Path to save Label Studio annotations (`.json`).\n \"\"\"\n\n def _encode(obj):\n if type(obj).__name__ == \"uint64\":\n return int(obj)\n\n ls_dicts = convert_labels(labels)\n with open(filename, \"w\") as f:\n json.dump(ls_dicts, f, indent=4, default=_encode)\n
"},{"location":"reference/sleap_io/io/main/","title":"main","text":""},{"location":"reference/sleap_io/io/main/#sleap_io.io.main","title":"sleap_io.io.main
","text":"This module contains high-level wrappers for utilizing different I/O backends.
Modules:
Name Descriptionjabs
This module handles direct I/O operations for working with JABS files.
labelstudio
This module handles direct I/O operations for working with Labelstudio files.
nwb
Functions to write and read from the neurodata without borders (NWB) format.
slp
This module handles direct I/O operations for working with .slp files.
video_writing
Utilities for writing videos.
Functions:
Name Descriptionload_file
Load a file and return the appropriate object.
load_jabs
Read JABS-style predictions from a file and return a Labels
object.
load_labelstudio
Read Label Studio-style annotations from a file and return a Labels
object.
load_nwb
Load an NWB dataset as a SLEAP Labels
object.
load_slp
Load a SLEAP dataset.
load_video
Load a video file.
save_file
Save a file based on the extension.
save_jabs
Save a SLEAP dataset to JABS pose file format.
save_labelstudio
Save a SLEAP dataset to Label Studio format.
save_nwb
Save a SLEAP dataset to NWB format.
save_slp
Save a SLEAP dataset to a .slp
file.
save_video
Write a list of frames to a video file.
"},{"location":"reference/sleap_io/io/main/#sleap_io.io.main.load_file","title":"load_file(filename, format=None, **kwargs)
","text":"Load a file and return the appropriate object.
Parameters:
Name Type Description Defaultfilename
str | Path
Path to a file.
requiredformat
Optional[str]
Optional format to load as. If not provided, will be inferred from the file extension. Available formats are: \"slp\", \"nwb\", \"labelstudio\", \"jabs\" and \"video\".
None
Returns:
Type DescriptionUnion[Labels, Video]
A Labels
or Video
object.
sleap_io/io/main.py
def load_file(\n filename: str | Path, format: Optional[str] = None, **kwargs\n) -> Union[Labels, Video]:\n \"\"\"Load a file and return the appropriate object.\n\n Args:\n filename: Path to a file.\n format: Optional format to load as. If not provided, will be inferred from the\n file extension. Available formats are: \"slp\", \"nwb\", \"labelstudio\", \"jabs\"\n and \"video\".\n\n Returns:\n A `Labels` or `Video` object.\n \"\"\"\n if isinstance(filename, Path):\n filename = filename.as_posix()\n\n if format is None:\n if filename.endswith(\".slp\"):\n format = \"slp\"\n elif filename.endswith(\".nwb\"):\n format = \"nwb\"\n elif filename.endswith(\".json\"):\n format = \"json\"\n elif filename.endswith(\".h5\"):\n format = \"jabs\"\n else:\n for vid_ext in Video.EXTS:\n if filename.endswith(vid_ext):\n format = \"video\"\n break\n if format is None:\n raise ValueError(f\"Could not infer format from filename: '{filename}'.\")\n\n if filename.endswith(\".slp\"):\n return load_slp(filename, **kwargs)\n elif filename.endswith(\".nwb\"):\n return load_nwb(filename, **kwargs)\n elif filename.endswith(\".json\"):\n return load_labelstudio(filename, **kwargs)\n elif filename.endswith(\".h5\"):\n return load_jabs(filename, **kwargs)\n elif format == \"video\":\n return load_video(filename, **kwargs)\n
"},{"location":"reference/sleap_io/io/main/#sleap_io.io.main.load_jabs","title":"load_jabs(filename, skeleton=None)
","text":"Read JABS-style predictions from a file and return a Labels
object.
Parameters:
Name Type Description Defaultfilename
str
Path to the jabs h5 pose file.
requiredskeleton
Optional[Skeleton]
An optional Skeleton
object.
None
Returns:
Type DescriptionLabels
Parsed labels as a Labels
instance.
sleap_io/io/main.py
def load_jabs(filename: str, skeleton: Optional[Skeleton] = None) -> Labels:\n \"\"\"Read JABS-style predictions from a file and return a `Labels` object.\n\n Args:\n filename: Path to the jabs h5 pose file.\n skeleton: An optional `Skeleton` object.\n\n Returns:\n Parsed labels as a `Labels` instance.\n \"\"\"\n return jabs.read_labels(filename, skeleton=skeleton)\n
"},{"location":"reference/sleap_io/io/main/#sleap_io.io.main.load_labelstudio","title":"load_labelstudio(filename, skeleton=None)
","text":"Read Label Studio-style annotations from a file and return a Labels
object.
Parameters:
Name Type Description Defaultfilename
str
Path to the label-studio annotation file in JSON format.
requiredskeleton
Optional[Union[Skeleton, list[str]]]
An optional Skeleton
object or list of node names. If not provided (the default), skeleton will be inferred from the data. It may be useful to provide this so the keypoint label types can be filtered to just the ones in the skeleton.
None
Returns:
Type DescriptionLabels
Parsed labels as a Labels
instance.
sleap_io/io/main.py
def load_labelstudio(\n filename: str, skeleton: Optional[Union[Skeleton, list[str]]] = None\n) -> Labels:\n \"\"\"Read Label Studio-style annotations from a file and return a `Labels` object.\n\n Args:\n filename: Path to the label-studio annotation file in JSON format.\n skeleton: An optional `Skeleton` object or list of node names. If not provided\n (the default), skeleton will be inferred from the data. It may be useful to\n provide this so the keypoint label types can be filtered to just the ones in\n the skeleton.\n\n Returns:\n Parsed labels as a `Labels` instance.\n \"\"\"\n return labelstudio.read_labels(filename, skeleton=skeleton)\n
"},{"location":"reference/sleap_io/io/main/#sleap_io.io.main.load_nwb","title":"load_nwb(filename)
","text":"Load an NWB dataset as a SLEAP Labels
object.
Parameters:
Name Type Description Defaultfilename
str
Path to a NWB file (.nwb
).
Returns:
Type DescriptionLabels
The dataset as a Labels
object.
sleap_io/io/main.py
def load_nwb(filename: str) -> Labels:\n \"\"\"Load an NWB dataset as a SLEAP `Labels` object.\n\n Args:\n filename: Path to a NWB file (`.nwb`).\n\n Returns:\n The dataset as a `Labels` object.\n \"\"\"\n return nwb.read_nwb(filename)\n
"},{"location":"reference/sleap_io/io/main/#sleap_io.io.main.load_slp","title":"load_slp(filename, open_videos=True)
","text":"Load a SLEAP dataset.
Parameters:
Name Type Description Defaultfilename
str
Path to a SLEAP labels file (.slp
).
open_videos
bool
If True
(the default), attempt to open the video backend for I/O. If False
, the backend will not be opened (useful for reading metadata when the video files are not available).
True
Returns:
Type DescriptionLabels
The dataset as a Labels
object.
sleap_io/io/main.py
def load_slp(filename: str, open_videos: bool = True) -> Labels:\n \"\"\"Load a SLEAP dataset.\n\n Args:\n filename: Path to a SLEAP labels file (`.slp`).\n open_videos: If `True` (the default), attempt to open the video backend for\n I/O. If `False`, the backend will not be opened (useful for reading metadata\n when the video files are not available).\n\n Returns:\n The dataset as a `Labels` object.\n \"\"\"\n return slp.read_labels(filename, open_videos=open_videos)\n
"},{"location":"reference/sleap_io/io/main/#sleap_io.io.main.load_video","title":"load_video(filename, **kwargs)
","text":"Load a video file.
Parameters:
Name Type Description Defaultfilename
str
The filename(s) of the video. Supported extensions: \"mp4\", \"avi\", \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\", \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are expected. If filename is a folder, it will be searched for images.
requiredReturns:
Type DescriptionVideo
A Video
object.
sleap_io/io/main.py
def load_video(filename: str, **kwargs) -> Video:\n \"\"\"Load a video file.\n\n Args:\n filename: The filename(s) of the video. Supported extensions: \"mp4\", \"avi\",\n \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\",\n \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are\n expected. If filename is a folder, it will be searched for images.\n\n Returns:\n A `Video` object.\n \"\"\"\n return Video.from_filename(filename, **kwargs)\n
"},{"location":"reference/sleap_io/io/main/#sleap_io.io.main.save_file","title":"save_file(labels, filename, format=None, **kwargs)
","text":"Save a file based on the extension.
Parameters:
Name Type Description Defaultlabels
Labels
A SLEAP Labels
object (see load_slp
).
filename
str | Path
Path to save labels to.
requiredformat
Optional[str]
Optional format to save as. If not provided, will be inferred from the file extension. Available formats are: \"slp\", \"nwb\", \"labelstudio\" and \"jabs\".
None
Source code in sleap_io/io/main.py
def save_file(\n labels: Labels, filename: str | Path, format: Optional[str] = None, **kwargs\n):\n \"\"\"Save a file based on the extension.\n\n Args:\n labels: A SLEAP `Labels` object (see `load_slp`).\n filename: Path to save labels to.\n format: Optional format to save as. If not provided, will be inferred from the\n file extension. Available formats are: \"slp\", \"nwb\", \"labelstudio\" and\n \"jabs\".\n \"\"\"\n if isinstance(filename, Path):\n filename = str(filename)\n\n if format is None:\n if filename.endswith(\".slp\"):\n format = \"slp\"\n elif filename.endswith(\".nwb\"):\n format = \"nwb\"\n elif filename.endswith(\".json\"):\n format = \"labelstudio\"\n elif \"pose_version\" in kwargs:\n format = \"jabs\"\n\n if format == \"slp\":\n save_slp(labels, filename, **kwargs)\n elif format == \"nwb\":\n save_nwb(labels, filename, **kwargs)\n elif format == \"labelstudio\":\n save_labelstudio(labels, filename, **kwargs)\n elif format == \"jabs\":\n pose_version = kwargs.pop(\"pose_version\", 5)\n root_folder = kwargs.pop(\"root_folder\", filename)\n save_jabs(labels, pose_version=pose_version, root_folder=root_folder)\n else:\n raise ValueError(f\"Unknown format '{format}' for filename: '{filename}'.\")\n
"},{"location":"reference/sleap_io/io/main/#sleap_io.io.main.save_jabs","title":"save_jabs(labels, pose_version, root_folder=None)
","text":"Save a SLEAP dataset to JABS pose file format.
Parameters:
Name Type Description Defaultlabels
Labels
SLEAP Labels
object.
pose_version
int
The JABS pose version to write data out.
requiredroot_folder
Optional[str]
Optional root folder where the files should be saved.
None
Note Filenames for JABS poses are based on video filenames.
Source code insleap_io/io/main.py
def save_jabs(labels: Labels, pose_version: int, root_folder: Optional[str] = None):\n \"\"\"Save a SLEAP dataset to JABS pose file format.\n\n Args:\n labels: SLEAP `Labels` object.\n pose_version: The JABS pose version to write data out.\n root_folder: Optional root folder where the files should be saved.\n\n Note:\n Filenames for JABS poses are based on video filenames.\n \"\"\"\n jabs.write_labels(labels, pose_version, root_folder)\n
"},{"location":"reference/sleap_io/io/main/#sleap_io.io.main.save_labelstudio","title":"save_labelstudio(labels, filename)
","text":"Save a SLEAP dataset to Label Studio format.
Parameters:
Name Type Description Defaultlabels
Labels
A SLEAP Labels
object (see load_slp
).
filename
str
Path to save labels to ending with .json
.
sleap_io/io/main.py
def save_labelstudio(labels: Labels, filename: str):\n \"\"\"Save a SLEAP dataset to Label Studio format.\n\n Args:\n labels: A SLEAP `Labels` object (see `load_slp`).\n filename: Path to save labels to ending with `.json`.\n \"\"\"\n labelstudio.write_labels(labels, filename)\n
"},{"location":"reference/sleap_io/io/main/#sleap_io.io.main.save_nwb","title":"save_nwb(labels, filename, append=True)
","text":"Save a SLEAP dataset to NWB format.
Parameters:
Name Type Description Defaultlabels
Labels
A SLEAP Labels
object (see load_slp
).
filename
str
Path to NWB file to save to. Must end in .nwb
.
append
bool
If True
(the default), append to existing NWB file. File will be created if it does not exist.
True
See also: nwb.write_nwb, nwb.append_nwb
Source code insleap_io/io/main.py
def save_nwb(labels: Labels, filename: str, append: bool = True):\n \"\"\"Save a SLEAP dataset to NWB format.\n\n Args:\n labels: A SLEAP `Labels` object (see `load_slp`).\n filename: Path to NWB file to save to. Must end in `.nwb`.\n append: If `True` (the default), append to existing NWB file. File will be\n created if it does not exist.\n\n See also: nwb.write_nwb, nwb.append_nwb\n \"\"\"\n if append and Path(filename).exists():\n nwb.append_nwb(labels, filename)\n else:\n nwb.write_nwb(labels, filename)\n
"},{"location":"reference/sleap_io/io/main/#sleap_io.io.main.save_slp","title":"save_slp(labels, filename, embed=None)
","text":"Save a SLEAP dataset to a .slp
file.
Parameters:
Name Type Description Defaultlabels
Labels
A SLEAP Labels
object (see load_slp
).
filename
str
Path to save labels to ending with .slp
.
embed
bool | str | list[tuple[Video, int]] | None
Frames to embed in the saved labels file. One of None
, True
, \"all\"
, \"user\"
, \"suggestions\"
, \"user+suggestions\"
, \"source\"
or list of tuples of (video, frame_idx)
.
If None
is specified (the default) and the labels contains embedded frames, those embedded frames will be re-saved to the new file.
If True
or \"all\"
, all labeled frames and suggested frames will be embedded.
If \"source\"
is specified, no images will be embedded and the source video will be restored if available.
This argument is only valid for the SLP backend.
None
Source code in sleap_io/io/main.py
def save_slp(\n labels: Labels,\n filename: str,\n embed: bool | str | list[tuple[Video, int]] | None = None,\n):\n \"\"\"Save a SLEAP dataset to a `.slp` file.\n\n Args:\n labels: A SLEAP `Labels` object (see `load_slp`).\n filename: Path to save labels to ending with `.slp`.\n embed: Frames to embed in the saved labels file. One of `None`, `True`,\n `\"all\"`, `\"user\"`, `\"suggestions\"`, `\"user+suggestions\"`, `\"source\"` or list\n of tuples of `(video, frame_idx)`.\n\n If `None` is specified (the default) and the labels contains embedded\n frames, those embedded frames will be re-saved to the new file.\n\n If `True` or `\"all\"`, all labeled frames and suggested frames will be\n embedded.\n\n If `\"source\"` is specified, no images will be embedded and the source video\n will be restored if available.\n\n This argument is only valid for the SLP backend.\n \"\"\"\n return slp.write_labels(filename, labels, embed=embed)\n
"},{"location":"reference/sleap_io/io/main/#sleap_io.io.main.save_video","title":"save_video(frames, filename, fps=30, pixelformat='yuv420p', codec='libx264', crf=25, preset='superfast', output_params=None)
","text":"Write a list of frames to a video file.
Parameters:
Name Type Description Defaultframes
ndarray | Video
Sequence of frames to write to video. Each frame should be a 2D or 3D numpy array with dimensions (height, width) or (height, width, channels).
requiredfilename
str | Path
Path to output video file.
requiredfps
float
Frames per second. Defaults to 30.
30
pixelformat
str
Pixel format for video. Defaults to \"yuv420p\".
'yuv420p'
codec
str
Codec to use for encoding. Defaults to \"libx264\".
'libx264'
crf
int
Constant rate factor to control lossiness of video. Values go from 2 to 32, with numbers in the 18 to 30 range being most common. Lower values mean less compressed/higher quality. Defaults to 25. No effect if codec is not \"libx264\".
25
preset
str
H264 encoding preset. Defaults to \"superfast\". No effect if codec is not \"libx264\".
'superfast'
output_params
list | None
Additional output parameters for FFMPEG. This should be a list of strings corresponding to command line arguments for FFMPEG and libx264. Use ffmpeg -h encoder=libx264
to see all options for libx264 output_params.
None
See also: sio.VideoWriter
sleap_io/io/main.py
def save_video(\n frames: np.ndarray | Video,\n filename: str | Path,\n fps: float = 30,\n pixelformat: str = \"yuv420p\",\n codec: str = \"libx264\",\n crf: int = 25,\n preset: str = \"superfast\",\n output_params: list | None = None,\n):\n \"\"\"Write a list of frames to a video file.\n\n Args:\n frames: Sequence of frames to write to video. Each frame should be a 2D or 3D\n numpy array with dimensions (height, width) or (height, width, channels).\n filename: Path to output video file.\n fps: Frames per second. Defaults to 30.\n pixelformat: Pixel format for video. Defaults to \"yuv420p\".\n codec: Codec to use for encoding. Defaults to \"libx264\".\n crf: Constant rate factor to control lossiness of video. Values go from 2 to 32,\n with numbers in the 18 to 30 range being most common. Lower values mean less\n compressed/higher quality. Defaults to 25. No effect if codec is not\n \"libx264\".\n preset: H264 encoding preset. Defaults to \"superfast\". No effect if codec is not\n \"libx264\".\n output_params: Additional output parameters for FFMPEG. This should be a list of\n strings corresponding to command line arguments for FFMPEG and libx264. Use\n `ffmpeg -h encoder=libx264` to see all options for libx264 output_params.\n\n See also: `sio.VideoWriter`\n \"\"\"\n if output_params is None:\n output_params = []\n\n with video_writing.VideoWriter(\n filename,\n fps=fps,\n pixelformat=pixelformat,\n codec=codec,\n crf=crf,\n preset=preset,\n output_params=output_params,\n ) as writer:\n for frame in frames:\n writer(frame)\n
"},{"location":"reference/sleap_io/io/nwb/","title":"nwb","text":""},{"location":"reference/sleap_io/io/nwb/#sleap_io.io.nwb","title":"sleap_io.io.nwb
","text":"Functions to write and read from the neurodata without borders (NWB) format.
Functions:
Name Descriptionappend_nwb
Append a SLEAP Labels
object to an existing NWB data file.
append_nwb_data
Append data from a Labels object to an in-memory nwb file.
build_pose_estimation_container_for_track
Create a PoseEstimation container for a track.
build_track_pose_estimation_list
Build a list of PoseEstimationSeries from tracks.
convert_predictions_to_dataframe
Convert predictions data to a Pandas dataframe.
get_processing_module_for_video
Auxiliary function to create a processing module.
get_timestamps
Return a vector of timestamps for a PoseEstimationSeries
.
read_nwb
Read an NWB formatted file to a SLEAP Labels
object.
write_nwb
Write labels to an nwb file and save it to the nwbfile_path given.
"},{"location":"reference/sleap_io/io/nwb/#sleap_io.io.nwb.append_nwb","title":"append_nwb(labels, filename, pose_estimation_metadata=None)
","text":"Append a SLEAP Labels
object to an existing NWB data file.
Parameters:
Name Type Description Defaultlabels
Labels
A general Labels
object.
filename
str
The path to the NWB file.
requiredpose_estimation_metadata
Optional[dict]
Metadata for pose estimation. See append_nwb_data
for details.
None
See also: append_nwb_data
Source code insleap_io/io/nwb.py
def append_nwb(\n labels: Labels, filename: str, pose_estimation_metadata: Optional[dict] = None\n):\n \"\"\"Append a SLEAP `Labels` object to an existing NWB data file.\n\n Args:\n labels: A general `Labels` object.\n filename: The path to the NWB file.\n pose_estimation_metadata: Metadata for pose estimation. See `append_nwb_data`\n for details.\n\n See also: append_nwb_data\n \"\"\"\n with NWBHDF5IO(filename, mode=\"a\", load_namespaces=True) as io:\n nwb_file = io.read()\n nwb_file = append_nwb_data(\n labels, nwb_file, pose_estimation_metadata=pose_estimation_metadata\n )\n io.write(nwb_file)\n
"},{"location":"reference/sleap_io/io/nwb/#sleap_io.io.nwb.append_nwb_data","title":"append_nwb_data(labels, nwbfile, pose_estimation_metadata=None)
","text":"Append data from a Labels object to an in-memory nwb file.
Parameters:
Name Type Description Defaultlabels
Labels
A general labels object
requirednwbfile
NWBFile
And in-memory nwbfile where the data is to be appended.
requiredpose_estimation_metadata
Optional[dict]
This argument has a dual purpose:
1) It can be used to pass time information about the video which is necessary for synchronizing frames in pose estimation tracking to other modalities. Either the video timestamps can be passed to This can be used to pass the timestamps with the key video_timestamps
or the sampling rate with keyvideo_sample_rate
.
e.g. pose_estimation_metadata[\"video_timestamps\"] = np.array(timestamps) or pose_estimation_metadata[\"video_sample_rate\"] = 15 # In Hz
2) The other use of this dictionary is to ovewrite sleap-io default arguments for the PoseEstimation container. see https://github.com/rly/ndx-pose for a full list or arguments.
None
Returns:
Type DescriptionNWBFile
An in-memory nwbfile with the data from the labels object appended.
Source code insleap_io/io/nwb.py
def append_nwb_data(\n labels: Labels, nwbfile: NWBFile, pose_estimation_metadata: Optional[dict] = None\n) -> NWBFile:\n \"\"\"Append data from a Labels object to an in-memory nwb file.\n\n Args:\n labels: A general labels object\n nwbfile: And in-memory nwbfile where the data is to be appended.\n pose_estimation_metadata: This argument has a dual purpose:\n\n 1) It can be used to pass time information about the video which is\n necessary for synchronizing frames in pose estimation tracking to other\n modalities. Either the video timestamps can be passed to\n This can be used to pass the timestamps with the key `video_timestamps`\n or the sampling rate with key`video_sample_rate`.\n\n e.g. pose_estimation_metadata[\"video_timestamps\"] = np.array(timestamps)\n or pose_estimation_metadata[\"video_sample_rate\"] = 15 # In Hz\n\n 2) The other use of this dictionary is to ovewrite sleap-io default\n arguments for the PoseEstimation container.\n see https://github.com/rly/ndx-pose for a full list or arguments.\n\n Returns:\n An in-memory nwbfile with the data from the labels object appended.\n \"\"\"\n pose_estimation_metadata = pose_estimation_metadata or dict()\n\n # Extract default metadata\n provenance = labels.provenance\n default_metadata = dict(scorer=str(provenance))\n sleap_version = provenance.get(\"sleap_version\", None)\n default_metadata[\"source_software_version\"] = sleap_version\n\n labels_data_df = convert_predictions_to_dataframe(labels)\n\n # For every video create a processing module\n for video_index, video in enumerate(labels.videos):\n video_path = Path(video.filename)\n processing_module_name = f\"SLEAP_VIDEO_{video_index:03}_{video_path.stem}\"\n nwb_processing_module = get_processing_module_for_video(\n processing_module_name, nwbfile\n )\n\n # Propagate video metadata\n default_metadata[\"original_videos\"] = [f\"{video.filename}\"] # type: ignore\n default_metadata[\"labeled_videos\"] = [f\"{video.filename}\"] # type: ignore\n\n # Overwrite default with the user provided metadata\n default_metadata.update(pose_estimation_metadata)\n\n # For every track in that video create a PoseEstimation container\n name_of_tracks_in_video = (\n labels_data_df[video.filename]\n .columns.get_level_values(\"track_name\")\n .unique()\n )\n\n for track_index, track_name in enumerate(name_of_tracks_in_video):\n pose_estimation_container = build_pose_estimation_container_for_track(\n labels_data_df,\n labels,\n track_name,\n video,\n default_metadata,\n )\n nwb_processing_module.add(pose_estimation_container)\n\n return nwbfile\n
"},{"location":"reference/sleap_io/io/nwb/#sleap_io.io.nwb.build_pose_estimation_container_for_track","title":"build_pose_estimation_container_for_track(labels_data_df, labels, track_name, video, pose_estimation_metadata)
","text":"Create a PoseEstimation container for a track.
Parameters:
Name Type Description Defaultlabels_data_df
DataFrame
A pandas object with the data corresponding to the predicted instances associated to this labels object.
requiredlabels
Labels
A general labels object
requiredtrack_name
str
The name of the track in labels.tracks
requiredvideo
Video
The video to which data belongs to
requiredReturns:
Name Type DescriptionPoseEstimation
PoseEstimation
A PoseEstimation multicontainer where the time series of all the node trajectories in the track are stored. One time series per node.
Source code insleap_io/io/nwb.py
def build_pose_estimation_container_for_track(\n labels_data_df: pd.DataFrame,\n labels: Labels,\n track_name: str,\n video: Video,\n pose_estimation_metadata: dict,\n) -> PoseEstimation:\n \"\"\"Create a PoseEstimation container for a track.\n\n Args:\n labels_data_df (pd.DataFrame): A pandas object with the data corresponding\n to the predicted instances associated to this labels object.\n labels (Labels): A general labels object\n track_name (str): The name of the track in labels.tracks\n video (Video): The video to which data belongs to\n\n Returns:\n PoseEstimation: A PoseEstimation multicontainer where the time series\n of all the node trajectories in the track are stored. One time series per\n node.\n \"\"\"\n # Copy metadata for local use and modification\n pose_estimation_metadata_copy = deepcopy(pose_estimation_metadata)\n video_path = Path(video.filename)\n\n all_track_skeletons = (\n labels_data_df[video.filename]\n .columns.get_level_values(\"skeleton_name\")\n .unique()\n )\n\n # Assuming only one skeleton per track\n skeleton_name = all_track_skeletons[0]\n skeleton = next(\n skeleton for skeleton in labels.skeletons if skeleton.name == skeleton_name\n )\n\n track_data_df = labels_data_df[\n video.filename,\n skeleton.name,\n track_name,\n ]\n\n # Combine each node's PoseEstimationSeries to create a PoseEstimation container\n timestamps = pose_estimation_metadata_copy.pop(\"video_timestamps\", None)\n sample_rate = pose_estimation_metadata_copy.pop(\"video_sample_rate\", 1.0)\n if timestamps is None:\n # Keeps backward compatbility.\n timestamps = np.arange(track_data_df.shape[0]) * sample_rate\n else:\n timestamps = np.asarray(timestamps)\n\n pose_estimation_series_list = build_track_pose_estimation_list(\n track_data_df, timestamps\n )\n\n # Arrange and mix metadata\n pose_estimation_container_kwargs = dict(\n name=f\"track={track_name}\",\n description=f\"Estimated positions of {skeleton.name} in video {video_path.name}\",\n pose_estimation_series=pose_estimation_series_list,\n nodes=skeleton.node_names,\n edges=np.array(skeleton.edge_inds).astype(\"uint64\"),\n source_software=\"SLEAP\",\n # dimensions=np.array([[video.backend.height, video.backend.width]]),\n )\n\n pose_estimation_container_kwargs.update(**pose_estimation_metadata_copy)\n pose_estimation_container = PoseEstimation(**pose_estimation_container_kwargs)\n\n return pose_estimation_container\n
"},{"location":"reference/sleap_io/io/nwb/#sleap_io.io.nwb.build_track_pose_estimation_list","title":"build_track_pose_estimation_list(track_data_df, timestamps)
","text":"Build a list of PoseEstimationSeries from tracks.
Parameters:
Name Type Description Defaulttrack_data_df
DataFrame
A pandas DataFrame object containing the trajectories for all the nodes associated with a specific track.
requiredReturns:
Type DescriptionList[PoseEstimationSeries]
List[PoseEstimationSeries]: The list of all the PoseEstimationSeries. One for each node.
Source code insleap_io/io/nwb.py
def build_track_pose_estimation_list(\n track_data_df: pd.DataFrame, timestamps: ArrayLike\n) -> List[PoseEstimationSeries]:\n \"\"\"Build a list of PoseEstimationSeries from tracks.\n\n Args:\n track_data_df (pd.DataFrame): A pandas DataFrame object containing the\n trajectories for all the nodes associated with a specific track.\n\n Returns:\n List[PoseEstimationSeries]: The list of all the PoseEstimationSeries.\n One for each node.\n \"\"\"\n name_of_nodes_in_track = track_data_df.columns.get_level_values(\n \"node_name\"\n ).unique()\n\n pose_estimation_series_list: List[PoseEstimationSeries] = []\n for node_name in name_of_nodes_in_track:\n # Drop data with missing values\n data_for_node = track_data_df[node_name].dropna(axis=\"index\", how=\"any\")\n\n node_trajectory = data_for_node[[\"x\", \"y\"]].to_numpy()\n confidence = data_for_node[\"score\"].to_numpy()\n\n reference_frame = (\n \"The coordinates are in (x, y) relative to the top-left of the image. \"\n \"Coordinates refer to the midpoint of the pixel. \"\n \"That is, t the midpoint of the top-left pixel is at (0, 0), whereas \"\n \"the top-left corner of that same pixel is at (-0.5, -0.5).\"\n )\n\n pose_estimation_kwargs = dict(\n name=f\"{node_name}\",\n description=f\"Sequential trajectory of {node_name}.\",\n data=node_trajectory,\n unit=\"pixels\",\n reference_frame=reference_frame,\n confidence=confidence,\n confidence_definition=\"Point-wise confidence scores.\",\n )\n\n # Add timestamps or only rate if the timestamps are uniform\n frames = data_for_node.index.values\n timestamps_for_data = timestamps[frames] # type: ignore[index]\n sample_periods = np.diff(timestamps_for_data)\n if sample_periods.size == 0:\n rate = None # This is the case with only one data point\n else:\n # Difference below 0.1 ms do not matter for behavior in videos\n uniform_samples = np.unique(sample_periods.round(5)).size == 1\n rate = 1 / sample_periods[0] if uniform_samples else None\n\n if rate:\n # Video sample rates are ints but nwb expect floats\n rate = float(int(rate))\n pose_estimation_kwargs.update(\n rate=rate, starting_time=timestamps_for_data[0]\n )\n else:\n pose_estimation_kwargs.update(timestamps=timestamps_for_data)\n\n # Build the pose estimation object and attach it to the list\n pose_estimation_series = PoseEstimationSeries(**pose_estimation_kwargs)\n pose_estimation_series_list.append(pose_estimation_series)\n\n return pose_estimation_series_list\n
"},{"location":"reference/sleap_io/io/nwb/#sleap_io.io.nwb.convert_predictions_to_dataframe","title":"convert_predictions_to_dataframe(labels)
","text":"Convert predictions data to a Pandas dataframe.
Parameters:
Name Type Description Defaultlabels
Labels
A general label object.
requiredReturns:
Type DescriptionDataFrame
pd.DataFrame: A pandas data frame with the structured data with hierarchical columns. The column hierarchy is: \"video_path\", \"skeleton_name\", \"track_name\", \"node_name\", And it is indexed by the frames.
Raises:
Type DescriptionValueError
If no frames in the label objects contain predicted instances.
Source code insleap_io/io/nwb.py
def convert_predictions_to_dataframe(labels: Labels) -> pd.DataFrame:\n \"\"\"Convert predictions data to a Pandas dataframe.\n\n Args:\n labels: A general label object.\n\n Returns:\n pd.DataFrame: A pandas data frame with the structured data with\n hierarchical columns. The column hierarchy is:\n \"video_path\",\n \"skeleton_name\",\n \"track_name\",\n \"node_name\",\n And it is indexed by the frames.\n\n Raises:\n ValueError: If no frames in the label objects contain predicted instances.\n \"\"\"\n # Form pairs of labeled_frames and predicted instances\n labeled_frames = labels.labeled_frames\n all_frame_instance_tuples = (\n (label_frame, instance) # type: ignore\n for label_frame in labeled_frames\n for instance in label_frame.predicted_instances\n )\n\n # Extract the data\n data_list = list()\n for labeled_frame, instance in all_frame_instance_tuples:\n # Traverse the nodes of the instances's skeleton\n skeleton = instance.skeleton\n for node in skeleton.nodes:\n row_dict = dict(\n frame_idx=labeled_frame.frame_idx,\n x=instance.points[node].x,\n y=instance.points[node].y,\n score=instance.points[node].score, # type: ignore[attr-defined]\n node_name=node.name,\n skeleton_name=skeleton.name,\n track_name=instance.track.name if instance.track else \"untracked\",\n video_path=labeled_frame.video.filename,\n )\n data_list.append(row_dict)\n\n if not data_list:\n raise ValueError(\"No predicted instances found in labels object\")\n\n labels_df = pd.DataFrame(data_list)\n\n # Reformat the data with columns for dict-like hierarchical data access.\n index = [\n \"skeleton_name\",\n \"track_name\",\n \"node_name\",\n \"video_path\",\n \"frame_idx\",\n ]\n\n labels_tidy_df = (\n labels_df.set_index(index)\n .unstack(level=[0, 1, 2, 3])\n .swaplevel(0, -1, axis=1) # video_path on top while x, y score on bottom\n .sort_index(axis=1) # Better format for columns\n .sort_index(axis=0) # Sorts by frames\n )\n\n return labels_tidy_df\n
"},{"location":"reference/sleap_io/io/nwb/#sleap_io.io.nwb.get_processing_module_for_video","title":"get_processing_module_for_video(processing_module_name, nwbfile)
","text":"Auxiliary function to create a processing module.
Checks for the processing module existence and creates if not available.
Parameters:
Name Type Description Defaultprocessing_module_name
str
The name of the processing module.
requirednwbfile
NWBFile
The nwbfile to attach the processing module to.
requiredReturns:
Name Type DescriptionProcessingModule
ProcessingModule
An nwb processing module with the desired name.
Source code insleap_io/io/nwb.py
def get_processing_module_for_video(\n processing_module_name: str, nwbfile: NWBFile\n) -> ProcessingModule:\n \"\"\"Auxiliary function to create a processing module.\n\n Checks for the processing module existence and creates if not available.\n\n Args:\n processing_module_name (str): The name of the processing module.\n nwbfile (NWBFile): The nwbfile to attach the processing module to.\n\n Returns:\n ProcessingModule: An nwb processing module with the desired name.\n \"\"\"\n description = \"Processed SLEAP data\"\n processing_module = (\n nwbfile.processing[processing_module_name]\n if processing_module_name in nwbfile.processing\n else nwbfile.create_processing_module(\n name=processing_module_name, description=description\n )\n )\n return processing_module\n
"},{"location":"reference/sleap_io/io/nwb/#sleap_io.io.nwb.get_timestamps","title":"get_timestamps(series)
","text":"Return a vector of timestamps for a PoseEstimationSeries
.
sleap_io/io/nwb.py
def get_timestamps(series: PoseEstimationSeries) -> np.ndarray:\n \"\"\"Return a vector of timestamps for a `PoseEstimationSeries`.\"\"\"\n if series.timestamps is not None:\n return np.asarray(series.timestamps)\n else:\n return np.arange(series.data.shape[0]) * series.rate + series.starting_time\n
"},{"location":"reference/sleap_io/io/nwb/#sleap_io.io.nwb.read_nwb","title":"read_nwb(path)
","text":"Read an NWB formatted file to a SLEAP Labels
object.
Parameters:
Name Type Description Defaultpath
str
Path to an NWB file (.nwb
).
Returns:
Type DescriptionLabels
A Labels
object.
sleap_io/io/nwb.py
def read_nwb(path: str) -> Labels:\n \"\"\"Read an NWB formatted file to a SLEAP `Labels` object.\n\n Args:\n path: Path to an NWB file (`.nwb`).\n\n Returns:\n A `Labels` object.\n \"\"\"\n with NWBHDF5IO(path, mode=\"r\", load_namespaces=True) as io:\n read_nwbfile = io.read()\n nwb_file = read_nwbfile.processing\n\n # Get list of videos\n video_keys: List[str] = [key for key in nwb_file.keys() if \"SLEAP_VIDEO\" in key]\n video_tracks = dict()\n\n # Get track keys\n test_processing_module: ProcessingModule = nwb_file[video_keys[0]]\n track_keys: List[str] = list(test_processing_module.fields[\"data_interfaces\"])\n\n # Get track\n test_pose_estimation: PoseEstimation = test_processing_module[track_keys[0]]\n node_names = test_pose_estimation.nodes[:]\n edge_inds = test_pose_estimation.edges[:]\n\n for processing_module in nwb_file.values():\n # Get track keys\n _track_keys: List[str] = list(processing_module.fields[\"data_interfaces\"])\n is_tracked: bool = re.sub(\"[0-9]+\", \"\", _track_keys[0]) == \"track\"\n\n # Figure out the max number of frames and the canonical timestamps\n timestamps = np.empty(())\n for track_key in _track_keys:\n for node_name in node_names:\n pose_estimation_series = processing_module[track_key][node_name]\n timestamps = np.union1d(\n timestamps, get_timestamps(pose_estimation_series)\n )\n timestamps = np.sort(timestamps)\n\n # Recreate Labels numpy (same as output of Labels.numpy())\n n_tracks = len(_track_keys)\n n_frames = len(timestamps)\n n_nodes = len(node_names)\n tracks_numpy = np.full((n_frames, n_tracks, n_nodes, 2), np.nan, np.float32)\n confidence = np.full((n_frames, n_tracks, n_nodes), np.nan, np.float32)\n for track_idx, track_key in enumerate(_track_keys):\n pose_estimation = processing_module[track_key]\n\n for node_idx, node_name in enumerate(node_names):\n pose_estimation_series = pose_estimation[node_name]\n frame_inds = np.searchsorted(\n timestamps, get_timestamps(pose_estimation_series)\n )\n tracks_numpy[frame_inds, track_idx, node_idx, :] = (\n pose_estimation_series.data[:]\n )\n confidence[frame_inds, track_idx, node_idx] = (\n pose_estimation_series.confidence[:]\n )\n\n video_tracks[Path(pose_estimation.original_videos[0]).as_posix()] = (\n tracks_numpy,\n confidence,\n is_tracked,\n )\n\n # Create skeleton\n skeleton = Skeleton(\n nodes=node_names,\n edges=edge_inds,\n )\n\n # Add instances to labeled frames\n lfs = []\n for video_fn, (tracks_numpy, confidence, is_tracked) in video_tracks.items():\n video = Video(filename=video_fn)\n n_frames, n_tracks, n_nodes, _ = tracks_numpy.shape\n tracks = [Track(name=f\"track{track_idx}\") for track_idx in range(n_tracks)]\n for frame_idx, (frame_pts, frame_confs) in enumerate(\n zip(tracks_numpy, confidence)\n ):\n insts: List[Union[Instance, PredictedInstance]] = []\n for track, (inst_pts, inst_confs) in zip(\n tracks, zip(frame_pts, frame_confs)\n ):\n if np.isnan(inst_pts).all():\n continue\n insts.append(\n PredictedInstance.from_numpy(\n points=inst_pts, # (n_nodes, 2)\n point_scores=inst_confs, # (n_nodes,)\n instance_score=inst_confs.mean(), # ()\n skeleton=skeleton,\n track=track if is_tracked else None,\n )\n )\n if len(insts) > 0:\n lfs.append(\n LabeledFrame(video=video, frame_idx=frame_idx, instances=insts)\n )\n labels = Labels(lfs)\n labels.provenance[\"filename\"] = path\n return labels\n
"},{"location":"reference/sleap_io/io/nwb/#sleap_io.io.nwb.write_nwb","title":"write_nwb(labels, nwbfile_path, nwb_file_kwargs=None, pose_estimation_metadata=None)
","text":"Write labels to an nwb file and save it to the nwbfile_path given.
Parameters:
Name Type Description Defaultlabels
Labels
A general Labels
object.
nwbfile_path
str
The path where the nwb file is to be written.
requirednwb_file_kwargs
Optional[dict]
A dict containing metadata to the nwbfile. Example: nwb_file_kwargs = { 'session_description: 'your_session_description', 'identifier': 'your session_identifier', } For a full list of possible values see: https://pynwb.readthedocs.io/en/stable/pynwb.file.html#pynwb.file.NWBFile
Defaults to None and default values are used to generate the nwb file.
None
pose_estimation_metadata
Optional[dict]
This argument has a dual purpose:
1) It can be used to pass time information about the video which is necessary for synchronizing frames in pose estimation tracking to other modalities. Either the video timestamps can be passed to This can be used to pass the timestamps with the key video_timestamps
or the sampling rate with keyvideo_sample_rate
.
e.g. pose_estimation_metadata[\"video_timestamps\"] = np.array(timestamps) or pose_estimation_metadata[\"video_sample_rate] = 15 # In Hz
2) The other use of this dictionary is to ovewrite sleap-io default arguments for the PoseEstimation container. see https://github.com/rly/ndx-pose for a full list or arguments.
None
Source code in sleap_io/io/nwb.py
def write_nwb(\n labels: Labels,\n nwbfile_path: str,\n nwb_file_kwargs: Optional[dict] = None,\n pose_estimation_metadata: Optional[dict] = None,\n):\n \"\"\"Write labels to an nwb file and save it to the nwbfile_path given.\n\n Args:\n labels: A general `Labels` object.\n nwbfile_path: The path where the nwb file is to be written.\n nwb_file_kwargs: A dict containing metadata to the nwbfile. Example:\n nwb_file_kwargs = {\n 'session_description: 'your_session_description',\n 'identifier': 'your session_identifier',\n }\n For a full list of possible values see:\n https://pynwb.readthedocs.io/en/stable/pynwb.file.html#pynwb.file.NWBFile\n\n Defaults to None and default values are used to generate the nwb file.\n\n pose_estimation_metadata: This argument has a dual purpose:\n\n 1) It can be used to pass time information about the video which is\n necessary for synchronizing frames in pose estimation tracking to other\n modalities. Either the video timestamps can be passed to\n This can be used to pass the timestamps with the key `video_timestamps`\n or the sampling rate with key`video_sample_rate`.\n\n e.g. pose_estimation_metadata[\"video_timestamps\"] = np.array(timestamps)\n or pose_estimation_metadata[\"video_sample_rate] = 15 # In Hz\n\n 2) The other use of this dictionary is to ovewrite sleap-io default\n arguments for the PoseEstimation container.\n see https://github.com/rly/ndx-pose for a full list or arguments.\n \"\"\"\n nwb_file_kwargs = nwb_file_kwargs or dict()\n\n # Add required values for nwbfile if not present\n session_description = nwb_file_kwargs.get(\n \"session_description\", \"Processed SLEAP pose data\"\n )\n session_start_time = nwb_file_kwargs.get(\n \"session_start_time\", datetime.datetime.now(datetime.timezone.utc)\n )\n identifier = nwb_file_kwargs.get(\"identifier\", str(uuid.uuid1()))\n\n nwb_file_kwargs.update(\n session_description=session_description,\n session_start_time=session_start_time,\n identifier=identifier,\n )\n\n nwbfile = NWBFile(**nwb_file_kwargs)\n nwbfile = append_nwb_data(labels, nwbfile, pose_estimation_metadata)\n\n with NWBHDF5IO(str(nwbfile_path), \"w\") as io:\n io.write(nwbfile)\n
"},{"location":"reference/sleap_io/io/slp/","title":"slp","text":""},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp","title":"sleap_io.io.slp
","text":"This module handles direct I/O operations for working with .slp files.
Classes:
Name DescriptionInstanceType
Enumeration of instance types to integers.
Functions:
Name Descriptionembed_frames
Embed frames in a SLEAP labels file.
embed_video
Embed frames of a video in a SLEAP labels file.
embed_videos
Embed videos in a SLEAP labels file.
make_video
Create a Video
object from a JSON dictionary.
read_instances
Read Instance
dataset in a SLEAP labels file.
read_labels
Read a SLEAP labels file.
read_metadata
Read metadata from a SLEAP labels file.
read_points
Read Point
dataset from a SLEAP labels file.
read_pred_points
Read PredictedPoint
dataset from a SLEAP labels file.
read_skeletons
Read Skeleton
dataset from a SLEAP labels file.
read_suggestions
Read SuggestionFrame
dataset in a SLEAP labels file.
read_tracks
Read Track
dataset in a SLEAP labels file.
read_videos
Read Video
dataset in a SLEAP labels file.
sanitize_filename
Sanitize a filename to a canonical posix-compatible format.
serialize_skeletons
Serialize a list of Skeleton
objects to JSON-compatible dicts.
video_to_dict
Convert a Video
object to a JSON-compatible dictionary.
write_labels
Write a SLEAP labels file.
write_lfs
Write labeled frames, instances and points to a SLEAP labels file.
write_metadata
Write metadata to a SLEAP labels file.
write_suggestions
Write track metadata to a SLEAP labels file.
write_tracks
Write track metadata to a SLEAP labels file.
write_videos
Write video metadata to a SLEAP labels file.
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.InstanceType","title":"InstanceType
","text":" Bases: IntEnum
Enumeration of instance types to integers.
Source code insleap_io/io/slp.py
class InstanceType(IntEnum):\n \"\"\"Enumeration of instance types to integers.\"\"\"\n\n USER = 0\n PREDICTED = 1\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.embed_frames","title":"embed_frames(labels_path, labels, embed, image_format='png')
","text":"Embed frames in a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredlabels
Labels
A Labels
object to embed in the labels file.
embed
list[tuple[Video, int]]
A list of tuples of (video, frame_idx)
specifying the frames to embed.
image_format
str
The image format to use for embedding. Valid formats are \"png\" (the default), \"jpg\" or \"hdf5\".
'png'
Notes This function will embed the frames in the labels file and update the Videos
and Labels
objects in place.
sleap_io/io/slp.py
def embed_frames(\n labels_path: str,\n labels: Labels,\n embed: list[tuple[Video, int]],\n image_format: str = \"png\",\n):\n \"\"\"Embed frames in a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n labels: A `Labels` object to embed in the labels file.\n embed: A list of tuples of `(video, frame_idx)` specifying the frames to embed.\n image_format: The image format to use for embedding. Valid formats are \"png\"\n (the default), \"jpg\" or \"hdf5\".\n\n Notes:\n This function will embed the frames in the labels file and update the `Videos`\n and `Labels` objects in place.\n \"\"\"\n to_embed_by_video = {}\n for video, frame_idx in embed:\n if video not in to_embed_by_video:\n to_embed_by_video[video] = []\n to_embed_by_video[video].append(frame_idx)\n\n for video in to_embed_by_video:\n to_embed_by_video[video] = np.unique(to_embed_by_video[video]).tolist()\n\n replaced_videos = {}\n for video, frame_inds in to_embed_by_video.items():\n video_ind = labels.videos.index(video)\n embedded_video = embed_video(\n labels_path,\n video,\n group=f\"video{video_ind}\",\n frame_inds=frame_inds,\n image_format=image_format,\n )\n\n labels.videos[video_ind] = embedded_video\n replaced_videos[video] = embedded_video\n\n if len(replaced_videos) > 0:\n labels.replace_videos(video_map=replaced_videos)\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.embed_video","title":"embed_video(labels_path, video, group, frame_inds, image_format='png', fixed_length=True)
","text":"Embed frames of a video in a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredvideo
Video
A Video
object to embed in the labels file.
group
str
The name of the group to store the embedded video in. Image data will be stored in a dataset named {group}/video
. Frame indices will be stored in a data set named {group}/frame_numbers
.
frame_inds
list[int]
A list of frame indices to embed.
requiredimage_format
str
The image format to use for embedding. Valid formats are \"png\" (the default), \"jpg\" or \"hdf5\".
'png'
fixed_length
bool
If True
(the default), the embedded images will be padded to the length of the largest image. If False
, the images will be stored as variable length, which is smaller but may not be supported by all readers.
True
Returns:
Type DescriptionVideo
An embedded Video
object.
If the video is already embedded, the original video will be returned. If not, a new Video
object will be created with the embedded data.
sleap_io/io/slp.py
def embed_video(\n labels_path: str,\n video: Video,\n group: str,\n frame_inds: list[int],\n image_format: str = \"png\",\n fixed_length: bool = True,\n) -> Video:\n \"\"\"Embed frames of a video in a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n video: A `Video` object to embed in the labels file.\n group: The name of the group to store the embedded video in. Image data will be\n stored in a dataset named `{group}/video`. Frame indices will be stored\n in a data set named `{group}/frame_numbers`.\n frame_inds: A list of frame indices to embed.\n image_format: The image format to use for embedding. Valid formats are \"png\"\n (the default), \"jpg\" or \"hdf5\".\n fixed_length: If `True` (the default), the embedded images will be padded to the\n length of the largest image. If `False`, the images will be stored as\n variable length, which is smaller but may not be supported by all readers.\n\n Returns:\n An embedded `Video` object.\n\n If the video is already embedded, the original video will be returned. If not,\n a new `Video` object will be created with the embedded data.\n \"\"\"\n # Load the image data and optionally encode it.\n imgs_data = []\n for frame_idx in frame_inds:\n frame = video[frame_idx]\n\n if image_format == \"hdf5\":\n img_data = frame\n else:\n if \"cv2\" in sys.modules:\n img_data = np.squeeze(\n cv2.imencode(\".\" + image_format, frame)[1]\n ).astype(\"int8\")\n else:\n if frame.shape[-1] == 1:\n frame = frame.squeeze(axis=-1)\n img_data = np.frombuffer(\n iio.imwrite(\"<bytes>\", frame, extension=\".\" + image_format),\n dtype=\"int8\",\n )\n\n imgs_data.append(img_data)\n\n # Write the image data to the labels file.\n with h5py.File(labels_path, \"a\") as f:\n if image_format == \"hdf5\":\n f.create_dataset(\n f\"{group}/video\", data=imgs_data, compression=\"gzip\", chunks=True\n )\n else:\n if fixed_length:\n ds = f.create_dataset(\n f\"{group}/video\",\n shape=(len(imgs_data), max(len(img) for img in imgs_data)),\n dtype=\"int8\",\n compression=\"gzip\",\n )\n for i, img in enumerate(imgs_data):\n ds[i, : len(img)] = img\n else:\n ds = f.create_dataset(\n f\"{group}/video\",\n shape=(len(imgs_data),),\n dtype=h5py.special_dtype(vlen=np.dtype(\"int8\")),\n )\n for i, img in enumerate(imgs_data):\n ds[i] = img\n\n # Store metadata.\n ds.attrs[\"format\"] = image_format\n video_shape = video.shape\n (\n ds.attrs[\"frames\"],\n ds.attrs[\"height\"],\n ds.attrs[\"width\"],\n ds.attrs[\"channels\"],\n ) = video_shape\n\n # Store frame indices.\n f.create_dataset(f\"{group}/frame_numbers\", data=frame_inds)\n\n # Store source video.\n if video.source_video is not None:\n # If this is already an embedded dataset, retain the previous source video.\n source_video = video.source_video\n else:\n source_video = video\n\n # Create a new video object with the embedded data.\n embedded_video = Video(\n filename=labels_path,\n backend=VideoBackend.from_filename(\n labels_path,\n dataset=f\"{group}/video\",\n grayscale=video.grayscale,\n keep_open=False,\n ),\n source_video=source_video,\n )\n\n grp = f.require_group(f\"{group}/source_video\")\n grp.attrs[\"json\"] = json.dumps(\n video_to_dict(source_video), separators=(\",\", \":\")\n )\n\n return embedded_video\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.embed_videos","title":"embed_videos(labels_path, labels, embed)
","text":"Embed videos in a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file to save.
requiredlabels
Labels
A Labels
object to save.
embed
bool | str | list[tuple[Video, int]]
Frames to embed in the saved labels file. One of None
, True
, \"all\"
, \"user\"
, \"suggestions\"
, \"user+suggestions\"
, \"source\"
or list of tuples of (video, frame_idx)
.
If None
is specified (the default) and the labels contains embedded frames, those embedded frames will be re-saved to the new file.
If True
or \"all\"
, all labeled frames and suggested frames will be embedded.
If \"source\"
is specified, no images will be embedded and the source video will be restored if available.
This argument is only valid for the SLP backend.
required Source code insleap_io/io/slp.py
def embed_videos(\n labels_path: str, labels: Labels, embed: bool | str | list[tuple[Video, int]]\n):\n \"\"\"Embed videos in a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file to save.\n labels: A `Labels` object to save.\n embed: Frames to embed in the saved labels file. One of `None`, `True`,\n `\"all\"`, `\"user\"`, `\"suggestions\"`, `\"user+suggestions\"`, `\"source\"` or list\n of tuples of `(video, frame_idx)`.\n\n If `None` is specified (the default) and the labels contains embedded\n frames, those embedded frames will be re-saved to the new file.\n\n If `True` or `\"all\"`, all labeled frames and suggested frames will be\n embedded.\n\n If `\"source\"` is specified, no images will be embedded and the source video\n will be restored if available.\n\n This argument is only valid for the SLP backend.\n \"\"\"\n if embed is True:\n embed = \"all\"\n if embed == \"user\":\n embed = [(lf.video, lf.frame_idx) for lf in labels.user_labeled_frames]\n elif embed == \"suggestions\":\n embed = [(sf.video, sf.frame_idx) for sf in labels.suggestions]\n elif embed == \"user+suggestions\":\n embed = [(lf.video, lf.frame_idx) for lf in labels.user_labeled_frames]\n embed += [(sf.video, sf.frame_idx) for sf in labels.suggestions]\n elif embed == \"all\":\n embed = [(lf.video, lf.frame_idx) for lf in labels]\n embed += [(sf.video, sf.frame_idx) for sf in labels.suggestions]\n elif embed == \"source\":\n embed = []\n elif isinstance(embed, list):\n embed = embed\n else:\n raise ValueError(f\"Invalid value for embed: {embed}\")\n\n embed_frames(labels_path, labels, embed)\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.make_video","title":"make_video(labels_path, video_json, open_backend=True)
","text":"Create a Video
object from a JSON dictionary.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredvideo_json
dict
A dictionary containing the video metadata.
requiredopen_backend
bool
If True
(the default), attempt to open the video backend for I/O. If False
, the backend will not be opened (useful for reading metadata when the video files are not available).
True
Source code in sleap_io/io/slp.py
def make_video(\n labels_path: str,\n video_json: dict,\n open_backend: bool = True,\n) -> Video:\n \"\"\"Create a `Video` object from a JSON dictionary.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n video_json: A dictionary containing the video metadata.\n open_backend: If `True` (the default), attempt to open the video backend for\n I/O. If `False`, the backend will not be opened (useful for reading metadata\n when the video files are not available).\n \"\"\"\n backend_metadata = video_json[\"backend\"]\n video_path = backend_metadata[\"filename\"]\n\n # Marker for embedded videos.\n source_video = None\n is_embedded = False\n if video_path == \".\":\n video_path = labels_path\n is_embedded = True\n\n # Basic path resolution.\n video_path = Path(sanitize_filename(video_path))\n\n if is_embedded:\n # Try to recover the source video.\n with h5py.File(labels_path, \"r\") as f:\n dataset = backend_metadata[\"dataset\"]\n if dataset.endswith(\"/video\"):\n dataset = dataset[:-6]\n if dataset in f:\n source_video_json = json.loads(\n f[f\"{dataset}/source_video\"].attrs[\"json\"]\n )\n source_video = make_video(\n labels_path,\n source_video_json,\n open_backend=open_backend,\n )\n\n backend = None\n if open_backend:\n try:\n if not is_file_accessible(video_path):\n # Check for the same filename in the same directory as the labels file.\n candidate_video_path = Path(labels_path).parent / video_path.name\n if is_file_accessible(candidate_video_path):\n video_path = candidate_video_path\n else:\n # TODO (TP): Expand capabilities of path resolution to support more\n # complex path finding strategies.\n pass\n except (OSError, PermissionError, FileNotFoundError):\n pass\n\n # Convert video path to string.\n video_path = video_path.as_posix()\n\n if \"filenames\" in backend_metadata:\n # This is an ImageVideo.\n # TODO: Path resolution.\n video_path = backend_metadata[\"filenames\"]\n video_path = [Path(sanitize_filename(p)) for p in video_path]\n\n try:\n grayscale = None\n if \"grayscale\" in backend_metadata:\n grayscale = backend_metadata[\"grayscale\"]\n elif \"shape\" in backend_metadata:\n grayscale = backend_metadata[\"shape\"][-1] == 1\n backend = VideoBackend.from_filename(\n video_path,\n dataset=backend_metadata.get(\"dataset\", None),\n grayscale=grayscale,\n input_format=backend_metadata.get(\"input_format\", None),\n )\n except Exception:\n backend = None\n\n return Video(\n filename=video_path,\n backend=backend,\n backend_metadata=backend_metadata,\n source_video=source_video,\n open_backend=open_backend,\n )\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.read_instances","title":"read_instances(labels_path, skeletons, tracks, points, pred_points, format_id)
","text":"Read Instance
dataset in a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredskeletons
list[Skeleton]
A list of Skeleton
objects (see read_skeletons
).
tracks
list[Track]
A list of Track
objects (see read_tracks
).
points
list[Point]
A list of Point
objects (see read_points
).
pred_points
list[PredictedPoint]
A list of PredictedPoint
objects (see read_pred_points
).
format_id
float
The format version identifier used to specify the format of the input file.
requiredReturns:
Type Descriptionlist[Union[Instance, PredictedInstance]]
A list of Instance
and/or PredictedInstance
objects.
sleap_io/io/slp.py
def read_instances(\n labels_path: str,\n skeletons: list[Skeleton],\n tracks: list[Track],\n points: list[Point],\n pred_points: list[PredictedPoint],\n format_id: float,\n) -> list[Union[Instance, PredictedInstance]]:\n \"\"\"Read `Instance` dataset in a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n skeletons: A list of `Skeleton` objects (see `read_skeletons`).\n tracks: A list of `Track` objects (see `read_tracks`).\n points: A list of `Point` objects (see `read_points`).\n pred_points: A list of `PredictedPoint` objects (see `read_pred_points`).\n format_id: The format version identifier used to specify the format of the input\n file.\n\n Returns:\n A list of `Instance` and/or `PredictedInstance` objects.\n \"\"\"\n instances_data = read_hdf5_dataset(labels_path, \"instances\")\n\n instances = {}\n from_predicted_pairs = []\n for instance_data in instances_data:\n if format_id < 1.2:\n (\n instance_id,\n instance_type,\n frame_id,\n skeleton_id,\n track_id,\n from_predicted,\n instance_score,\n point_id_start,\n point_id_end,\n ) = instance_data\n tracking_score = np.zeros_like(instance_score)\n else:\n (\n instance_id,\n instance_type,\n frame_id,\n skeleton_id,\n track_id,\n from_predicted,\n instance_score,\n point_id_start,\n point_id_end,\n tracking_score,\n ) = instance_data\n\n if instance_type == InstanceType.USER:\n instances[instance_id] = Instance(\n points=points[point_id_start:point_id_end], # type: ignore[arg-type]\n skeleton=skeletons[skeleton_id],\n track=tracks[track_id] if track_id >= 0 else None,\n )\n if from_predicted >= 0:\n from_predicted_pairs.append((instance_id, from_predicted))\n elif instance_type == InstanceType.PREDICTED:\n instances[instance_id] = PredictedInstance(\n points=pred_points[point_id_start:point_id_end], # type: ignore[arg-type]\n skeleton=skeletons[skeleton_id],\n track=tracks[track_id] if track_id >= 0 else None,\n score=instance_score,\n tracking_score=tracking_score,\n )\n\n # Link instances based on from_predicted field.\n for instance_id, from_predicted in from_predicted_pairs:\n instances[instance_id].from_predicted = instances[from_predicted]\n\n # Convert instances back to list.\n instances = list(instances.values())\n\n return instances\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.read_labels","title":"read_labels(labels_path, open_videos=True)
","text":"Read a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredopen_videos
bool
If True
(the default), attempt to open the video backend for I/O. If False
, the backend will not be opened (useful for reading metadata when the video files are not available).
True
Returns:
Type DescriptionLabels
The processed Labels
object.
sleap_io/io/slp.py
def read_labels(labels_path: str, open_videos: bool = True) -> Labels:\n \"\"\"Read a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n open_videos: If `True` (the default), attempt to open the video backend for\n I/O. If `False`, the backend will not be opened (useful for reading metadata\n when the video files are not available).\n\n Returns:\n The processed `Labels` object.\n \"\"\"\n tracks = read_tracks(labels_path)\n videos = read_videos(labels_path, open_backend=open_videos)\n skeletons = read_skeletons(labels_path)\n points = read_points(labels_path)\n pred_points = read_pred_points(labels_path)\n format_id = read_hdf5_attrs(labels_path, \"metadata\", \"format_id\")\n instances = read_instances(\n labels_path, skeletons, tracks, points, pred_points, format_id\n )\n suggestions = read_suggestions(labels_path, videos)\n metadata = read_metadata(labels_path)\n provenance = metadata.get(\"provenance\", dict())\n\n frames = read_hdf5_dataset(labels_path, \"frames\")\n labeled_frames = []\n for _, video_id, frame_idx, instance_id_start, instance_id_end in frames:\n labeled_frames.append(\n LabeledFrame(\n video=videos[video_id],\n frame_idx=int(frame_idx),\n instances=instances[instance_id_start:instance_id_end],\n )\n )\n\n labels = Labels(\n labeled_frames=labeled_frames,\n videos=videos,\n skeletons=skeletons,\n tracks=tracks,\n suggestions=suggestions,\n provenance=provenance,\n )\n labels.provenance[\"filename\"] = labels_path\n\n return labels\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.read_metadata","title":"read_metadata(labels_path)
","text":"Read metadata from a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredReturns:
Type Descriptiondict
A dict containing the metadata from a SLEAP labels file.
Source code insleap_io/io/slp.py
def read_metadata(labels_path: str) -> dict:\n \"\"\"Read metadata from a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n\n Returns:\n A dict containing the metadata from a SLEAP labels file.\n \"\"\"\n md = read_hdf5_attrs(labels_path, \"metadata\", \"json\")\n return json.loads(md.decode())\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.read_points","title":"read_points(labels_path)
","text":"Read Point
dataset from a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredReturns:
Type Descriptionlist[Point]
A list of Point
objects.
sleap_io/io/slp.py
def read_points(labels_path: str) -> list[Point]:\n \"\"\"Read `Point` dataset from a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n\n Returns:\n A list of `Point` objects.\n \"\"\"\n pts = read_hdf5_dataset(labels_path, \"points\")\n return [\n Point(x=x, y=y, visible=visible, complete=complete)\n for x, y, visible, complete in pts\n ]\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.read_pred_points","title":"read_pred_points(labels_path)
","text":"Read PredictedPoint
dataset from a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredReturns:
Type Descriptionlist[PredictedPoint]
A list of PredictedPoint
objects.
sleap_io/io/slp.py
def read_pred_points(labels_path: str) -> list[PredictedPoint]:\n \"\"\"Read `PredictedPoint` dataset from a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n\n Returns:\n A list of `PredictedPoint` objects.\n \"\"\"\n pred_pts = read_hdf5_dataset(labels_path, \"pred_points\")\n return [\n PredictedPoint(x=x, y=y, visible=visible, complete=complete, score=score)\n for x, y, visible, complete, score in pred_pts\n ]\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.read_skeletons","title":"read_skeletons(labels_path)
","text":"Read Skeleton
dataset from a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string that contains the path to the labels file.
requiredReturns:
Type Descriptionlist[Skeleton]
A list of Skeleton
objects.
sleap_io/io/slp.py
def read_skeletons(labels_path: str) -> list[Skeleton]:\n \"\"\"Read `Skeleton` dataset from a SLEAP labels file.\n\n Args:\n labels_path: A string that contains the path to the labels file.\n\n Returns:\n A list of `Skeleton` objects.\n \"\"\"\n metadata = read_metadata(labels_path)\n\n # Get node names. This is a superset of all nodes across all skeletons. Note that\n # node ordering is specific to each skeleton, so we'll need to fix this afterwards.\n node_names = [x[\"name\"] for x in metadata[\"nodes\"]]\n\n skeleton_objects = []\n for skel in metadata[\"skeletons\"]:\n # Parse out the cattr-based serialization stuff from the skeleton links.\n edge_inds, symmetry_inds = [], []\n for link in skel[\"links\"]:\n if \"py/reduce\" in link[\"type\"]:\n edge_type = link[\"type\"][\"py/reduce\"][1][\"py/tuple\"][0]\n else:\n edge_type = link[\"type\"][\"py/id\"]\n\n if edge_type == 1: # 1 -> real edge, 2 -> symmetry edge\n edge_inds.append((link[\"source\"], link[\"target\"]))\n\n elif edge_type == 2:\n symmetry_inds.append((link[\"source\"], link[\"target\"]))\n\n # Re-index correctly.\n skeleton_node_inds = [node[\"id\"] for node in skel[\"nodes\"]]\n sorted_node_names = [node_names[i] for i in skeleton_node_inds]\n\n # Create nodes.\n nodes = []\n for name in sorted_node_names:\n nodes.append(Node(name=name))\n\n # Create edges.\n edge_inds = [\n (skeleton_node_inds.index(s), skeleton_node_inds.index(d))\n for s, d in edge_inds\n ]\n edges = []\n for edge in edge_inds:\n edges.append(Edge(source=nodes[edge[0]], destination=nodes[edge[1]]))\n\n # Create symmetries.\n symmetry_inds = [\n (skeleton_node_inds.index(s), skeleton_node_inds.index(d))\n for s, d in symmetry_inds\n ]\n symmetries = []\n for symmetry in symmetry_inds:\n symmetries.append(Symmetry([nodes[symmetry[0]], nodes[symmetry[1]]]))\n\n # Create the full skeleton.\n skel = Skeleton(\n nodes=nodes, edges=edges, symmetries=symmetries, name=skel[\"graph\"][\"name\"]\n )\n skeleton_objects.append(skel)\n return skeleton_objects\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.read_suggestions","title":"read_suggestions(labels_path, videos)
","text":"Read SuggestionFrame
dataset in a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredvideos
list[Video]
A list of Video
objects.
Returns:
Type Descriptionlist[SuggestionFrame]
A list of SuggestionFrame
objects.
sleap_io/io/slp.py
def read_suggestions(labels_path: str, videos: list[Video]) -> list[SuggestionFrame]:\n \"\"\"Read `SuggestionFrame` dataset in a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n videos: A list of `Video` objects.\n\n Returns:\n A list of `SuggestionFrame` objects.\n \"\"\"\n try:\n suggestions = read_hdf5_dataset(labels_path, \"suggestions_json\")\n except KeyError:\n return []\n suggestions = [json.loads(x) for x in suggestions]\n suggestions_objects = []\n for suggestion in suggestions:\n suggestions_objects.append(\n SuggestionFrame(\n video=videos[int(suggestion[\"video\"])],\n frame_idx=suggestion[\"frame_idx\"],\n )\n )\n return suggestions_objects\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.read_tracks","title":"read_tracks(labels_path)
","text":"Read Track
dataset in a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredReturns:
Type Descriptionlist[Track]
A list of Track
objects.
sleap_io/io/slp.py
def read_tracks(labels_path: str) -> list[Track]:\n \"\"\"Read `Track` dataset in a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n\n Returns:\n A list of `Track` objects.\n \"\"\"\n tracks = [json.loads(x) for x in read_hdf5_dataset(labels_path, \"tracks_json\")]\n track_objects = []\n for track in tracks:\n track_objects.append(Track(name=track[1]))\n return track_objects\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.read_videos","title":"read_videos(labels_path, open_backend=True)
","text":"Read Video
dataset in a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredopen_backend
bool
If True
(the default), attempt to open the video backend for I/O. If False
, the backend will not be opened (useful for reading metadata when the video files are not available).
True
Returns:
Type Descriptionlist[Video]
A list of Video
objects.
sleap_io/io/slp.py
def read_videos(labels_path: str, open_backend: bool = True) -> list[Video]:\n \"\"\"Read `Video` dataset in a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n open_backend: If `True` (the default), attempt to open the video backend for\n I/O. If `False`, the backend will not be opened (useful for reading metadata\n when the video files are not available).\n\n Returns:\n A list of `Video` objects.\n \"\"\"\n videos = []\n videos_metadata = read_hdf5_dataset(labels_path, \"videos_json\")\n for video_data in videos_metadata:\n video_json = json.loads(video_data)\n video = make_video(labels_path, video_json, open_backend=open_backend)\n videos.append(video)\n return videos\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.sanitize_filename","title":"sanitize_filename(filename)
","text":"Sanitize a filename to a canonical posix-compatible format.
Parameters:
Name Type Description Defaultfilename
str | Path | list[str] | list[Path]
A string or Path
object or list of either to sanitize.
Returns:
Type Descriptionstr | list[str]
A sanitized filename as a string (or list of strings if a list was provided) with forward slashes and posix-formatted.
Source code insleap_io/io/slp.py
def sanitize_filename(\n filename: str | Path | list[str] | list[Path],\n) -> str | list[str]:\n \"\"\"Sanitize a filename to a canonical posix-compatible format.\n\n Args:\n filename: A string or `Path` object or list of either to sanitize.\n\n Returns:\n A sanitized filename as a string (or list of strings if a list was provided)\n with forward slashes and posix-formatted.\n \"\"\"\n if isinstance(filename, list):\n return [sanitize_filename(f) for f in filename]\n return Path(filename).as_posix().replace(\"\\\\\", \"/\")\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.serialize_skeletons","title":"serialize_skeletons(skeletons)
","text":"Serialize a list of Skeleton
objects to JSON-compatible dicts.
Parameters:
Name Type Description Defaultskeletons
list[Skeleton]
A list of Skeleton
objects.
Returns:
Type Descriptiontuple[list[dict], list[dict]]
A tuple of nodes_dicts, skeletons_dicts
.
nodes_dicts
is a list of dicts containing the nodes in all the skeletons.
skeletons_dicts
is a list of dicts containing the skeletons.
This function attempts to replicate the serialization of skeletons in legacy SLEAP which relies on a combination of networkx's graph serialization and our own metadata used to store nodes and edges independent of the graph structure.
However, because sleap-io does not currently load in the legacy metadata, this function will not produce byte-level compatible serialization with legacy formats, even though the ordering and all attributes of nodes and edges should match up.
Source code insleap_io/io/slp.py
def serialize_skeletons(skeletons: list[Skeleton]) -> tuple[list[dict], list[dict]]:\n \"\"\"Serialize a list of `Skeleton` objects to JSON-compatible dicts.\n\n Args:\n skeletons: A list of `Skeleton` objects.\n\n Returns:\n A tuple of `nodes_dicts, skeletons_dicts`.\n\n `nodes_dicts` is a list of dicts containing the nodes in all the skeletons.\n\n `skeletons_dicts` is a list of dicts containing the skeletons.\n\n Notes:\n This function attempts to replicate the serialization of skeletons in legacy\n SLEAP which relies on a combination of networkx's graph serialization and our\n own metadata used to store nodes and edges independent of the graph structure.\n\n However, because sleap-io does not currently load in the legacy metadata, this\n function will not produce byte-level compatible serialization with legacy\n formats, even though the ordering and all attributes of nodes and edges should\n match up.\n \"\"\"\n # Create global list of nodes with all nodes from all skeletons.\n nodes_dicts = []\n node_to_id = {}\n for skeleton in skeletons:\n for node in skeleton.nodes:\n if node not in node_to_id:\n # Note: This ID is not the same as the node index in the skeleton in\n # legacy SLEAP, but we do not retain this information in the labels, so\n # IDs will be different.\n #\n # The weight is also kept fixed here, but technically this is not\n # modified or used in legacy SLEAP either.\n #\n # TODO: Store legacy metadata in labels to get byte-level compatibility?\n node_to_id[node] = len(node_to_id)\n nodes_dicts.append({\"name\": node.name, \"weight\": 1.0})\n\n skeletons_dicts = []\n for skeleton in skeletons:\n # Build links dicts for normal edges.\n edges_dicts = []\n for edge_ind, edge in enumerate(skeleton.edges):\n if edge_ind == 0:\n edge_type = {\n \"py/reduce\": [\n {\"py/type\": \"sleap.skeleton.EdgeType\"},\n {\"py/tuple\": [1]}, # 1 = real edge, 2 = symmetry edge\n ]\n }\n else:\n edge_type = {\"py/id\": 1}\n\n edges_dicts.append(\n {\n # Note: Insert idx is not the same as the edge index in the skeleton\n # in legacy SLEAP.\n \"edge_insert_idx\": edge_ind,\n \"key\": 0, # Always 0.\n \"source\": node_to_id[edge.source],\n \"target\": node_to_id[edge.destination],\n \"type\": edge_type,\n }\n )\n\n # Build links dicts for symmetry edges.\n for symmetry_ind, symmetry in enumerate(skeleton.symmetries):\n if symmetry_ind == 0:\n edge_type = {\n \"py/reduce\": [\n {\"py/type\": \"sleap.skeleton.EdgeType\"},\n {\"py/tuple\": [2]}, # 1 = real edge, 2 = symmetry edge\n ]\n }\n else:\n edge_type = {\"py/id\": 2}\n\n src, dst = tuple(symmetry.nodes)\n edges_dicts.append(\n {\n \"key\": 0,\n \"source\": node_to_id[src],\n \"target\": node_to_id[dst],\n \"type\": edge_type,\n }\n )\n\n # Create skeleton dict.\n skeletons_dicts.append(\n {\n \"directed\": True,\n \"graph\": {\n \"name\": skeleton.name,\n \"num_edges_inserted\": len(skeleton.edges),\n },\n \"links\": edges_dicts,\n \"multigraph\": True,\n # In the order in Skeleton.nodes and must match up with nodes_dicts.\n \"nodes\": [{\"id\": node_to_id[node]} for node in skeleton.nodes],\n }\n )\n\n return skeletons_dicts, nodes_dicts\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.video_to_dict","title":"video_to_dict(video)
","text":"Convert a Video
object to a JSON-compatible dictionary.
Parameters:
Name Type Description Defaultvideo
Video
A Video
object to convert.
Returns:
Type Descriptiondict
A dictionary containing the video metadata.
Source code insleap_io/io/slp.py
def video_to_dict(video: Video) -> dict:\n \"\"\"Convert a `Video` object to a JSON-compatible dictionary.\n\n Args:\n video: A `Video` object to convert.\n\n Returns:\n A dictionary containing the video metadata.\n \"\"\"\n video_filename = sanitize_filename(video.filename)\n if video.backend is None:\n return {\"filename\": video_filename, \"backend\": video.backend_metadata}\n\n if type(video.backend) == MediaVideo:\n return {\n \"filename\": video_filename,\n \"backend\": {\n \"type\": \"MediaVideo\",\n \"shape\": video.shape,\n \"filename\": video_filename,\n \"grayscale\": video.grayscale,\n \"bgr\": True,\n \"dataset\": \"\",\n \"input_format\": \"\",\n },\n }\n\n elif type(video.backend) == HDF5Video:\n return {\n \"filename\": video_filename,\n \"backend\": {\n \"type\": \"HDF5Video\",\n \"shape\": video.shape,\n \"filename\": (\n \".\" if video.backend.has_embedded_images else video_filename\n ),\n \"dataset\": video.backend.dataset,\n \"input_format\": video.backend.input_format,\n \"convert_range\": False,\n \"has_embedded_images\": video.backend.has_embedded_images,\n \"grayscale\": video.grayscale,\n },\n }\n\n elif type(video.backend) == ImageVideo:\n return {\n \"filename\": video_filename,\n \"backend\": {\n \"type\": \"ImageVideo\",\n \"shape\": video.shape,\n \"filename\": sanitize_filename(video.backend.filename[0]),\n \"filenames\": sanitize_filename(video.backend.filename),\n \"dataset\": video.backend_metadata.get(\"dataset\", None),\n \"grayscale\": video.grayscale,\n \"input_format\": video.backend_metadata.get(\"input_format\", None),\n },\n }\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.write_labels","title":"write_labels(labels_path, labels, embed=None)
","text":"Write a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file to save.
requiredlabels
Labels
A Labels
object to save.
embed
bool | str | list[tuple[Video, int]] | None
Frames to embed in the saved labels file. One of None
, True
, \"all\"
, \"user\"
, \"suggestions\"
, \"user+suggestions\"
, \"source\"
or list of tuples of (video, frame_idx)
.
If None
is specified (the default) and the labels contains embedded frames, those embedded frames will be re-saved to the new file.
If True
or \"all\"
, all labeled frames and suggested frames will be embedded.
If \"source\"
is specified, no images will be embedded and the source video will be restored if available.
This argument is only valid for the SLP backend.
None
Source code in sleap_io/io/slp.py
def write_labels(\n labels_path: str,\n labels: Labels,\n embed: bool | str | list[tuple[Video, int]] | None = None,\n):\n \"\"\"Write a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file to save.\n labels: A `Labels` object to save.\n embed: Frames to embed in the saved labels file. One of `None`, `True`,\n `\"all\"`, `\"user\"`, `\"suggestions\"`, `\"user+suggestions\"`, `\"source\"` or list\n of tuples of `(video, frame_idx)`.\n\n If `None` is specified (the default) and the labels contains embedded\n frames, those embedded frames will be re-saved to the new file.\n\n If `True` or `\"all\"`, all labeled frames and suggested frames will be\n embedded.\n\n If `\"source\"` is specified, no images will be embedded and the source video\n will be restored if available.\n\n This argument is only valid for the SLP backend.\n \"\"\"\n if Path(labels_path).exists():\n Path(labels_path).unlink()\n\n if embed:\n embed_videos(labels_path, labels, embed)\n write_videos(labels_path, labels.videos, restore_source=(embed == \"source\"))\n write_tracks(labels_path, labels.tracks)\n write_suggestions(labels_path, labels.suggestions, labels.videos)\n write_metadata(labels_path, labels)\n write_lfs(labels_path, labels)\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.write_lfs","title":"write_lfs(labels_path, labels)
","text":"Write labeled frames, instances and points to a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredlabels
Labels
A Labels
object to store the metadata for.
sleap_io/io/slp.py
def write_lfs(labels_path: str, labels: Labels):\n \"\"\"Write labeled frames, instances and points to a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n labels: A `Labels` object to store the metadata for.\n \"\"\"\n # We store the data in structured arrays for performance, so we first define the\n # dtype fields.\n instance_dtype = np.dtype(\n [\n (\"instance_id\", \"i8\"),\n (\"instance_type\", \"u1\"),\n (\"frame_id\", \"u8\"),\n (\"skeleton\", \"u4\"),\n (\"track\", \"i4\"),\n (\"from_predicted\", \"i8\"),\n (\"score\", \"f4\"),\n (\"point_id_start\", \"u8\"),\n (\"point_id_end\", \"u8\"),\n (\"tracking_score\", \"f4\"), # FORMAT_ID >= 1.2\n ]\n )\n frame_dtype = np.dtype(\n [\n (\"frame_id\", \"u8\"),\n (\"video\", \"u4\"),\n (\"frame_idx\", \"u8\"),\n (\"instance_id_start\", \"u8\"),\n (\"instance_id_end\", \"u8\"),\n ]\n )\n point_dtype = np.dtype(\n [(\"x\", \"f8\"), (\"y\", \"f8\"), (\"visible\", \"?\"), (\"complete\", \"?\")]\n )\n predicted_point_dtype = np.dtype(\n [(\"x\", \"f8\"), (\"y\", \"f8\"), (\"visible\", \"?\"), (\"complete\", \"?\"), (\"score\", \"f8\")]\n )\n\n # Next, we extract the data from the labels object into lists with the same fields.\n frames, instances, points, predicted_points, to_link = [], [], [], [], []\n inst_to_id = {}\n for lf in labels:\n frame_id = len(frames)\n instance_id_start = len(instances)\n for inst in lf:\n instance_id = len(instances)\n inst_to_id[id(inst)] = instance_id\n skeleton_id = labels.skeletons.index(inst.skeleton)\n track = labels.tracks.index(inst.track) if inst.track else -1\n from_predicted = -1\n if inst.from_predicted:\n to_link.append((instance_id, inst.from_predicted))\n\n if type(inst) == Instance:\n instance_type = InstanceType.USER\n score = np.nan\n tracking_score = np.nan\n point_id_start = len(points)\n\n for node in inst.skeleton.nodes:\n pt = inst.points[node]\n points.append([pt.x, pt.y, pt.visible, pt.complete])\n\n point_id_end = len(points)\n\n elif type(inst) == PredictedInstance:\n instance_type = InstanceType.PREDICTED\n score = inst.score\n tracking_score = inst.tracking_score\n point_id_start = len(predicted_points)\n\n for node in inst.skeleton.nodes:\n pt = inst.points[node]\n predicted_points.append(\n [pt.x, pt.y, pt.visible, pt.complete, pt.score]\n )\n\n point_id_end = len(predicted_points)\n\n else:\n raise ValueError(f\"Unknown instance type: {type(inst)}\")\n\n instances.append(\n [\n instance_id,\n int(instance_type),\n frame_id,\n skeleton_id,\n track,\n from_predicted,\n score,\n point_id_start,\n point_id_end,\n tracking_score,\n ]\n )\n\n instance_id_end = len(instances)\n\n frames.append(\n [\n frame_id,\n labels.videos.index(lf.video),\n lf.frame_idx,\n instance_id_start,\n instance_id_end,\n ]\n )\n\n # Link instances based on from_predicted field.\n for instance_id, from_predicted in to_link:\n # Source instance may be missing if predictions were removed from the labels, in\n # which case, remove the link.\n instances[instance_id][5] = inst_to_id.get(id(from_predicted), -1)\n\n # Create structured arrays.\n points = np.array([tuple(x) for x in points], dtype=point_dtype)\n predicted_points = np.array(\n [tuple(x) for x in predicted_points], dtype=predicted_point_dtype\n )\n instances = np.array([tuple(x) for x in instances], dtype=instance_dtype)\n frames = np.array([tuple(x) for x in frames], dtype=frame_dtype)\n\n # Write to file.\n with h5py.File(labels_path, \"a\") as f:\n f.create_dataset(\"points\", data=points, dtype=points.dtype)\n f.create_dataset(\n \"pred_points\",\n data=predicted_points,\n dtype=predicted_points.dtype,\n )\n f.create_dataset(\n \"instances\",\n data=instances,\n dtype=instances.dtype,\n )\n f.create_dataset(\n \"frames\",\n data=frames,\n dtype=frames.dtype,\n )\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.write_metadata","title":"write_metadata(labels_path, labels)
","text":"Write metadata to a SLEAP labels file.
This function will write the skeletons and provenance for the labels.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredlabels
Labels
A Labels
object to store the metadata for.
See also: serialize_skeletons
Source code insleap_io/io/slp.py
def write_metadata(labels_path: str, labels: Labels):\n \"\"\"Write metadata to a SLEAP labels file.\n\n This function will write the skeletons and provenance for the labels.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n labels: A `Labels` object to store the metadata for.\n\n See also: serialize_skeletons\n \"\"\"\n skeletons_dicts, nodes_dicts = serialize_skeletons(labels.skeletons)\n\n md = {\n \"version\": \"2.0.0\",\n \"skeletons\": skeletons_dicts,\n \"nodes\": nodes_dicts,\n \"videos\": [],\n \"tracks\": [],\n \"suggestions\": [], # TODO: Handle suggestions metadata.\n \"negative_anchors\": {},\n \"provenance\": labels.provenance,\n }\n\n # Custom encoding.\n for k in md[\"provenance\"]:\n if isinstance(md[\"provenance\"][k], Path):\n # Path -> str\n md[\"provenance\"][k] = md[\"provenance\"][k].as_posix()\n\n with h5py.File(labels_path, \"a\") as f:\n grp = f.require_group(\"metadata\")\n grp.attrs[\"format_id\"] = 1.2\n grp.attrs[\"json\"] = np.bytes_(json.dumps(md, separators=(\",\", \":\")))\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.write_suggestions","title":"write_suggestions(labels_path, suggestions, videos)
","text":"Write track metadata to a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredsuggestions
list[SuggestionFrame]
A list of SuggestionFrame
objects to store the metadata for.
videos
list[Video]
A list of Video
objects.
sleap_io/io/slp.py
def write_suggestions(\n labels_path: str, suggestions: list[SuggestionFrame], videos: list[Video]\n):\n \"\"\"Write track metadata to a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n suggestions: A list of `SuggestionFrame` objects to store the metadata for.\n videos: A list of `Video` objects.\n \"\"\"\n GROUP = 0 # TODO: Handle storing extraneous metadata.\n suggestions_json = []\n for suggestion in suggestions:\n suggestion_dict = {\n \"video\": str(videos.index(suggestion.video)),\n \"frame_idx\": suggestion.frame_idx,\n \"group\": GROUP,\n }\n suggestion_json = np.bytes_(json.dumps(suggestion_dict, separators=(\",\", \":\")))\n suggestions_json.append(suggestion_json)\n\n with h5py.File(labels_path, \"a\") as f:\n f.create_dataset(\"suggestions_json\", data=suggestions_json, maxshape=(None,))\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.write_tracks","title":"write_tracks(labels_path, tracks)
","text":"Write track metadata to a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredtracks
list[Track]
A list of Track
objects to store the metadata for.
sleap_io/io/slp.py
def write_tracks(labels_path: str, tracks: list[Track]):\n \"\"\"Write track metadata to a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n tracks: A list of `Track` objects to store the metadata for.\n \"\"\"\n # TODO: Add support for track metadata like spawned on frame.\n SPAWNED_ON = 0\n tracks_json = [\n np.bytes_(json.dumps([SPAWNED_ON, track.name], separators=(\",\", \":\")))\n for track in tracks\n ]\n with h5py.File(labels_path, \"a\") as f:\n f.create_dataset(\"tracks_json\", data=tracks_json, maxshape=(None,))\n
"},{"location":"reference/sleap_io/io/slp/#sleap_io.io.slp.write_videos","title":"write_videos(labels_path, videos, restore_source=False)
","text":"Write video metadata to a SLEAP labels file.
Parameters:
Name Type Description Defaultlabels_path
str
A string path to the SLEAP labels file.
requiredvideos
list[Video]
A list of Video
objects to store the metadata for.
restore_source
bool
If True
, restore source videos if available and will not re-embed the embedded images. If False
(the default), will re-embed images that were previously embedded.
False
Source code in sleap_io/io/slp.py
def write_videos(labels_path: str, videos: list[Video], restore_source: bool = False):\n \"\"\"Write video metadata to a SLEAP labels file.\n\n Args:\n labels_path: A string path to the SLEAP labels file.\n videos: A list of `Video` objects to store the metadata for.\n restore_source: If `True`, restore source videos if available and will not\n re-embed the embedded images. If `False` (the default), will re-embed images\n that were previously embedded.\n \"\"\"\n video_jsons = []\n for video_ind, video in enumerate(videos):\n if type(video.backend) == HDF5Video and video.backend.has_embedded_images:\n if restore_source:\n video = video.source_video\n else:\n # If the video has embedded images, embed them images again if we haven't\n # already.\n already_embedded = False\n if Path(labels_path).exists():\n with h5py.File(labels_path, \"r\") as f:\n already_embedded = f\"video{video_ind}/video\" in f\n\n if not already_embedded:\n video = embed_video(\n labels_path,\n video,\n group=f\"video{video_ind}\",\n frame_inds=video.backend.source_inds,\n image_format=video.backend.image_format,\n )\n\n video_json = video_to_dict(video)\n\n video_jsons.append(np.bytes_(json.dumps(video_json, separators=(\",\", \":\"))))\n\n with h5py.File(labels_path, \"a\") as f:\n f.create_dataset(\"videos_json\", data=video_jsons, maxshape=(None,))\n
"},{"location":"reference/sleap_io/io/utils/","title":"utils","text":""},{"location":"reference/sleap_io/io/utils/#sleap_io.io.utils","title":"sleap_io.io.utils
","text":"Miscellaneous utilities for working with different I/O formats.
Functions:
Name Descriptionis_file_accessible
Check if a file is accessible.
read_hdf5_attrs
Read attributes from an HDF5 dataset.
read_hdf5_dataset
Read data from an HDF5 file.
read_hdf5_group
Read an entire group from an HDF5 file.
write_hdf5_attrs
Write attributes to an HDF5 dataset.
write_hdf5_dataset
Write data to an HDF5 file.
write_hdf5_group
Write an entire group to an HDF5 file.
"},{"location":"reference/sleap_io/io/utils/#sleap_io.io.utils.is_file_accessible","title":"is_file_accessible(filename)
","text":"Check if a file is accessible.
Parameters:
Name Type Description Defaultfilename
str | Path
Path to a file.
requiredReturns:
Type Descriptionbool
True
if the file is accessible, False
otherwise.
This checks if the file readable by the current user by reading one byte from the file.
Source code insleap_io/io/utils.py
def is_file_accessible(filename: str | Path) -> bool:\n \"\"\"Check if a file is accessible.\n\n Args:\n filename: Path to a file.\n\n Returns:\n `True` if the file is accessible, `False` otherwise.\n\n Notes:\n This checks if the file readable by the current user by reading one byte from\n the file.\n \"\"\"\n filename = Path(filename)\n try:\n with open(filename, \"rb\") as f:\n f.read(1)\n return True\n except (FileNotFoundError, PermissionError, OSError, ValueError):\n return False\n
"},{"location":"reference/sleap_io/io/utils/#sleap_io.io.utils.read_hdf5_attrs","title":"read_hdf5_attrs(filename, dataset='/', attribute=None)
","text":"Read attributes from an HDF5 dataset.
Parameters:
Name Type Description Defaultfilename
str
Path to an HDF5 file.
requireddataset
str
Path to a dataset or group from which attributes will be read.
'/'
attribute
Optional[str]
If specified, the attribute name to read. If None
(the default), all attributes for the dataset will be returned.
None
Returns:
Type DescriptionUnion[Any, dict[str, Any]]
The attributes in a dictionary, or the attribute field if attribute
was provided.
sleap_io/io/utils.py
def read_hdf5_attrs(\n filename: str, dataset: str = \"/\", attribute: Optional[str] = None\n) -> Union[Any, dict[str, Any]]:\n \"\"\"Read attributes from an HDF5 dataset.\n\n Args:\n filename: Path to an HDF5 file.\n dataset: Path to a dataset or group from which attributes will be read.\n attribute: If specified, the attribute name to read. If `None` (the default),\n all attributes for the dataset will be returned.\n\n Returns:\n The attributes in a dictionary, or the attribute field if `attribute` was\n provided.\n \"\"\"\n with h5py.File(filename, \"r\") as f:\n ds = f[dataset]\n if attribute is None:\n data = dict(ds.attrs)\n else:\n data = ds.attrs[attribute]\n return data\n
"},{"location":"reference/sleap_io/io/utils/#sleap_io.io.utils.read_hdf5_dataset","title":"read_hdf5_dataset(filename, dataset)
","text":"Read data from an HDF5 file.
Parameters:
Name Type Description Defaultfilename
str
Path to an HDF5 file.
requireddataset
str
Path to a dataset.
requiredReturns:
Type Descriptionndarray
The data as an array.
Source code insleap_io/io/utils.py
def read_hdf5_dataset(filename: str, dataset: str) -> np.ndarray:\n \"\"\"Read data from an HDF5 file.\n\n Args:\n filename: Path to an HDF5 file.\n dataset: Path to a dataset.\n\n Returns:\n The data as an array.\n \"\"\"\n with h5py.File(filename, \"r\") as f:\n data = f[dataset][()]\n return data\n
"},{"location":"reference/sleap_io/io/utils/#sleap_io.io.utils.read_hdf5_group","title":"read_hdf5_group(filename, group='/')
","text":"Read an entire group from an HDF5 file.
Parameters:
Name Type Description Defaultfilename
str
Path an HDF5 file.
requiredgroup
str
Path to a group within the HDF5 file. Defaults to \"/\" (read the entire file).
'/'
Returns:
Type Descriptiondict[str, ndarray]
A flat dictionary with keys corresponding to dataset paths and values corresponding to the datasets as arrays.
Source code insleap_io/io/utils.py
def read_hdf5_group(filename: str, group: str = \"/\") -> dict[str, np.ndarray]:\n \"\"\"Read an entire group from an HDF5 file.\n\n Args:\n filename: Path an HDF5 file.\n group: Path to a group within the HDF5 file. Defaults to \"/\" (read the entire\n file).\n\n Returns:\n A flat dictionary with keys corresponding to dataset paths and values\n corresponding to the datasets as arrays.\n \"\"\"\n data = {}\n\n def read_datasets(k, v):\n if type(v) == h5py.Dataset:\n data[v.name] = v[()]\n\n with h5py.File(filename, \"r\") as f:\n f[group].visititems(read_datasets)\n\n return data\n
"},{"location":"reference/sleap_io/io/utils/#sleap_io.io.utils.write_hdf5_attrs","title":"write_hdf5_attrs(filename, dataset, attributes)
","text":"Write attributes to an HDF5 dataset.
Parameters:
Name Type Description Defaultfilename
str
Path to an HDF5 file.
requireddataset
str
Path to a dataset or group to which attributes will be written.
requiredattributes
dict[str, Any]
The attributes in a dictionary with the keys as the attribute names.
required Source code insleap_io/io/utils.py
def write_hdf5_attrs(filename: str, dataset: str, attributes: dict[str, Any]):\n \"\"\"Write attributes to an HDF5 dataset.\n\n Args:\n filename: Path to an HDF5 file.\n dataset: Path to a dataset or group to which attributes will be written.\n attributes: The attributes in a dictionary with the keys as the attribute names.\n \"\"\"\n\n def _overwrite_hdf5_attr(\n group_or_dataset: Union[h5py.Group, h5py.Dataset], attr_name: str, data: Any\n ):\n \"\"\"Overwrite attribute for group or dataset in HDF5 file.\n\n Args:\n group_or_dataset: Path to group or dataset in HDF5 file.\n attr_name: Name of attribute.\n data: Data to write to attribute.\n \"\"\"\n try:\n del group_or_dataset.attrs[attr_name]\n except KeyError:\n pass\n group_or_dataset.attrs.create(attr_name, data)\n\n with h5py.File(filename, \"a\") as f: # \"a\": read/write if exists, create otherwise\n ds = f[dataset]\n for attr_name, attr_value in attributes.items():\n _overwrite_hdf5_attr(ds, attr_name, attr_value)\n
"},{"location":"reference/sleap_io/io/utils/#sleap_io.io.utils.write_hdf5_dataset","title":"write_hdf5_dataset(filename, dataset, data)
","text":"Write data to an HDF5 file.
Parameters:
Name Type Description Defaultfilename
str
Path to an HDF5 file.
requireddataset
str
Path to a dataset.
requireddata
ndarray
Data to write to dataset.
required Source code insleap_io/io/utils.py
def write_hdf5_dataset(filename: str, dataset: str, data: np.ndarray):\n \"\"\"Write data to an HDF5 file.\n\n Args:\n filename: Path to an HDF5 file.\n dataset: Path to a dataset.\n data: Data to write to dataset.\n \"\"\"\n with h5py.File(filename, \"a\") as f: # \"a\": read/write if exists, create otherwise\n _overwrite_hdf5_dataset(f, dataset, data)\n
"},{"location":"reference/sleap_io/io/utils/#sleap_io.io.utils.write_hdf5_group","title":"write_hdf5_group(filename, data)
","text":"Write an entire group to an HDF5 file.
Parameters:
Name Type Description Defaultfilename
str
Path an HDF5 file.
requireddata
dict[str, ndarray]
A dictionary with keys corresponding to dataset/group paths and values corresponding to either sub group paths or the datasets as arrays.
required Source code insleap_io/io/utils.py
def write_hdf5_group(filename: str, data: dict[str, np.ndarray]):\n \"\"\"Write an entire group to an HDF5 file.\n\n Args:\n filename: Path an HDF5 file.\n data: A dictionary with keys corresponding to dataset/group paths and values\n corresponding to either sub group paths or the datasets as arrays.\n \"\"\"\n\n def overwrite_hdf5_group(\n file_or_group: Union[h5py.File, h5py.Group], group_name: str\n ) -> h5py.Group:\n \"\"\"Overwrite group in HDF5 file.\n\n Args:\n file_or_group: Path to an HDF5 file or parent group.\n group_name: Path to a group.\n\n Return:\n group: (Sub-)group under specified file or parent group.\n \"\"\"\n try:\n del file_or_group[group_name]\n except KeyError:\n pass\n group = file_or_group.create_group(group_name)\n return group\n\n def write_group(parent_group, data_to_write):\n for name, dataset_or_group in data_to_write.items():\n if isinstance(dataset_or_group, dict):\n # Create (sub-)group under parent group (top level being the file)\n group = overwrite_hdf5_group(parent_group, name)\n write_group(group, dataset_or_group) # Recall with new parent\n else:\n # Create dataset if dataset_or_group is a dataset\n _overwrite_hdf5_dataset(\n f=parent_group, dataset=name, data=dataset_or_group\n )\n\n with h5py.File(filename, \"a\") as f: # \"a\": read/write if exists, create otherwise\n write_group(f, data)\n
"},{"location":"reference/sleap_io/io/video_reading/","title":"video_reading","text":""},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading","title":"sleap_io.io.video_reading
","text":"Backends for reading videos.
Classes:
Name DescriptionHDF5Video
Video backend for reading videos stored in HDF5 files.
ImageVideo
Video backend for reading videos stored as image files.
MediaVideo
Video backend for reading videos stored as common media files.
VideoBackend
Base class for video backends.
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.HDF5Video","title":"HDF5Video
","text":" Bases: VideoBackend
Video backend for reading videos stored in HDF5 files.
This backend supports reading videos stored in HDF5 files, both in rank-4 datasets as well as in datasets with lists of binary-encoded images.
Embedded image datasets are used in SLEAP when exporting package files (.pkg.slp
) with videos embedded in them. This is useful for bundling training or inference data without having to worry about the videos (or frame images) being moved or deleted. It is expected that these types of datasets will be in a Group
with a int8
variable length dataset called \"video\"
. This dataset must also contain an attribute called \"format\" with a string describing the image format (e.g., \"png\" or \"jpg\") which will be used to decode it appropriately.
If a frame_numbers
dataset is present in the group, it will be used to map from source video frames to the frames in the dataset. This is useful to preserve frame indexing when exporting a subset of frames in the video. It will also be used to populate frame_map
and source_inds
attributes.
Attributes:
Name Type Descriptionfilename
Path to HDF5 file (.h5, .hdf5 or .slp).
grayscale
Whether to force grayscale. If None, autodetect on first frame load.
keep_open
Whether to keep the video reader open between calls to read frames. If False, will close the reader after each call. If True (the default), it will keep the reader open and cache it for subsequent calls which may enhance the performance of reading multiple frames.
dataset
Optional[str]
Name of dataset to read from. If None
, will try to find a rank-4 dataset by iterating through datasets in the file. If specifying an embedded dataset, this can be the group containing a \"video\" dataset or the dataset itself (e.g., \"video0\" or \"video0/video\").
input_format
str
Format of the data in the dataset. One of \"channels_last\" (the default) in (frames, height, width, channels)
order or \"channels_first\" in (frames, channels, width, height)
order. Embedded datasets should use the \"channels_last\" format.
frame_map
dict[int, int]
Mapping from frame indices to indices in the dataset. This is used to translate between the frame indices of the images within their source video and the indices of the images in the dataset. This is only used when reading embedded image datasets.
source_filename
Optional[str]
Path to the source video file. This is metadata and only used when reading embedded image datasets.
source_inds
Optional[ndarray]
Indices of the frames in the source video file. This is metadata and only used when reading embedded image datasets.
image_format
str
Format of the images in the embedded dataset. This is metadata and only used when reading embedded image datasets.
Methods:
Name Description__attrs_post_init__
Auto-detect dataset and frame map heuristically.
decode_embedded
Decode an embedded image string into a numpy array.
has_frame
Check if a frame index is contained in the video.
read_test_frame
Read a single frame from the video to test for grayscale.
Attributes:
Name Type Descriptionembedded_frame_inds
list[int]
Return the frame indices of the embedded images.
has_embedded_images
bool
Return True if the dataset contains embedded images.
img_shape
Tuple[int, int, int]
Shape of a single frame in the video as (height, width, channels)
.
num_frames
int
Number of frames in the video.
Source code insleap_io/io/video_reading.py
@attrs.define\nclass HDF5Video(VideoBackend):\n \"\"\"Video backend for reading videos stored in HDF5 files.\n\n This backend supports reading videos stored in HDF5 files, both in rank-4 datasets\n as well as in datasets with lists of binary-encoded images.\n\n Embedded image datasets are used in SLEAP when exporting package files (`.pkg.slp`)\n with videos embedded in them. This is useful for bundling training or inference data\n without having to worry about the videos (or frame images) being moved or deleted.\n It is expected that these types of datasets will be in a `Group` with a `int8`\n variable length dataset called `\"video\"`. This dataset must also contain an\n attribute called \"format\" with a string describing the image format (e.g., \"png\" or\n \"jpg\") which will be used to decode it appropriately.\n\n If a `frame_numbers` dataset is present in the group, it will be used to map from\n source video frames to the frames in the dataset. This is useful to preserve frame\n indexing when exporting a subset of frames in the video. It will also be used to\n populate `frame_map` and `source_inds` attributes.\n\n Attributes:\n filename: Path to HDF5 file (.h5, .hdf5 or .slp).\n grayscale: Whether to force grayscale. If None, autodetect on first frame load.\n keep_open: Whether to keep the video reader open between calls to read frames.\n If False, will close the reader after each call. If True (the default), it\n will keep the reader open and cache it for subsequent calls which may\n enhance the performance of reading multiple frames.\n dataset: Name of dataset to read from. If `None`, will try to find a rank-4\n dataset by iterating through datasets in the file. If specifying an embedded\n dataset, this can be the group containing a \"video\" dataset or the dataset\n itself (e.g., \"video0\" or \"video0/video\").\n input_format: Format of the data in the dataset. One of \"channels_last\" (the\n default) in `(frames, height, width, channels)` order or \"channels_first\" in\n `(frames, channels, width, height)` order. Embedded datasets should use the\n \"channels_last\" format.\n frame_map: Mapping from frame indices to indices in the dataset. This is used to\n translate between the frame indices of the images within their source video\n and the indices of the images in the dataset. This is only used when reading\n embedded image datasets.\n source_filename: Path to the source video file. This is metadata and only used\n when reading embedded image datasets.\n source_inds: Indices of the frames in the source video file. This is metadata\n and only used when reading embedded image datasets.\n image_format: Format of the images in the embedded dataset. This is metadata and\n only used when reading embedded image datasets.\n \"\"\"\n\n dataset: Optional[str] = None\n input_format: str = attrs.field(\n default=\"channels_last\",\n validator=attrs.validators.in_([\"channels_last\", \"channels_first\"]),\n )\n frame_map: dict[int, int] = attrs.field(init=False, default=attrs.Factory(dict))\n source_filename: Optional[str] = None\n source_inds: Optional[np.ndarray] = None\n image_format: str = \"hdf5\"\n\n EXTS = (\"h5\", \"hdf5\", \"slp\")\n\n def __attrs_post_init__(self):\n \"\"\"Auto-detect dataset and frame map heuristically.\"\"\"\n # Check if the file accessible before applying heuristics.\n try:\n f = h5py.File(self.filename, \"r\")\n except OSError:\n return\n\n if self.dataset is None:\n # Iterate through datasets to find a rank 4 array.\n def find_movies(name, obj):\n if isinstance(obj, h5py.Dataset) and obj.ndim == 4:\n self.dataset = name\n return True\n\n f.visititems(find_movies)\n\n if self.dataset is None:\n # Iterate through datasets to find an embedded video dataset.\n def find_embedded(name, obj):\n if isinstance(obj, h5py.Dataset) and name.endswith(\"/video\"):\n self.dataset = name\n return True\n\n f.visititems(find_embedded)\n\n if self.dataset is None:\n # Couldn't find video datasets.\n return\n\n if isinstance(f[self.dataset], h5py.Group):\n # If this is a group, assume it's an embedded video dataset.\n if \"video\" in f[self.dataset]:\n self.dataset = f\"{self.dataset}/video\"\n\n if self.dataset.split(\"/\")[-1] == \"video\":\n # This may be an embedded video dataset. Check for frame map.\n ds = f[self.dataset]\n\n if \"format\" in ds.attrs:\n self.image_format = ds.attrs[\"format\"]\n\n if \"frame_numbers\" in ds.parent:\n frame_numbers = ds.parent[\"frame_numbers\"][:].astype(int)\n self.frame_map = {frame: idx for idx, frame in enumerate(frame_numbers)}\n self.source_inds = frame_numbers\n\n if \"source_video\" in ds.parent:\n self.source_filename = json.loads(\n ds.parent[\"source_video\"].attrs[\"json\"]\n )[\"backend\"][\"filename\"]\n\n f.close()\n\n @property\n def num_frames(self) -> int:\n \"\"\"Number of frames in the video.\"\"\"\n with h5py.File(self.filename, \"r\") as f:\n return f[self.dataset].shape[0]\n\n @property\n def img_shape(self) -> Tuple[int, int, int]:\n \"\"\"Shape of a single frame in the video as `(height, width, channels)`.\"\"\"\n with h5py.File(self.filename, \"r\") as f:\n ds = f[self.dataset]\n\n img_shape = None\n if \"height\" in ds.attrs:\n # Try to get shape from the attributes.\n img_shape = (\n ds.attrs[\"height\"],\n ds.attrs[\"width\"],\n ds.attrs[\"channels\"],\n )\n\n if img_shape[0] == 0 or img_shape[1] == 0:\n # Invalidate the shape if the attributes are zero.\n img_shape = None\n\n if img_shape is None and self.image_format == \"hdf5\" and ds.ndim == 4:\n # Use the dataset shape if just stored as a rank-4 array.\n img_shape = ds.shape[1:]\n\n if self.input_format == \"channels_first\":\n img_shape = img_shape[::-1]\n\n if img_shape is None:\n # Fall back to reading a test frame.\n return super().img_shape\n\n return int(img_shape[0]), int(img_shape[1]), int(img_shape[2])\n\n def read_test_frame(self) -> np.ndarray:\n \"\"\"Read a single frame from the video to test for grayscale.\"\"\"\n if self.frame_map:\n frame_idx = list(self.frame_map.keys())[0]\n else:\n frame_idx = 0\n return self._read_frame(frame_idx)\n\n @property\n def has_embedded_images(self) -> bool:\n \"\"\"Return True if the dataset contains embedded images.\"\"\"\n return self.image_format is not None and self.image_format != \"hdf5\"\n\n @property\n def embedded_frame_inds(self) -> list[int]:\n \"\"\"Return the frame indices of the embedded images.\"\"\"\n return list(self.frame_map.keys())\n\n def decode_embedded(self, img_string: np.ndarray) -> np.ndarray:\n \"\"\"Decode an embedded image string into a numpy array.\n\n Args:\n img_string: Binary string of the image as a `int8` numpy vector with the\n bytes as values corresponding to the format-encoded image.\n\n Returns:\n The decoded image as a numpy array of shape `(height, width, channels)`. If\n a rank-2 image is decoded, it will be expanded such that channels will be 1.\n\n This method does not apply grayscale conversion as per the `grayscale`\n attribute. Use the `get_frame` or `get_frames` methods of the `VideoBackend`\n to apply grayscale conversion rather than calling this function directly.\n \"\"\"\n if \"cv2\" in sys.modules:\n img = cv2.imdecode(img_string, cv2.IMREAD_UNCHANGED)\n else:\n img = iio.imread(BytesIO(img_string), extension=f\".{self.image_format}\")\n\n if img.ndim == 2:\n img = np.expand_dims(img, axis=-1)\n return img\n\n def has_frame(self, frame_idx: int) -> bool:\n \"\"\"Check if a frame index is contained in the video.\n\n Args:\n frame_idx: Index of frame to check.\n\n Returns:\n `True` if the index is contained in the video, otherwise `False`.\n \"\"\"\n if self.frame_map:\n return frame_idx in self.frame_map\n else:\n return frame_idx < len(self)\n\n def _read_frame(self, frame_idx: int) -> np.ndarray:\n \"\"\"Read a single frame from the video.\n\n Args:\n frame_idx: Index of frame to read.\n\n Returns:\n The frame as a numpy array of shape `(height, width, channels)`.\n\n Notes:\n This does not apply grayscale conversion. It is recommended to use the\n `get_frame` method of the `VideoBackend` class instead.\n \"\"\"\n if self.keep_open:\n if self._open_reader is None:\n self._open_reader = h5py.File(self.filename, \"r\")\n f = self._open_reader\n else:\n f = h5py.File(self.filename, \"r\")\n\n ds = f[self.dataset]\n\n if self.frame_map:\n frame_idx = self.frame_map[frame_idx]\n\n img = ds[frame_idx]\n\n if self.has_embedded_images:\n img = self.decode_embedded(img)\n\n if self.input_format == \"channels_first\":\n img = np.transpose(img, (2, 1, 0))\n\n if not self.keep_open:\n f.close()\n return img\n\n def _read_frames(self, frame_inds: list) -> np.ndarray:\n \"\"\"Read a list of frames from the video.\n\n Args:\n frame_inds: List of indices of frames to read.\n\n Returns:\n The frame as a numpy array of shape `(frames, height, width, channels)`.\n\n Notes:\n This does not apply grayscale conversion. It is recommended to use the\n `get_frames` method of the `VideoBackend` class instead.\n \"\"\"\n if self.keep_open:\n if self._open_reader is None:\n self._open_reader = h5py.File(self.filename, \"r\")\n f = self._open_reader\n else:\n f = h5py.File(self.filename, \"r\")\n\n if self.frame_map:\n frame_inds = [self.frame_map[idx] for idx in frame_inds]\n\n ds = f[self.dataset]\n imgs = ds[frame_inds]\n\n if \"format\" in ds.attrs:\n imgs = np.stack(\n [self.decode_embedded(img) for img in imgs],\n axis=0,\n )\n\n if self.input_format == \"channels_first\":\n imgs = np.transpose(imgs, (0, 3, 2, 1))\n\n if not self.keep_open:\n f.close()\n\n return imgs\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.HDF5Video.embedded_frame_inds","title":"embedded_frame_inds: list[int]
property
","text":"Return the frame indices of the embedded images.
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.HDF5Video.has_embedded_images","title":"has_embedded_images: bool
property
","text":"Return True if the dataset contains embedded images.
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.HDF5Video.img_shape","title":"img_shape: Tuple[int, int, int]
property
","text":"Shape of a single frame in the video as (height, width, channels)
.
num_frames: int
property
","text":"Number of frames in the video.
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.HDF5Video.__attrs_post_init__","title":"__attrs_post_init__()
","text":"Auto-detect dataset and frame map heuristically.
Source code insleap_io/io/video_reading.py
def __attrs_post_init__(self):\n \"\"\"Auto-detect dataset and frame map heuristically.\"\"\"\n # Check if the file accessible before applying heuristics.\n try:\n f = h5py.File(self.filename, \"r\")\n except OSError:\n return\n\n if self.dataset is None:\n # Iterate through datasets to find a rank 4 array.\n def find_movies(name, obj):\n if isinstance(obj, h5py.Dataset) and obj.ndim == 4:\n self.dataset = name\n return True\n\n f.visititems(find_movies)\n\n if self.dataset is None:\n # Iterate through datasets to find an embedded video dataset.\n def find_embedded(name, obj):\n if isinstance(obj, h5py.Dataset) and name.endswith(\"/video\"):\n self.dataset = name\n return True\n\n f.visititems(find_embedded)\n\n if self.dataset is None:\n # Couldn't find video datasets.\n return\n\n if isinstance(f[self.dataset], h5py.Group):\n # If this is a group, assume it's an embedded video dataset.\n if \"video\" in f[self.dataset]:\n self.dataset = f\"{self.dataset}/video\"\n\n if self.dataset.split(\"/\")[-1] == \"video\":\n # This may be an embedded video dataset. Check for frame map.\n ds = f[self.dataset]\n\n if \"format\" in ds.attrs:\n self.image_format = ds.attrs[\"format\"]\n\n if \"frame_numbers\" in ds.parent:\n frame_numbers = ds.parent[\"frame_numbers\"][:].astype(int)\n self.frame_map = {frame: idx for idx, frame in enumerate(frame_numbers)}\n self.source_inds = frame_numbers\n\n if \"source_video\" in ds.parent:\n self.source_filename = json.loads(\n ds.parent[\"source_video\"].attrs[\"json\"]\n )[\"backend\"][\"filename\"]\n\n f.close()\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.HDF5Video.decode_embedded","title":"decode_embedded(img_string)
","text":"Decode an embedded image string into a numpy array.
Parameters:
Name Type Description Defaultimg_string
ndarray
Binary string of the image as a int8
numpy vector with the bytes as values corresponding to the format-encoded image.
Returns:
Type Descriptionndarray
The decoded image as a numpy array of shape (height, width, channels)
. If a rank-2 image is decoded, it will be expanded such that channels will be 1.
This method does not apply grayscale conversion as per the grayscale
attribute. Use the get_frame
or get_frames
methods of the VideoBackend
to apply grayscale conversion rather than calling this function directly.
sleap_io/io/video_reading.py
def decode_embedded(self, img_string: np.ndarray) -> np.ndarray:\n \"\"\"Decode an embedded image string into a numpy array.\n\n Args:\n img_string: Binary string of the image as a `int8` numpy vector with the\n bytes as values corresponding to the format-encoded image.\n\n Returns:\n The decoded image as a numpy array of shape `(height, width, channels)`. If\n a rank-2 image is decoded, it will be expanded such that channels will be 1.\n\n This method does not apply grayscale conversion as per the `grayscale`\n attribute. Use the `get_frame` or `get_frames` methods of the `VideoBackend`\n to apply grayscale conversion rather than calling this function directly.\n \"\"\"\n if \"cv2\" in sys.modules:\n img = cv2.imdecode(img_string, cv2.IMREAD_UNCHANGED)\n else:\n img = iio.imread(BytesIO(img_string), extension=f\".{self.image_format}\")\n\n if img.ndim == 2:\n img = np.expand_dims(img, axis=-1)\n return img\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.HDF5Video.has_frame","title":"has_frame(frame_idx)
","text":"Check if a frame index is contained in the video.
Parameters:
Name Type Description Defaultframe_idx
int
Index of frame to check.
requiredReturns:
Type Descriptionbool
True
if the index is contained in the video, otherwise False
.
sleap_io/io/video_reading.py
def has_frame(self, frame_idx: int) -> bool:\n \"\"\"Check if a frame index is contained in the video.\n\n Args:\n frame_idx: Index of frame to check.\n\n Returns:\n `True` if the index is contained in the video, otherwise `False`.\n \"\"\"\n if self.frame_map:\n return frame_idx in self.frame_map\n else:\n return frame_idx < len(self)\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.HDF5Video.read_test_frame","title":"read_test_frame()
","text":"Read a single frame from the video to test for grayscale.
Source code insleap_io/io/video_reading.py
def read_test_frame(self) -> np.ndarray:\n \"\"\"Read a single frame from the video to test for grayscale.\"\"\"\n if self.frame_map:\n frame_idx = list(self.frame_map.keys())[0]\n else:\n frame_idx = 0\n return self._read_frame(frame_idx)\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.ImageVideo","title":"ImageVideo
","text":" Bases: VideoBackend
Video backend for reading videos stored as image files.
This backend supports reading videos stored as a list of images.
Attributes:
Name Type Descriptionfilename
Path to image files.
grayscale
Whether to force grayscale. If None, autodetect on first frame load.
Methods:
Name Descriptionfind_images
Find images in a folder and return a list of filenames.
Attributes:
Name Type Descriptionnum_frames
int
Number of frames in the video.
Source code insleap_io/io/video_reading.py
@attrs.define\nclass ImageVideo(VideoBackend):\n \"\"\"Video backend for reading videos stored as image files.\n\n This backend supports reading videos stored as a list of images.\n\n Attributes:\n filename: Path to image files.\n grayscale: Whether to force grayscale. If None, autodetect on first frame load.\n \"\"\"\n\n EXTS = (\"png\", \"jpg\", \"jpeg\", \"tif\", \"tiff\", \"bmp\")\n\n @staticmethod\n def find_images(folder: str) -> list[str]:\n \"\"\"Find images in a folder and return a list of filenames.\"\"\"\n folder = Path(folder)\n return sorted(\n [f.as_posix() for f in folder.glob(\"*\") if f.suffix[1:] in ImageVideo.EXTS]\n )\n\n @property\n def num_frames(self) -> int:\n \"\"\"Number of frames in the video.\"\"\"\n return len(self.filename)\n\n def _read_frame(self, frame_idx: int) -> np.ndarray:\n \"\"\"Read a single frame from the video.\n\n Args:\n frame_idx: Index of frame to read.\n\n Returns:\n The frame as a numpy array of shape `(height, width, channels)`.\n\n Notes:\n This does not apply grayscale conversion. It is recommended to use the\n `get_frame` method of the `VideoBackend` class instead.\n \"\"\"\n img = iio.imread(self.filename[frame_idx])\n if img.ndim == 2:\n img = np.expand_dims(img, axis=-1)\n return img\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.ImageVideo.num_frames","title":"num_frames: int
property
","text":"Number of frames in the video.
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.ImageVideo.find_images","title":"find_images(folder)
staticmethod
","text":"Find images in a folder and return a list of filenames.
Source code insleap_io/io/video_reading.py
@staticmethod\ndef find_images(folder: str) -> list[str]:\n \"\"\"Find images in a folder and return a list of filenames.\"\"\"\n folder = Path(folder)\n return sorted(\n [f.as_posix() for f in folder.glob(\"*\") if f.suffix[1:] in ImageVideo.EXTS]\n )\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.MediaVideo","title":"MediaVideo
","text":" Bases: VideoBackend
Video backend for reading videos stored as common media files.
This backend supports reading through FFMPEG (the default), pyav, or OpenCV. Here are their trade-offs:
- \"opencv\": Fastest video reader, but only supports a limited number of codecs\n and may not be able to read some videos. It requires `opencv-python` to be\n installed. It is the fastest because it uses the OpenCV C++ library to read\n videos, but is limited by the version of FFMPEG that was linked into it at\n build time as well as the OpenCV version used.\n- \"FFMPEG\": Slowest, but most reliable. This is the default backend. It requires\n `imageio-ffmpeg` and a `ffmpeg` executable on the system path (which can be\n installed via conda). The `imageio` plugin for FFMPEG reads frames into raw\n bytes which are communicated to Python through STDOUT on a subprocess pipe,\n which can be slow. However, it is the most reliable and feature-complete. If\n you install the conda-forge version of ffmpeg, it will be compiled with\n support for many codecs, including GPU-accelerated codecs like NVDEC for\n H264 and others.\n- \"pyav\": Supports most codecs that FFMPEG does, but not as complete or reliable\n of an implementation in `imageio` as FFMPEG for some video types. It is\n faster than FFMPEG because it uses the `av` package to read frames directly\n into numpy arrays in memory without the need for a subprocess pipe. These\n are Python bindings for the C library libav, which is the same library that\n FFMPEG uses under the hood.\n
Attributes:
Name Type Descriptionfilename
Path to video file.
grayscale
Whether to force grayscale. If None, autodetect on first frame load.
keep_open
Whether to keep the video reader open between calls to read frames. If False, will close the reader after each call. If True (the default), it will keep the reader open and cache it for subsequent calls which may enhance the performance of reading multiple frames.
plugin
str
Video plugin to use. One of \"opencv\", \"FFMPEG\", or \"pyav\". If None
, will use the first available plugin in the order listed above.
Attributes:
Name Type Descriptionnum_frames
int
Number of frames in the video.
reader
object
Return the reader object for the video, caching if necessary.
Source code insleap_io/io/video_reading.py
@attrs.define\nclass MediaVideo(VideoBackend):\n \"\"\"Video backend for reading videos stored as common media files.\n\n This backend supports reading through FFMPEG (the default), pyav, or OpenCV. Here\n are their trade-offs:\n\n - \"opencv\": Fastest video reader, but only supports a limited number of codecs\n and may not be able to read some videos. It requires `opencv-python` to be\n installed. It is the fastest because it uses the OpenCV C++ library to read\n videos, but is limited by the version of FFMPEG that was linked into it at\n build time as well as the OpenCV version used.\n - \"FFMPEG\": Slowest, but most reliable. This is the default backend. It requires\n `imageio-ffmpeg` and a `ffmpeg` executable on the system path (which can be\n installed via conda). The `imageio` plugin for FFMPEG reads frames into raw\n bytes which are communicated to Python through STDOUT on a subprocess pipe,\n which can be slow. However, it is the most reliable and feature-complete. If\n you install the conda-forge version of ffmpeg, it will be compiled with\n support for many codecs, including GPU-accelerated codecs like NVDEC for\n H264 and others.\n - \"pyav\": Supports most codecs that FFMPEG does, but not as complete or reliable\n of an implementation in `imageio` as FFMPEG for some video types. It is\n faster than FFMPEG because it uses the `av` package to read frames directly\n into numpy arrays in memory without the need for a subprocess pipe. These\n are Python bindings for the C library libav, which is the same library that\n FFMPEG uses under the hood.\n\n Attributes:\n filename: Path to video file.\n grayscale: Whether to force grayscale. If None, autodetect on first frame load.\n keep_open: Whether to keep the video reader open between calls to read frames.\n If False, will close the reader after each call. If True (the default), it\n will keep the reader open and cache it for subsequent calls which may\n enhance the performance of reading multiple frames.\n plugin: Video plugin to use. One of \"opencv\", \"FFMPEG\", or \"pyav\". If `None`,\n will use the first available plugin in the order listed above.\n \"\"\"\n\n plugin: str = attrs.field(\n validator=attrs.validators.in_([\"opencv\", \"FFMPEG\", \"pyav\"])\n )\n\n EXTS = (\"mp4\", \"avi\", \"mov\", \"mj2\", \"mkv\")\n\n @plugin.default\n def _default_plugin(self) -> str:\n if \"cv2\" in sys.modules:\n return \"opencv\"\n elif \"imageio_ffmpeg\" in sys.modules:\n return \"FFMPEG\"\n elif \"av\" in sys.modules:\n return \"pyav\"\n else:\n raise ImportError(\n \"No video plugins found. Install opencv-python, imageio-ffmpeg, or av.\"\n )\n\n @property\n def reader(self) -> object:\n \"\"\"Return the reader object for the video, caching if necessary.\"\"\"\n if self.keep_open:\n if self._open_reader is None:\n if self.plugin == \"opencv\":\n self._open_reader = cv2.VideoCapture(self.filename)\n elif self.plugin == \"pyav\" or self.plugin == \"FFMPEG\":\n self._open_reader = iio.imopen(\n self.filename, \"r\", plugin=self.plugin\n )\n return self._open_reader\n else:\n if self.plugin == \"opencv\":\n return cv2.VideoCapture(self.filename)\n elif self.plugin == \"pyav\" or self.plugin == \"FFMPEG\":\n return iio.imopen(self.filename, \"r\", plugin=self.plugin)\n\n @property\n def num_frames(self) -> int:\n \"\"\"Number of frames in the video.\"\"\"\n if self.plugin == \"opencv\":\n return int(self.reader.get(cv2.CAP_PROP_FRAME_COUNT))\n else:\n props = iio.improps(self.filename, plugin=self.plugin)\n n_frames = props.n_images\n if np.isinf(n_frames):\n legacy_reader = self.reader.legacy_get_reader()\n # Note: This might be super slow for some videos, so maybe we should\n # defer evaluation of this or give the user control over it.\n n_frames = legacy_reader.count_frames()\n return n_frames\n\n def _read_frame(self, frame_idx: int) -> np.ndarray:\n \"\"\"Read a single frame from the video.\n\n Args:\n frame_idx: Index of frame to read.\n\n Returns:\n The frame as a numpy array of shape `(height, width, channels)`.\n\n Notes:\n This does not apply grayscale conversion. It is recommended to use the\n `get_frame` method of the `VideoBackend` class instead.\n \"\"\"\n failed = False\n if self.plugin == \"opencv\":\n if self.reader.get(cv2.CAP_PROP_POS_FRAMES) != frame_idx:\n self.reader.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)\n success, img = self.reader.read()\n elif self.plugin == \"pyav\" or self.plugin == \"FFMPEG\":\n if self.keep_open:\n img = self.reader.read(index=frame_idx)\n else:\n with iio.imopen(self.filename, \"r\", plugin=self.plugin) as reader:\n img = reader.read(index=frame_idx)\n\n success = (not failed) and (img is not None)\n if not success:\n raise IndexError(f\"Failed to read frame index {frame_idx}.\")\n return img\n\n def _read_frames(self, frame_inds: list) -> np.ndarray:\n \"\"\"Read a list of frames from the video.\n\n Args:\n frame_inds: List of indices of frames to read.\n\n Returns:\n The frame as a numpy array of shape `(frames, height, width, channels)`.\n\n Notes:\n This does not apply grayscale conversion. It is recommended to use the\n `get_frames` method of the `VideoBackend` class instead.\n \"\"\"\n if self.plugin == \"opencv\":\n if self.keep_open:\n if self._open_reader is None:\n self._open_reader = cv2.VideoCapture(self.filename)\n reader = self._open_reader\n else:\n reader = cv2.VideoCapture(self.filename)\n\n reader.set(cv2.CAP_PROP_POS_FRAMES, frame_inds[0])\n imgs = []\n for idx in frame_inds:\n if reader.get(cv2.CAP_PROP_POS_FRAMES) != idx:\n reader.set(cv2.CAP_PROP_POS_FRAMES, idx)\n _, img = reader.read()\n img = img[..., ::-1] # BGR -> RGB\n imgs.append(img)\n imgs = np.stack(imgs, axis=0)\n\n elif self.plugin == \"pyav\" or self.plugin == \"FFMPEG\":\n if self.keep_open:\n if self._open_reader is None:\n self._open_reader = iio.imopen(\n self.filename, \"r\", plugin=self.plugin\n )\n reader = self._open_reader\n imgs = np.stack([reader.read(index=idx) for idx in frame_inds], axis=0)\n else:\n with iio.imopen(self.filename, \"r\", plugin=self.plugin) as reader:\n imgs = np.stack(\n [reader.read(index=idx) for idx in frame_inds], axis=0\n )\n return imgs\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.MediaVideo.num_frames","title":"num_frames: int
property
","text":"Number of frames in the video.
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.MediaVideo.reader","title":"reader: object
property
","text":"Return the reader object for the video, caching if necessary.
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.VideoBackend","title":"VideoBackend
","text":"Base class for video backends.
This class is not meant to be used directly. Instead, use the from_filename
constructor to create a backend instance.
Attributes:
Name Type Descriptionfilename
str | Path | list[str] | list[Path]
Path to video file(s).
grayscale
Optional[bool]
Whether to force grayscale. If None, autodetect on first frame load.
keep_open
bool
Whether to keep the video reader open between calls to read frames. If False, will close the reader after each call. If True (the default), it will keep the reader open and cache it for subsequent calls which may enhance the performance of reading multiple frames.
Methods:
Name Description__getitem__
Return a single frame or a list of frames from the video.
__len__
Return number of frames in the video.
detect_grayscale
Detect whether the video is grayscale.
from_filename
Create a VideoBackend from a filename.
get_frame
Read a single frame from the video.
get_frames
Read a list of frames from the video.
has_frame
Check if a frame index is contained in the video.
read_test_frame
Read a single frame from the video to test for grayscale.
Attributes:
Name Type Descriptionframes
int
Number of frames in the video.
img_shape
Tuple[int, int, int]
Shape of a single frame in the video.
num_frames
int
Number of frames in the video. Must be implemented in subclasses.
shape
Tuple[int, int, int, int]
Shape of the video as a tuple of (frames, height, width, channels)
.
sleap_io/io/video_reading.py
@attrs.define\nclass VideoBackend:\n \"\"\"Base class for video backends.\n\n This class is not meant to be used directly. Instead, use the `from_filename`\n constructor to create a backend instance.\n\n Attributes:\n filename: Path to video file(s).\n grayscale: Whether to force grayscale. If None, autodetect on first frame load.\n keep_open: Whether to keep the video reader open between calls to read frames.\n If False, will close the reader after each call. If True (the default), it\n will keep the reader open and cache it for subsequent calls which may\n enhance the performance of reading multiple frames.\n \"\"\"\n\n filename: str | Path | list[str] | list[Path]\n grayscale: Optional[bool] = None\n keep_open: bool = True\n _cached_shape: Optional[Tuple[int, int, int, int]] = None\n _open_reader: Optional[object] = None\n\n @classmethod\n def from_filename(\n cls,\n filename: str | list[str],\n dataset: Optional[str] = None,\n grayscale: Optional[bool] = None,\n keep_open: bool = True,\n **kwargs,\n ) -> VideoBackend:\n \"\"\"Create a VideoBackend from a filename.\n\n Args:\n filename: Path to video file(s).\n dataset: Name of dataset in HDF5 file.\n grayscale: Whether to force grayscale. If None, autodetect on first frame\n load.\n keep_open: Whether to keep the video reader open between calls to read\n frames. If False, will close the reader after each call. If True (the\n default), it will keep the reader open and cache it for subsequent calls\n which may enhance the performance of reading multiple frames.\n\n Returns:\n VideoBackend subclass instance.\n \"\"\"\n if isinstance(filename, Path):\n filename = filename.as_posix()\n\n if type(filename) == str and Path(filename).is_dir():\n filename = ImageVideo.find_images(filename)\n\n if type(filename) == list:\n filename = [Path(f).as_posix() for f in filename]\n return ImageVideo(\n filename, grayscale=grayscale, **_get_valid_kwargs(ImageVideo, kwargs)\n )\n elif filename.endswith(ImageVideo.EXTS):\n return ImageVideo(\n [filename], grayscale=grayscale, **_get_valid_kwargs(ImageVideo, kwargs)\n )\n elif filename.endswith(MediaVideo.EXTS):\n return MediaVideo(\n filename,\n grayscale=grayscale,\n keep_open=keep_open,\n **_get_valid_kwargs(MediaVideo, kwargs),\n )\n elif filename.endswith(HDF5Video.EXTS):\n return HDF5Video(\n filename,\n dataset=dataset,\n grayscale=grayscale,\n keep_open=keep_open,\n **_get_valid_kwargs(HDF5Video, kwargs),\n )\n else:\n raise ValueError(f\"Unknown video file type: {filename}\")\n\n def _read_frame(self, frame_idx: int) -> np.ndarray:\n \"\"\"Read a single frame from the video. Must be implemented in subclasses.\"\"\"\n raise NotImplementedError\n\n def _read_frames(self, frame_inds: list) -> np.ndarray:\n \"\"\"Read a list of frames from the video.\"\"\"\n return np.stack([self.get_frame(i) for i in frame_inds], axis=0)\n\n def read_test_frame(self) -> np.ndarray:\n \"\"\"Read a single frame from the video to test for grayscale.\n\n Note:\n This reads the frame at index 0. This may not be appropriate if the first\n frame is not available in a given backend.\n \"\"\"\n return self._read_frame(0)\n\n def detect_grayscale(self, test_img: np.ndarray | None = None) -> bool:\n \"\"\"Detect whether the video is grayscale.\n\n This works by reading in a test frame and comparing the first and last channel\n for equality. It may fail in cases where, due to compression, the first and\n last channels are not exactly the same.\n\n Args:\n test_img: Optional test image to use. If not provided, a test image will be\n loaded via the `read_test_frame` method.\n\n Returns:\n Whether the video is grayscale. This value is also cached in the `grayscale`\n attribute of the class.\n \"\"\"\n if test_img is None:\n test_img = self.read_test_frame()\n is_grayscale = np.array_equal(test_img[..., 0], test_img[..., -1])\n self.grayscale = is_grayscale\n return is_grayscale\n\n @property\n def num_frames(self) -> int:\n \"\"\"Number of frames in the video. Must be implemented in subclasses.\"\"\"\n raise NotImplementedError\n\n @property\n def img_shape(self) -> Tuple[int, int, int]:\n \"\"\"Shape of a single frame in the video.\"\"\"\n height, width, channels = self.read_test_frame().shape\n if self.grayscale is None:\n self.detect_grayscale()\n if self.grayscale is False:\n channels = 3\n elif self.grayscale is True:\n channels = 1\n return int(height), int(width), int(channels)\n\n @property\n def shape(self) -> Tuple[int, int, int, int]:\n \"\"\"Shape of the video as a tuple of `(frames, height, width, channels)`.\n\n On first call, this will defer to `num_frames` and `img_shape` to determine the\n full shape. This call may be expensive for some subclasses, so the result is\n cached and returned on subsequent calls.\n \"\"\"\n if self._cached_shape is not None:\n return self._cached_shape\n else:\n shape = (self.num_frames,) + self.img_shape\n self._cached_shape = shape\n return shape\n\n @property\n def frames(self) -> int:\n \"\"\"Number of frames in the video.\"\"\"\n return self.shape[0]\n\n def __len__(self) -> int:\n \"\"\"Return number of frames in the video.\"\"\"\n return self.shape[0]\n\n def has_frame(self, frame_idx: int) -> bool:\n \"\"\"Check if a frame index is contained in the video.\n\n Args:\n frame_idx: Index of frame to check.\n\n Returns:\n `True` if the index is contained in the video, otherwise `False`.\n \"\"\"\n return frame_idx < len(self)\n\n def get_frame(self, frame_idx: int) -> np.ndarray:\n \"\"\"Read a single frame from the video.\n\n Args:\n frame_idx: Index of frame to read.\n\n Returns:\n Frame as a numpy array of shape `(height, width, channels)` where the\n `channels` dimension is 1 for grayscale videos and 3 for color videos.\n\n Notes:\n If the `grayscale` attribute is set to `True`, the `channels` dimension will\n be reduced to 1 if an RGB frame is loaded from the backend.\n\n If the `grayscale` attribute is set to `None`, the `grayscale` attribute\n will be automatically set based on the first frame read.\n\n See also: `get_frames`\n \"\"\"\n if not self.has_frame(frame_idx):\n raise IndexError(f\"Frame index {frame_idx} out of range.\")\n\n img = self._read_frame(frame_idx)\n\n if self.grayscale is None:\n self.detect_grayscale(img)\n\n if self.grayscale:\n img = img[..., [0]]\n\n return img\n\n def get_frames(self, frame_inds: list[int]) -> np.ndarray:\n \"\"\"Read a list of frames from the video.\n\n Depending on the backend implementation, this may be faster than reading frames\n individually using `get_frame`.\n\n Args:\n frame_inds: List of frame indices to read.\n\n Returns:\n Frames as a numpy array of shape `(frames, height, width, channels)` where\n `channels` dimension is 1 for grayscale videos and 3 for color videos.\n\n Notes:\n If the `grayscale` attribute is set to `True`, the `channels` dimension will\n be reduced to 1 if an RGB frame is loaded from the backend.\n\n If the `grayscale` attribute is set to `None`, the `grayscale` attribute\n will be automatically set based on the first frame read.\n\n See also: `get_frame`\n \"\"\"\n imgs = self._read_frames(frame_inds)\n\n if self.grayscale is None:\n self.detect_grayscale(imgs[0])\n\n if self.grayscale:\n imgs = imgs[..., [0]]\n\n return imgs\n\n def __getitem__(self, ind: int | list[int] | slice) -> np.ndarray:\n \"\"\"Return a single frame or a list of frames from the video.\n\n Args:\n ind: Index or list of indices of frames to read.\n\n Returns:\n Frame or frames as a numpy array of shape `(height, width, channels)` if a\n scalar index is provided, or `(frames, height, width, channels)` if a list\n of indices is provided.\n\n See also: get_frame, get_frames\n \"\"\"\n if np.isscalar(ind):\n return self.get_frame(ind)\n else:\n if type(ind) is slice:\n start = (ind.start or 0) % len(self)\n stop = ind.stop or len(self)\n if stop < 0:\n stop = len(self) + stop\n step = ind.step or 1\n ind = range(start, stop, step)\n return self.get_frames(ind)\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.VideoBackend.frames","title":"frames: int
property
","text":"Number of frames in the video.
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.VideoBackend.img_shape","title":"img_shape: Tuple[int, int, int]
property
","text":"Shape of a single frame in the video.
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.VideoBackend.num_frames","title":"num_frames: int
property
","text":"Number of frames in the video. Must be implemented in subclasses.
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.VideoBackend.shape","title":"shape: Tuple[int, int, int, int]
property
","text":"Shape of the video as a tuple of (frames, height, width, channels)
.
On first call, this will defer to num_frames
and img_shape
to determine the full shape. This call may be expensive for some subclasses, so the result is cached and returned on subsequent calls.
__getitem__(ind)
","text":"Return a single frame or a list of frames from the video.
Parameters:
Name Type Description Defaultind
int | list[int] | slice
Index or list of indices of frames to read.
requiredReturns:
Type Descriptionndarray
Frame or frames as a numpy array of shape (height, width, channels)
if a scalar index is provided, or (frames, height, width, channels)
if a list of indices is provided.
See also: get_frame, get_frames
Source code insleap_io/io/video_reading.py
def __getitem__(self, ind: int | list[int] | slice) -> np.ndarray:\n \"\"\"Return a single frame or a list of frames from the video.\n\n Args:\n ind: Index or list of indices of frames to read.\n\n Returns:\n Frame or frames as a numpy array of shape `(height, width, channels)` if a\n scalar index is provided, or `(frames, height, width, channels)` if a list\n of indices is provided.\n\n See also: get_frame, get_frames\n \"\"\"\n if np.isscalar(ind):\n return self.get_frame(ind)\n else:\n if type(ind) is slice:\n start = (ind.start or 0) % len(self)\n stop = ind.stop or len(self)\n if stop < 0:\n stop = len(self) + stop\n step = ind.step or 1\n ind = range(start, stop, step)\n return self.get_frames(ind)\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.VideoBackend.__len__","title":"__len__()
","text":"Return number of frames in the video.
Source code insleap_io/io/video_reading.py
def __len__(self) -> int:\n \"\"\"Return number of frames in the video.\"\"\"\n return self.shape[0]\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.VideoBackend.detect_grayscale","title":"detect_grayscale(test_img=None)
","text":"Detect whether the video is grayscale.
This works by reading in a test frame and comparing the first and last channel for equality. It may fail in cases where, due to compression, the first and last channels are not exactly the same.
Parameters:
Name Type Description Defaulttest_img
ndarray | None
Optional test image to use. If not provided, a test image will be loaded via the read_test_frame
method.
None
Returns:
Type Descriptionbool
Whether the video is grayscale. This value is also cached in the grayscale
attribute of the class.
sleap_io/io/video_reading.py
def detect_grayscale(self, test_img: np.ndarray | None = None) -> bool:\n \"\"\"Detect whether the video is grayscale.\n\n This works by reading in a test frame and comparing the first and last channel\n for equality. It may fail in cases where, due to compression, the first and\n last channels are not exactly the same.\n\n Args:\n test_img: Optional test image to use. If not provided, a test image will be\n loaded via the `read_test_frame` method.\n\n Returns:\n Whether the video is grayscale. This value is also cached in the `grayscale`\n attribute of the class.\n \"\"\"\n if test_img is None:\n test_img = self.read_test_frame()\n is_grayscale = np.array_equal(test_img[..., 0], test_img[..., -1])\n self.grayscale = is_grayscale\n return is_grayscale\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.VideoBackend.from_filename","title":"from_filename(filename, dataset=None, grayscale=None, keep_open=True, **kwargs)
classmethod
","text":"Create a VideoBackend from a filename.
Parameters:
Name Type Description Defaultfilename
str | list[str]
Path to video file(s).
requireddataset
Optional[str]
Name of dataset in HDF5 file.
None
grayscale
Optional[bool]
Whether to force grayscale. If None, autodetect on first frame load.
None
keep_open
bool
Whether to keep the video reader open between calls to read frames. If False, will close the reader after each call. If True (the default), it will keep the reader open and cache it for subsequent calls which may enhance the performance of reading multiple frames.
True
Returns:
Type DescriptionVideoBackend
VideoBackend subclass instance.
Source code insleap_io/io/video_reading.py
@classmethod\ndef from_filename(\n cls,\n filename: str | list[str],\n dataset: Optional[str] = None,\n grayscale: Optional[bool] = None,\n keep_open: bool = True,\n **kwargs,\n) -> VideoBackend:\n \"\"\"Create a VideoBackend from a filename.\n\n Args:\n filename: Path to video file(s).\n dataset: Name of dataset in HDF5 file.\n grayscale: Whether to force grayscale. If None, autodetect on first frame\n load.\n keep_open: Whether to keep the video reader open between calls to read\n frames. If False, will close the reader after each call. If True (the\n default), it will keep the reader open and cache it for subsequent calls\n which may enhance the performance of reading multiple frames.\n\n Returns:\n VideoBackend subclass instance.\n \"\"\"\n if isinstance(filename, Path):\n filename = filename.as_posix()\n\n if type(filename) == str and Path(filename).is_dir():\n filename = ImageVideo.find_images(filename)\n\n if type(filename) == list:\n filename = [Path(f).as_posix() for f in filename]\n return ImageVideo(\n filename, grayscale=grayscale, **_get_valid_kwargs(ImageVideo, kwargs)\n )\n elif filename.endswith(ImageVideo.EXTS):\n return ImageVideo(\n [filename], grayscale=grayscale, **_get_valid_kwargs(ImageVideo, kwargs)\n )\n elif filename.endswith(MediaVideo.EXTS):\n return MediaVideo(\n filename,\n grayscale=grayscale,\n keep_open=keep_open,\n **_get_valid_kwargs(MediaVideo, kwargs),\n )\n elif filename.endswith(HDF5Video.EXTS):\n return HDF5Video(\n filename,\n dataset=dataset,\n grayscale=grayscale,\n keep_open=keep_open,\n **_get_valid_kwargs(HDF5Video, kwargs),\n )\n else:\n raise ValueError(f\"Unknown video file type: {filename}\")\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.VideoBackend.get_frame","title":"get_frame(frame_idx)
","text":"Read a single frame from the video.
Parameters:
Name Type Description Defaultframe_idx
int
Index of frame to read.
requiredReturns:
Type Descriptionndarray
Frame as a numpy array of shape (height, width, channels)
where the channels
dimension is 1 for grayscale videos and 3 for color videos.
If the grayscale
attribute is set to True
, the channels
dimension will be reduced to 1 if an RGB frame is loaded from the backend.
If the grayscale
attribute is set to None
, the grayscale
attribute will be automatically set based on the first frame read.
See also: get_frames
sleap_io/io/video_reading.py
def get_frame(self, frame_idx: int) -> np.ndarray:\n \"\"\"Read a single frame from the video.\n\n Args:\n frame_idx: Index of frame to read.\n\n Returns:\n Frame as a numpy array of shape `(height, width, channels)` where the\n `channels` dimension is 1 for grayscale videos and 3 for color videos.\n\n Notes:\n If the `grayscale` attribute is set to `True`, the `channels` dimension will\n be reduced to 1 if an RGB frame is loaded from the backend.\n\n If the `grayscale` attribute is set to `None`, the `grayscale` attribute\n will be automatically set based on the first frame read.\n\n See also: `get_frames`\n \"\"\"\n if not self.has_frame(frame_idx):\n raise IndexError(f\"Frame index {frame_idx} out of range.\")\n\n img = self._read_frame(frame_idx)\n\n if self.grayscale is None:\n self.detect_grayscale(img)\n\n if self.grayscale:\n img = img[..., [0]]\n\n return img\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.VideoBackend.get_frames","title":"get_frames(frame_inds)
","text":"Read a list of frames from the video.
Depending on the backend implementation, this may be faster than reading frames individually using get_frame
.
Parameters:
Name Type Description Defaultframe_inds
list[int]
List of frame indices to read.
requiredReturns:
Type Descriptionndarray
Frames as a numpy array of shape (frames, height, width, channels)
where channels
dimension is 1 for grayscale videos and 3 for color videos.
If the grayscale
attribute is set to True
, the channels
dimension will be reduced to 1 if an RGB frame is loaded from the backend.
If the grayscale
attribute is set to None
, the grayscale
attribute will be automatically set based on the first frame read.
See also: get_frame
sleap_io/io/video_reading.py
def get_frames(self, frame_inds: list[int]) -> np.ndarray:\n \"\"\"Read a list of frames from the video.\n\n Depending on the backend implementation, this may be faster than reading frames\n individually using `get_frame`.\n\n Args:\n frame_inds: List of frame indices to read.\n\n Returns:\n Frames as a numpy array of shape `(frames, height, width, channels)` where\n `channels` dimension is 1 for grayscale videos and 3 for color videos.\n\n Notes:\n If the `grayscale` attribute is set to `True`, the `channels` dimension will\n be reduced to 1 if an RGB frame is loaded from the backend.\n\n If the `grayscale` attribute is set to `None`, the `grayscale` attribute\n will be automatically set based on the first frame read.\n\n See also: `get_frame`\n \"\"\"\n imgs = self._read_frames(frame_inds)\n\n if self.grayscale is None:\n self.detect_grayscale(imgs[0])\n\n if self.grayscale:\n imgs = imgs[..., [0]]\n\n return imgs\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.VideoBackend.has_frame","title":"has_frame(frame_idx)
","text":"Check if a frame index is contained in the video.
Parameters:
Name Type Description Defaultframe_idx
int
Index of frame to check.
requiredReturns:
Type Descriptionbool
True
if the index is contained in the video, otherwise False
.
sleap_io/io/video_reading.py
def has_frame(self, frame_idx: int) -> bool:\n \"\"\"Check if a frame index is contained in the video.\n\n Args:\n frame_idx: Index of frame to check.\n\n Returns:\n `True` if the index is contained in the video, otherwise `False`.\n \"\"\"\n return frame_idx < len(self)\n
"},{"location":"reference/sleap_io/io/video_reading/#sleap_io.io.video_reading.VideoBackend.read_test_frame","title":"read_test_frame()
","text":"Read a single frame from the video to test for grayscale.
NoteThis reads the frame at index 0. This may not be appropriate if the first frame is not available in a given backend.
Source code insleap_io/io/video_reading.py
def read_test_frame(self) -> np.ndarray:\n \"\"\"Read a single frame from the video to test for grayscale.\n\n Note:\n This reads the frame at index 0. This may not be appropriate if the first\n frame is not available in a given backend.\n \"\"\"\n return self._read_frame(0)\n
"},{"location":"reference/sleap_io/io/video_writing/","title":"video_writing","text":""},{"location":"reference/sleap_io/io/video_writing/#sleap_io.io.video_writing","title":"sleap_io.io.video_writing
","text":"Utilities for writing videos.
Classes:
Name DescriptionVideoWriter
Simple video writer using imageio and FFMPEG.
"},{"location":"reference/sleap_io/io/video_writing/#sleap_io.io.video_writing.VideoWriter","title":"VideoWriter
","text":"Simple video writer using imageio and FFMPEG.
Attributes:
Name Type Descriptionfilename
Path
Path to output video file.
fps
float
Frames per second. Defaults to 30.
pixelformat
str
Pixel format for video. Defaults to \"yuv420p\".
codec
str
Codec to use for encoding. Defaults to \"libx264\".
crf
int
Constant rate factor to control lossiness of video. Values go from 2 to 32, with numbers in the 18 to 30 range being most common. Lower values mean less compressed/higher quality. Defaults to 25. No effect if codec is not \"libx264\".
preset
str
H264 encoding preset. Defaults to \"superfast\". No effect if codec is not \"libx264\".
output_params
list[str]
Additional output parameters for FFMPEG. This should be a list of strings corresponding to command line arguments for FFMPEG and libx264. Use ffmpeg -h encoder=libx264
to see all options for libx264 output_params.
This class can be used as a context manager to ensure the video is properly closed after writing. For example:
with VideoWriter(\"output.mp4\") as writer:\n for frame in frames:\n writer(frame)\n
Methods:
Name Description__call__
Write a frame to the video.
__enter__
Context manager entry.
__exit__
Context manager exit.
build_output_params
Build the output parameters for FFMPEG.
close
Close the video writer.
open
Open the video writer.
write_frame
Write a frame to the video.
Source code insleap_io/io/video_writing.py
@attrs.define\nclass VideoWriter:\n \"\"\"Simple video writer using imageio and FFMPEG.\n\n Attributes:\n filename: Path to output video file.\n fps: Frames per second. Defaults to 30.\n pixelformat: Pixel format for video. Defaults to \"yuv420p\".\n codec: Codec to use for encoding. Defaults to \"libx264\".\n crf: Constant rate factor to control lossiness of video. Values go from 2 to 32,\n with numbers in the 18 to 30 range being most common. Lower values mean less\n compressed/higher quality. Defaults to 25. No effect if codec is not\n \"libx264\".\n preset: H264 encoding preset. Defaults to \"superfast\". No effect if codec is not\n \"libx264\".\n output_params: Additional output parameters for FFMPEG. This should be a list of\n strings corresponding to command line arguments for FFMPEG and libx264. Use\n `ffmpeg -h encoder=libx264` to see all options for libx264 output_params.\n\n Notes:\n This class can be used as a context manager to ensure the video is properly\n closed after writing. For example:\n\n ```python\n with VideoWriter(\"output.mp4\") as writer:\n for frame in frames:\n writer(frame)\n ```\n \"\"\"\n\n filename: Path = attrs.field(converter=Path)\n fps: float = 30\n pixelformat: str = \"yuv420p\"\n codec: str = \"libx264\"\n crf: int = 25\n preset: str = \"superfast\"\n output_params: list[str] = attrs.field(factory=list)\n _writer: \"imageio.plugins.ffmpeg.FfmpegFormat.Writer\" | None = None\n\n def build_output_params(self) -> list[str]:\n \"\"\"Build the output parameters for FFMPEG.\"\"\"\n output_params = []\n if self.codec == \"libx264\":\n output_params.extend(\n [\n \"-crf\",\n str(self.crf),\n \"-preset\",\n self.preset,\n ]\n )\n return output_params + self.output_params\n\n def open(self):\n \"\"\"Open the video writer.\"\"\"\n self.close()\n\n self.filename.parent.mkdir(parents=True, exist_ok=True)\n self._writer = iio_v2.get_writer(\n self.filename.as_posix(),\n format=\"FFMPEG\",\n fps=self.fps,\n codec=self.codec,\n pixelformat=self.pixelformat,\n output_params=self.build_output_params(),\n )\n\n def close(self):\n \"\"\"Close the video writer.\"\"\"\n if self._writer is not None:\n self._writer.close()\n self._writer = None\n\n def write_frame(self, frame: np.ndarray):\n \"\"\"Write a frame to the video.\n\n Args:\n frame: Frame to write to video. Should be a 2D or 3D numpy array with\n dimensions (height, width) or (height, width, channels).\n \"\"\"\n if self._writer is None:\n self.open()\n\n self._writer.append_data(frame)\n\n def __enter__(self):\n \"\"\"Context manager entry.\"\"\"\n return self\n\n def __exit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_value: Optional[BaseException],\n traceback: Optional[TracebackType],\n ) -> Optional[bool]:\n \"\"\"Context manager exit.\"\"\"\n self.close()\n return False\n\n def __call__(self, frame: np.ndarray):\n \"\"\"Write a frame to the video.\n\n Args:\n frame: Frame to write to video. Should be a 2D or 3D numpy array with\n dimensions (height, width) or (height, width, channels).\n \"\"\"\n self.write_frame(frame)\n
"},{"location":"reference/sleap_io/io/video_writing/#sleap_io.io.video_writing.VideoWriter.__call__","title":"__call__(frame)
","text":"Write a frame to the video.
Parameters:
Name Type Description Defaultframe
ndarray
Frame to write to video. Should be a 2D or 3D numpy array with dimensions (height, width) or (height, width, channels).
required Source code insleap_io/io/video_writing.py
def __call__(self, frame: np.ndarray):\n \"\"\"Write a frame to the video.\n\n Args:\n frame: Frame to write to video. Should be a 2D or 3D numpy array with\n dimensions (height, width) or (height, width, channels).\n \"\"\"\n self.write_frame(frame)\n
"},{"location":"reference/sleap_io/io/video_writing/#sleap_io.io.video_writing.VideoWriter.__enter__","title":"__enter__()
","text":"Context manager entry.
Source code insleap_io/io/video_writing.py
def __enter__(self):\n \"\"\"Context manager entry.\"\"\"\n return self\n
"},{"location":"reference/sleap_io/io/video_writing/#sleap_io.io.video_writing.VideoWriter.__exit__","title":"__exit__(exc_type, exc_value, traceback)
","text":"Context manager exit.
Source code insleap_io/io/video_writing.py
def __exit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_value: Optional[BaseException],\n traceback: Optional[TracebackType],\n) -> Optional[bool]:\n \"\"\"Context manager exit.\"\"\"\n self.close()\n return False\n
"},{"location":"reference/sleap_io/io/video_writing/#sleap_io.io.video_writing.VideoWriter.build_output_params","title":"build_output_params()
","text":"Build the output parameters for FFMPEG.
Source code insleap_io/io/video_writing.py
def build_output_params(self) -> list[str]:\n \"\"\"Build the output parameters for FFMPEG.\"\"\"\n output_params = []\n if self.codec == \"libx264\":\n output_params.extend(\n [\n \"-crf\",\n str(self.crf),\n \"-preset\",\n self.preset,\n ]\n )\n return output_params + self.output_params\n
"},{"location":"reference/sleap_io/io/video_writing/#sleap_io.io.video_writing.VideoWriter.close","title":"close()
","text":"Close the video writer.
Source code insleap_io/io/video_writing.py
def close(self):\n \"\"\"Close the video writer.\"\"\"\n if self._writer is not None:\n self._writer.close()\n self._writer = None\n
"},{"location":"reference/sleap_io/io/video_writing/#sleap_io.io.video_writing.VideoWriter.open","title":"open()
","text":"Open the video writer.
Source code insleap_io/io/video_writing.py
def open(self):\n \"\"\"Open the video writer.\"\"\"\n self.close()\n\n self.filename.parent.mkdir(parents=True, exist_ok=True)\n self._writer = iio_v2.get_writer(\n self.filename.as_posix(),\n format=\"FFMPEG\",\n fps=self.fps,\n codec=self.codec,\n pixelformat=self.pixelformat,\n output_params=self.build_output_params(),\n )\n
"},{"location":"reference/sleap_io/io/video_writing/#sleap_io.io.video_writing.VideoWriter.write_frame","title":"write_frame(frame)
","text":"Write a frame to the video.
Parameters:
Name Type Description Defaultframe
ndarray
Frame to write to video. Should be a 2D or 3D numpy array with dimensions (height, width) or (height, width, channels).
required Source code insleap_io/io/video_writing.py
def write_frame(self, frame: np.ndarray):\n \"\"\"Write a frame to the video.\n\n Args:\n frame: Frame to write to video. Should be a 2D or 3D numpy array with\n dimensions (height, width) or (height, width, channels).\n \"\"\"\n if self._writer is None:\n self.open()\n\n self._writer.append_data(frame)\n
"},{"location":"reference/sleap_io/model/","title":"model","text":""},{"location":"reference/sleap_io/model/#sleap_io.model","title":"sleap_io.model
","text":"This subpackage contains data model interfaces.
Modules:
Name Descriptioncamera
Data structure for a single camera view in a multi-camera setup.
instance
Data structures for data associated with a single instance such as an animal.
labeled_frame
Data structures for data contained within a single video frame.
labels
Data structure for the labels, a top-level container for pose data.
skeleton
Data model for skeletons.
suggestions
Data module for suggestions.
video
Data model for videos.
"},{"location":"reference/sleap_io/model/camera/","title":"camera","text":""},{"location":"reference/sleap_io/model/camera/#sleap_io.model.camera","title":"sleap_io.model.camera
","text":"Data structure for a single camera view in a multi-camera setup.
Classes:
Name DescriptionCamera
A camera used to record in a multi-view RecordingSession
.
Camera
","text":"A camera used to record in a multi-view RecordingSession
.
Attributes:
Name Type Descriptionmatrix
ndarray
Intrinsic camera matrix of size (3, 3) and type float64.
dist
ndarray
Radial-tangential distortion coefficients [k_1, k_2, p_1, p_2, k_3] of size (5,) and type float64.
size
tuple[int, int]
Image size of camera in pixels of size (2,) and type int.
rvec
ndarray
Rotation vector in unnormalized axis-angle representation of size (3,) and type float64.
tvec
ndarray
Translation vector of size (3,) and type float64.
extrinsic_matrix
ndarray
Extrinsic matrix of camera of size (4, 4) and type float64.
name
str
Camera name.
Methods:
Name Description__attrs_post_init__
Initialize extrinsic matrix from rotation and translation vectors.
__getattr__
Get attribute by name.
project
Project 3D points to 2D using camera matrix and distortion coefficients.
undistort_points
Undistort points using camera matrix and distortion coefficients.
Attributes:
Name Type Descriptionextrinsic_matrix
ndarray
Get extrinsic matrix of camera.
rvec
ndarray
Get rotation vector of camera.
tvec
ndarray
Get translation vector of camera.
Source code insleap_io/model/camera.py
@define\nclass Camera:\n \"\"\"A camera used to record in a multi-view `RecordingSession`.\n\n Attributes:\n matrix: Intrinsic camera matrix of size (3, 3) and type float64.\n dist: Radial-tangential distortion coefficients [k_1, k_2, p_1, p_2, k_3] of\n size (5,) and type float64.\n size: Image size of camera in pixels of size (2,) and type int.\n rvec: Rotation vector in unnormalized axis-angle representation of size (3,) and\n type float64.\n tvec: Translation vector of size (3,) and type float64.\n extrinsic_matrix: Extrinsic matrix of camera of size (4, 4) and type float64.\n name: Camera name.\n \"\"\"\n\n matrix: np.ndarray = field(\n default=np.eye(3),\n converter=lambda x: np.array(x, dtype=\"float64\"),\n )\n dist: np.ndarray = field(\n default=np.zeros(5), converter=lambda x: np.array(x, dtype=\"float64\").ravel()\n )\n size: tuple[int, int] = field(\n default=None, converter=attrs.converters.optional(tuple)\n )\n _rvec: np.ndarray = field(\n default=np.zeros(3), converter=lambda x: np.array(x, dtype=\"float64\").ravel()\n )\n _tvec: np.ndarray = field(\n default=np.zeros(3), converter=lambda x: np.array(x, dtype=\"float64\").ravel()\n )\n name: str = field(default=None, converter=attrs.converters.optional(str))\n _extrinsic_matrix: np.ndarray = field(init=False)\n\n @matrix.validator\n @dist.validator\n @size.validator\n @_rvec.validator\n @_tvec.validator\n @_extrinsic_matrix.validator\n def _validate_shape(self, attribute: attrs.Attribute, value):\n \"\"\"Validate shape of attribute based on metadata.\n\n Args:\n attribute: Attribute to validate.\n value: Value of attribute to validate.\n\n Raises:\n ValueError: If attribute shape is not as expected.\n \"\"\"\n # Define metadata for each attribute\n attr_metadata = {\n \"matrix\": {\"shape\": (3, 3), \"type\": np.ndarray},\n \"dist\": {\"shape\": (5,), \"type\": np.ndarray},\n \"size\": {\"shape\": (2,), \"type\": tuple},\n \"_rvec\": {\"shape\": (3,), \"type\": np.ndarray},\n \"_tvec\": {\"shape\": (3,), \"type\": np.ndarray},\n \"_extrinsic_matrix\": {\"shape\": (4, 4), \"type\": np.ndarray},\n }\n optional_attrs = [\"size\"]\n\n # Skip validation if optional attribute is None\n if attribute.name in optional_attrs and value is None:\n return\n\n # Validate shape of attribute\n expected_shape = attr_metadata[attribute.name][\"shape\"]\n expected_type = attr_metadata[attribute.name][\"type\"]\n if np.shape(value) != expected_shape:\n raise ValueError(\n f\"{attribute.name} must be a {expected_type} of size {expected_shape}, \"\n f\"but recieved shape: {np.shape(value)} and type: {type(value)} for \"\n f\"value: {value}\"\n )\n\n def __attrs_post_init__(self):\n \"\"\"Initialize extrinsic matrix from rotation and translation vectors.\"\"\"\n # Initialize extrinsic matrix\n self._extrinsic_matrix = np.eye(4, dtype=\"float64\")\n self._extrinsic_matrix[:3, :3] = cv2.Rodrigues(self._rvec)[0]\n self._extrinsic_matrix[:3, 3] = self._tvec\n\n @property\n def rvec(self) -> np.ndarray:\n \"\"\"Get rotation vector of camera.\n\n Returns:\n Rotation vector of camera of size 3.\n \"\"\"\n return self._rvec\n\n @rvec.setter\n def rvec(self, value: np.ndarray):\n \"\"\"Set rotation vector and update extrinsic matrix.\n\n Args:\n value: Rotation vector of size 3.\n \"\"\"\n self._rvec = value\n\n # Update extrinsic matrix\n rotation_matrix, _ = cv2.Rodrigues(self._rvec)\n self._extrinsic_matrix[:3, :3] = rotation_matrix\n\n @property\n def tvec(self) -> np.ndarray:\n \"\"\"Get translation vector of camera.\n\n Returns:\n Translation vector of camera of size 3.\n \"\"\"\n return self._tvec\n\n @tvec.setter\n def tvec(self, value: np.ndarray):\n \"\"\"Set translation vector and update extrinsic matrix.\n\n Args:\n value: Translation vector of size 3.\n \"\"\"\n self._tvec = value\n\n # Update extrinsic matrix\n self._extrinsic_matrix[:3, 3] = self._tvec\n\n @property\n def extrinsic_matrix(self) -> np.ndarray:\n \"\"\"Get extrinsic matrix of camera.\n\n Returns:\n Extrinsic matrix of camera of size 4 x 4.\n \"\"\"\n return self._extrinsic_matrix\n\n @extrinsic_matrix.setter\n def extrinsic_matrix(self, value: np.ndarray):\n \"\"\"Set extrinsic matrix and update rotation and translation vectors.\n\n Args:\n value: Extrinsic matrix of size 4 x 4.\n \"\"\"\n self._extrinsic_matrix = value\n\n # Update rotation and translation vectors\n self._rvec, _ = cv2.Rodrigues(self._extrinsic_matrix[:3, :3])\n self._tvec = self._extrinsic_matrix[:3, 3]\n\n def undistort_points(self, points: np.ndarray) -> np.ndarray:\n \"\"\"Undistort points using camera matrix and distortion coefficients.\n\n Args:\n points: Points to undistort of shape (N, 2).\n\n Returns:\n Undistorted points of shape (N, 2).\n \"\"\"\n shape = points.shape\n points = points.reshape(-1, 1, 2)\n out = cv2.undistortPoints(points, self.matrix, self.dist)\n return out.reshape(shape)\n\n def project(self, points: np.ndarray) -> np.ndarray:\n \"\"\"Project 3D points to 2D using camera matrix and distortion coefficients.\n\n Args:\n points: 3D points to project of shape (N, 3) or (N, 1, 3).\n\n Returns:\n Projected 2D points of shape (N, 1, 2).\n \"\"\"\n points = points.reshape(-1, 1, 3)\n out, _ = cv2.projectPoints(\n points,\n self.rvec,\n self.tvec,\n self.matrix,\n self.dist,\n )\n return out\n\n # TODO: Remove this when we implement triangulation without aniposelib\n def __getattr__(self, name: str):\n \"\"\"Get attribute by name.\n\n Args:\n name: Name of attribute to get.\n\n Returns:\n Value of attribute.\n\n Raises:\n AttributeError: If attribute does not exist.\n \"\"\"\n if name in self.__attrs_attrs__:\n return getattr(self, name)\n\n # The aliases for methods called when triangulate with sleap_anipose\n method_aliases = {\n \"get_name\": self.name,\n \"get_extrinsic_matrix\": self.extrinsic_matrix,\n }\n\n def return_callable_method_alias():\n return method_aliases[name]\n\n if name in method_aliases:\n return return_callable_method_alias\n\n raise AttributeError(f\"'Camera' object has no attribute or method '{name}'\")\n
"},{"location":"reference/sleap_io/model/camera/#sleap_io.model.camera.Camera.extrinsic_matrix","title":"extrinsic_matrix: np.ndarray
property
writable
","text":"Get extrinsic matrix of camera.
Returns:
Type Descriptionndarray
Extrinsic matrix of camera of size 4 x 4.
"},{"location":"reference/sleap_io/model/camera/#sleap_io.model.camera.Camera.rvec","title":"rvec: np.ndarray
property
writable
","text":"Get rotation vector of camera.
Returns:
Type Descriptionndarray
Rotation vector of camera of size 3.
"},{"location":"reference/sleap_io/model/camera/#sleap_io.model.camera.Camera.tvec","title":"tvec: np.ndarray
property
writable
","text":"Get translation vector of camera.
Returns:
Type Descriptionndarray
Translation vector of camera of size 3.
"},{"location":"reference/sleap_io/model/camera/#sleap_io.model.camera.Camera.__attrs_post_init__","title":"__attrs_post_init__()
","text":"Initialize extrinsic matrix from rotation and translation vectors.
Source code insleap_io/model/camera.py
def __attrs_post_init__(self):\n \"\"\"Initialize extrinsic matrix from rotation and translation vectors.\"\"\"\n # Initialize extrinsic matrix\n self._extrinsic_matrix = np.eye(4, dtype=\"float64\")\n self._extrinsic_matrix[:3, :3] = cv2.Rodrigues(self._rvec)[0]\n self._extrinsic_matrix[:3, 3] = self._tvec\n
"},{"location":"reference/sleap_io/model/camera/#sleap_io.model.camera.Camera.__getattr__","title":"__getattr__(name)
","text":"Get attribute by name.
Parameters:
Name Type Description Defaultname
str
Name of attribute to get.
requiredReturns:
Type DescriptionValue of attribute.
Raises:
Type DescriptionAttributeError
If attribute does not exist.
Source code insleap_io/model/camera.py
def __getattr__(self, name: str):\n \"\"\"Get attribute by name.\n\n Args:\n name: Name of attribute to get.\n\n Returns:\n Value of attribute.\n\n Raises:\n AttributeError: If attribute does not exist.\n \"\"\"\n if name in self.__attrs_attrs__:\n return getattr(self, name)\n\n # The aliases for methods called when triangulate with sleap_anipose\n method_aliases = {\n \"get_name\": self.name,\n \"get_extrinsic_matrix\": self.extrinsic_matrix,\n }\n\n def return_callable_method_alias():\n return method_aliases[name]\n\n if name in method_aliases:\n return return_callable_method_alias\n\n raise AttributeError(f\"'Camera' object has no attribute or method '{name}'\")\n
"},{"location":"reference/sleap_io/model/camera/#sleap_io.model.camera.Camera.project","title":"project(points)
","text":"Project 3D points to 2D using camera matrix and distortion coefficients.
Parameters:
Name Type Description Defaultpoints
ndarray
3D points to project of shape (N, 3) or (N, 1, 3).
requiredReturns:
Type Descriptionndarray
Projected 2D points of shape (N, 1, 2).
Source code insleap_io/model/camera.py
def project(self, points: np.ndarray) -> np.ndarray:\n \"\"\"Project 3D points to 2D using camera matrix and distortion coefficients.\n\n Args:\n points: 3D points to project of shape (N, 3) or (N, 1, 3).\n\n Returns:\n Projected 2D points of shape (N, 1, 2).\n \"\"\"\n points = points.reshape(-1, 1, 3)\n out, _ = cv2.projectPoints(\n points,\n self.rvec,\n self.tvec,\n self.matrix,\n self.dist,\n )\n return out\n
"},{"location":"reference/sleap_io/model/camera/#sleap_io.model.camera.Camera.undistort_points","title":"undistort_points(points)
","text":"Undistort points using camera matrix and distortion coefficients.
Parameters:
Name Type Description Defaultpoints
ndarray
Points to undistort of shape (N, 2).
requiredReturns:
Type Descriptionndarray
Undistorted points of shape (N, 2).
Source code insleap_io/model/camera.py
def undistort_points(self, points: np.ndarray) -> np.ndarray:\n \"\"\"Undistort points using camera matrix and distortion coefficients.\n\n Args:\n points: Points to undistort of shape (N, 2).\n\n Returns:\n Undistorted points of shape (N, 2).\n \"\"\"\n shape = points.shape\n points = points.reshape(-1, 1, 2)\n out = cv2.undistortPoints(points, self.matrix, self.dist)\n return out.reshape(shape)\n
"},{"location":"reference/sleap_io/model/instance/","title":"instance","text":""},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance","title":"sleap_io.model.instance
","text":"Data structures for data associated with a single instance such as an animal.
The Instance
class is a SLEAP data structure that contains a collection of Point
s that correspond to landmarks within a Skeleton
.
PredictedInstance
additionally contains metadata associated with how the instance was estimated, such as confidence scores.
Classes:
Name DescriptionInstance
This class represents a ground truth instance such as an animal.
Point
A 2D spatial landmark and metadata associated with annotation.
PredictedInstance
A PredictedInstance
is an Instance
that was predicted using a model.
PredictedPoint
A predicted point with associated score generated by a prediction model.
Track
An object that represents the same animal/object across multiple detections.
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Instance","title":"Instance
","text":"This class represents a ground truth instance such as an animal.
An Instance
has a set of landmarks (Point
s) that correspond to the nodes defined in its Skeleton
.
It may also be associated with a Track
which links multiple instances together across frames or videos.
Attributes:
Name Type Descriptionpoints
Union[dict[Node, Point], dict[Node, PredictedPoint]]
A dictionary with keys as Node
s and values as Point
s containing all of the landmarks of the instance. This can also be specified as a dictionary with node names, a list of length n_nodes
, or a numpy array of shape (n_nodes, 2)
.
skeleton
Skeleton
The Skeleton
that describes the Node
s and Edge
s associated with this instance.
track
Optional[Track]
An optional Track
associated with a unique animal/object across frames or videos.
from_predicted
Optional[PredictedInstance]
The PredictedInstance
(if any) that this instance was initialized from. This is used with human-in-the-loop workflows.
Methods:
Name Description__attrs_post_init__
Maintain point mappings between node and points after initialization.
__getitem__
Return the point associated with a node or None
if not set.
__len__
Return the number of points in the instance.
__repr__
Return a readable representation of the instance.
from_numpy
Create an instance object from a numpy array.
numpy
Return the instance points as a numpy array.
replace_skeleton
Replace the skeleton associated with the instance.
update_skeleton
Update the points dictionary to match the skeleton.
Attributes:
Name Type Descriptionis_empty
bool
Return True
if no points are visible on the instance.
n_visible
int
Return the number of visible points in the instance.
Source code insleap_io/model/instance.py
@define(auto_attribs=True, slots=True, eq=True)\nclass Instance:\n \"\"\"This class represents a ground truth instance such as an animal.\n\n An `Instance` has a set of landmarks (`Point`s) that correspond to the nodes defined\n in its `Skeleton`.\n\n It may also be associated with a `Track` which links multiple instances together\n across frames or videos.\n\n Attributes:\n points: A dictionary with keys as `Node`s and values as `Point`s containing all\n of the landmarks of the instance. This can also be specified as a dictionary\n with node names, a list of length `n_nodes`, or a numpy array of shape\n `(n_nodes, 2)`.\n skeleton: The `Skeleton` that describes the `Node`s and `Edge`s associated with\n this instance.\n track: An optional `Track` associated with a unique animal/object across frames\n or videos.\n from_predicted: The `PredictedInstance` (if any) that this instance was\n initialized from. This is used with human-in-the-loop workflows.\n \"\"\"\n\n _POINT_TYPE = Point\n\n def _make_default_point(self, x, y):\n return self._POINT_TYPE(x, y, visible=not (math.isnan(x) or math.isnan(y)))\n\n def _convert_points(self, attr, points):\n \"\"\"Maintain points mappings between nodes and points.\"\"\"\n if type(points) == np.ndarray:\n points = points.tolist()\n\n if type(points) == list:\n if len(points) != len(self.skeleton):\n raise ValueError(\n \"If specifying points as a list, must provide as many points as \"\n \"nodes in the skeleton.\"\n )\n points = {node: pt for node, pt in zip(self.skeleton.nodes, points)}\n\n if type(points) == dict:\n keys = [\n node if type(node) == Node else self.skeleton[node]\n for node in points.keys()\n ]\n vals = [\n (\n point\n if type(point) == self._POINT_TYPE\n else self._make_default_point(*point)\n )\n for point in points.values()\n ]\n points = {k: v for k, v in zip(keys, vals)}\n\n missing_nodes = list(set(self.skeleton.nodes) - set(points.keys()))\n for node in missing_nodes:\n points[node] = self._make_default_point(x=np.nan, y=np.nan)\n\n return points\n\n points: Union[dict[Node, Point], dict[Node, PredictedPoint]] = field(\n on_setattr=_convert_points, eq=cmp_using(eq=_compare_points) # type: ignore\n )\n skeleton: Skeleton\n track: Optional[Track] = None\n from_predicted: Optional[PredictedInstance] = None\n\n def __attrs_post_init__(self):\n \"\"\"Maintain point mappings between node and points after initialization.\"\"\"\n super().__setattr__(\"points\", self._convert_points(None, self.points))\n\n def __getitem__(self, node: Union[int, str, Node]) -> Optional[Point]:\n \"\"\"Return the point associated with a node or `None` if not set.\"\"\"\n if (type(node) == int) or (type(node) == str):\n node = self.skeleton[node]\n if isinstance(node, Node):\n return self.points.get(node, None)\n else:\n raise IndexError(f\"Invalid indexing argument for instance: {node}\")\n\n def __len__(self) -> int:\n \"\"\"Return the number of points in the instance.\"\"\"\n return len(self.points)\n\n def __repr__(self) -> str:\n \"\"\"Return a readable representation of the instance.\"\"\"\n pts = self.numpy().tolist()\n track = f'\"{self.track.name}\"' if self.track is not None else self.track\n\n return f\"Instance(points={pts}, track={track})\"\n\n @property\n def n_visible(self) -> int:\n \"\"\"Return the number of visible points in the instance.\"\"\"\n return sum(pt.visible for pt in self.points.values())\n\n @property\n def is_empty(self) -> bool:\n \"\"\"Return `True` if no points are visible on the instance.\"\"\"\n return self.n_visible == 0\n\n @classmethod\n def from_numpy(\n cls, points: np.ndarray, skeleton: Skeleton, track: Optional[Track] = None\n ) -> \"Instance\":\n \"\"\"Create an instance object from a numpy array.\n\n Args:\n points: A numpy array of shape `(n_nodes, 2)` corresponding to the points of\n the skeleton. Values of `np.nan` indicate \"missing\" nodes.\n skeleton: The `Skeleton` that this `Instance` is associated with. It should\n have `n_nodes` nodes.\n track: An optional `Track` associated with a unique animal/object across\n frames or videos.\n \"\"\"\n return cls(\n points=points, skeleton=skeleton, track=track # type: ignore[arg-type]\n )\n\n def numpy(self) -> np.ndarray:\n \"\"\"Return the instance points as a numpy array.\"\"\"\n pts = np.full((len(self.skeleton), 2), np.nan)\n for node, point in self.points.items():\n if point.visible:\n pts[self.skeleton.index(node)] = point.numpy()\n return pts\n\n def update_skeleton(self):\n \"\"\"Update the points dictionary to match the skeleton.\n\n Points associated with nodes that are no longer in the skeleton will be removed.\n\n Additionally, the keys of the points dictionary will be ordered to match the\n order of the nodes in the skeleton.\n\n Notes:\n This method is useful when the skeleton has been updated (e.g., nodes\n removed or reordered).\n\n However, it is recommended to use `Labels`-level methods (e.g.,\n `Labels.remove_nodes()`) when manipulating the skeleton as these will\n automatically call this method on every instance.\n \"\"\"\n # Create a new dictionary to hold the updated points\n new_points = {}\n\n # Iterate over the nodes in the skeleton\n for node in self.skeleton.nodes:\n # Get the point associated with the node\n point = self.points.get(node, None)\n\n # If the point is not None, add it to the new dictionary\n if point is not None:\n new_points[node] = point\n\n # Update the points dictionary\n self.points = new_points\n\n def replace_skeleton(\n self,\n new_skeleton: Skeleton,\n node_map: dict[NodeOrIndex, NodeOrIndex] | None = None,\n rev_node_map: dict[NodeOrIndex, NodeOrIndex] | None = None,\n ):\n \"\"\"Replace the skeleton associated with the instance.\n\n The points dictionary will be updated to match the new skeleton.\n\n Args:\n new_skeleton: The new `Skeleton` to associate with the instance.\n node_map: Dictionary mapping nodes in the old skeleton to nodes in the new\n skeleton. Keys and values can be specified as `Node` objects, integer\n indices, or string names. If not provided, only nodes with identical\n names will be mapped. Points associated with unmapped nodes will be\n removed.\n rev_node_map: Dictionary mapping nodes in the new skeleton to nodes in the\n old skeleton. This is used internally when calling from\n `Labels.replace_skeleton()` as it is more efficient to compute this\n mapping once and pass it to all instances. No validation is done on this\n mapping, so nodes are expected to be `Node` objects.\n \"\"\"\n if rev_node_map is None:\n if node_map is None:\n node_map = {}\n for old_node in self.skeleton.nodes:\n for new_node in new_skeleton.nodes:\n if old_node.name == new_node.name:\n node_map[old_node] = new_node\n break\n else:\n node_map = {\n self.skeleton.require_node(\n old, add_missing=False\n ): new_skeleton.require_node(new, add_missing=False)\n for old, new in node_map.items()\n }\n\n # Make new -> old mapping for nodes\n rev_node_map = {new: old for old, new in node_map.items()}\n\n # Build new points list with mapped nodes\n new_points = {}\n for new_node in new_skeleton.nodes:\n old_node = rev_node_map.get(new_node, None)\n if old_node is not None and old_node in self.points:\n new_points[new_node] = self.points[old_node]\n\n # Update the skeleton and points\n self.skeleton = new_skeleton\n self.points = new_points\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Instance.is_empty","title":"is_empty: bool
property
","text":"Return True
if no points are visible on the instance.
n_visible: int
property
","text":"Return the number of visible points in the instance.
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Instance.__attrs_post_init__","title":"__attrs_post_init__()
","text":"Maintain point mappings between node and points after initialization.
Source code insleap_io/model/instance.py
def __attrs_post_init__(self):\n \"\"\"Maintain point mappings between node and points after initialization.\"\"\"\n super().__setattr__(\"points\", self._convert_points(None, self.points))\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Instance.__getitem__","title":"__getitem__(node)
","text":"Return the point associated with a node or None
if not set.
sleap_io/model/instance.py
def __getitem__(self, node: Union[int, str, Node]) -> Optional[Point]:\n \"\"\"Return the point associated with a node or `None` if not set.\"\"\"\n if (type(node) == int) or (type(node) == str):\n node = self.skeleton[node]\n if isinstance(node, Node):\n return self.points.get(node, None)\n else:\n raise IndexError(f\"Invalid indexing argument for instance: {node}\")\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Instance.__len__","title":"__len__()
","text":"Return the number of points in the instance.
Source code insleap_io/model/instance.py
def __len__(self) -> int:\n \"\"\"Return the number of points in the instance.\"\"\"\n return len(self.points)\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Instance.__repr__","title":"__repr__()
","text":"Return a readable representation of the instance.
Source code insleap_io/model/instance.py
def __repr__(self) -> str:\n \"\"\"Return a readable representation of the instance.\"\"\"\n pts = self.numpy().tolist()\n track = f'\"{self.track.name}\"' if self.track is not None else self.track\n\n return f\"Instance(points={pts}, track={track})\"\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Instance.from_numpy","title":"from_numpy(points, skeleton, track=None)
classmethod
","text":"Create an instance object from a numpy array.
Parameters:
Name Type Description Defaultpoints
ndarray
A numpy array of shape (n_nodes, 2)
corresponding to the points of the skeleton. Values of np.nan
indicate \"missing\" nodes.
skeleton
Skeleton
The Skeleton
that this Instance
is associated with. It should have n_nodes
nodes.
track
Optional[Track]
An optional Track
associated with a unique animal/object across frames or videos.
None
Source code in sleap_io/model/instance.py
@classmethod\ndef from_numpy(\n cls, points: np.ndarray, skeleton: Skeleton, track: Optional[Track] = None\n) -> \"Instance\":\n \"\"\"Create an instance object from a numpy array.\n\n Args:\n points: A numpy array of shape `(n_nodes, 2)` corresponding to the points of\n the skeleton. Values of `np.nan` indicate \"missing\" nodes.\n skeleton: The `Skeleton` that this `Instance` is associated with. It should\n have `n_nodes` nodes.\n track: An optional `Track` associated with a unique animal/object across\n frames or videos.\n \"\"\"\n return cls(\n points=points, skeleton=skeleton, track=track # type: ignore[arg-type]\n )\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Instance.numpy","title":"numpy()
","text":"Return the instance points as a numpy array.
Source code insleap_io/model/instance.py
def numpy(self) -> np.ndarray:\n \"\"\"Return the instance points as a numpy array.\"\"\"\n pts = np.full((len(self.skeleton), 2), np.nan)\n for node, point in self.points.items():\n if point.visible:\n pts[self.skeleton.index(node)] = point.numpy()\n return pts\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Instance.replace_skeleton","title":"replace_skeleton(new_skeleton, node_map=None, rev_node_map=None)
","text":"Replace the skeleton associated with the instance.
The points dictionary will be updated to match the new skeleton.
Parameters:
Name Type Description Defaultnew_skeleton
Skeleton
The new Skeleton
to associate with the instance.
node_map
dict[NodeOrIndex, NodeOrIndex] | None
Dictionary mapping nodes in the old skeleton to nodes in the new skeleton. Keys and values can be specified as Node
objects, integer indices, or string names. If not provided, only nodes with identical names will be mapped. Points associated with unmapped nodes will be removed.
None
rev_node_map
dict[NodeOrIndex, NodeOrIndex] | None
Dictionary mapping nodes in the new skeleton to nodes in the old skeleton. This is used internally when calling from Labels.replace_skeleton()
as it is more efficient to compute this mapping once and pass it to all instances. No validation is done on this mapping, so nodes are expected to be Node
objects.
None
Source code in sleap_io/model/instance.py
def replace_skeleton(\n self,\n new_skeleton: Skeleton,\n node_map: dict[NodeOrIndex, NodeOrIndex] | None = None,\n rev_node_map: dict[NodeOrIndex, NodeOrIndex] | None = None,\n):\n \"\"\"Replace the skeleton associated with the instance.\n\n The points dictionary will be updated to match the new skeleton.\n\n Args:\n new_skeleton: The new `Skeleton` to associate with the instance.\n node_map: Dictionary mapping nodes in the old skeleton to nodes in the new\n skeleton. Keys and values can be specified as `Node` objects, integer\n indices, or string names. If not provided, only nodes with identical\n names will be mapped. Points associated with unmapped nodes will be\n removed.\n rev_node_map: Dictionary mapping nodes in the new skeleton to nodes in the\n old skeleton. This is used internally when calling from\n `Labels.replace_skeleton()` as it is more efficient to compute this\n mapping once and pass it to all instances. No validation is done on this\n mapping, so nodes are expected to be `Node` objects.\n \"\"\"\n if rev_node_map is None:\n if node_map is None:\n node_map = {}\n for old_node in self.skeleton.nodes:\n for new_node in new_skeleton.nodes:\n if old_node.name == new_node.name:\n node_map[old_node] = new_node\n break\n else:\n node_map = {\n self.skeleton.require_node(\n old, add_missing=False\n ): new_skeleton.require_node(new, add_missing=False)\n for old, new in node_map.items()\n }\n\n # Make new -> old mapping for nodes\n rev_node_map = {new: old for old, new in node_map.items()}\n\n # Build new points list with mapped nodes\n new_points = {}\n for new_node in new_skeleton.nodes:\n old_node = rev_node_map.get(new_node, None)\n if old_node is not None and old_node in self.points:\n new_points[new_node] = self.points[old_node]\n\n # Update the skeleton and points\n self.skeleton = new_skeleton\n self.points = new_points\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Instance.update_skeleton","title":"update_skeleton()
","text":"Update the points dictionary to match the skeleton.
Points associated with nodes that are no longer in the skeleton will be removed.
Additionally, the keys of the points dictionary will be ordered to match the order of the nodes in the skeleton.
NotesThis method is useful when the skeleton has been updated (e.g., nodes removed or reordered).
However, it is recommended to use Labels
-level methods (e.g., Labels.remove_nodes()
) when manipulating the skeleton as these will automatically call this method on every instance.
sleap_io/model/instance.py
def update_skeleton(self):\n \"\"\"Update the points dictionary to match the skeleton.\n\n Points associated with nodes that are no longer in the skeleton will be removed.\n\n Additionally, the keys of the points dictionary will be ordered to match the\n order of the nodes in the skeleton.\n\n Notes:\n This method is useful when the skeleton has been updated (e.g., nodes\n removed or reordered).\n\n However, it is recommended to use `Labels`-level methods (e.g.,\n `Labels.remove_nodes()`) when manipulating the skeleton as these will\n automatically call this method on every instance.\n \"\"\"\n # Create a new dictionary to hold the updated points\n new_points = {}\n\n # Iterate over the nodes in the skeleton\n for node in self.skeleton.nodes:\n # Get the point associated with the node\n point = self.points.get(node, None)\n\n # If the point is not None, add it to the new dictionary\n if point is not None:\n new_points[node] = point\n\n # Update the points dictionary\n self.points = new_points\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Point","title":"Point
","text":"A 2D spatial landmark and metadata associated with annotation.
Attributes:
Name Type Descriptionx
float
The horizontal pixel location of point in image coordinates.
y
float
The vertical pixel location of point in image coordinates.
visible
bool
Whether point is visible in the image or not.
complete
bool
Has the point been verified by the user labeler.
Class variableseq_atol: Controls absolute tolerence allowed in x
and y
when comparing two Point
s for equality. eq_rtol: Controls relative tolerence allowed in x
and y
when comparing two Point
s for equality.
Methods:
Name Description__eq__
Compare self
and other
for equality.
numpy
Return the coordinates as a numpy array of shape (2,)
.
sleap_io/model/instance.py
@define\nclass Point:\n \"\"\"A 2D spatial landmark and metadata associated with annotation.\n\n Attributes:\n x: The horizontal pixel location of point in image coordinates.\n y: The vertical pixel location of point in image coordinates.\n visible: Whether point is visible in the image or not.\n complete: Has the point been verified by the user labeler.\n\n Class variables:\n eq_atol: Controls absolute tolerence allowed in `x` and `y` when comparing two\n `Point`s for equality.\n eq_rtol: Controls relative tolerence allowed in `x` and `y` when comparing two\n `Point`s for equality.\n\n \"\"\"\n\n eq_atol: ClassVar[float] = 1e-08\n eq_rtol: ClassVar[float] = 0\n\n x: float\n y: float\n visible: bool = True\n complete: bool = False\n\n def __eq__(self, other: object) -> bool:\n \"\"\"Compare `self` and `other` for equality.\n\n Precision error between the respective `x` and `y` properties of two\n instances may be allowed or controlled via the `Point.eq_atol` and\n `Point.eq_rtol` class variables. Set to zero to disable their effect.\n Internally, `numpy.isclose()` is used for the comparison:\n https://numpy.org/doc/stable/reference/generated/numpy.isclose.html\n\n Args:\n other: Instance of `Point` to compare to.\n\n Returns:\n Returns True if all attributes of `self` and `other` are the identical\n (possibly allowing precision error for `x` and `y` attributes).\n \"\"\"\n # Check that other is a Point.\n if type(other) is not type(self):\n return False\n\n # We know that we have some kind of point at this point.\n other = cast(Point, other)\n\n return bool(\n np.all(\n np.isclose(\n [self.x, self.y],\n [other.x, other.y],\n rtol=Point.eq_rtol,\n atol=Point.eq_atol,\n equal_nan=True,\n )\n )\n and (self.visible == other.visible)\n and (self.complete == other.complete)\n )\n\n def numpy(self) -> np.ndarray:\n \"\"\"Return the coordinates as a numpy array of shape `(2,)`.\"\"\"\n return np.array([self.x, self.y]) if self.visible else np.full((2,), np.nan)\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Point.__eq__","title":"__eq__(other)
","text":"Compare self
and other
for equality.
Precision error between the respective x
and y
properties of two instances may be allowed or controlled via the Point.eq_atol
and Point.eq_rtol
class variables. Set to zero to disable their effect. Internally, numpy.isclose()
is used for the comparison: https://numpy.org/doc/stable/reference/generated/numpy.isclose.html
Parameters:
Name Type Description Defaultother
object
Instance of Point
to compare to.
Returns:
Type Descriptionbool
Returns True if all attributes of self
and other
are the identical (possibly allowing precision error for x
and y
attributes).
sleap_io/model/instance.py
def __eq__(self, other: object) -> bool:\n \"\"\"Compare `self` and `other` for equality.\n\n Precision error between the respective `x` and `y` properties of two\n instances may be allowed or controlled via the `Point.eq_atol` and\n `Point.eq_rtol` class variables. Set to zero to disable their effect.\n Internally, `numpy.isclose()` is used for the comparison:\n https://numpy.org/doc/stable/reference/generated/numpy.isclose.html\n\n Args:\n other: Instance of `Point` to compare to.\n\n Returns:\n Returns True if all attributes of `self` and `other` are the identical\n (possibly allowing precision error for `x` and `y` attributes).\n \"\"\"\n # Check that other is a Point.\n if type(other) is not type(self):\n return False\n\n # We know that we have some kind of point at this point.\n other = cast(Point, other)\n\n return bool(\n np.all(\n np.isclose(\n [self.x, self.y],\n [other.x, other.y],\n rtol=Point.eq_rtol,\n atol=Point.eq_atol,\n equal_nan=True,\n )\n )\n and (self.visible == other.visible)\n and (self.complete == other.complete)\n )\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Point.numpy","title":"numpy()
","text":"Return the coordinates as a numpy array of shape (2,)
.
sleap_io/model/instance.py
def numpy(self) -> np.ndarray:\n \"\"\"Return the coordinates as a numpy array of shape `(2,)`.\"\"\"\n return np.array([self.x, self.y]) if self.visible else np.full((2,), np.nan)\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.PredictedInstance","title":"PredictedInstance
","text":" Bases: Instance
A PredictedInstance
is an Instance
that was predicted using a model.
Attributes:
Name Type Descriptionskeleton
The Skeleton
that this Instance
is associated with.
points
A dictionary where keys are Skeleton
nodes and values are Point
s.
track
An optional Track
associated with a unique animal/object across frames or videos.
from_predicted
Optional[PredictedInstance]
Not applicable in PredictedInstance
s (must be set to None
).
score
float
The instance detection or part grouping prediction score. This is a scalar that represents the confidence with which this entire instance was predicted. This may not always be applicable depending on the model type.
tracking_score
Optional[float]
The score associated with the Track
assignment. This is typically the value from the score matrix used in an identity assignment.
Methods:
Name Description__repr__
Return a readable representation of the instance.
from_numpy
Create an instance object from a numpy array.
numpy
Return the instance points as a numpy array.
Source code insleap_io/model/instance.py
@define\nclass PredictedInstance(Instance):\n \"\"\"A `PredictedInstance` is an `Instance` that was predicted using a model.\n\n Attributes:\n skeleton: The `Skeleton` that this `Instance` is associated with.\n points: A dictionary where keys are `Skeleton` nodes and values are `Point`s.\n track: An optional `Track` associated with a unique animal/object across frames\n or videos.\n from_predicted: Not applicable in `PredictedInstance`s (must be set to `None`).\n score: The instance detection or part grouping prediction score. This is a\n scalar that represents the confidence with which this entire instance was\n predicted. This may not always be applicable depending on the model type.\n tracking_score: The score associated with the `Track` assignment. This is\n typically the value from the score matrix used in an identity assignment.\n \"\"\"\n\n _POINT_TYPE = PredictedPoint\n\n from_predicted: Optional[PredictedInstance] = field(\n default=None, validator=validators.instance_of(type(None))\n )\n score: float = 0.0\n tracking_score: Optional[float] = 0\n\n def __repr__(self) -> str:\n \"\"\"Return a readable representation of the instance.\"\"\"\n pts = self.numpy().tolist()\n track = f'\"{self.track.name}\"' if self.track is not None else self.track\n\n score = str(self.score) if self.score is None else f\"{self.score:.2f}\"\n tracking_score = (\n str(self.tracking_score)\n if self.tracking_score is None\n else f\"{self.tracking_score:.2f}\"\n )\n return (\n f\"PredictedInstance(points={pts}, track={track}, \"\n f\"score={score}, tracking_score={tracking_score})\"\n )\n\n @classmethod\n def from_numpy( # type: ignore[override]\n cls,\n points: np.ndarray,\n point_scores: np.ndarray,\n instance_score: float,\n skeleton: Skeleton,\n tracking_score: Optional[float] = None,\n track: Optional[Track] = None,\n ) -> \"PredictedInstance\":\n \"\"\"Create an instance object from a numpy array.\n\n Args:\n points: A numpy array of shape `(n_nodes, 2)` corresponding to the points of\n the skeleton. Values of `np.nan` indicate \"missing\" nodes.\n point_scores: The points-level prediction score. This is an array that\n represents the confidence with which each point in the instance was\n predicted. This may not always be applicable depending on the model\n type.\n instance_score: The instance detection or part grouping prediction score.\n This is a scalar that represents the confidence with which this entire\n instance was predicted. This may not always be applicable depending on\n the model type.\n skeleton: The `Skeleton` that this `Instance` is associated with. It should\n have `n_nodes` nodes.\n tracking_score: The score associated with the `Track` assignment. This is\n typically the value from the score matrix used in an identity\n assignment.\n track: An optional `Track` associated with a unique animal/object across\n frames or videos.\n \"\"\"\n node_points = {\n node: PredictedPoint(pt[0], pt[1], score=score)\n for node, pt, score in zip(skeleton.nodes, points, point_scores)\n }\n return cls(\n points=node_points,\n skeleton=skeleton,\n score=instance_score,\n tracking_score=tracking_score,\n track=track,\n )\n\n def numpy(self, scores: bool = False) -> np.ndarray:\n \"\"\"Return the instance points as a numpy array.\"\"\"\n pts = np.full((len(self.skeleton), 3), np.nan)\n for node, point in self.points.items():\n if point.visible:\n pts[self.skeleton.index(node)] = point.numpy()\n if not scores:\n pts = pts[:, :2]\n return pts\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.PredictedInstance.__repr__","title":"__repr__()
","text":"Return a readable representation of the instance.
Source code insleap_io/model/instance.py
def __repr__(self) -> str:\n \"\"\"Return a readable representation of the instance.\"\"\"\n pts = self.numpy().tolist()\n track = f'\"{self.track.name}\"' if self.track is not None else self.track\n\n score = str(self.score) if self.score is None else f\"{self.score:.2f}\"\n tracking_score = (\n str(self.tracking_score)\n if self.tracking_score is None\n else f\"{self.tracking_score:.2f}\"\n )\n return (\n f\"PredictedInstance(points={pts}, track={track}, \"\n f\"score={score}, tracking_score={tracking_score})\"\n )\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.PredictedInstance.from_numpy","title":"from_numpy(points, point_scores, instance_score, skeleton, tracking_score=None, track=None)
classmethod
","text":"Create an instance object from a numpy array.
Parameters:
Name Type Description Defaultpoints
ndarray
A numpy array of shape (n_nodes, 2)
corresponding to the points of the skeleton. Values of np.nan
indicate \"missing\" nodes.
point_scores
ndarray
The points-level prediction score. This is an array that represents the confidence with which each point in the instance was predicted. This may not always be applicable depending on the model type.
requiredinstance_score
float
The instance detection or part grouping prediction score. This is a scalar that represents the confidence with which this entire instance was predicted. This may not always be applicable depending on the model type.
requiredskeleton
Skeleton
The Skeleton
that this Instance
is associated with. It should have n_nodes
nodes.
tracking_score
Optional[float]
The score associated with the Track
assignment. This is typically the value from the score matrix used in an identity assignment.
None
track
Optional[Track]
An optional Track
associated with a unique animal/object across frames or videos.
None
Source code in sleap_io/model/instance.py
@classmethod\ndef from_numpy( # type: ignore[override]\n cls,\n points: np.ndarray,\n point_scores: np.ndarray,\n instance_score: float,\n skeleton: Skeleton,\n tracking_score: Optional[float] = None,\n track: Optional[Track] = None,\n) -> \"PredictedInstance\":\n \"\"\"Create an instance object from a numpy array.\n\n Args:\n points: A numpy array of shape `(n_nodes, 2)` corresponding to the points of\n the skeleton. Values of `np.nan` indicate \"missing\" nodes.\n point_scores: The points-level prediction score. This is an array that\n represents the confidence with which each point in the instance was\n predicted. This may not always be applicable depending on the model\n type.\n instance_score: The instance detection or part grouping prediction score.\n This is a scalar that represents the confidence with which this entire\n instance was predicted. This may not always be applicable depending on\n the model type.\n skeleton: The `Skeleton` that this `Instance` is associated with. It should\n have `n_nodes` nodes.\n tracking_score: The score associated with the `Track` assignment. This is\n typically the value from the score matrix used in an identity\n assignment.\n track: An optional `Track` associated with a unique animal/object across\n frames or videos.\n \"\"\"\n node_points = {\n node: PredictedPoint(pt[0], pt[1], score=score)\n for node, pt, score in zip(skeleton.nodes, points, point_scores)\n }\n return cls(\n points=node_points,\n skeleton=skeleton,\n score=instance_score,\n tracking_score=tracking_score,\n track=track,\n )\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.PredictedInstance.numpy","title":"numpy(scores=False)
","text":"Return the instance points as a numpy array.
Source code insleap_io/model/instance.py
def numpy(self, scores: bool = False) -> np.ndarray:\n \"\"\"Return the instance points as a numpy array.\"\"\"\n pts = np.full((len(self.skeleton), 3), np.nan)\n for node, point in self.points.items():\n if point.visible:\n pts[self.skeleton.index(node)] = point.numpy()\n if not scores:\n pts = pts[:, :2]\n return pts\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.PredictedPoint","title":"PredictedPoint
","text":" Bases: Point
A predicted point with associated score generated by a prediction model.
It has all the properties of a labeled Point
, plus a score
.
Attributes:
Name Type Descriptionx
The horizontal pixel location of point within image frame.
y
The vertical pixel location of point within image frame.
visible
Whether point is visible in the image or not.
complete
Has the point been verified by the user labeler.
score
float
The point-level prediction score. This is typically the confidence and set to a value between 0 and 1.
Methods:
Name Description__eq__
Compare self
and other
for equality.
numpy
Return the coordinates and score as a numpy array of shape (3,)
.
sleap_io/model/instance.py
@define\nclass PredictedPoint(Point):\n \"\"\"A predicted point with associated score generated by a prediction model.\n\n It has all the properties of a labeled `Point`, plus a `score`.\n\n Attributes:\n x: The horizontal pixel location of point within image frame.\n y: The vertical pixel location of point within image frame.\n visible: Whether point is visible in the image or not.\n complete: Has the point been verified by the user labeler.\n score: The point-level prediction score. This is typically the confidence and\n set to a value between 0 and 1.\n \"\"\"\n\n score: float = 0.0\n\n def numpy(self) -> np.ndarray:\n \"\"\"Return the coordinates and score as a numpy array of shape `(3,)`.\"\"\"\n return (\n np.array([self.x, self.y, self.score])\n if self.visible\n else np.full((3,), np.nan)\n )\n\n def __eq__(self, other: object) -> bool:\n \"\"\"Compare `self` and `other` for equality.\n\n See `Point.__eq__()` for important notes about point equality semantics!\n\n Args:\n other: Instance of `PredictedPoint` to compare\n\n Returns:\n Returns True if all attributes of `self` and `other` are the identical\n (possibly allowing precision error for `x` and `y` attributes).\n \"\"\"\n if not super().__eq__(other):\n return False\n\n # we know that we have a point at this point\n other = cast(PredictedPoint, other)\n\n return self.score == other.score\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.PredictedPoint.__eq__","title":"__eq__(other)
","text":"Compare self
and other
for equality.
See Point.__eq__()
for important notes about point equality semantics!
Parameters:
Name Type Description Defaultother
object
Instance of PredictedPoint
to compare
Returns:
Type Descriptionbool
Returns True if all attributes of self
and other
are the identical (possibly allowing precision error for x
and y
attributes).
sleap_io/model/instance.py
def __eq__(self, other: object) -> bool:\n \"\"\"Compare `self` and `other` for equality.\n\n See `Point.__eq__()` for important notes about point equality semantics!\n\n Args:\n other: Instance of `PredictedPoint` to compare\n\n Returns:\n Returns True if all attributes of `self` and `other` are the identical\n (possibly allowing precision error for `x` and `y` attributes).\n \"\"\"\n if not super().__eq__(other):\n return False\n\n # we know that we have a point at this point\n other = cast(PredictedPoint, other)\n\n return self.score == other.score\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.PredictedPoint.numpy","title":"numpy()
","text":"Return the coordinates and score as a numpy array of shape (3,)
.
sleap_io/model/instance.py
def numpy(self) -> np.ndarray:\n \"\"\"Return the coordinates and score as a numpy array of shape `(3,)`.\"\"\"\n return (\n np.array([self.x, self.y, self.score])\n if self.visible\n else np.full((3,), np.nan)\n )\n
"},{"location":"reference/sleap_io/model/instance/#sleap_io.model.instance.Track","title":"Track
","text":"An object that represents the same animal/object across multiple detections.
This allows tracking of unique entities in the video over time and space.
A Track
may also be used to refer to unique identity classes that span multiple videos, such as \"female mouse\"
.
Attributes:
Name Type Descriptionname
str
A name given to this track for identification purposes.
NotesTrack
s are compared by identity. This means that unique track objects with the same name are considered to be different.
sleap_io/model/instance.py
@define(eq=False)\nclass Track:\n \"\"\"An object that represents the same animal/object across multiple detections.\n\n This allows tracking of unique entities in the video over time and space.\n\n A `Track` may also be used to refer to unique identity classes that span multiple\n videos, such as `\"female mouse\"`.\n\n Attributes:\n name: A name given to this track for identification purposes.\n\n Notes:\n `Track`s are compared by identity. This means that unique track objects with the\n same name are considered to be different.\n \"\"\"\n\n name: str = \"\"\n
"},{"location":"reference/sleap_io/model/labeled_frame/","title":"labeled_frame","text":""},{"location":"reference/sleap_io/model/labeled_frame/#sleap_io.model.labeled_frame","title":"sleap_io.model.labeled_frame
","text":"Data structures for data contained within a single video frame.
The LabeledFrame
class is a data structure that contains Instance
s and PredictedInstance
s that are associated with a single frame within a video.
Classes:
Name DescriptionLabeledFrame
Labeled data for a single frame of a video.
"},{"location":"reference/sleap_io/model/labeled_frame/#sleap_io.model.labeled_frame.LabeledFrame","title":"LabeledFrame
","text":"Labeled data for a single frame of a video.
Attributes:
Name Type Descriptionvideo
Video
The Video
associated with this LabeledFrame
.
frame_idx
int
The index of the LabeledFrame
in the Video
.
instances
list[Union[Instance, PredictedInstance]]
List of Instance
objects associated with this LabeledFrame
.
Instances of this class are hashed by identity, not by value. This means that two LabeledFrame
instances with the same attributes will NOT be considered equal in a set or dict.
Methods:
Name Description__getitem__
Return the Instance
at key
index in the instances
list.
__iter__
Iterate over Instance
s in instances
list.
__len__
Return the number of instances in the frame.
numpy
Return all instances in the frame as a numpy array.
remove_empty_instances
Remove all instances with no visible points.
remove_predictions
Remove all PredictedInstance
objects from the frame.
Attributes:
Name Type Descriptionhas_predicted_instances
bool
Return True if the frame has any predicted instances.
has_user_instances
bool
Return True if the frame has any user-labeled instances.
image
ndarray
Return the image of the frame as a numpy array.
predicted_instances
list[Instance]
Frame instances that are predicted by a model (PredictedInstance
objects).
unused_predictions
list[Instance]
Return a list of \"unused\" PredictedInstance
objects in frame.
user_instances
list[Instance]
Frame instances that are user-labeled (Instance
objects).
sleap_io/model/labeled_frame.py
@define(eq=False)\nclass LabeledFrame:\n \"\"\"Labeled data for a single frame of a video.\n\n Attributes:\n video: The `Video` associated with this `LabeledFrame`.\n frame_idx: The index of the `LabeledFrame` in the `Video`.\n instances: List of `Instance` objects associated with this `LabeledFrame`.\n\n Notes:\n Instances of this class are hashed by identity, not by value. This means that\n two `LabeledFrame` instances with the same attributes will NOT be considered\n equal in a set or dict.\n \"\"\"\n\n video: Video\n frame_idx: int = field(converter=int)\n instances: list[Union[Instance, PredictedInstance]] = field(factory=list)\n\n def __len__(self) -> int:\n \"\"\"Return the number of instances in the frame.\"\"\"\n return len(self.instances)\n\n def __getitem__(self, key: int) -> Union[Instance, PredictedInstance]:\n \"\"\"Return the `Instance` at `key` index in the `instances` list.\"\"\"\n return self.instances[key]\n\n def __iter__(self):\n \"\"\"Iterate over `Instance`s in `instances` list.\"\"\"\n return iter(self.instances)\n\n @property\n def user_instances(self) -> list[Instance]:\n \"\"\"Frame instances that are user-labeled (`Instance` objects).\"\"\"\n return [inst for inst in self.instances if type(inst) == Instance]\n\n @property\n def has_user_instances(self) -> bool:\n \"\"\"Return True if the frame has any user-labeled instances.\"\"\"\n for inst in self.instances:\n if type(inst) == Instance:\n return True\n return False\n\n @property\n def predicted_instances(self) -> list[Instance]:\n \"\"\"Frame instances that are predicted by a model (`PredictedInstance` objects).\"\"\"\n return [inst for inst in self.instances if type(inst) == PredictedInstance]\n\n @property\n def has_predicted_instances(self) -> bool:\n \"\"\"Return True if the frame has any predicted instances.\"\"\"\n for inst in self.instances:\n if type(inst) == PredictedInstance:\n return True\n return False\n\n def numpy(self) -> np.ndarray:\n \"\"\"Return all instances in the frame as a numpy array.\n\n Returns:\n Points as a numpy array of shape `(n_instances, n_nodes, 2)`.\n\n Note that the order of the instances is arbitrary.\n \"\"\"\n n_instances = len(self.instances)\n n_nodes = len(self.instances[0]) if n_instances > 0 else 0\n pts = np.full((n_instances, n_nodes, 2), np.nan)\n for i, inst in enumerate(self.instances):\n pts[i] = inst.numpy()[:, 0:2]\n return pts\n\n @property\n def image(self) -> np.ndarray:\n \"\"\"Return the image of the frame as a numpy array.\"\"\"\n return self.video[self.frame_idx]\n\n @property\n def unused_predictions(self) -> list[Instance]:\n \"\"\"Return a list of \"unused\" `PredictedInstance` objects in frame.\n\n This is all of the `PredictedInstance` objects which do not have a corresponding\n `Instance` in the same track in the same frame.\n \"\"\"\n unused_predictions = []\n any_tracks = [inst.track for inst in self.instances if inst.track is not None]\n if len(any_tracks):\n # Use tracks to determine which predicted instances have been used\n used_tracks = [\n inst.track\n for inst in self.instances\n if type(inst) == Instance and inst.track is not None\n ]\n unused_predictions = [\n inst\n for inst in self.instances\n if inst.track not in used_tracks and type(inst) == PredictedInstance\n ]\n\n else:\n # Use from_predicted to determine which predicted instances have been used\n # TODO: should we always do this instead of using tracks?\n used_instances = [\n inst.from_predicted\n for inst in self.instances\n if inst.from_predicted is not None\n ]\n unused_predictions = [\n inst\n for inst in self.instances\n if type(inst) == PredictedInstance and inst not in used_instances\n ]\n\n return unused_predictions\n\n def remove_predictions(self):\n \"\"\"Remove all `PredictedInstance` objects from the frame.\"\"\"\n self.instances = [inst for inst in self.instances if type(inst) == Instance]\n\n def remove_empty_instances(self):\n \"\"\"Remove all instances with no visible points.\"\"\"\n self.instances = [inst for inst in self.instances if not inst.is_empty]\n
"},{"location":"reference/sleap_io/model/labeled_frame/#sleap_io.model.labeled_frame.LabeledFrame.has_predicted_instances","title":"has_predicted_instances: bool
property
","text":"Return True if the frame has any predicted instances.
"},{"location":"reference/sleap_io/model/labeled_frame/#sleap_io.model.labeled_frame.LabeledFrame.has_user_instances","title":"has_user_instances: bool
property
","text":"Return True if the frame has any user-labeled instances.
"},{"location":"reference/sleap_io/model/labeled_frame/#sleap_io.model.labeled_frame.LabeledFrame.image","title":"image: np.ndarray
property
","text":"Return the image of the frame as a numpy array.
"},{"location":"reference/sleap_io/model/labeled_frame/#sleap_io.model.labeled_frame.LabeledFrame.predicted_instances","title":"predicted_instances: list[Instance]
property
","text":"Frame instances that are predicted by a model (PredictedInstance
objects).
unused_predictions: list[Instance]
property
","text":"Return a list of \"unused\" PredictedInstance
objects in frame.
This is all of the PredictedInstance
objects which do not have a corresponding Instance
in the same track in the same frame.
user_instances: list[Instance]
property
","text":"Frame instances that are user-labeled (Instance
objects).
__getitem__(key)
","text":"Return the Instance
at key
index in the instances
list.
sleap_io/model/labeled_frame.py
def __getitem__(self, key: int) -> Union[Instance, PredictedInstance]:\n \"\"\"Return the `Instance` at `key` index in the `instances` list.\"\"\"\n return self.instances[key]\n
"},{"location":"reference/sleap_io/model/labeled_frame/#sleap_io.model.labeled_frame.LabeledFrame.__iter__","title":"__iter__()
","text":"Iterate over Instance
s in instances
list.
sleap_io/model/labeled_frame.py
def __iter__(self):\n \"\"\"Iterate over `Instance`s in `instances` list.\"\"\"\n return iter(self.instances)\n
"},{"location":"reference/sleap_io/model/labeled_frame/#sleap_io.model.labeled_frame.LabeledFrame.__len__","title":"__len__()
","text":"Return the number of instances in the frame.
Source code insleap_io/model/labeled_frame.py
def __len__(self) -> int:\n \"\"\"Return the number of instances in the frame.\"\"\"\n return len(self.instances)\n
"},{"location":"reference/sleap_io/model/labeled_frame/#sleap_io.model.labeled_frame.LabeledFrame.numpy","title":"numpy()
","text":"Return all instances in the frame as a numpy array.
Returns:
Type Descriptionndarray
Points as a numpy array of shape (n_instances, n_nodes, 2)
.
Note that the order of the instances is arbitrary.
Source code insleap_io/model/labeled_frame.py
def numpy(self) -> np.ndarray:\n \"\"\"Return all instances in the frame as a numpy array.\n\n Returns:\n Points as a numpy array of shape `(n_instances, n_nodes, 2)`.\n\n Note that the order of the instances is arbitrary.\n \"\"\"\n n_instances = len(self.instances)\n n_nodes = len(self.instances[0]) if n_instances > 0 else 0\n pts = np.full((n_instances, n_nodes, 2), np.nan)\n for i, inst in enumerate(self.instances):\n pts[i] = inst.numpy()[:, 0:2]\n return pts\n
"},{"location":"reference/sleap_io/model/labeled_frame/#sleap_io.model.labeled_frame.LabeledFrame.remove_empty_instances","title":"remove_empty_instances()
","text":"Remove all instances with no visible points.
Source code insleap_io/model/labeled_frame.py
def remove_empty_instances(self):\n \"\"\"Remove all instances with no visible points.\"\"\"\n self.instances = [inst for inst in self.instances if not inst.is_empty]\n
"},{"location":"reference/sleap_io/model/labeled_frame/#sleap_io.model.labeled_frame.LabeledFrame.remove_predictions","title":"remove_predictions()
","text":"Remove all PredictedInstance
objects from the frame.
sleap_io/model/labeled_frame.py
def remove_predictions(self):\n \"\"\"Remove all `PredictedInstance` objects from the frame.\"\"\"\n self.instances = [inst for inst in self.instances if type(inst) == Instance]\n
"},{"location":"reference/sleap_io/model/labels/","title":"labels","text":""},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels","title":"sleap_io.model.labels
","text":"Data structure for the labels, a top-level container for pose data.
Label
s contain LabeledFrame
s, which in turn contain Instance
s, which contain Point
s.
This structure also maintains metadata that is common across all child objects such as Track
s, Video
s, Skeleton
s and others.
It is intended to be the entrypoint for deserialization and main container that should be used for serialization. It is designed to support both labeled data (used for training models) and predictions (inference results).
Classes:
Name DescriptionLabels
Pose data for a set of videos that have user labels and/or predictions.
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels","title":"Labels
","text":"Pose data for a set of videos that have user labels and/or predictions.
Attributes:
Name Type Descriptionlabeled_frames
list[LabeledFrame]
A list of LabeledFrame
s that are associated with this dataset.
videos
list[Video]
A list of Video
s that are associated with this dataset. Videos do not need to have corresponding LabeledFrame
s if they do not have any labels or predictions yet.
skeletons
list[Skeleton]
A list of Skeleton
s that are associated with this dataset. This should generally only contain a single skeleton.
tracks
list[Track]
A list of Track
s that are associated with this dataset.
suggestions
list[SuggestionFrame]
A list of SuggestionFrame
s that are associated with this dataset.
provenance
dict[str, Any]
Dictionary of arbitrary metadata providing additional information about where the dataset came from.
NotesVideo
s in contain LabeledFrame
s, and Skeleton
s and Track
s in contained Instance
s are added to the respective lists automatically.
Methods:
Name Description__attrs_post_init__
Append videos, skeletons, and tracks seen in labeled_frames
to Labels
.
__getitem__
Return one or more labeled frames based on indexing criteria.
__iter__
Iterate over labeled_frames
list when calling iter method on Labels
.
__len__
Return number of labeled frames.
__repr__
Return a readable representation of the labels.
__str__
Return a readable representation of the labels.
append
Append a labeled frame to the labels.
clean
Remove empty frames, unused skeletons, tracks and videos.
extend
Append a labeled frame to the labels.
extract
Extract a set of frames into a new Labels object.
find
Search for labeled frames given video and/or frame index.
make_training_splits
Make splits for training with embedded images.
numpy
Construct a numpy array from instance points.
remove_nodes
Remove nodes from the skeleton.
remove_predictions
Remove all predicted instances from the labels.
rename_nodes
Rename nodes in the skeleton.
reorder_nodes
Reorder nodes in the skeleton.
replace_filenames
Replace video filenames.
replace_skeleton
Replace the skeleton in the labels.
replace_videos
Replace videos and update all references.
save
Save labels to file in specified format.
split
Separate the labels into random splits.
trim
Trim the labels to a subset of frames and videos accordingly.
update
Update data structures based on contents.
Attributes:
Name Type Descriptioninstances
Iterator[Instance]
Return an iterator over all instances within all labeled frames.
skeleton
Skeleton
Return the skeleton if there is only a single skeleton in the labels.
user_labeled_frames
list[LabeledFrame]
Return all labeled frames with user (non-predicted) instances.
video
Video
Return the video if there is only a single video in the labels.
Source code insleap_io/model/labels.py
@define\nclass Labels:\n \"\"\"Pose data for a set of videos that have user labels and/or predictions.\n\n Attributes:\n labeled_frames: A list of `LabeledFrame`s that are associated with this dataset.\n videos: A list of `Video`s that are associated with this dataset. Videos do not\n need to have corresponding `LabeledFrame`s if they do not have any\n labels or predictions yet.\n skeletons: A list of `Skeleton`s that are associated with this dataset. This\n should generally only contain a single skeleton.\n tracks: A list of `Track`s that are associated with this dataset.\n suggestions: A list of `SuggestionFrame`s that are associated with this dataset.\n provenance: Dictionary of arbitrary metadata providing additional information\n about where the dataset came from.\n\n Notes:\n `Video`s in contain `LabeledFrame`s, and `Skeleton`s and `Track`s in contained\n `Instance`s are added to the respective lists automatically.\n \"\"\"\n\n labeled_frames: list[LabeledFrame] = field(factory=list)\n videos: list[Video] = field(factory=list)\n skeletons: list[Skeleton] = field(factory=list)\n tracks: list[Track] = field(factory=list)\n suggestions: list[SuggestionFrame] = field(factory=list)\n provenance: dict[str, Any] = field(factory=dict)\n\n def __attrs_post_init__(self):\n \"\"\"Append videos, skeletons, and tracks seen in `labeled_frames` to `Labels`.\"\"\"\n self.update()\n\n def update(self):\n \"\"\"Update data structures based on contents.\n\n This function will update the list of skeletons, videos and tracks from the\n labeled frames, instances and suggestions.\n \"\"\"\n for lf in self.labeled_frames:\n if lf.video not in self.videos:\n self.videos.append(lf.video)\n\n for inst in lf:\n if inst.skeleton not in self.skeletons:\n self.skeletons.append(inst.skeleton)\n\n if inst.track is not None and inst.track not in self.tracks:\n self.tracks.append(inst.track)\n\n for sf in self.suggestions:\n if sf.video not in self.videos:\n self.videos.append(sf.video)\n\n def __getitem__(\n self, key: int | slice | list[int] | np.ndarray | tuple[Video, int]\n ) -> list[LabeledFrame] | LabeledFrame:\n \"\"\"Return one or more labeled frames based on indexing criteria.\"\"\"\n if type(key) == int:\n return self.labeled_frames[key]\n elif type(key) == slice:\n return [self.labeled_frames[i] for i in range(*key.indices(len(self)))]\n elif type(key) == list:\n return [self.labeled_frames[i] for i in key]\n elif isinstance(key, np.ndarray):\n return [self.labeled_frames[i] for i in key.tolist()]\n elif type(key) == tuple and len(key) == 2:\n video, frame_idx = key\n res = self.find(video, frame_idx)\n if len(res) == 1:\n return res[0]\n elif len(res) == 0:\n raise IndexError(\n f\"No labeled frames found for video {video} and \"\n f\"frame index {frame_idx}.\"\n )\n elif type(key) == Video:\n res = self.find(key)\n if len(res) == 0:\n raise IndexError(f\"No labeled frames found for video {key}.\")\n return res\n else:\n raise IndexError(f\"Invalid indexing argument for labels: {key}\")\n\n def __iter__(self):\n \"\"\"Iterate over `labeled_frames` list when calling iter method on `Labels`.\"\"\"\n return iter(self.labeled_frames)\n\n def __len__(self) -> int:\n \"\"\"Return number of labeled frames.\"\"\"\n return len(self.labeled_frames)\n\n def __repr__(self) -> str:\n \"\"\"Return a readable representation of the labels.\"\"\"\n return (\n \"Labels(\"\n f\"labeled_frames={len(self.labeled_frames)}, \"\n f\"videos={len(self.videos)}, \"\n f\"skeletons={len(self.skeletons)}, \"\n f\"tracks={len(self.tracks)}, \"\n f\"suggestions={len(self.suggestions)}\"\n \")\"\n )\n\n def __str__(self) -> str:\n \"\"\"Return a readable representation of the labels.\"\"\"\n return self.__repr__()\n\n def append(self, lf: LabeledFrame, update: bool = True):\n \"\"\"Append a labeled frame to the labels.\n\n Args:\n lf: A labeled frame to add to the labels.\n update: If `True` (the default), update list of videos, tracks and\n skeletons from the contents.\n \"\"\"\n self.labeled_frames.append(lf)\n\n if update:\n if lf.video not in self.videos:\n self.videos.append(lf.video)\n\n for inst in lf:\n if inst.skeleton not in self.skeletons:\n self.skeletons.append(inst.skeleton)\n\n if inst.track is not None and inst.track not in self.tracks:\n self.tracks.append(inst.track)\n\n def extend(self, lfs: list[LabeledFrame], update: bool = True):\n \"\"\"Append a labeled frame to the labels.\n\n Args:\n lfs: A list of labeled frames to add to the labels.\n update: If `True` (the default), update list of videos, tracks and\n skeletons from the contents.\n \"\"\"\n self.labeled_frames.extend(lfs)\n\n if update:\n for lf in lfs:\n if lf.video not in self.videos:\n self.videos.append(lf.video)\n\n for inst in lf:\n if inst.skeleton not in self.skeletons:\n self.skeletons.append(inst.skeleton)\n\n if inst.track is not None and inst.track not in self.tracks:\n self.tracks.append(inst.track)\n\n def numpy(\n self,\n video: Optional[Union[Video, int]] = None,\n all_frames: bool = True,\n untracked: bool = False,\n return_confidence: bool = False,\n ) -> np.ndarray:\n \"\"\"Construct a numpy array from instance points.\n\n Args:\n video: Video or video index to convert to numpy arrays. If `None` (the\n default), uses the first video.\n untracked: If `False` (the default), include only instances that have a\n track assignment. If `True`, includes all instances in each frame in\n arbitrary order.\n return_confidence: If `False` (the default), only return points of nodes. If\n `True`, return the points and scores of nodes.\n\n Returns:\n An array of tracks of shape `(n_frames, n_tracks, n_nodes, 2)` if\n `return_confidence` is `False`. Otherwise returned shape is\n `(n_frames, n_tracks, n_nodes, 3)` if `return_confidence` is `True`.\n\n Missing data will be replaced with `np.nan`.\n\n If this is a single instance project, a track does not need to be assigned.\n\n Only predicted instances (NOT user instances) will be returned.\n\n Notes:\n This method assumes that instances have tracks assigned and is intended to\n function primarily for single-video prediction results.\n \"\"\"\n # Get labeled frames for specified video.\n if video is None:\n video = 0\n if type(video) == int:\n video = self.videos[video]\n lfs = [lf for lf in self.labeled_frames if lf.video == video]\n\n # Figure out frame index range.\n first_frame, last_frame = 0, 0\n for lf in lfs:\n first_frame = min(first_frame, lf.frame_idx)\n last_frame = max(last_frame, lf.frame_idx)\n\n # Figure out the number of tracks based on number of instances in each frame.\n # First, let's check the max number of predicted instances (regardless of\n # whether they're tracked.\n n_preds = 0\n for lf in lfs:\n n_pred_instances = len(lf.predicted_instances)\n n_preds = max(n_preds, n_pred_instances)\n\n # Case 1: We don't care about order because there's only 1 instance per frame,\n # or we're considering untracked instances.\n untracked = untracked or n_preds == 1\n if untracked:\n n_tracks = n_preds\n else:\n # Case 2: We're considering only tracked instances.\n n_tracks = len(self.tracks)\n\n n_frames = int(last_frame - first_frame + 1)\n skeleton = self.skeletons[-1] # Assume project only uses last skeleton\n n_nodes = len(skeleton.nodes)\n\n if return_confidence:\n tracks = np.full((n_frames, n_tracks, n_nodes, 3), np.nan, dtype=\"float32\")\n else:\n tracks = np.full((n_frames, n_tracks, n_nodes, 2), np.nan, dtype=\"float32\")\n for lf in lfs:\n i = int(lf.frame_idx - first_frame)\n if untracked:\n for j, inst in enumerate(lf.predicted_instances):\n tracks[i, j] = inst.numpy(scores=return_confidence)\n else:\n tracked_instances = [\n inst\n for inst in lf.instances\n if type(inst) == PredictedInstance and inst.track is not None\n ]\n for inst in tracked_instances:\n j = self.tracks.index(inst.track) # type: ignore[arg-type]\n tracks[i, j] = inst.numpy(scores=return_confidence)\n\n return tracks\n\n @property\n def video(self) -> Video:\n \"\"\"Return the video if there is only a single video in the labels.\"\"\"\n if len(self.videos) == 0:\n raise ValueError(\"There are no videos in the labels.\")\n elif len(self.videos) == 1:\n return self.videos[0]\n else:\n raise ValueError(\n \"Labels.video can only be used when there is only a single video saved \"\n \"in the labels. Use Labels.videos instead.\"\n )\n\n @property\n def skeleton(self) -> Skeleton:\n \"\"\"Return the skeleton if there is only a single skeleton in the labels.\"\"\"\n if len(self.skeletons) == 0:\n raise ValueError(\"There are no skeletons in the labels.\")\n elif len(self.skeletons) == 1:\n return self.skeletons[0]\n else:\n raise ValueError(\n \"Labels.skeleton can only be used when there is only a single skeleton \"\n \"saved in the labels. Use Labels.skeletons instead.\"\n )\n\n def find(\n self,\n video: Video,\n frame_idx: int | list[int] | None = None,\n return_new: bool = False,\n ) -> list[LabeledFrame]:\n \"\"\"Search for labeled frames given video and/or frame index.\n\n Args:\n video: A `Video` that is associated with the project.\n frame_idx: The frame index (or indices) which we want to find in the video.\n If a range is specified, we'll return all frames with indices in that\n range. If not specific, then we'll return all labeled frames for video.\n return_new: Whether to return singleton of new and empty `LabeledFrame` if\n none are found in project.\n\n Returns:\n List of `LabeledFrame` objects that match the criteria.\n\n The list will be empty if no matches found, unless return_new is True, in\n which case it contains new (empty) `LabeledFrame` objects with `video` and\n `frame_index` set.\n \"\"\"\n results = []\n\n if frame_idx is None:\n for lf in self.labeled_frames:\n if lf.video == video:\n results.append(lf)\n return results\n\n if np.isscalar(frame_idx):\n frame_idx = np.array(frame_idx).reshape(-1)\n\n for frame_ind in frame_idx:\n result = None\n for lf in self.labeled_frames:\n if lf.video == video and lf.frame_idx == frame_ind:\n result = lf\n results.append(result)\n break\n if result is None and return_new:\n results.append(LabeledFrame(video=video, frame_idx=frame_ind))\n\n return results\n\n def save(\n self,\n filename: str,\n format: Optional[str] = None,\n embed: bool | str | list[tuple[Video, int]] | None = None,\n **kwargs,\n ):\n \"\"\"Save labels to file in specified format.\n\n Args:\n filename: Path to save labels to.\n format: The format to save the labels in. If `None`, the format will be\n inferred from the file extension. Available formats are `\"slp\"`,\n `\"nwb\"`, `\"labelstudio\"`, and `\"jabs\"`.\n embed: Frames to embed in the saved labels file. One of `None`, `True`,\n `\"all\"`, `\"user\"`, `\"suggestions\"`, `\"user+suggestions\"`, `\"source\"` or\n list of tuples of `(video, frame_idx)`.\n\n If `None` is specified (the default) and the labels contains embedded\n frames, those embedded frames will be re-saved to the new file.\n\n If `True` or `\"all\"`, all labeled frames and suggested frames will be\n embedded.\n\n If `\"source\"` is specified, no images will be embedded and the source\n video will be restored if available.\n\n This argument is only valid for the SLP backend.\n \"\"\"\n from sleap_io import save_file\n\n save_file(self, filename, format=format, embed=embed, **kwargs)\n\n def clean(\n self,\n frames: bool = True,\n empty_instances: bool = False,\n skeletons: bool = True,\n tracks: bool = True,\n videos: bool = False,\n ):\n \"\"\"Remove empty frames, unused skeletons, tracks and videos.\n\n Args:\n frames: If `True` (the default), remove empty frames.\n empty_instances: If `True` (NOT default), remove instances that have no\n visible points.\n skeletons: If `True` (the default), remove unused skeletons.\n tracks: If `True` (the default), remove unused tracks.\n videos: If `True` (NOT default), remove videos that have no labeled frames.\n \"\"\"\n used_skeletons = []\n used_tracks = []\n used_videos = []\n kept_frames = []\n for lf in self.labeled_frames:\n\n if empty_instances:\n lf.remove_empty_instances()\n\n if frames and len(lf) == 0:\n continue\n\n if videos and lf.video not in used_videos:\n used_videos.append(lf.video)\n\n if skeletons or tracks:\n for inst in lf:\n if skeletons and inst.skeleton not in used_skeletons:\n used_skeletons.append(inst.skeleton)\n if (\n tracks\n and inst.track is not None\n and inst.track not in used_tracks\n ):\n used_tracks.append(inst.track)\n\n if frames:\n kept_frames.append(lf)\n\n if videos:\n self.videos = [video for video in self.videos if video in used_videos]\n\n if skeletons:\n self.skeletons = [\n skeleton for skeleton in self.skeletons if skeleton in used_skeletons\n ]\n\n if tracks:\n self.tracks = [track for track in self.tracks if track in used_tracks]\n\n if frames:\n self.labeled_frames = kept_frames\n\n def remove_predictions(self, clean: bool = True):\n \"\"\"Remove all predicted instances from the labels.\n\n Args:\n clean: If `True` (the default), also remove any empty frames and unused\n tracks and skeletons. It does NOT remove videos that have no labeled\n frames or instances with no visible points.\n\n See also: `Labels.clean`\n \"\"\"\n for lf in self.labeled_frames:\n lf.remove_predictions()\n\n if clean:\n self.clean(\n frames=True,\n empty_instances=False,\n skeletons=True,\n tracks=True,\n videos=False,\n )\n\n @property\n def user_labeled_frames(self) -> list[LabeledFrame]:\n \"\"\"Return all labeled frames with user (non-predicted) instances.\"\"\"\n return [lf for lf in self.labeled_frames if lf.has_user_instances]\n\n @property\n def instances(self) -> Iterator[Instance]:\n \"\"\"Return an iterator over all instances within all labeled frames.\"\"\"\n return (instance for lf in self.labeled_frames for instance in lf.instances)\n\n def rename_nodes(\n self,\n name_map: dict[NodeOrIndex, str] | list[str],\n skeleton: Skeleton | None = None,\n ):\n \"\"\"Rename nodes in the skeleton.\n\n Args:\n name_map: A dictionary mapping old node names to new node names. Keys can be\n specified as `Node` objects, integer indices, or string names. Values\n must be specified as string names.\n\n If a list of strings is provided of the same length as the current\n nodes, the nodes will be renamed to the names in the list in order.\n skeleton: `Skeleton` to update. If `None` (the default), assumes there is\n only one skeleton in the labels and raises `ValueError` otherwise.\n\n Raises:\n ValueError: If the new node names exist in the skeleton, if the old node\n names are not found in the skeleton, or if there is more than one\n skeleton in the `Labels` but it is not specified.\n\n Notes:\n This method is recommended over `Skeleton.rename_nodes` as it will update\n all instances in the labels to reflect the new node names.\n\n Example:\n >>> labels = Labels(skeletons=[Skeleton([\"A\", \"B\", \"C\"])])\n >>> labels.rename_nodes({\"A\": \"X\", \"B\": \"Y\", \"C\": \"Z\"})\n >>> labels.skeleton.node_names\n [\"X\", \"Y\", \"Z\"]\n >>> labels.rename_nodes([\"a\", \"b\", \"c\"])\n >>> labels.skeleton.node_names\n [\"a\", \"b\", \"c\"]\n \"\"\"\n if skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Skeleton must be specified when there is more than one skeleton in \"\n \"the labels.\"\n )\n skeleton = self.skeleton\n\n skeleton.rename_nodes(name_map)\n\n def remove_nodes(self, nodes: list[NodeOrIndex], skeleton: Skeleton | None = None):\n \"\"\"Remove nodes from the skeleton.\n\n Args:\n nodes: A list of node names, indices, or `Node` objects to remove.\n skeleton: `Skeleton` to update. If `None` (the default), assumes there is\n only one skeleton in the labels and raises `ValueError` otherwise.\n\n Raises:\n ValueError: If the nodes are not found in the skeleton, or if there is more\n than one skeleton in the `Labels` but it is not specified.\n\n Notes:\n This method should always be used when removing nodes from the skeleton as\n it handles updating the lookup caches necessary for indexing nodes by name,\n and updating instances to reflect the changes made to the skeleton.\n\n Any edges and symmetries that are connected to the removed nodes will also\n be removed.\n \"\"\"\n if skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Skeleton must be specified when there is more than one skeleton \"\n \"in the labels.\"\n )\n skeleton = self.skeleton\n\n skeleton.remove_nodes(nodes)\n\n for inst in self.instances:\n if inst.skeleton == skeleton:\n inst.update_skeleton()\n\n def reorder_nodes(\n self, new_order: list[NodeOrIndex], skeleton: Skeleton | None = None\n ):\n \"\"\"Reorder nodes in the skeleton.\n\n Args:\n new_order: A list of node names, indices, or `Node` objects specifying the\n new order of the nodes.\n skeleton: `Skeleton` to update. If `None` (the default), assumes there is\n only one skeleton in the labels and raises `ValueError` otherwise.\n\n Raises:\n ValueError: If the new order of nodes is not the same length as the current\n nodes, or if there is more than one skeleton in the `Labels` but it is\n not specified.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name, as well as updating instances to reflect the changes made to the\n skeleton.\n \"\"\"\n if skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Skeleton must be specified when there is more than one skeleton \"\n \"in the labels.\"\n )\n skeleton = self.skeleton\n\n skeleton.reorder_nodes(new_order)\n\n for inst in self.instances:\n if inst.skeleton == skeleton:\n inst.update_skeleton()\n\n def replace_skeleton(\n self,\n new_skeleton: Skeleton,\n old_skeleton: Skeleton | None = None,\n node_map: dict[NodeOrIndex, NodeOrIndex] | None = None,\n ):\n \"\"\"Replace the skeleton in the labels.\n\n Args:\n new_skeleton: The new `Skeleton` to replace the old skeleton with.\n old_skeleton: The old `Skeleton` to replace. If `None` (the default),\n assumes there is only one skeleton in the labels and raises `ValueError`\n otherwise.\n node_map: Dictionary mapping nodes in the old skeleton to nodes in the new\n skeleton. Keys and values can be specified as `Node` objects, integer\n indices, or string names. If not provided, only nodes with identical\n names will be mapped. Points associated with unmapped nodes will be\n removed.\n\n Raises:\n ValueError: If there is more than one skeleton in the `Labels` but it is not\n specified.\n\n Warning:\n This method will replace the skeleton in all instances in the labels that\n have the old skeleton. **All point data associated with nodes not in the\n `node_map` will be lost.**\n \"\"\"\n if old_skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Old skeleton must be specified when there is more than one \"\n \"skeleton in the labels.\"\n )\n old_skeleton = self.skeleton\n\n if node_map is None:\n node_map = {}\n for old_node in old_skeleton.nodes:\n for new_node in new_skeleton.nodes:\n if old_node.name == new_node.name:\n node_map[old_node] = new_node\n break\n else:\n node_map = {\n old_skeleton.require_node(\n old, add_missing=False\n ): new_skeleton.require_node(new, add_missing=False)\n for old, new in node_map.items()\n }\n\n # Make new -> old mapping for nodes for efficiency.\n rev_node_map = {new: old for old, new in node_map.items()}\n\n # Replace the skeleton in the instances.\n for inst in self.instances:\n if inst.skeleton == old_skeleton:\n inst.replace_skeleton(new_skeleton, rev_node_map=rev_node_map)\n\n # Replace the skeleton in the labels.\n self.skeletons[self.skeletons.index(old_skeleton)] = new_skeleton\n\n def replace_videos(\n self,\n old_videos: list[Video] | None = None,\n new_videos: list[Video] | None = None,\n video_map: dict[Video, Video] | None = None,\n ):\n \"\"\"Replace videos and update all references.\n\n Args:\n old_videos: List of videos to be replaced.\n new_videos: List of videos to replace with.\n video_map: Alternative input of dictionary where keys are the old videos and\n values are the new videos.\n \"\"\"\n if (\n old_videos is None\n and new_videos is not None\n and len(new_videos) == len(self.videos)\n ):\n old_videos = self.videos\n\n if video_map is None:\n video_map = {o: n for o, n in zip(old_videos, new_videos)}\n\n # Update the labeled frames with the new videos.\n for lf in self.labeled_frames:\n if lf.video in video_map:\n lf.video = video_map[lf.video]\n\n # Update suggestions with the new videos.\n for sf in self.suggestions:\n if sf.video in video_map:\n sf.video = video_map[sf.video]\n\n # Update the list of videos.\n self.videos = [video_map.get(video, video) for video in self.videos]\n\n def replace_filenames(\n self,\n new_filenames: list[str | Path] | None = None,\n filename_map: dict[str | Path, str | Path] | None = None,\n prefix_map: dict[str | Path, str | Path] | None = None,\n ):\n \"\"\"Replace video filenames.\n\n Args:\n new_filenames: List of new filenames. Must have the same length as the\n number of videos in the labels.\n filename_map: Dictionary mapping old filenames (keys) to new filenames\n (values).\n prefix_map: Dictonary mapping old prefixes (keys) to new prefixes (values).\n\n Notes:\n Only one of the argument types can be provided.\n \"\"\"\n n = 0\n if new_filenames is not None:\n n += 1\n if filename_map is not None:\n n += 1\n if prefix_map is not None:\n n += 1\n if n != 1:\n raise ValueError(\n \"Exactly one input method must be provided to replace filenames.\"\n )\n\n if new_filenames is not None:\n if len(self.videos) != len(new_filenames):\n raise ValueError(\n f\"Number of new filenames ({len(new_filenames)}) does not match \"\n f\"the number of videos ({len(self.videos)}).\"\n )\n\n for video, new_filename in zip(self.videos, new_filenames):\n video.replace_filename(new_filename)\n\n elif filename_map is not None:\n for video in self.videos:\n for old_fn, new_fn in filename_map.items():\n if type(video.filename) == list:\n new_fns = []\n for fn in video.filename:\n if Path(fn) == Path(old_fn):\n new_fns.append(new_fn)\n else:\n new_fns.append(fn)\n video.replace_filename(new_fns)\n else:\n if Path(video.filename) == Path(old_fn):\n video.replace_filename(new_fn)\n\n elif prefix_map is not None:\n for video in self.videos:\n for old_prefix, new_prefix in prefix_map.items():\n old_prefix, new_prefix = Path(old_prefix), Path(new_prefix)\n\n if type(video.filename) == list:\n new_fns = []\n for fn in video.filename:\n fn = Path(fn)\n if fn.as_posix().startswith(old_prefix.as_posix()):\n new_fns.append(new_prefix / fn.relative_to(old_prefix))\n else:\n new_fns.append(fn)\n video.replace_filename(new_fns)\n else:\n fn = Path(video.filename)\n if fn.as_posix().startswith(old_prefix.as_posix()):\n video.replace_filename(\n new_prefix / fn.relative_to(old_prefix)\n )\n\n def extract(\n self, inds: list[int] | list[tuple[Video, int]] | np.ndarray, copy: bool = True\n ) -> Labels:\n \"\"\"Extract a set of frames into a new Labels object.\n\n Args:\n inds: Indices of labeled frames. Can be specified as a list of array of\n integer indices of labeled frames or tuples of Video and frame indices.\n copy: If `True` (the default), return a copy of the frames and containing\n objects. Otherwise, return a reference to the data.\n\n Returns:\n A new `Labels` object containing the selected labels.\n\n Notes:\n This copies the labeled frames and their associated data, including\n skeletons and tracks, and tries to maintain the relative ordering.\n\n This also copies the provenance and inserts an extra key: `\"source_labels\"`\n with the path to the current labels, if available.\n\n It does NOT copy suggested frames.\n \"\"\"\n lfs = self[inds]\n\n if copy:\n lfs = deepcopy(lfs)\n labels = Labels(lfs)\n\n # Try to keep the lists in the same order.\n track_to_ind = {track.name: ind for ind, track in enumerate(self.tracks)}\n labels.tracks = sorted(labels.tracks, key=lambda x: track_to_ind[x.name])\n\n skel_to_ind = {skel.name: ind for ind, skel in enumerate(self.skeletons)}\n labels.skeletons = sorted(labels.skeletons, key=lambda x: skel_to_ind[x.name])\n\n labels.provenance = deepcopy(labels.provenance)\n labels.provenance[\"source_labels\"] = self.provenance.get(\"filename\", None)\n\n return labels\n\n def split(self, n: int | float, seed: int | None = None) -> tuple[Labels, Labels]:\n \"\"\"Separate the labels into random splits.\n\n Args:\n n: Size of the first split. If integer >= 1, assumes that this is the number\n of labeled frames in the first split. If < 1.0, this will be treated as\n a fraction of the total labeled frames.\n seed: Optional integer seed to use for reproducibility.\n\n Returns:\n A tuple of `split1, split2`.\n\n If an integer was specified, `len(split1) == n`.\n\n If a fraction was specified, `len(split1) == int(n * len(labels))`.\n\n The second split contains the remainder, i.e.,\n `len(split2) == len(labels) - len(split1)`.\n\n If there are too few frames, a minimum of 1 frame will be kept in the second\n split.\n\n If there is exactly 1 labeled frame in the labels, the same frame will be\n assigned to both splits.\n \"\"\"\n n0 = len(self)\n if n0 == 0:\n return self, self\n n1 = n\n if n < 1.0:\n n1 = max(int(n0 * float(n)), 1)\n n2 = max(n0 - n1, 1)\n n1, n2 = int(n1), int(n2)\n\n rng = np.random.default_rng(seed=seed)\n inds1 = rng.choice(n0, size=(n1,), replace=False)\n\n if n0 == 1:\n inds2 = np.array([0])\n else:\n inds2 = np.setdiff1d(np.arange(n0), inds1)\n\n split1 = self.extract(inds1, copy=True)\n split2 = self.extract(inds2, copy=True)\n\n return split1, split2\n\n def make_training_splits(\n self,\n n_train: int | float,\n n_val: int | float | None = None,\n n_test: int | float | None = None,\n save_dir: str | Path | None = None,\n seed: int | None = None,\n embed: bool = True,\n ) -> tuple[Labels, Labels] | tuple[Labels, Labels, Labels]:\n \"\"\"Make splits for training with embedded images.\n\n Args:\n n_train: Size of the training split as integer or fraction.\n n_val: Size of the validation split as integer or fraction. If `None`,\n this will be inferred based on the values of `n_train` and `n_test`. If\n `n_test` is `None`, this will be the remainder of the data after the\n training split.\n n_test: Size of the testing split as integer or fraction. If `None`, the\n test split will not be saved.\n save_dir: If specified, save splits to SLP files with embedded images.\n seed: Optional integer seed to use for reproducibility.\n embed: If `True` (the default), embed user labeled frame images in the saved\n files, which is useful for portability but can be slow for large\n projects. If `False`, labels are saved with references to the source\n videos files.\n\n Returns:\n A tuple of `labels_train, labels_val` or\n `labels_train, labels_val, labels_test` if `n_test` was specified.\n\n Notes:\n Predictions and suggestions will be removed before saving, leaving only\n frames with user labeled data (the source labels are not affected).\n\n Frames with user labeled data will be embedded in the resulting files.\n\n If `save_dir` is specified, this will save the randomly sampled splits to:\n\n - `{save_dir}/train.pkg.slp`\n - `{save_dir}/val.pkg.slp`\n - `{save_dir}/test.pkg.slp` (if `n_test` is specified)\n\n If `embed` is `False`, the files will be saved without embedded images to:\n\n - `{save_dir}/train.slp`\n - `{save_dir}/val.slp`\n - `{save_dir}/test.slp` (if `n_test` is specified)\n\n See also: `Labels.split`\n \"\"\"\n # Clean up labels.\n labels = deepcopy(self)\n labels.remove_predictions()\n labels.suggestions = []\n labels.clean()\n\n # Make train split.\n labels_train, labels_rest = labels.split(n_train, seed=seed)\n\n # Make test split.\n if n_test is not None:\n if n_test < 1:\n n_test = (n_test * len(labels)) / len(labels_rest)\n labels_test, labels_rest = labels_rest.split(n=n_test, seed=seed)\n\n # Make val split.\n if n_val is not None:\n if n_val < 1:\n n_val = (n_val * len(labels)) / len(labels_rest)\n if isinstance(n_val, float) and n_val == 1.0:\n labels_val = labels_rest\n else:\n labels_val, _ = labels_rest.split(n=n_val, seed=seed)\n else:\n labels_val = labels_rest\n\n # Update provenance.\n source_labels = self.provenance.get(\"filename\", None)\n labels_train.provenance[\"source_labels\"] = source_labels\n if n_val is not None:\n labels_val.provenance[\"source_labels\"] = source_labels\n if n_test is not None:\n labels_test.provenance[\"source_labels\"] = source_labels\n\n # Save.\n if save_dir is not None:\n save_dir = Path(save_dir)\n save_dir.mkdir(exist_ok=True, parents=True)\n\n if embed:\n labels_train.save(save_dir / \"train.pkg.slp\", embed=\"user\")\n labels_val.save(save_dir / \"val.pkg.slp\", embed=\"user\")\n labels_test.save(save_dir / \"test.pkg.slp\", embed=\"user\")\n else:\n labels_train.save(save_dir / \"train.slp\", embed=False)\n labels_val.save(save_dir / \"val.slp\", embed=False)\n labels_test.save(save_dir / \"test.slp\", embed=False)\n\n if n_test is None:\n return labels_train, labels_val\n else:\n return labels_train, labels_val, labels_test\n\n def trim(\n self,\n save_path: str | Path,\n frame_inds: list[int] | np.ndarray,\n video: Video | int | None = None,\n video_kwargs: dict[str, Any] | None = None,\n ) -> Labels:\n \"\"\"Trim the labels to a subset of frames and videos accordingly.\n\n Args:\n save_path: Path to the trimmed labels SLP file. Video will be saved with the\n same base name but with .mp4 extension.\n frame_inds: Frame indices to save. Can be specified as a list or array of\n frame integers.\n video: Video or integer index of the video to trim. Does not need to be\n specified for single-video projects.\n video_kwargs: A dictionary of keyword arguments to provide to\n `sio.save_video` for video compression.\n\n Returns:\n The resulting labels object referencing the trimmed data.\n\n Notes:\n This will remove any data outside of the trimmed frames, save new videos,\n and adjust the frame indices to match the newly trimmed videos.\n \"\"\"\n if video is None:\n if len(self.videos) == 1:\n video = self.video\n else:\n raise ValueError(\n \"Video needs to be specified when trimming multi-video projects.\"\n )\n if type(video) == int:\n video = self.videos[video]\n\n # Write trimmed clip.\n save_path = Path(save_path)\n video_path = save_path.with_suffix(\".mp4\")\n fidx0, fidx1 = np.min(frame_inds), np.max(frame_inds)\n new_video = video.save(\n video_path,\n frame_inds=np.arange(fidx0, fidx1 + 1),\n video_kwargs=video_kwargs,\n )\n\n # Get frames in range.\n # TODO: Create an optimized search function for this access pattern.\n inds = []\n for ind, lf in enumerate(self):\n if lf.video == video and lf.frame_idx >= fidx0 and lf.frame_idx <= fidx1:\n inds.append(ind)\n trimmed_labels = self.extract(inds, copy=True)\n\n # Adjust video and frame indices.\n trimmed_labels.videos = [new_video]\n for lf in trimmed_labels:\n lf.video = new_video\n lf.frame_idx = lf.frame_idx - fidx0\n\n # Save.\n trimmed_labels.save(save_path)\n\n return trimmed_labels\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.instances","title":"instances: Iterator[Instance]
property
","text":"Return an iterator over all instances within all labeled frames.
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.skeleton","title":"skeleton: Skeleton
property
","text":"Return the skeleton if there is only a single skeleton in the labels.
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.user_labeled_frames","title":"user_labeled_frames: list[LabeledFrame]
property
","text":"Return all labeled frames with user (non-predicted) instances.
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.video","title":"video: Video
property
","text":"Return the video if there is only a single video in the labels.
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.__attrs_post_init__","title":"__attrs_post_init__()
","text":"Append videos, skeletons, and tracks seen in labeled_frames
to Labels
.
sleap_io/model/labels.py
def __attrs_post_init__(self):\n \"\"\"Append videos, skeletons, and tracks seen in `labeled_frames` to `Labels`.\"\"\"\n self.update()\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.__getitem__","title":"__getitem__(key)
","text":"Return one or more labeled frames based on indexing criteria.
Source code insleap_io/model/labels.py
def __getitem__(\n self, key: int | slice | list[int] | np.ndarray | tuple[Video, int]\n) -> list[LabeledFrame] | LabeledFrame:\n \"\"\"Return one or more labeled frames based on indexing criteria.\"\"\"\n if type(key) == int:\n return self.labeled_frames[key]\n elif type(key) == slice:\n return [self.labeled_frames[i] for i in range(*key.indices(len(self)))]\n elif type(key) == list:\n return [self.labeled_frames[i] for i in key]\n elif isinstance(key, np.ndarray):\n return [self.labeled_frames[i] for i in key.tolist()]\n elif type(key) == tuple and len(key) == 2:\n video, frame_idx = key\n res = self.find(video, frame_idx)\n if len(res) == 1:\n return res[0]\n elif len(res) == 0:\n raise IndexError(\n f\"No labeled frames found for video {video} and \"\n f\"frame index {frame_idx}.\"\n )\n elif type(key) == Video:\n res = self.find(key)\n if len(res) == 0:\n raise IndexError(f\"No labeled frames found for video {key}.\")\n return res\n else:\n raise IndexError(f\"Invalid indexing argument for labels: {key}\")\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.__iter__","title":"__iter__()
","text":"Iterate over labeled_frames
list when calling iter method on Labels
.
sleap_io/model/labels.py
def __iter__(self):\n \"\"\"Iterate over `labeled_frames` list when calling iter method on `Labels`.\"\"\"\n return iter(self.labeled_frames)\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.__len__","title":"__len__()
","text":"Return number of labeled frames.
Source code insleap_io/model/labels.py
def __len__(self) -> int:\n \"\"\"Return number of labeled frames.\"\"\"\n return len(self.labeled_frames)\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.__repr__","title":"__repr__()
","text":"Return a readable representation of the labels.
Source code insleap_io/model/labels.py
def __repr__(self) -> str:\n \"\"\"Return a readable representation of the labels.\"\"\"\n return (\n \"Labels(\"\n f\"labeled_frames={len(self.labeled_frames)}, \"\n f\"videos={len(self.videos)}, \"\n f\"skeletons={len(self.skeletons)}, \"\n f\"tracks={len(self.tracks)}, \"\n f\"suggestions={len(self.suggestions)}\"\n \")\"\n )\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.__str__","title":"__str__()
","text":"Return a readable representation of the labels.
Source code insleap_io/model/labels.py
def __str__(self) -> str:\n \"\"\"Return a readable representation of the labels.\"\"\"\n return self.__repr__()\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.append","title":"append(lf, update=True)
","text":"Append a labeled frame to the labels.
Parameters:
Name Type Description Defaultlf
LabeledFrame
A labeled frame to add to the labels.
requiredupdate
bool
If True
(the default), update list of videos, tracks and skeletons from the contents.
True
Source code in sleap_io/model/labels.py
def append(self, lf: LabeledFrame, update: bool = True):\n \"\"\"Append a labeled frame to the labels.\n\n Args:\n lf: A labeled frame to add to the labels.\n update: If `True` (the default), update list of videos, tracks and\n skeletons from the contents.\n \"\"\"\n self.labeled_frames.append(lf)\n\n if update:\n if lf.video not in self.videos:\n self.videos.append(lf.video)\n\n for inst in lf:\n if inst.skeleton not in self.skeletons:\n self.skeletons.append(inst.skeleton)\n\n if inst.track is not None and inst.track not in self.tracks:\n self.tracks.append(inst.track)\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.clean","title":"clean(frames=True, empty_instances=False, skeletons=True, tracks=True, videos=False)
","text":"Remove empty frames, unused skeletons, tracks and videos.
Parameters:
Name Type Description Defaultframes
bool
If True
(the default), remove empty frames.
True
empty_instances
bool
If True
(NOT default), remove instances that have no visible points.
False
skeletons
bool
If True
(the default), remove unused skeletons.
True
tracks
bool
If True
(the default), remove unused tracks.
True
videos
bool
If True
(NOT default), remove videos that have no labeled frames.
False
Source code in sleap_io/model/labels.py
def clean(\n self,\n frames: bool = True,\n empty_instances: bool = False,\n skeletons: bool = True,\n tracks: bool = True,\n videos: bool = False,\n):\n \"\"\"Remove empty frames, unused skeletons, tracks and videos.\n\n Args:\n frames: If `True` (the default), remove empty frames.\n empty_instances: If `True` (NOT default), remove instances that have no\n visible points.\n skeletons: If `True` (the default), remove unused skeletons.\n tracks: If `True` (the default), remove unused tracks.\n videos: If `True` (NOT default), remove videos that have no labeled frames.\n \"\"\"\n used_skeletons = []\n used_tracks = []\n used_videos = []\n kept_frames = []\n for lf in self.labeled_frames:\n\n if empty_instances:\n lf.remove_empty_instances()\n\n if frames and len(lf) == 0:\n continue\n\n if videos and lf.video not in used_videos:\n used_videos.append(lf.video)\n\n if skeletons or tracks:\n for inst in lf:\n if skeletons and inst.skeleton not in used_skeletons:\n used_skeletons.append(inst.skeleton)\n if (\n tracks\n and inst.track is not None\n and inst.track not in used_tracks\n ):\n used_tracks.append(inst.track)\n\n if frames:\n kept_frames.append(lf)\n\n if videos:\n self.videos = [video for video in self.videos if video in used_videos]\n\n if skeletons:\n self.skeletons = [\n skeleton for skeleton in self.skeletons if skeleton in used_skeletons\n ]\n\n if tracks:\n self.tracks = [track for track in self.tracks if track in used_tracks]\n\n if frames:\n self.labeled_frames = kept_frames\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.extend","title":"extend(lfs, update=True)
","text":"Append a labeled frame to the labels.
Parameters:
Name Type Description Defaultlfs
list[LabeledFrame]
A list of labeled frames to add to the labels.
requiredupdate
bool
If True
(the default), update list of videos, tracks and skeletons from the contents.
True
Source code in sleap_io/model/labels.py
def extend(self, lfs: list[LabeledFrame], update: bool = True):\n \"\"\"Append a labeled frame to the labels.\n\n Args:\n lfs: A list of labeled frames to add to the labels.\n update: If `True` (the default), update list of videos, tracks and\n skeletons from the contents.\n \"\"\"\n self.labeled_frames.extend(lfs)\n\n if update:\n for lf in lfs:\n if lf.video not in self.videos:\n self.videos.append(lf.video)\n\n for inst in lf:\n if inst.skeleton not in self.skeletons:\n self.skeletons.append(inst.skeleton)\n\n if inst.track is not None and inst.track not in self.tracks:\n self.tracks.append(inst.track)\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.extract","title":"extract(inds, copy=True)
","text":"Extract a set of frames into a new Labels object.
Parameters:
Name Type Description Defaultinds
list[int] | list[tuple[Video, int]] | ndarray
Indices of labeled frames. Can be specified as a list of array of integer indices of labeled frames or tuples of Video and frame indices.
requiredcopy
bool
If True
(the default), return a copy of the frames and containing objects. Otherwise, return a reference to the data.
True
Returns:
Type DescriptionLabels
A new Labels
object containing the selected labels.
This copies the labeled frames and their associated data, including skeletons and tracks, and tries to maintain the relative ordering.
This also copies the provenance and inserts an extra key: \"source_labels\"
with the path to the current labels, if available.
It does NOT copy suggested frames.
Source code insleap_io/model/labels.py
def extract(\n self, inds: list[int] | list[tuple[Video, int]] | np.ndarray, copy: bool = True\n) -> Labels:\n \"\"\"Extract a set of frames into a new Labels object.\n\n Args:\n inds: Indices of labeled frames. Can be specified as a list of array of\n integer indices of labeled frames or tuples of Video and frame indices.\n copy: If `True` (the default), return a copy of the frames and containing\n objects. Otherwise, return a reference to the data.\n\n Returns:\n A new `Labels` object containing the selected labels.\n\n Notes:\n This copies the labeled frames and their associated data, including\n skeletons and tracks, and tries to maintain the relative ordering.\n\n This also copies the provenance and inserts an extra key: `\"source_labels\"`\n with the path to the current labels, if available.\n\n It does NOT copy suggested frames.\n \"\"\"\n lfs = self[inds]\n\n if copy:\n lfs = deepcopy(lfs)\n labels = Labels(lfs)\n\n # Try to keep the lists in the same order.\n track_to_ind = {track.name: ind for ind, track in enumerate(self.tracks)}\n labels.tracks = sorted(labels.tracks, key=lambda x: track_to_ind[x.name])\n\n skel_to_ind = {skel.name: ind for ind, skel in enumerate(self.skeletons)}\n labels.skeletons = sorted(labels.skeletons, key=lambda x: skel_to_ind[x.name])\n\n labels.provenance = deepcopy(labels.provenance)\n labels.provenance[\"source_labels\"] = self.provenance.get(\"filename\", None)\n\n return labels\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.find","title":"find(video, frame_idx=None, return_new=False)
","text":"Search for labeled frames given video and/or frame index.
Parameters:
Name Type Description Defaultvideo
Video
A Video
that is associated with the project.
frame_idx
int | list[int] | None
The frame index (or indices) which we want to find in the video. If a range is specified, we'll return all frames with indices in that range. If not specific, then we'll return all labeled frames for video.
None
return_new
bool
Whether to return singleton of new and empty LabeledFrame
if none are found in project.
False
Returns:
Type Descriptionlist[LabeledFrame]
List of LabeledFrame
objects that match the criteria.
The list will be empty if no matches found, unless return_new is True, in which case it contains new (empty) LabeledFrame
objects with video
and frame_index
set.
sleap_io/model/labels.py
def find(\n self,\n video: Video,\n frame_idx: int | list[int] | None = None,\n return_new: bool = False,\n) -> list[LabeledFrame]:\n \"\"\"Search for labeled frames given video and/or frame index.\n\n Args:\n video: A `Video` that is associated with the project.\n frame_idx: The frame index (or indices) which we want to find in the video.\n If a range is specified, we'll return all frames with indices in that\n range. If not specific, then we'll return all labeled frames for video.\n return_new: Whether to return singleton of new and empty `LabeledFrame` if\n none are found in project.\n\n Returns:\n List of `LabeledFrame` objects that match the criteria.\n\n The list will be empty if no matches found, unless return_new is True, in\n which case it contains new (empty) `LabeledFrame` objects with `video` and\n `frame_index` set.\n \"\"\"\n results = []\n\n if frame_idx is None:\n for lf in self.labeled_frames:\n if lf.video == video:\n results.append(lf)\n return results\n\n if np.isscalar(frame_idx):\n frame_idx = np.array(frame_idx).reshape(-1)\n\n for frame_ind in frame_idx:\n result = None\n for lf in self.labeled_frames:\n if lf.video == video and lf.frame_idx == frame_ind:\n result = lf\n results.append(result)\n break\n if result is None and return_new:\n results.append(LabeledFrame(video=video, frame_idx=frame_ind))\n\n return results\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.make_training_splits","title":"make_training_splits(n_train, n_val=None, n_test=None, save_dir=None, seed=None, embed=True)
","text":"Make splits for training with embedded images.
Parameters:
Name Type Description Defaultn_train
int | float
Size of the training split as integer or fraction.
requiredn_val
int | float | None
Size of the validation split as integer or fraction. If None
, this will be inferred based on the values of n_train
and n_test
. If n_test
is None
, this will be the remainder of the data after the training split.
None
n_test
int | float | None
Size of the testing split as integer or fraction. If None
, the test split will not be saved.
None
save_dir
str | Path | None
If specified, save splits to SLP files with embedded images.
None
seed
int | None
Optional integer seed to use for reproducibility.
None
embed
bool
If True
(the default), embed user labeled frame images in the saved files, which is useful for portability but can be slow for large projects. If False
, labels are saved with references to the source videos files.
True
Returns:
Type Descriptiontuple[Labels, Labels] | tuple[Labels, Labels, Labels]
A tuple of labels_train, labels_val
or labels_train, labels_val, labels_test
if n_test
was specified.
Predictions and suggestions will be removed before saving, leaving only frames with user labeled data (the source labels are not affected).
Frames with user labeled data will be embedded in the resulting files.
If save_dir
is specified, this will save the randomly sampled splits to:
{save_dir}/train.pkg.slp
{save_dir}/val.pkg.slp
{save_dir}/test.pkg.slp
(if n_test
is specified)If embed
is False
, the files will be saved without embedded images to:
{save_dir}/train.slp
{save_dir}/val.slp
{save_dir}/test.slp
(if n_test
is specified)See also: Labels.split
sleap_io/model/labels.py
def make_training_splits(\n self,\n n_train: int | float,\n n_val: int | float | None = None,\n n_test: int | float | None = None,\n save_dir: str | Path | None = None,\n seed: int | None = None,\n embed: bool = True,\n) -> tuple[Labels, Labels] | tuple[Labels, Labels, Labels]:\n \"\"\"Make splits for training with embedded images.\n\n Args:\n n_train: Size of the training split as integer or fraction.\n n_val: Size of the validation split as integer or fraction. If `None`,\n this will be inferred based on the values of `n_train` and `n_test`. If\n `n_test` is `None`, this will be the remainder of the data after the\n training split.\n n_test: Size of the testing split as integer or fraction. If `None`, the\n test split will not be saved.\n save_dir: If specified, save splits to SLP files with embedded images.\n seed: Optional integer seed to use for reproducibility.\n embed: If `True` (the default), embed user labeled frame images in the saved\n files, which is useful for portability but can be slow for large\n projects. If `False`, labels are saved with references to the source\n videos files.\n\n Returns:\n A tuple of `labels_train, labels_val` or\n `labels_train, labels_val, labels_test` if `n_test` was specified.\n\n Notes:\n Predictions and suggestions will be removed before saving, leaving only\n frames with user labeled data (the source labels are not affected).\n\n Frames with user labeled data will be embedded in the resulting files.\n\n If `save_dir` is specified, this will save the randomly sampled splits to:\n\n - `{save_dir}/train.pkg.slp`\n - `{save_dir}/val.pkg.slp`\n - `{save_dir}/test.pkg.slp` (if `n_test` is specified)\n\n If `embed` is `False`, the files will be saved without embedded images to:\n\n - `{save_dir}/train.slp`\n - `{save_dir}/val.slp`\n - `{save_dir}/test.slp` (if `n_test` is specified)\n\n See also: `Labels.split`\n \"\"\"\n # Clean up labels.\n labels = deepcopy(self)\n labels.remove_predictions()\n labels.suggestions = []\n labels.clean()\n\n # Make train split.\n labels_train, labels_rest = labels.split(n_train, seed=seed)\n\n # Make test split.\n if n_test is not None:\n if n_test < 1:\n n_test = (n_test * len(labels)) / len(labels_rest)\n labels_test, labels_rest = labels_rest.split(n=n_test, seed=seed)\n\n # Make val split.\n if n_val is not None:\n if n_val < 1:\n n_val = (n_val * len(labels)) / len(labels_rest)\n if isinstance(n_val, float) and n_val == 1.0:\n labels_val = labels_rest\n else:\n labels_val, _ = labels_rest.split(n=n_val, seed=seed)\n else:\n labels_val = labels_rest\n\n # Update provenance.\n source_labels = self.provenance.get(\"filename\", None)\n labels_train.provenance[\"source_labels\"] = source_labels\n if n_val is not None:\n labels_val.provenance[\"source_labels\"] = source_labels\n if n_test is not None:\n labels_test.provenance[\"source_labels\"] = source_labels\n\n # Save.\n if save_dir is not None:\n save_dir = Path(save_dir)\n save_dir.mkdir(exist_ok=True, parents=True)\n\n if embed:\n labels_train.save(save_dir / \"train.pkg.slp\", embed=\"user\")\n labels_val.save(save_dir / \"val.pkg.slp\", embed=\"user\")\n labels_test.save(save_dir / \"test.pkg.slp\", embed=\"user\")\n else:\n labels_train.save(save_dir / \"train.slp\", embed=False)\n labels_val.save(save_dir / \"val.slp\", embed=False)\n labels_test.save(save_dir / \"test.slp\", embed=False)\n\n if n_test is None:\n return labels_train, labels_val\n else:\n return labels_train, labels_val, labels_test\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.numpy","title":"numpy(video=None, all_frames=True, untracked=False, return_confidence=False)
","text":"Construct a numpy array from instance points.
Parameters:
Name Type Description Defaultvideo
Optional[Union[Video, int]]
Video or video index to convert to numpy arrays. If None
(the default), uses the first video.
None
untracked
bool
If False
(the default), include only instances that have a track assignment. If True
, includes all instances in each frame in arbitrary order.
False
return_confidence
bool
If False
(the default), only return points of nodes. If True
, return the points and scores of nodes.
False
Returns:
Type Descriptionndarray
An array of tracks of shape (n_frames, n_tracks, n_nodes, 2)
if return_confidence
is False
. Otherwise returned shape is (n_frames, n_tracks, n_nodes, 3)
if return_confidence
is True
.
Missing data will be replaced with np.nan
.
If this is a single instance project, a track does not need to be assigned.
Only predicted instances (NOT user instances) will be returned.
NotesThis method assumes that instances have tracks assigned and is intended to function primarily for single-video prediction results.
Source code insleap_io/model/labels.py
def numpy(\n self,\n video: Optional[Union[Video, int]] = None,\n all_frames: bool = True,\n untracked: bool = False,\n return_confidence: bool = False,\n) -> np.ndarray:\n \"\"\"Construct a numpy array from instance points.\n\n Args:\n video: Video or video index to convert to numpy arrays. If `None` (the\n default), uses the first video.\n untracked: If `False` (the default), include only instances that have a\n track assignment. If `True`, includes all instances in each frame in\n arbitrary order.\n return_confidence: If `False` (the default), only return points of nodes. If\n `True`, return the points and scores of nodes.\n\n Returns:\n An array of tracks of shape `(n_frames, n_tracks, n_nodes, 2)` if\n `return_confidence` is `False`. Otherwise returned shape is\n `(n_frames, n_tracks, n_nodes, 3)` if `return_confidence` is `True`.\n\n Missing data will be replaced with `np.nan`.\n\n If this is a single instance project, a track does not need to be assigned.\n\n Only predicted instances (NOT user instances) will be returned.\n\n Notes:\n This method assumes that instances have tracks assigned and is intended to\n function primarily for single-video prediction results.\n \"\"\"\n # Get labeled frames for specified video.\n if video is None:\n video = 0\n if type(video) == int:\n video = self.videos[video]\n lfs = [lf for lf in self.labeled_frames if lf.video == video]\n\n # Figure out frame index range.\n first_frame, last_frame = 0, 0\n for lf in lfs:\n first_frame = min(first_frame, lf.frame_idx)\n last_frame = max(last_frame, lf.frame_idx)\n\n # Figure out the number of tracks based on number of instances in each frame.\n # First, let's check the max number of predicted instances (regardless of\n # whether they're tracked.\n n_preds = 0\n for lf in lfs:\n n_pred_instances = len(lf.predicted_instances)\n n_preds = max(n_preds, n_pred_instances)\n\n # Case 1: We don't care about order because there's only 1 instance per frame,\n # or we're considering untracked instances.\n untracked = untracked or n_preds == 1\n if untracked:\n n_tracks = n_preds\n else:\n # Case 2: We're considering only tracked instances.\n n_tracks = len(self.tracks)\n\n n_frames = int(last_frame - first_frame + 1)\n skeleton = self.skeletons[-1] # Assume project only uses last skeleton\n n_nodes = len(skeleton.nodes)\n\n if return_confidence:\n tracks = np.full((n_frames, n_tracks, n_nodes, 3), np.nan, dtype=\"float32\")\n else:\n tracks = np.full((n_frames, n_tracks, n_nodes, 2), np.nan, dtype=\"float32\")\n for lf in lfs:\n i = int(lf.frame_idx - first_frame)\n if untracked:\n for j, inst in enumerate(lf.predicted_instances):\n tracks[i, j] = inst.numpy(scores=return_confidence)\n else:\n tracked_instances = [\n inst\n for inst in lf.instances\n if type(inst) == PredictedInstance and inst.track is not None\n ]\n for inst in tracked_instances:\n j = self.tracks.index(inst.track) # type: ignore[arg-type]\n tracks[i, j] = inst.numpy(scores=return_confidence)\n\n return tracks\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.remove_nodes","title":"remove_nodes(nodes, skeleton=None)
","text":"Remove nodes from the skeleton.
Parameters:
Name Type Description Defaultnodes
list[NodeOrIndex]
A list of node names, indices, or Node
objects to remove.
skeleton
Skeleton | None
Skeleton
to update. If None
(the default), assumes there is only one skeleton in the labels and raises ValueError
otherwise.
None
Raises:
Type DescriptionValueError
If the nodes are not found in the skeleton, or if there is more than one skeleton in the Labels
but it is not specified.
This method should always be used when removing nodes from the skeleton as it handles updating the lookup caches necessary for indexing nodes by name, and updating instances to reflect the changes made to the skeleton.
Any edges and symmetries that are connected to the removed nodes will also be removed.
Source code insleap_io/model/labels.py
def remove_nodes(self, nodes: list[NodeOrIndex], skeleton: Skeleton | None = None):\n \"\"\"Remove nodes from the skeleton.\n\n Args:\n nodes: A list of node names, indices, or `Node` objects to remove.\n skeleton: `Skeleton` to update. If `None` (the default), assumes there is\n only one skeleton in the labels and raises `ValueError` otherwise.\n\n Raises:\n ValueError: If the nodes are not found in the skeleton, or if there is more\n than one skeleton in the `Labels` but it is not specified.\n\n Notes:\n This method should always be used when removing nodes from the skeleton as\n it handles updating the lookup caches necessary for indexing nodes by name,\n and updating instances to reflect the changes made to the skeleton.\n\n Any edges and symmetries that are connected to the removed nodes will also\n be removed.\n \"\"\"\n if skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Skeleton must be specified when there is more than one skeleton \"\n \"in the labels.\"\n )\n skeleton = self.skeleton\n\n skeleton.remove_nodes(nodes)\n\n for inst in self.instances:\n if inst.skeleton == skeleton:\n inst.update_skeleton()\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.remove_predictions","title":"remove_predictions(clean=True)
","text":"Remove all predicted instances from the labels.
Parameters:
Name Type Description Defaultclean
bool
If True
(the default), also remove any empty frames and unused tracks and skeletons. It does NOT remove videos that have no labeled frames or instances with no visible points.
True
See also: Labels.clean
sleap_io/model/labels.py
def remove_predictions(self, clean: bool = True):\n \"\"\"Remove all predicted instances from the labels.\n\n Args:\n clean: If `True` (the default), also remove any empty frames and unused\n tracks and skeletons. It does NOT remove videos that have no labeled\n frames or instances with no visible points.\n\n See also: `Labels.clean`\n \"\"\"\n for lf in self.labeled_frames:\n lf.remove_predictions()\n\n if clean:\n self.clean(\n frames=True,\n empty_instances=False,\n skeletons=True,\n tracks=True,\n videos=False,\n )\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.rename_nodes","title":"rename_nodes(name_map, skeleton=None)
","text":"Rename nodes in the skeleton.
Parameters:
Name Type Description Defaultname_map
dict[NodeOrIndex, str] | list[str]
A dictionary mapping old node names to new node names. Keys can be specified as Node
objects, integer indices, or string names. Values must be specified as string names.
If a list of strings is provided of the same length as the current nodes, the nodes will be renamed to the names in the list in order.
requiredskeleton
Skeleton | None
Skeleton
to update. If None
(the default), assumes there is only one skeleton in the labels and raises ValueError
otherwise.
None
Raises:
Type DescriptionValueError
If the new node names exist in the skeleton, if the old node names are not found in the skeleton, or if there is more than one skeleton in the Labels
but it is not specified.
This method is recommended over Skeleton.rename_nodes
as it will update all instances in the labels to reflect the new node names.
labels = Labels(skeletons=[Skeleton([\"A\", \"B\", \"C\"])]) labels.rename_nodes({\"A\": \"X\", \"B\": \"Y\", \"C\": \"Z\"}) labels.skeleton.node_names [\"X\", \"Y\", \"Z\"] labels.rename_nodes([\"a\", \"b\", \"c\"]) labels.skeleton.node_names [\"a\", \"b\", \"c\"]
Source code insleap_io/model/labels.py
def rename_nodes(\n self,\n name_map: dict[NodeOrIndex, str] | list[str],\n skeleton: Skeleton | None = None,\n):\n \"\"\"Rename nodes in the skeleton.\n\n Args:\n name_map: A dictionary mapping old node names to new node names. Keys can be\n specified as `Node` objects, integer indices, or string names. Values\n must be specified as string names.\n\n If a list of strings is provided of the same length as the current\n nodes, the nodes will be renamed to the names in the list in order.\n skeleton: `Skeleton` to update. If `None` (the default), assumes there is\n only one skeleton in the labels and raises `ValueError` otherwise.\n\n Raises:\n ValueError: If the new node names exist in the skeleton, if the old node\n names are not found in the skeleton, or if there is more than one\n skeleton in the `Labels` but it is not specified.\n\n Notes:\n This method is recommended over `Skeleton.rename_nodes` as it will update\n all instances in the labels to reflect the new node names.\n\n Example:\n >>> labels = Labels(skeletons=[Skeleton([\"A\", \"B\", \"C\"])])\n >>> labels.rename_nodes({\"A\": \"X\", \"B\": \"Y\", \"C\": \"Z\"})\n >>> labels.skeleton.node_names\n [\"X\", \"Y\", \"Z\"]\n >>> labels.rename_nodes([\"a\", \"b\", \"c\"])\n >>> labels.skeleton.node_names\n [\"a\", \"b\", \"c\"]\n \"\"\"\n if skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Skeleton must be specified when there is more than one skeleton in \"\n \"the labels.\"\n )\n skeleton = self.skeleton\n\n skeleton.rename_nodes(name_map)\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.reorder_nodes","title":"reorder_nodes(new_order, skeleton=None)
","text":"Reorder nodes in the skeleton.
Parameters:
Name Type Description Defaultnew_order
list[NodeOrIndex]
A list of node names, indices, or Node
objects specifying the new order of the nodes.
skeleton
Skeleton | None
Skeleton
to update. If None
(the default), assumes there is only one skeleton in the labels and raises ValueError
otherwise.
None
Raises:
Type DescriptionValueError
If the new order of nodes is not the same length as the current nodes, or if there is more than one skeleton in the Labels
but it is not specified.
This method handles updating the lookup caches necessary for indexing nodes by name, as well as updating instances to reflect the changes made to the skeleton.
Source code insleap_io/model/labels.py
def reorder_nodes(\n self, new_order: list[NodeOrIndex], skeleton: Skeleton | None = None\n):\n \"\"\"Reorder nodes in the skeleton.\n\n Args:\n new_order: A list of node names, indices, or `Node` objects specifying the\n new order of the nodes.\n skeleton: `Skeleton` to update. If `None` (the default), assumes there is\n only one skeleton in the labels and raises `ValueError` otherwise.\n\n Raises:\n ValueError: If the new order of nodes is not the same length as the current\n nodes, or if there is more than one skeleton in the `Labels` but it is\n not specified.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name, as well as updating instances to reflect the changes made to the\n skeleton.\n \"\"\"\n if skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Skeleton must be specified when there is more than one skeleton \"\n \"in the labels.\"\n )\n skeleton = self.skeleton\n\n skeleton.reorder_nodes(new_order)\n\n for inst in self.instances:\n if inst.skeleton == skeleton:\n inst.update_skeleton()\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.replace_filenames","title":"replace_filenames(new_filenames=None, filename_map=None, prefix_map=None)
","text":"Replace video filenames.
Parameters:
Name Type Description Defaultnew_filenames
list[str | Path] | None
List of new filenames. Must have the same length as the number of videos in the labels.
None
filename_map
dict[str | Path, str | Path] | None
Dictionary mapping old filenames (keys) to new filenames (values).
None
prefix_map
dict[str | Path, str | Path] | None
Dictonary mapping old prefixes (keys) to new prefixes (values).
None
Notes Only one of the argument types can be provided.
Source code insleap_io/model/labels.py
def replace_filenames(\n self,\n new_filenames: list[str | Path] | None = None,\n filename_map: dict[str | Path, str | Path] | None = None,\n prefix_map: dict[str | Path, str | Path] | None = None,\n):\n \"\"\"Replace video filenames.\n\n Args:\n new_filenames: List of new filenames. Must have the same length as the\n number of videos in the labels.\n filename_map: Dictionary mapping old filenames (keys) to new filenames\n (values).\n prefix_map: Dictonary mapping old prefixes (keys) to new prefixes (values).\n\n Notes:\n Only one of the argument types can be provided.\n \"\"\"\n n = 0\n if new_filenames is not None:\n n += 1\n if filename_map is not None:\n n += 1\n if prefix_map is not None:\n n += 1\n if n != 1:\n raise ValueError(\n \"Exactly one input method must be provided to replace filenames.\"\n )\n\n if new_filenames is not None:\n if len(self.videos) != len(new_filenames):\n raise ValueError(\n f\"Number of new filenames ({len(new_filenames)}) does not match \"\n f\"the number of videos ({len(self.videos)}).\"\n )\n\n for video, new_filename in zip(self.videos, new_filenames):\n video.replace_filename(new_filename)\n\n elif filename_map is not None:\n for video in self.videos:\n for old_fn, new_fn in filename_map.items():\n if type(video.filename) == list:\n new_fns = []\n for fn in video.filename:\n if Path(fn) == Path(old_fn):\n new_fns.append(new_fn)\n else:\n new_fns.append(fn)\n video.replace_filename(new_fns)\n else:\n if Path(video.filename) == Path(old_fn):\n video.replace_filename(new_fn)\n\n elif prefix_map is not None:\n for video in self.videos:\n for old_prefix, new_prefix in prefix_map.items():\n old_prefix, new_prefix = Path(old_prefix), Path(new_prefix)\n\n if type(video.filename) == list:\n new_fns = []\n for fn in video.filename:\n fn = Path(fn)\n if fn.as_posix().startswith(old_prefix.as_posix()):\n new_fns.append(new_prefix / fn.relative_to(old_prefix))\n else:\n new_fns.append(fn)\n video.replace_filename(new_fns)\n else:\n fn = Path(video.filename)\n if fn.as_posix().startswith(old_prefix.as_posix()):\n video.replace_filename(\n new_prefix / fn.relative_to(old_prefix)\n )\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.replace_skeleton","title":"replace_skeleton(new_skeleton, old_skeleton=None, node_map=None)
","text":"Replace the skeleton in the labels.
Parameters:
Name Type Description Defaultnew_skeleton
Skeleton
The new Skeleton
to replace the old skeleton with.
old_skeleton
Skeleton | None
The old Skeleton
to replace. If None
(the default), assumes there is only one skeleton in the labels and raises ValueError
otherwise.
None
node_map
dict[NodeOrIndex, NodeOrIndex] | None
Dictionary mapping nodes in the old skeleton to nodes in the new skeleton. Keys and values can be specified as Node
objects, integer indices, or string names. If not provided, only nodes with identical names will be mapped. Points associated with unmapped nodes will be removed.
None
Raises:
Type DescriptionValueError
If there is more than one skeleton in the Labels
but it is not specified.
This method will replace the skeleton in all instances in the labels that have the old skeleton. All point data associated with nodes not in the node_map
will be lost.
sleap_io/model/labels.py
def replace_skeleton(\n self,\n new_skeleton: Skeleton,\n old_skeleton: Skeleton | None = None,\n node_map: dict[NodeOrIndex, NodeOrIndex] | None = None,\n):\n \"\"\"Replace the skeleton in the labels.\n\n Args:\n new_skeleton: The new `Skeleton` to replace the old skeleton with.\n old_skeleton: The old `Skeleton` to replace. If `None` (the default),\n assumes there is only one skeleton in the labels and raises `ValueError`\n otherwise.\n node_map: Dictionary mapping nodes in the old skeleton to nodes in the new\n skeleton. Keys and values can be specified as `Node` objects, integer\n indices, or string names. If not provided, only nodes with identical\n names will be mapped. Points associated with unmapped nodes will be\n removed.\n\n Raises:\n ValueError: If there is more than one skeleton in the `Labels` but it is not\n specified.\n\n Warning:\n This method will replace the skeleton in all instances in the labels that\n have the old skeleton. **All point data associated with nodes not in the\n `node_map` will be lost.**\n \"\"\"\n if old_skeleton is None:\n if len(self.skeletons) != 1:\n raise ValueError(\n \"Old skeleton must be specified when there is more than one \"\n \"skeleton in the labels.\"\n )\n old_skeleton = self.skeleton\n\n if node_map is None:\n node_map = {}\n for old_node in old_skeleton.nodes:\n for new_node in new_skeleton.nodes:\n if old_node.name == new_node.name:\n node_map[old_node] = new_node\n break\n else:\n node_map = {\n old_skeleton.require_node(\n old, add_missing=False\n ): new_skeleton.require_node(new, add_missing=False)\n for old, new in node_map.items()\n }\n\n # Make new -> old mapping for nodes for efficiency.\n rev_node_map = {new: old for old, new in node_map.items()}\n\n # Replace the skeleton in the instances.\n for inst in self.instances:\n if inst.skeleton == old_skeleton:\n inst.replace_skeleton(new_skeleton, rev_node_map=rev_node_map)\n\n # Replace the skeleton in the labels.\n self.skeletons[self.skeletons.index(old_skeleton)] = new_skeleton\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.replace_videos","title":"replace_videos(old_videos=None, new_videos=None, video_map=None)
","text":"Replace videos and update all references.
Parameters:
Name Type Description Defaultold_videos
list[Video] | None
List of videos to be replaced.
None
new_videos
list[Video] | None
List of videos to replace with.
None
video_map
dict[Video, Video] | None
Alternative input of dictionary where keys are the old videos and values are the new videos.
None
Source code in sleap_io/model/labels.py
def replace_videos(\n self,\n old_videos: list[Video] | None = None,\n new_videos: list[Video] | None = None,\n video_map: dict[Video, Video] | None = None,\n):\n \"\"\"Replace videos and update all references.\n\n Args:\n old_videos: List of videos to be replaced.\n new_videos: List of videos to replace with.\n video_map: Alternative input of dictionary where keys are the old videos and\n values are the new videos.\n \"\"\"\n if (\n old_videos is None\n and new_videos is not None\n and len(new_videos) == len(self.videos)\n ):\n old_videos = self.videos\n\n if video_map is None:\n video_map = {o: n for o, n in zip(old_videos, new_videos)}\n\n # Update the labeled frames with the new videos.\n for lf in self.labeled_frames:\n if lf.video in video_map:\n lf.video = video_map[lf.video]\n\n # Update suggestions with the new videos.\n for sf in self.suggestions:\n if sf.video in video_map:\n sf.video = video_map[sf.video]\n\n # Update the list of videos.\n self.videos = [video_map.get(video, video) for video in self.videos]\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.save","title":"save(filename, format=None, embed=None, **kwargs)
","text":"Save labels to file in specified format.
Parameters:
Name Type Description Defaultfilename
str
Path to save labels to.
requiredformat
Optional[str]
The format to save the labels in. If None
, the format will be inferred from the file extension. Available formats are \"slp\"
, \"nwb\"
, \"labelstudio\"
, and \"jabs\"
.
None
embed
bool | str | list[tuple[Video, int]] | None
Frames to embed in the saved labels file. One of None
, True
, \"all\"
, \"user\"
, \"suggestions\"
, \"user+suggestions\"
, \"source\"
or list of tuples of (video, frame_idx)
.
If None
is specified (the default) and the labels contains embedded frames, those embedded frames will be re-saved to the new file.
If True
or \"all\"
, all labeled frames and suggested frames will be embedded.
If \"source\"
is specified, no images will be embedded and the source video will be restored if available.
This argument is only valid for the SLP backend.
None
Source code in sleap_io/model/labels.py
def save(\n self,\n filename: str,\n format: Optional[str] = None,\n embed: bool | str | list[tuple[Video, int]] | None = None,\n **kwargs,\n):\n \"\"\"Save labels to file in specified format.\n\n Args:\n filename: Path to save labels to.\n format: The format to save the labels in. If `None`, the format will be\n inferred from the file extension. Available formats are `\"slp\"`,\n `\"nwb\"`, `\"labelstudio\"`, and `\"jabs\"`.\n embed: Frames to embed in the saved labels file. One of `None`, `True`,\n `\"all\"`, `\"user\"`, `\"suggestions\"`, `\"user+suggestions\"`, `\"source\"` or\n list of tuples of `(video, frame_idx)`.\n\n If `None` is specified (the default) and the labels contains embedded\n frames, those embedded frames will be re-saved to the new file.\n\n If `True` or `\"all\"`, all labeled frames and suggested frames will be\n embedded.\n\n If `\"source\"` is specified, no images will be embedded and the source\n video will be restored if available.\n\n This argument is only valid for the SLP backend.\n \"\"\"\n from sleap_io import save_file\n\n save_file(self, filename, format=format, embed=embed, **kwargs)\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.split","title":"split(n, seed=None)
","text":"Separate the labels into random splits.
Parameters:
Name Type Description Defaultn
int | float
Size of the first split. If integer >= 1, assumes that this is the number of labeled frames in the first split. If < 1.0, this will be treated as a fraction of the total labeled frames.
requiredseed
int | None
Optional integer seed to use for reproducibility.
None
Returns:
Type Descriptiontuple[Labels, Labels]
A tuple of split1, split2
.
If an integer was specified, len(split1) == n
.
If a fraction was specified, len(split1) == int(n * len(labels))
.
The second split contains the remainder, i.e., len(split2) == len(labels) - len(split1)
.
If there are too few frames, a minimum of 1 frame will be kept in the second split.
If there is exactly 1 labeled frame in the labels, the same frame will be assigned to both splits.
Source code insleap_io/model/labels.py
def split(self, n: int | float, seed: int | None = None) -> tuple[Labels, Labels]:\n \"\"\"Separate the labels into random splits.\n\n Args:\n n: Size of the first split. If integer >= 1, assumes that this is the number\n of labeled frames in the first split. If < 1.0, this will be treated as\n a fraction of the total labeled frames.\n seed: Optional integer seed to use for reproducibility.\n\n Returns:\n A tuple of `split1, split2`.\n\n If an integer was specified, `len(split1) == n`.\n\n If a fraction was specified, `len(split1) == int(n * len(labels))`.\n\n The second split contains the remainder, i.e.,\n `len(split2) == len(labels) - len(split1)`.\n\n If there are too few frames, a minimum of 1 frame will be kept in the second\n split.\n\n If there is exactly 1 labeled frame in the labels, the same frame will be\n assigned to both splits.\n \"\"\"\n n0 = len(self)\n if n0 == 0:\n return self, self\n n1 = n\n if n < 1.0:\n n1 = max(int(n0 * float(n)), 1)\n n2 = max(n0 - n1, 1)\n n1, n2 = int(n1), int(n2)\n\n rng = np.random.default_rng(seed=seed)\n inds1 = rng.choice(n0, size=(n1,), replace=False)\n\n if n0 == 1:\n inds2 = np.array([0])\n else:\n inds2 = np.setdiff1d(np.arange(n0), inds1)\n\n split1 = self.extract(inds1, copy=True)\n split2 = self.extract(inds2, copy=True)\n\n return split1, split2\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.trim","title":"trim(save_path, frame_inds, video=None, video_kwargs=None)
","text":"Trim the labels to a subset of frames and videos accordingly.
Parameters:
Name Type Description Defaultsave_path
str | Path
Path to the trimmed labels SLP file. Video will be saved with the same base name but with .mp4 extension.
requiredframe_inds
list[int] | ndarray
Frame indices to save. Can be specified as a list or array of frame integers.
requiredvideo
Video | int | None
Video or integer index of the video to trim. Does not need to be specified for single-video projects.
None
video_kwargs
dict[str, Any] | None
A dictionary of keyword arguments to provide to sio.save_video
for video compression.
None
Returns:
Type DescriptionLabels
The resulting labels object referencing the trimmed data.
NotesThis will remove any data outside of the trimmed frames, save new videos, and adjust the frame indices to match the newly trimmed videos.
Source code insleap_io/model/labels.py
def trim(\n self,\n save_path: str | Path,\n frame_inds: list[int] | np.ndarray,\n video: Video | int | None = None,\n video_kwargs: dict[str, Any] | None = None,\n) -> Labels:\n \"\"\"Trim the labels to a subset of frames and videos accordingly.\n\n Args:\n save_path: Path to the trimmed labels SLP file. Video will be saved with the\n same base name but with .mp4 extension.\n frame_inds: Frame indices to save. Can be specified as a list or array of\n frame integers.\n video: Video or integer index of the video to trim. Does not need to be\n specified for single-video projects.\n video_kwargs: A dictionary of keyword arguments to provide to\n `sio.save_video` for video compression.\n\n Returns:\n The resulting labels object referencing the trimmed data.\n\n Notes:\n This will remove any data outside of the trimmed frames, save new videos,\n and adjust the frame indices to match the newly trimmed videos.\n \"\"\"\n if video is None:\n if len(self.videos) == 1:\n video = self.video\n else:\n raise ValueError(\n \"Video needs to be specified when trimming multi-video projects.\"\n )\n if type(video) == int:\n video = self.videos[video]\n\n # Write trimmed clip.\n save_path = Path(save_path)\n video_path = save_path.with_suffix(\".mp4\")\n fidx0, fidx1 = np.min(frame_inds), np.max(frame_inds)\n new_video = video.save(\n video_path,\n frame_inds=np.arange(fidx0, fidx1 + 1),\n video_kwargs=video_kwargs,\n )\n\n # Get frames in range.\n # TODO: Create an optimized search function for this access pattern.\n inds = []\n for ind, lf in enumerate(self):\n if lf.video == video and lf.frame_idx >= fidx0 and lf.frame_idx <= fidx1:\n inds.append(ind)\n trimmed_labels = self.extract(inds, copy=True)\n\n # Adjust video and frame indices.\n trimmed_labels.videos = [new_video]\n for lf in trimmed_labels:\n lf.video = new_video\n lf.frame_idx = lf.frame_idx - fidx0\n\n # Save.\n trimmed_labels.save(save_path)\n\n return trimmed_labels\n
"},{"location":"reference/sleap_io/model/labels/#sleap_io.model.labels.Labels.update","title":"update()
","text":"Update data structures based on contents.
This function will update the list of skeletons, videos and tracks from the labeled frames, instances and suggestions.
Source code insleap_io/model/labels.py
def update(self):\n \"\"\"Update data structures based on contents.\n\n This function will update the list of skeletons, videos and tracks from the\n labeled frames, instances and suggestions.\n \"\"\"\n for lf in self.labeled_frames:\n if lf.video not in self.videos:\n self.videos.append(lf.video)\n\n for inst in lf:\n if inst.skeleton not in self.skeletons:\n self.skeletons.append(inst.skeleton)\n\n if inst.track is not None and inst.track not in self.tracks:\n self.tracks.append(inst.track)\n\n for sf in self.suggestions:\n if sf.video not in self.videos:\n self.videos.append(sf.video)\n
"},{"location":"reference/sleap_io/model/skeleton/","title":"skeleton","text":""},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton","title":"sleap_io.model.skeleton
","text":"Data model for skeletons.
Skeletons are collections of nodes and edges which describe the landmarks associated with a pose model. The edges represent the connections between them and may be used differently depending on the underlying pose model.
Classes:
Name DescriptionEdge
A connection between two Node
objects within a Skeleton
.
Node
A landmark type within a Skeleton
.
Skeleton
A description of a set of landmark types and connections between them.
Symmetry
A relationship between a pair of nodes denoting their left/right pairing.
Functions:
Name Descriptionis_node_or_index
Check if an object is a Node
, string name or integer index.
Edge
","text":"A connection between two Node
objects within a Skeleton
.
This is a directed edge, representing the ordering of Node
s in the Skeleton
tree.
Attributes:
Name Type Descriptionsource
Node
The origin Node
.
destination
Node
The destination Node
.
Methods:
Name Description__getitem__
Return the source Node
(idx
is 0) or destination Node
(idx
is 1).
sleap_io/model/skeleton.py
@define(frozen=True)\nclass Edge:\n \"\"\"A connection between two `Node` objects within a `Skeleton`.\n\n This is a directed edge, representing the ordering of `Node`s in the `Skeleton`\n tree.\n\n Attributes:\n source: The origin `Node`.\n destination: The destination `Node`.\n \"\"\"\n\n source: Node\n destination: Node\n\n def __getitem__(self, idx) -> Node:\n \"\"\"Return the source `Node` (`idx` is 0) or destination `Node` (`idx` is 1).\"\"\"\n if idx == 0:\n return self.source\n elif idx == 1:\n return self.destination\n else:\n raise IndexError(\"Edge only has 2 nodes (source and destination).\")\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Edge.__getitem__","title":"__getitem__(idx)
","text":"Return the source Node
(idx
is 0) or destination Node
(idx
is 1).
sleap_io/model/skeleton.py
def __getitem__(self, idx) -> Node:\n \"\"\"Return the source `Node` (`idx` is 0) or destination `Node` (`idx` is 1).\"\"\"\n if idx == 0:\n return self.source\n elif idx == 1:\n return self.destination\n else:\n raise IndexError(\"Edge only has 2 nodes (source and destination).\")\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Node","title":"Node
","text":"A landmark type within a Skeleton
.
This typically corresponds to a unique landmark within a skeleton, such as the \"left eye\".
Attributes:
Name Type Descriptionname
str
Descriptive label for the landmark.
Source code insleap_io/model/skeleton.py
@define(eq=False)\nclass Node:\n \"\"\"A landmark type within a `Skeleton`.\n\n This typically corresponds to a unique landmark within a skeleton, such as the \"left\n eye\".\n\n Attributes:\n name: Descriptive label for the landmark.\n \"\"\"\n\n name: str\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton","title":"Skeleton
","text":"A description of a set of landmark types and connections between them.
Skeletons are represented by a directed graph composed of a set of Node
s (landmark types such as body parts) and Edge
s (connections between parts).
Attributes:
Name Type Descriptionnodes
list[Node]
A list of Node
s. May be specified as a list of strings to create new nodes from their names.
edges
list[Edge]
A list of Edge
s. May be specified as a list of 2-tuples of string names or integer indices of nodes
. Each edge corresponds to a pair of source and destination nodes forming a directed edge.
symmetries
list[Symmetry]
A list of Symmetry
s. Each symmetry corresponds to symmetric body parts, such as \"left eye\", \"right eye\"
. This is used when applying flip (reflection) augmentation to images in order to appropriately swap the indices of symmetric landmarks.
name
str | None
A descriptive name for the Skeleton
.
Methods:
Name Description__attrs_post_init__
Ensure nodes are Node
s, edges are Edge
s, and Node
map is updated.
__contains__
Check if a node is in the skeleton.
__getitem__
Return a Node
when indexing by name or integer.
__len__
Return the number of nodes in the skeleton.
__repr__
Return a readable representation of the skeleton.
add_edge
Add an Edge
to the skeleton.
add_edges
Add multiple Edge
s to the skeleton.
add_node
Add a Node
to the skeleton.
add_nodes
Add multiple Node
s to the skeleton.
add_symmetry
Add a symmetry relationship to the skeleton.
get_flipped_node_inds
Returns node indices that should be switched when horizontally flipping.
index
Return the index of a node specified as a Node
or string name.
rebuild_cache
Rebuild the node name/index to Node
map caches.
remove_node
Remove a single node from the skeleton.
remove_nodes
Remove nodes from the skeleton.
rename_node
Rename a single node in the skeleton.
rename_nodes
Rename nodes in the skeleton.
reorder_nodes
Reorder nodes in the skeleton.
require_node
Return a Node
object, handling indexing and adding missing nodes.
Attributes:
Name Type Descriptionedge_inds
list[tuple[int, int]]
Edges indices as a list of 2-tuples.
edge_names
list[str, str]
Edge names as a list of 2-tuples with string node names.
node_names
list[str]
Names of the nodes associated with this skeleton as a list of strings.
symmetry_inds
list[tuple[int, int]]
Symmetry indices as a list of 2-tuples.
symmetry_names
list[str, str]
Symmetry names as a list of 2-tuples with string node names.
Source code insleap_io/model/skeleton.py
@define(eq=False)\nclass Skeleton:\n \"\"\"A description of a set of landmark types and connections between them.\n\n Skeletons are represented by a directed graph composed of a set of `Node`s (landmark\n types such as body parts) and `Edge`s (connections between parts).\n\n Attributes:\n nodes: A list of `Node`s. May be specified as a list of strings to create new\n nodes from their names.\n edges: A list of `Edge`s. May be specified as a list of 2-tuples of string names\n or integer indices of `nodes`. Each edge corresponds to a pair of source and\n destination nodes forming a directed edge.\n symmetries: A list of `Symmetry`s. Each symmetry corresponds to symmetric body\n parts, such as `\"left eye\", \"right eye\"`. This is used when applying flip\n (reflection) augmentation to images in order to appropriately swap the\n indices of symmetric landmarks.\n name: A descriptive name for the `Skeleton`.\n \"\"\"\n\n def _nodes_on_setattr(self, attr, new_nodes):\n \"\"\"Callback to update caches when nodes are set.\"\"\"\n self.rebuild_cache(nodes=new_nodes)\n return new_nodes\n\n nodes: list[Node] = field(\n factory=list,\n on_setattr=_nodes_on_setattr,\n )\n edges: list[Edge] = field(factory=list)\n symmetries: list[Symmetry] = field(factory=list)\n name: str | None = None\n _name_to_node_cache: dict[str, Node] = field(init=False, repr=False, eq=False)\n _node_to_ind_cache: dict[Node, int] = field(init=False, repr=False, eq=False)\n\n def __attrs_post_init__(self):\n \"\"\"Ensure nodes are `Node`s, edges are `Edge`s, and `Node` map is updated.\"\"\"\n self._convert_nodes()\n self._convert_edges()\n self.rebuild_cache()\n\n def _convert_nodes(self):\n \"\"\"Convert nodes to `Node` objects if needed.\"\"\"\n if isinstance(self.nodes, np.ndarray):\n object.__setattr__(self, \"nodes\", self.nodes.tolist())\n for i, node in enumerate(self.nodes):\n if type(node) == str:\n self.nodes[i] = Node(node)\n\n def _convert_edges(self):\n \"\"\"Convert list of edge names or integers to `Edge` objects if needed.\"\"\"\n if isinstance(self.edges, np.ndarray):\n self.edges = self.edges.tolist()\n node_names = self.node_names\n for i, edge in enumerate(self.edges):\n if type(edge) == Edge:\n continue\n src, dst = edge\n if type(src) == str:\n try:\n src = node_names.index(src)\n except ValueError:\n raise ValueError(\n f\"Node '{src}' specified in the edge list is not in the nodes.\"\n )\n if type(src) == int or (\n np.isscalar(src) and np.issubdtype(src.dtype, np.integer)\n ):\n src = self.nodes[src]\n\n if type(dst) == str:\n try:\n dst = node_names.index(dst)\n except ValueError:\n raise ValueError(\n f\"Node '{dst}' specified in the edge list is not in the nodes.\"\n )\n if type(dst) == int or (\n np.isscalar(dst) and np.issubdtype(dst.dtype, np.integer)\n ):\n dst = self.nodes[dst]\n\n self.edges[i] = Edge(src, dst)\n\n def rebuild_cache(self, nodes: list[Node] | None = None):\n \"\"\"Rebuild the node name/index to `Node` map caches.\n\n Args:\n nodes: A list of `Node` objects to update the cache with. If not provided,\n the cache will be updated with the current nodes in the skeleton. If\n nodes are provided, the cache will be updated with the provided nodes,\n but the current nodes in the skeleton will not be updated. Default is\n `None`.\n\n Notes:\n This function should be called when nodes or node list is mutated to update\n the lookup caches for indexing nodes by name or `Node` object.\n\n This is done automatically when nodes are added or removed from the skeleton\n using the convenience methods in this class.\n\n This method only needs to be used when manually mutating nodes or the node\n list directly.\n \"\"\"\n if nodes is None:\n nodes = self.nodes\n self._name_to_node_cache = {node.name: node for node in nodes}\n self._node_to_ind_cache = {node: i for i, node in enumerate(nodes)}\n\n @property\n def node_names(self) -> list[str]:\n \"\"\"Names of the nodes associated with this skeleton as a list of strings.\"\"\"\n return [node.name for node in self.nodes]\n\n @property\n def edge_inds(self) -> list[tuple[int, int]]:\n \"\"\"Edges indices as a list of 2-tuples.\"\"\"\n return [\n (self.nodes.index(edge.source), self.nodes.index(edge.destination))\n for edge in self.edges\n ]\n\n @property\n def edge_names(self) -> list[str, str]:\n \"\"\"Edge names as a list of 2-tuples with string node names.\"\"\"\n return [(edge.source.name, edge.destination.name) for edge in self.edges]\n\n @property\n def symmetry_inds(self) -> list[tuple[int, int]]:\n \"\"\"Symmetry indices as a list of 2-tuples.\"\"\"\n return [\n tuple(sorted((self.index(symmetry[0]), self.index(symmetry[1]))))\n for symmetry in self.symmetries\n ]\n\n @property\n def symmetry_names(self) -> list[str, str]:\n \"\"\"Symmetry names as a list of 2-tuples with string node names.\"\"\"\n return [\n (self.nodes[i].name, self.nodes[j].name) for (i, j) in self.symmetry_inds\n ]\n\n def get_flipped_node_inds(self) -> list[int]:\n \"\"\"Returns node indices that should be switched when horizontally flipping.\n\n This is useful as a lookup table for flipping the landmark coordinates when\n doing data augmentation.\n\n Example:\n >>> skel = Skeleton([\"A\", \"B_left\", \"B_right\", \"C\", \"D_left\", \"D_right\"])\n >>> skel.add_symmetry(\"B_left\", \"B_right\")\n >>> skel.add_symmetry(\"D_left\", \"D_right\")\n >>> skel.flipped_node_inds\n [0, 2, 1, 3, 5, 4]\n >>> pose = np.array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]])\n >>> pose[skel.flipped_node_inds]\n array([[0, 0],\n [2, 2],\n [1, 1],\n [3, 3],\n [5, 5],\n [4, 4]])\n \"\"\"\n flip_idx = np.arange(len(self.nodes))\n if len(self.symmetries) > 0:\n symmetry_inds = np.array(\n [(self.index(a), self.index(b)) for a, b in self.symmetries]\n )\n flip_idx[symmetry_inds[:, 0]] = symmetry_inds[:, 1]\n flip_idx[symmetry_inds[:, 1]] = symmetry_inds[:, 0]\n\n flip_idx = flip_idx.tolist()\n return flip_idx\n\n def __len__(self) -> int:\n \"\"\"Return the number of nodes in the skeleton.\"\"\"\n return len(self.nodes)\n\n def __repr__(self) -> str:\n \"\"\"Return a readable representation of the skeleton.\"\"\"\n nodes = \", \".join([f'\"{node}\"' for node in self.node_names])\n return \"Skeleton(\" f\"nodes=[{nodes}], \" f\"edges={self.edge_inds}\" \")\"\n\n def index(self, node: Node | str) -> int:\n \"\"\"Return the index of a node specified as a `Node` or string name.\"\"\"\n if type(node) == str:\n return self.index(self._name_to_node_cache[node])\n elif type(node) == Node:\n return self._node_to_ind_cache[node]\n else:\n raise IndexError(f\"Invalid indexing argument for skeleton: {node}\")\n\n def __getitem__(self, idx: NodeOrIndex) -> Node:\n \"\"\"Return a `Node` when indexing by name or integer.\"\"\"\n if type(idx) == int:\n return self.nodes[idx]\n elif type(idx) == str:\n return self._name_to_node_cache[idx]\n else:\n raise IndexError(f\"Invalid indexing argument for skeleton: {idx}\")\n\n def __contains__(self, node: NodeOrIndex) -> bool:\n \"\"\"Check if a node is in the skeleton.\"\"\"\n if type(node) == str:\n return node in self._name_to_node_cache\n elif type(node) == Node:\n return node in self.nodes\n elif type(node) == int:\n return 0 <= node < len(self.nodes)\n else:\n raise ValueError(f\"Invalid node type for skeleton: {node}\")\n\n def add_node(self, node: Node | str):\n \"\"\"Add a `Node` to the skeleton.\n\n Args:\n node: A `Node` object or a string name to create a new node.\n\n Raises:\n ValueError: If the node already exists in the skeleton or if the node is\n not specified as a `Node` or string.\n \"\"\"\n if node in self:\n raise ValueError(f\"Node '{node}' already exists in the skeleton.\")\n\n if type(node) == str:\n node = Node(node)\n\n if type(node) != Node:\n raise ValueError(f\"Invalid node type: {node} ({type(node)})\")\n\n self.nodes.append(node)\n\n # Atomic update of the cache.\n self._name_to_node_cache[node.name] = node\n self._node_to_ind_cache[node] = len(self.nodes) - 1\n\n def add_nodes(self, nodes: list[Node | str]):\n \"\"\"Add multiple `Node`s to the skeleton.\n\n Args:\n nodes: A list of `Node` objects or string names to create new nodes.\n \"\"\"\n for node in nodes:\n self.add_node(node)\n\n def require_node(self, node: NodeOrIndex, add_missing: bool = True) -> Node:\n \"\"\"Return a `Node` object, handling indexing and adding missing nodes.\n\n Args:\n node: A `Node` object, name or index.\n add_missing: If `True`, missing nodes will be added to the skeleton. If\n `False`, an error will be raised if the node is not found. Default is\n `True`.\n\n Returns:\n The `Node` object.\n\n Raises:\n IndexError: If the node is not found in the skeleton and `add_missing` is\n `False`.\n \"\"\"\n if node not in self:\n if add_missing:\n self.add_node(node)\n else:\n raise IndexError(f\"Node '{node}' not found in the skeleton.\")\n\n if type(node) == Node:\n return node\n\n return self[node]\n\n def add_edge(\n self,\n src: NodeOrIndex | Edge | tuple[NodeOrIndex, NodeOrIndex],\n dst: NodeOrIndex | None = None,\n ):\n \"\"\"Add an `Edge` to the skeleton.\n\n Args:\n src: The source node specified as a `Node`, name or index.\n dst: The destination node specified as a `Node`, name or index.\n \"\"\"\n edge = None\n if type(src) == tuple:\n src, dst = src\n\n if is_node_or_index(src):\n if not is_node_or_index(dst):\n raise ValueError(\"Destination node must be specified.\")\n\n src = self.require_node(src)\n dst = self.require_node(dst)\n edge = Edge(src, dst)\n\n if type(src) == Edge:\n edge = src\n\n if edge not in self.edges:\n self.edges.append(edge)\n\n def add_edges(self, edges: list[Edge | tuple[NodeOrIndex, NodeOrIndex]]):\n \"\"\"Add multiple `Edge`s to the skeleton.\n\n Args:\n edges: A list of `Edge` objects or 2-tuples of source and destination nodes.\n \"\"\"\n for edge in edges:\n self.add_edge(edge)\n\n def add_symmetry(\n self, node1: Symmetry | NodeOrIndex = None, node2: NodeOrIndex | None = None\n ):\n \"\"\"Add a symmetry relationship to the skeleton.\n\n Args:\n node1: The first node specified as a `Node`, name or index. If a `Symmetry`\n object is provided, it will be added directly to the skeleton.\n node2: The second node specified as a `Node`, name or index.\n \"\"\"\n symmetry = None\n if type(node1) == Symmetry:\n symmetry = node1\n node1, node2 = symmetry\n\n node1 = self.require_node(node1)\n node2 = self.require_node(node2)\n\n if symmetry is None:\n symmetry = Symmetry({node1, node2})\n\n if symmetry not in self.symmetries:\n self.symmetries.append(symmetry)\n\n def rename_nodes(self, name_map: dict[NodeOrIndex, str] | list[str]):\n \"\"\"Rename nodes in the skeleton.\n\n Args:\n name_map: A dictionary mapping old node names to new node names. Keys can be\n specified as `Node` objects, integer indices, or string names. Values\n must be specified as string names.\n\n If a list of strings is provided of the same length as the current\n nodes, the nodes will be renamed to the names in the list in order.\n\n Raises:\n ValueError: If the new node names exist in the skeleton or if the old node\n names are not found in the skeleton.\n\n Notes:\n This method should always be used when renaming nodes in the skeleton as it\n handles updating the lookup caches necessary for indexing nodes by name.\n\n After renaming, instances using this skeleton **do NOT need to be updated**\n as the nodes are stored by reference in the skeleton, so changes are\n reflected automatically.\n\n Example:\n >>> skel = Skeleton([\"A\", \"B\", \"C\"], edges=[(\"A\", \"B\"), (\"B\", \"C\")])\n >>> skel.rename_nodes({\"A\": \"X\", \"B\": \"Y\", \"C\": \"Z\"})\n >>> skel.node_names\n [\"X\", \"Y\", \"Z\"]\n >>> skel.rename_nodes([\"a\", \"b\", \"c\"])\n >>> skel.node_names\n [\"a\", \"b\", \"c\"]\n \"\"\"\n if type(name_map) == list:\n if len(name_map) != len(self.nodes):\n raise ValueError(\n \"List of new node names must be the same length as the current \"\n \"nodes.\"\n )\n name_map = {node: name for node, name in zip(self.nodes, name_map)}\n\n for old_name, new_name in name_map.items():\n if type(old_name) == Node:\n old_name = old_name.name\n if type(old_name) == int:\n old_name = self.nodes[old_name].name\n\n if old_name not in self._name_to_node_cache:\n raise ValueError(f\"Node '{old_name}' not found in the skeleton.\")\n if new_name in self._name_to_node_cache:\n raise ValueError(f\"Node '{new_name}' already exists in the skeleton.\")\n\n node = self._name_to_node_cache[old_name]\n node.name = new_name\n self._name_to_node_cache[new_name] = node\n del self._name_to_node_cache[old_name]\n\n def rename_node(self, old_name: NodeOrIndex, new_name: str):\n \"\"\"Rename a single node in the skeleton.\n\n Args:\n old_name: The name of the node to rename. Can also be specified as an\n integer index or `Node` object.\n new_name: The new name for the node.\n \"\"\"\n self.rename_nodes({old_name: new_name})\n\n def remove_nodes(self, nodes: list[NodeOrIndex]):\n \"\"\"Remove nodes from the skeleton.\n\n Args:\n nodes: A list of node names, indices, or `Node` objects to remove.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name.\n\n Any edges and symmetries that are connected to the removed nodes will also\n be removed.\n\n Warning:\n **This method does NOT update instances** that use this skeleton to reflect\n changes.\n\n It is recommended to use the `Labels.remove_nodes()` method which will\n update all contained to reflect the changes made to the skeleton.\n\n To manually update instances after this method is called, call\n `instance.update_nodes()` on each instance that uses this skeleton.\n \"\"\"\n # Standardize input and make a pre-mutation copy before keys are changed.\n rm_node_objs = [self.require_node(node, add_missing=False) for node in nodes]\n\n # Remove nodes from the skeleton.\n for node in rm_node_objs:\n self.nodes.remove(node)\n del self._name_to_node_cache[node.name]\n\n # Remove edges connected to the removed nodes.\n self.edges = [\n edge\n for edge in self.edges\n if edge.source not in rm_node_objs and edge.destination not in rm_node_objs\n ]\n\n # Remove symmetries connected to the removed nodes.\n self.symmetries = [\n symmetry\n for symmetry in self.symmetries\n if symmetry.nodes.isdisjoint(rm_node_objs)\n ]\n\n # Update node index map.\n self.rebuild_cache()\n\n def remove_node(self, node: NodeOrIndex):\n \"\"\"Remove a single node from the skeleton.\n\n Args:\n node: The node to remove. Can be specified as a string name, integer index,\n or `Node` object.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name.\n\n Any edges and symmetries that are connected to the removed node will also be\n removed.\n\n Warning:\n **This method does NOT update instances** that use this skeleton to reflect\n changes.\n\n It is recommended to use the `Labels.remove_nodes()` method which will\n update all contained instances to reflect the changes made to the skeleton.\n\n To manually update instances after this method is called, call\n `Instance.update_skeleton()` on each instance that uses this skeleton.\n \"\"\"\n self.remove_nodes([node])\n\n def reorder_nodes(self, new_order: list[NodeOrIndex]):\n \"\"\"Reorder nodes in the skeleton.\n\n Args:\n new_order: A list of node names, indices, or `Node` objects specifying the\n new order of the nodes.\n\n Raises:\n ValueError: If the new order of nodes is not the same length as the current\n nodes.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name.\n\n Warning:\n After reordering, instances using this skeleton do not need to be updated as\n the nodes are stored by reference in the skeleton.\n\n However, the order that points are stored in the instances will not be\n updated to match the new order of the nodes in the skeleton. This should not\n matter unless the ordering of the keys in the `Instance.points` dictionary\n is used instead of relying on the skeleton node order.\n\n To make sure these are aligned, it is recommended to use the\n `Labels.reorder_nodes()` method which will update all contained instances to\n reflect the changes made to the skeleton.\n\n To manually update instances after this method is called, call\n `Instance.update_skeleton()` on each instance that uses this skeleton.\n \"\"\"\n if len(new_order) != len(self.nodes):\n raise ValueError(\n \"New order of nodes must be the same length as the current nodes.\"\n )\n\n new_nodes = [self.require_node(node, add_missing=False) for node in new_order]\n self.nodes = new_nodes\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.edge_inds","title":"edge_inds: list[tuple[int, int]]
property
","text":"Edges indices as a list of 2-tuples.
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.edge_names","title":"edge_names: list[str, str]
property
","text":"Edge names as a list of 2-tuples with string node names.
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.node_names","title":"node_names: list[str]
property
","text":"Names of the nodes associated with this skeleton as a list of strings.
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.symmetry_inds","title":"symmetry_inds: list[tuple[int, int]]
property
","text":"Symmetry indices as a list of 2-tuples.
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.symmetry_names","title":"symmetry_names: list[str, str]
property
","text":"Symmetry names as a list of 2-tuples with string node names.
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.__attrs_post_init__","title":"__attrs_post_init__()
","text":"Ensure nodes are Node
s, edges are Edge
s, and Node
map is updated.
sleap_io/model/skeleton.py
def __attrs_post_init__(self):\n \"\"\"Ensure nodes are `Node`s, edges are `Edge`s, and `Node` map is updated.\"\"\"\n self._convert_nodes()\n self._convert_edges()\n self.rebuild_cache()\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.__contains__","title":"__contains__(node)
","text":"Check if a node is in the skeleton.
Source code insleap_io/model/skeleton.py
def __contains__(self, node: NodeOrIndex) -> bool:\n \"\"\"Check if a node is in the skeleton.\"\"\"\n if type(node) == str:\n return node in self._name_to_node_cache\n elif type(node) == Node:\n return node in self.nodes\n elif type(node) == int:\n return 0 <= node < len(self.nodes)\n else:\n raise ValueError(f\"Invalid node type for skeleton: {node}\")\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.__getitem__","title":"__getitem__(idx)
","text":"Return a Node
when indexing by name or integer.
sleap_io/model/skeleton.py
def __getitem__(self, idx: NodeOrIndex) -> Node:\n \"\"\"Return a `Node` when indexing by name or integer.\"\"\"\n if type(idx) == int:\n return self.nodes[idx]\n elif type(idx) == str:\n return self._name_to_node_cache[idx]\n else:\n raise IndexError(f\"Invalid indexing argument for skeleton: {idx}\")\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.__len__","title":"__len__()
","text":"Return the number of nodes in the skeleton.
Source code insleap_io/model/skeleton.py
def __len__(self) -> int:\n \"\"\"Return the number of nodes in the skeleton.\"\"\"\n return len(self.nodes)\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.__repr__","title":"__repr__()
","text":"Return a readable representation of the skeleton.
Source code insleap_io/model/skeleton.py
def __repr__(self) -> str:\n \"\"\"Return a readable representation of the skeleton.\"\"\"\n nodes = \", \".join([f'\"{node}\"' for node in self.node_names])\n return \"Skeleton(\" f\"nodes=[{nodes}], \" f\"edges={self.edge_inds}\" \")\"\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.add_edge","title":"add_edge(src, dst=None)
","text":"Add an Edge
to the skeleton.
Parameters:
Name Type Description Defaultsrc
NodeOrIndex | Edge | tuple[NodeOrIndex, NodeOrIndex]
The source node specified as a Node
, name or index.
dst
NodeOrIndex | None
The destination node specified as a Node
, name or index.
None
Source code in sleap_io/model/skeleton.py
def add_edge(\n self,\n src: NodeOrIndex | Edge | tuple[NodeOrIndex, NodeOrIndex],\n dst: NodeOrIndex | None = None,\n):\n \"\"\"Add an `Edge` to the skeleton.\n\n Args:\n src: The source node specified as a `Node`, name or index.\n dst: The destination node specified as a `Node`, name or index.\n \"\"\"\n edge = None\n if type(src) == tuple:\n src, dst = src\n\n if is_node_or_index(src):\n if not is_node_or_index(dst):\n raise ValueError(\"Destination node must be specified.\")\n\n src = self.require_node(src)\n dst = self.require_node(dst)\n edge = Edge(src, dst)\n\n if type(src) == Edge:\n edge = src\n\n if edge not in self.edges:\n self.edges.append(edge)\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.add_edges","title":"add_edges(edges)
","text":"Add multiple Edge
s to the skeleton.
Parameters:
Name Type Description Defaultedges
list[Edge | tuple[NodeOrIndex, NodeOrIndex]]
A list of Edge
objects or 2-tuples of source and destination nodes.
sleap_io/model/skeleton.py
def add_edges(self, edges: list[Edge | tuple[NodeOrIndex, NodeOrIndex]]):\n \"\"\"Add multiple `Edge`s to the skeleton.\n\n Args:\n edges: A list of `Edge` objects or 2-tuples of source and destination nodes.\n \"\"\"\n for edge in edges:\n self.add_edge(edge)\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.add_node","title":"add_node(node)
","text":"Add a Node
to the skeleton.
Parameters:
Name Type Description Defaultnode
Node | str
A Node
object or a string name to create a new node.
Raises:
Type DescriptionValueError
If the node already exists in the skeleton or if the node is not specified as a Node
or string.
sleap_io/model/skeleton.py
def add_node(self, node: Node | str):\n \"\"\"Add a `Node` to the skeleton.\n\n Args:\n node: A `Node` object or a string name to create a new node.\n\n Raises:\n ValueError: If the node already exists in the skeleton or if the node is\n not specified as a `Node` or string.\n \"\"\"\n if node in self:\n raise ValueError(f\"Node '{node}' already exists in the skeleton.\")\n\n if type(node) == str:\n node = Node(node)\n\n if type(node) != Node:\n raise ValueError(f\"Invalid node type: {node} ({type(node)})\")\n\n self.nodes.append(node)\n\n # Atomic update of the cache.\n self._name_to_node_cache[node.name] = node\n self._node_to_ind_cache[node] = len(self.nodes) - 1\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.add_nodes","title":"add_nodes(nodes)
","text":"Add multiple Node
s to the skeleton.
Parameters:
Name Type Description Defaultnodes
list[Node | str]
A list of Node
objects or string names to create new nodes.
sleap_io/model/skeleton.py
def add_nodes(self, nodes: list[Node | str]):\n \"\"\"Add multiple `Node`s to the skeleton.\n\n Args:\n nodes: A list of `Node` objects or string names to create new nodes.\n \"\"\"\n for node in nodes:\n self.add_node(node)\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.add_symmetry","title":"add_symmetry(node1=None, node2=None)
","text":"Add a symmetry relationship to the skeleton.
Parameters:
Name Type Description Defaultnode1
Symmetry | NodeOrIndex
The first node specified as a Node
, name or index. If a Symmetry
object is provided, it will be added directly to the skeleton.
None
node2
NodeOrIndex | None
The second node specified as a Node
, name or index.
None
Source code in sleap_io/model/skeleton.py
def add_symmetry(\n self, node1: Symmetry | NodeOrIndex = None, node2: NodeOrIndex | None = None\n):\n \"\"\"Add a symmetry relationship to the skeleton.\n\n Args:\n node1: The first node specified as a `Node`, name or index. If a `Symmetry`\n object is provided, it will be added directly to the skeleton.\n node2: The second node specified as a `Node`, name or index.\n \"\"\"\n symmetry = None\n if type(node1) == Symmetry:\n symmetry = node1\n node1, node2 = symmetry\n\n node1 = self.require_node(node1)\n node2 = self.require_node(node2)\n\n if symmetry is None:\n symmetry = Symmetry({node1, node2})\n\n if symmetry not in self.symmetries:\n self.symmetries.append(symmetry)\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.get_flipped_node_inds","title":"get_flipped_node_inds()
","text":"Returns node indices that should be switched when horizontally flipping.
This is useful as a lookup table for flipping the landmark coordinates when doing data augmentation.
Exampleskel = Skeleton([\"A\", \"B_left\", \"B_right\", \"C\", \"D_left\", \"D_right\"]) skel.add_symmetry(\"B_left\", \"B_right\") skel.add_symmetry(\"D_left\", \"D_right\") skel.flipped_node_inds [0, 2, 1, 3, 5, 4] pose = np.array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]) pose[skel.flipped_node_inds] array([[0, 0], [2, 2], [1, 1], [3, 3], [5, 5], [4, 4]])
Source code insleap_io/model/skeleton.py
def get_flipped_node_inds(self) -> list[int]:\n \"\"\"Returns node indices that should be switched when horizontally flipping.\n\n This is useful as a lookup table for flipping the landmark coordinates when\n doing data augmentation.\n\n Example:\n >>> skel = Skeleton([\"A\", \"B_left\", \"B_right\", \"C\", \"D_left\", \"D_right\"])\n >>> skel.add_symmetry(\"B_left\", \"B_right\")\n >>> skel.add_symmetry(\"D_left\", \"D_right\")\n >>> skel.flipped_node_inds\n [0, 2, 1, 3, 5, 4]\n >>> pose = np.array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]])\n >>> pose[skel.flipped_node_inds]\n array([[0, 0],\n [2, 2],\n [1, 1],\n [3, 3],\n [5, 5],\n [4, 4]])\n \"\"\"\n flip_idx = np.arange(len(self.nodes))\n if len(self.symmetries) > 0:\n symmetry_inds = np.array(\n [(self.index(a), self.index(b)) for a, b in self.symmetries]\n )\n flip_idx[symmetry_inds[:, 0]] = symmetry_inds[:, 1]\n flip_idx[symmetry_inds[:, 1]] = symmetry_inds[:, 0]\n\n flip_idx = flip_idx.tolist()\n return flip_idx\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.index","title":"index(node)
","text":"Return the index of a node specified as a Node
or string name.
sleap_io/model/skeleton.py
def index(self, node: Node | str) -> int:\n \"\"\"Return the index of a node specified as a `Node` or string name.\"\"\"\n if type(node) == str:\n return self.index(self._name_to_node_cache[node])\n elif type(node) == Node:\n return self._node_to_ind_cache[node]\n else:\n raise IndexError(f\"Invalid indexing argument for skeleton: {node}\")\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.rebuild_cache","title":"rebuild_cache(nodes=None)
","text":"Rebuild the node name/index to Node
map caches.
Parameters:
Name Type Description Defaultnodes
list[Node] | None
A list of Node
objects to update the cache with. If not provided, the cache will be updated with the current nodes in the skeleton. If nodes are provided, the cache will be updated with the provided nodes, but the current nodes in the skeleton will not be updated. Default is None
.
None
Notes This function should be called when nodes or node list is mutated to update the lookup caches for indexing nodes by name or Node
object.
This is done automatically when nodes are added or removed from the skeleton using the convenience methods in this class.
This method only needs to be used when manually mutating nodes or the node list directly.
Source code insleap_io/model/skeleton.py
def rebuild_cache(self, nodes: list[Node] | None = None):\n \"\"\"Rebuild the node name/index to `Node` map caches.\n\n Args:\n nodes: A list of `Node` objects to update the cache with. If not provided,\n the cache will be updated with the current nodes in the skeleton. If\n nodes are provided, the cache will be updated with the provided nodes,\n but the current nodes in the skeleton will not be updated. Default is\n `None`.\n\n Notes:\n This function should be called when nodes or node list is mutated to update\n the lookup caches for indexing nodes by name or `Node` object.\n\n This is done automatically when nodes are added or removed from the skeleton\n using the convenience methods in this class.\n\n This method only needs to be used when manually mutating nodes or the node\n list directly.\n \"\"\"\n if nodes is None:\n nodes = self.nodes\n self._name_to_node_cache = {node.name: node for node in nodes}\n self._node_to_ind_cache = {node: i for i, node in enumerate(nodes)}\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.remove_node","title":"remove_node(node)
","text":"Remove a single node from the skeleton.
Parameters:
Name Type Description Defaultnode
NodeOrIndex
The node to remove. Can be specified as a string name, integer index, or Node
object.
This method handles updating the lookup caches necessary for indexing nodes by name.
Any edges and symmetries that are connected to the removed node will also be removed.
WarningThis method does NOT update instances that use this skeleton to reflect changes.
It is recommended to use the Labels.remove_nodes()
method which will update all contained instances to reflect the changes made to the skeleton.
To manually update instances after this method is called, call Instance.update_skeleton()
on each instance that uses this skeleton.
sleap_io/model/skeleton.py
def remove_node(self, node: NodeOrIndex):\n \"\"\"Remove a single node from the skeleton.\n\n Args:\n node: The node to remove. Can be specified as a string name, integer index,\n or `Node` object.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name.\n\n Any edges and symmetries that are connected to the removed node will also be\n removed.\n\n Warning:\n **This method does NOT update instances** that use this skeleton to reflect\n changes.\n\n It is recommended to use the `Labels.remove_nodes()` method which will\n update all contained instances to reflect the changes made to the skeleton.\n\n To manually update instances after this method is called, call\n `Instance.update_skeleton()` on each instance that uses this skeleton.\n \"\"\"\n self.remove_nodes([node])\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.remove_nodes","title":"remove_nodes(nodes)
","text":"Remove nodes from the skeleton.
Parameters:
Name Type Description Defaultnodes
list[NodeOrIndex]
A list of node names, indices, or Node
objects to remove.
This method handles updating the lookup caches necessary for indexing nodes by name.
Any edges and symmetries that are connected to the removed nodes will also be removed.
WarningThis method does NOT update instances that use this skeleton to reflect changes.
It is recommended to use the Labels.remove_nodes()
method which will update all contained to reflect the changes made to the skeleton.
To manually update instances after this method is called, call instance.update_nodes()
on each instance that uses this skeleton.
sleap_io/model/skeleton.py
def remove_nodes(self, nodes: list[NodeOrIndex]):\n \"\"\"Remove nodes from the skeleton.\n\n Args:\n nodes: A list of node names, indices, or `Node` objects to remove.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name.\n\n Any edges and symmetries that are connected to the removed nodes will also\n be removed.\n\n Warning:\n **This method does NOT update instances** that use this skeleton to reflect\n changes.\n\n It is recommended to use the `Labels.remove_nodes()` method which will\n update all contained to reflect the changes made to the skeleton.\n\n To manually update instances after this method is called, call\n `instance.update_nodes()` on each instance that uses this skeleton.\n \"\"\"\n # Standardize input and make a pre-mutation copy before keys are changed.\n rm_node_objs = [self.require_node(node, add_missing=False) for node in nodes]\n\n # Remove nodes from the skeleton.\n for node in rm_node_objs:\n self.nodes.remove(node)\n del self._name_to_node_cache[node.name]\n\n # Remove edges connected to the removed nodes.\n self.edges = [\n edge\n for edge in self.edges\n if edge.source not in rm_node_objs and edge.destination not in rm_node_objs\n ]\n\n # Remove symmetries connected to the removed nodes.\n self.symmetries = [\n symmetry\n for symmetry in self.symmetries\n if symmetry.nodes.isdisjoint(rm_node_objs)\n ]\n\n # Update node index map.\n self.rebuild_cache()\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.rename_node","title":"rename_node(old_name, new_name)
","text":"Rename a single node in the skeleton.
Parameters:
Name Type Description Defaultold_name
NodeOrIndex
The name of the node to rename. Can also be specified as an integer index or Node
object.
new_name
str
The new name for the node.
required Source code insleap_io/model/skeleton.py
def rename_node(self, old_name: NodeOrIndex, new_name: str):\n \"\"\"Rename a single node in the skeleton.\n\n Args:\n old_name: The name of the node to rename. Can also be specified as an\n integer index or `Node` object.\n new_name: The new name for the node.\n \"\"\"\n self.rename_nodes({old_name: new_name})\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.rename_nodes","title":"rename_nodes(name_map)
","text":"Rename nodes in the skeleton.
Parameters:
Name Type Description Defaultname_map
dict[NodeOrIndex, str] | list[str]
A dictionary mapping old node names to new node names. Keys can be specified as Node
objects, integer indices, or string names. Values must be specified as string names.
If a list of strings is provided of the same length as the current nodes, the nodes will be renamed to the names in the list in order.
requiredRaises:
Type DescriptionValueError
If the new node names exist in the skeleton or if the old node names are not found in the skeleton.
NotesThis method should always be used when renaming nodes in the skeleton as it handles updating the lookup caches necessary for indexing nodes by name.
After renaming, instances using this skeleton do NOT need to be updated as the nodes are stored by reference in the skeleton, so changes are reflected automatically.
Exampleskel = Skeleton([\"A\", \"B\", \"C\"], edges=[(\"A\", \"B\"), (\"B\", \"C\")]) skel.rename_nodes({\"A\": \"X\", \"B\": \"Y\", \"C\": \"Z\"}) skel.node_names [\"X\", \"Y\", \"Z\"] skel.rename_nodes([\"a\", \"b\", \"c\"]) skel.node_names [\"a\", \"b\", \"c\"]
Source code insleap_io/model/skeleton.py
def rename_nodes(self, name_map: dict[NodeOrIndex, str] | list[str]):\n \"\"\"Rename nodes in the skeleton.\n\n Args:\n name_map: A dictionary mapping old node names to new node names. Keys can be\n specified as `Node` objects, integer indices, or string names. Values\n must be specified as string names.\n\n If a list of strings is provided of the same length as the current\n nodes, the nodes will be renamed to the names in the list in order.\n\n Raises:\n ValueError: If the new node names exist in the skeleton or if the old node\n names are not found in the skeleton.\n\n Notes:\n This method should always be used when renaming nodes in the skeleton as it\n handles updating the lookup caches necessary for indexing nodes by name.\n\n After renaming, instances using this skeleton **do NOT need to be updated**\n as the nodes are stored by reference in the skeleton, so changes are\n reflected automatically.\n\n Example:\n >>> skel = Skeleton([\"A\", \"B\", \"C\"], edges=[(\"A\", \"B\"), (\"B\", \"C\")])\n >>> skel.rename_nodes({\"A\": \"X\", \"B\": \"Y\", \"C\": \"Z\"})\n >>> skel.node_names\n [\"X\", \"Y\", \"Z\"]\n >>> skel.rename_nodes([\"a\", \"b\", \"c\"])\n >>> skel.node_names\n [\"a\", \"b\", \"c\"]\n \"\"\"\n if type(name_map) == list:\n if len(name_map) != len(self.nodes):\n raise ValueError(\n \"List of new node names must be the same length as the current \"\n \"nodes.\"\n )\n name_map = {node: name for node, name in zip(self.nodes, name_map)}\n\n for old_name, new_name in name_map.items():\n if type(old_name) == Node:\n old_name = old_name.name\n if type(old_name) == int:\n old_name = self.nodes[old_name].name\n\n if old_name not in self._name_to_node_cache:\n raise ValueError(f\"Node '{old_name}' not found in the skeleton.\")\n if new_name in self._name_to_node_cache:\n raise ValueError(f\"Node '{new_name}' already exists in the skeleton.\")\n\n node = self._name_to_node_cache[old_name]\n node.name = new_name\n self._name_to_node_cache[new_name] = node\n del self._name_to_node_cache[old_name]\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.reorder_nodes","title":"reorder_nodes(new_order)
","text":"Reorder nodes in the skeleton.
Parameters:
Name Type Description Defaultnew_order
list[NodeOrIndex]
A list of node names, indices, or Node
objects specifying the new order of the nodes.
Raises:
Type DescriptionValueError
If the new order of nodes is not the same length as the current nodes.
NotesThis method handles updating the lookup caches necessary for indexing nodes by name.
WarningAfter reordering, instances using this skeleton do not need to be updated as the nodes are stored by reference in the skeleton.
However, the order that points are stored in the instances will not be updated to match the new order of the nodes in the skeleton. This should not matter unless the ordering of the keys in the Instance.points
dictionary is used instead of relying on the skeleton node order.
To make sure these are aligned, it is recommended to use the Labels.reorder_nodes()
method which will update all contained instances to reflect the changes made to the skeleton.
To manually update instances after this method is called, call Instance.update_skeleton()
on each instance that uses this skeleton.
sleap_io/model/skeleton.py
def reorder_nodes(self, new_order: list[NodeOrIndex]):\n \"\"\"Reorder nodes in the skeleton.\n\n Args:\n new_order: A list of node names, indices, or `Node` objects specifying the\n new order of the nodes.\n\n Raises:\n ValueError: If the new order of nodes is not the same length as the current\n nodes.\n\n Notes:\n This method handles updating the lookup caches necessary for indexing nodes\n by name.\n\n Warning:\n After reordering, instances using this skeleton do not need to be updated as\n the nodes are stored by reference in the skeleton.\n\n However, the order that points are stored in the instances will not be\n updated to match the new order of the nodes in the skeleton. This should not\n matter unless the ordering of the keys in the `Instance.points` dictionary\n is used instead of relying on the skeleton node order.\n\n To make sure these are aligned, it is recommended to use the\n `Labels.reorder_nodes()` method which will update all contained instances to\n reflect the changes made to the skeleton.\n\n To manually update instances after this method is called, call\n `Instance.update_skeleton()` on each instance that uses this skeleton.\n \"\"\"\n if len(new_order) != len(self.nodes):\n raise ValueError(\n \"New order of nodes must be the same length as the current nodes.\"\n )\n\n new_nodes = [self.require_node(node, add_missing=False) for node in new_order]\n self.nodes = new_nodes\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Skeleton.require_node","title":"require_node(node, add_missing=True)
","text":"Return a Node
object, handling indexing and adding missing nodes.
Parameters:
Name Type Description Defaultnode
NodeOrIndex
A Node
object, name or index.
add_missing
bool
If True
, missing nodes will be added to the skeleton. If False
, an error will be raised if the node is not found. Default is True
.
True
Returns:
Type DescriptionNode
The Node
object.
Raises:
Type DescriptionIndexError
If the node is not found in the skeleton and add_missing
is False
.
sleap_io/model/skeleton.py
def require_node(self, node: NodeOrIndex, add_missing: bool = True) -> Node:\n \"\"\"Return a `Node` object, handling indexing and adding missing nodes.\n\n Args:\n node: A `Node` object, name or index.\n add_missing: If `True`, missing nodes will be added to the skeleton. If\n `False`, an error will be raised if the node is not found. Default is\n `True`.\n\n Returns:\n The `Node` object.\n\n Raises:\n IndexError: If the node is not found in the skeleton and `add_missing` is\n `False`.\n \"\"\"\n if node not in self:\n if add_missing:\n self.add_node(node)\n else:\n raise IndexError(f\"Node '{node}' not found in the skeleton.\")\n\n if type(node) == Node:\n return node\n\n return self[node]\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Symmetry","title":"Symmetry
","text":"A relationship between a pair of nodes denoting their left/right pairing.
Attributes:
Name Type Descriptionnodes
set[Node]
A set of two Node
s.
Methods:
Name Description__getitem__
Return the first node.
__iter__
Iterate over the symmetric nodes.
Source code insleap_io/model/skeleton.py
@define\nclass Symmetry:\n \"\"\"A relationship between a pair of nodes denoting their left/right pairing.\n\n Attributes:\n nodes: A set of two `Node`s.\n \"\"\"\n\n nodes: set[Node] = field(converter=set, validator=lambda _, __, val: len(val) == 2)\n\n def __iter__(self):\n \"\"\"Iterate over the symmetric nodes.\"\"\"\n return iter(self.nodes)\n\n def __getitem__(self, idx) -> Node:\n \"\"\"Return the first node.\"\"\"\n for i, node in enumerate(self.nodes):\n if i == idx:\n return node\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Symmetry.__getitem__","title":"__getitem__(idx)
","text":"Return the first node.
Source code insleap_io/model/skeleton.py
def __getitem__(self, idx) -> Node:\n \"\"\"Return the first node.\"\"\"\n for i, node in enumerate(self.nodes):\n if i == idx:\n return node\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.Symmetry.__iter__","title":"__iter__()
","text":"Iterate over the symmetric nodes.
Source code insleap_io/model/skeleton.py
def __iter__(self):\n \"\"\"Iterate over the symmetric nodes.\"\"\"\n return iter(self.nodes)\n
"},{"location":"reference/sleap_io/model/skeleton/#sleap_io.model.skeleton.is_node_or_index","title":"is_node_or_index(obj)
","text":"Check if an object is a Node
, string name or integer index.
Parameters:
Name Type Description Defaultobj
Any
The object to check.
required NotesThis is mainly for backwards compatibility with Python versions < 3.10 where generics can't be used with isinstance
. In newer Python, this is equivalent to isinstance(obj, NodeOrIndex)
.
sleap_io/model/skeleton.py
def is_node_or_index(obj: typing.Any) -> bool:\n \"\"\"Check if an object is a `Node`, string name or integer index.\n\n Args:\n obj: The object to check.\n\n Notes:\n This is mainly for backwards compatibility with Python versions < 3.10 where\n generics can't be used with `isinstance`. In newer Python, this is equivalent\n to `isinstance(obj, NodeOrIndex)`.\n \"\"\"\n return isinstance(obj, (Node, str, int))\n
"},{"location":"reference/sleap_io/model/suggestions/","title":"suggestions","text":""},{"location":"reference/sleap_io/model/suggestions/#sleap_io.model.suggestions","title":"sleap_io.model.suggestions
","text":"Data module for suggestions.
Classes:
Name DescriptionSuggestionFrame
Data structure for a single frame of suggestions.
"},{"location":"reference/sleap_io/model/suggestions/#sleap_io.model.suggestions.SuggestionFrame","title":"SuggestionFrame
","text":"Data structure for a single frame of suggestions.
Attributes:
Name Type Descriptionvideo
Video
The video associated with the frame.
frame_idx
int
The index of the frame in the video.
Source code insleap_io/model/suggestions.py
@attrs.define(auto_attribs=True)\nclass SuggestionFrame:\n \"\"\"Data structure for a single frame of suggestions.\n\n Attributes:\n video: The video associated with the frame.\n frame_idx: The index of the frame in the video.\n \"\"\"\n\n video: Video\n frame_idx: int\n
"},{"location":"reference/sleap_io/model/video/","title":"video","text":""},{"location":"reference/sleap_io/model/video/#sleap_io.model.video","title":"sleap_io.model.video
","text":"Data model for videos.
The Video
class is a SLEAP data structure that stores information regarding a video and its components used in SLEAP.
Classes:
Name DescriptionVideo
Video
class used by sleap to represent videos and data associated with them.
Video
","text":"Video
class used by sleap to represent videos and data associated with them.
This class is used to store information regarding a video and its components. It is used to store the video's filename
, shape
, and the video's backend
.
To create a Video
object, use the from_filename
method which will select the backend appropriately.
Attributes:
Name Type Descriptionfilename
str | list[str]
The filename(s) of the video. Supported extensions: \"mp4\", \"avi\", \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\", \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are expected. If filename is a folder, it will be searched for images.
backend
Optional[VideoBackend]
An object that implements the basic methods for reading and manipulating frames of a specific video type.
backend_metadata
dict[str, any]
A dictionary of metadata specific to the backend. This is useful for storing metadata that requires an open backend (e.g., shape information) without having access to the video file itself.
source_video
Optional[Video]
The source video object if this is a proxy video. This is present when the video contains an embedded subset of frames from another video.
open_backend
bool
Whether to open the backend when the video is available. If True
(the default), the backend will be automatically opened if the video exists. Set this to False
when you want to manually open the backend, or when the you know the video file does not exist and you want to avoid trying to open the file.
Instances of this class are hashed by identity, not by value. This means that two Video
instances with the same attributes will NOT be considered equal in a set or dict.
See also: VideoBackend
Methods:
Name Description__attrs_post_init__
Post init syntactic sugar.
__deepcopy__
Deep copy the video object.
__getitem__
Return the frames of the video at the given indices.
__len__
Return the length of the video as the number of frames.
__repr__
Informal string representation (for print or format).
__str__
Informal string representation (for print or format).
close
Close the video backend.
exists
Check if the video file exists and is accessible.
from_filename
Create a Video from a filename.
open
Open the video backend for reading.
replace_filename
Update the filename of the video, optionally opening the backend.
save
Save video frames to a new video file.
Attributes:
Name Type Descriptiongrayscale
bool | None
Return whether the video is grayscale.
is_open
bool
Check if the video backend is open.
shape
Tuple[int, int, int, int] | None
Return the shape of the video as (num_frames, height, width, channels).
Source code insleap_io/model/video.py
@attrs.define(eq=False)\nclass Video:\n \"\"\"`Video` class used by sleap to represent videos and data associated with them.\n\n This class is used to store information regarding a video and its components.\n It is used to store the video's `filename`, `shape`, and the video's `backend`.\n\n To create a `Video` object, use the `from_filename` method which will select the\n backend appropriately.\n\n Attributes:\n filename: The filename(s) of the video. Supported extensions: \"mp4\", \"avi\",\n \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\",\n \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are\n expected. If filename is a folder, it will be searched for images.\n backend: An object that implements the basic methods for reading and\n manipulating frames of a specific video type.\n backend_metadata: A dictionary of metadata specific to the backend. This is\n useful for storing metadata that requires an open backend (e.g., shape\n information) without having access to the video file itself.\n source_video: The source video object if this is a proxy video. This is present\n when the video contains an embedded subset of frames from another video.\n open_backend: Whether to open the backend when the video is available. If `True`\n (the default), the backend will be automatically opened if the video exists.\n Set this to `False` when you want to manually open the backend, or when the\n you know the video file does not exist and you want to avoid trying to open\n the file.\n\n Notes:\n Instances of this class are hashed by identity, not by value. This means that\n two `Video` instances with the same attributes will NOT be considered equal in a\n set or dict.\n\n See also: VideoBackend\n \"\"\"\n\n filename: str | list[str]\n backend: Optional[VideoBackend] = None\n backend_metadata: dict[str, any] = attrs.field(factory=dict)\n source_video: Optional[Video] = None\n open_backend: bool = True\n\n EXTS = MediaVideo.EXTS + HDF5Video.EXTS + ImageVideo.EXTS\n\n def __attrs_post_init__(self):\n \"\"\"Post init syntactic sugar.\"\"\"\n if self.open_backend and self.backend is None and self.exists():\n try:\n self.open()\n except Exception as e:\n # If we can't open the backend, just ignore it for now so we don't\n # prevent the user from building the Video object entirely.\n pass\n\n def __deepcopy__(self, memo):\n \"\"\"Deep copy the video object.\"\"\"\n if id(self) in memo:\n return memo[id(self)]\n\n reopen = False\n if self.is_open:\n reopen = True\n self.close()\n\n new_video = Video(\n filename=self.filename,\n backend=None,\n backend_metadata=self.backend_metadata,\n source_video=self.source_video,\n open_backend=self.open_backend,\n )\n\n memo[id(self)] = new_video\n\n if reopen:\n self.open()\n\n return new_video\n\n @classmethod\n def from_filename(\n cls,\n filename: str | list[str],\n dataset: Optional[str] = None,\n grayscale: Optional[bool] = None,\n keep_open: bool = True,\n source_video: Optional[Video] = None,\n **kwargs,\n ) -> VideoBackend:\n \"\"\"Create a Video from a filename.\n\n Args:\n filename: The filename(s) of the video. Supported extensions: \"mp4\", \"avi\",\n \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\",\n \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are\n expected. If filename is a folder, it will be searched for images.\n dataset: Name of dataset in HDF5 file.\n grayscale: Whether to force grayscale. If None, autodetect on first frame\n load.\n keep_open: Whether to keep the video reader open between calls to read\n frames. If False, will close the reader after each call. If True (the\n default), it will keep the reader open and cache it for subsequent calls\n which may enhance the performance of reading multiple frames.\n source_video: The source video object if this is a proxy video. This is\n present when the video contains an embedded subset of frames from\n another video.\n\n Returns:\n Video instance with the appropriate backend instantiated.\n \"\"\"\n return cls(\n filename=filename,\n backend=VideoBackend.from_filename(\n filename,\n dataset=dataset,\n grayscale=grayscale,\n keep_open=keep_open,\n **kwargs,\n ),\n source_video=source_video,\n )\n\n @property\n def shape(self) -> Tuple[int, int, int, int] | None:\n \"\"\"Return the shape of the video as (num_frames, height, width, channels).\n\n If the video backend is not set or it cannot determine the shape of the video,\n this will return None.\n \"\"\"\n return self._get_shape()\n\n def _get_shape(self) -> Tuple[int, int, int, int] | None:\n \"\"\"Return the shape of the video as (num_frames, height, width, channels).\n\n This suppresses errors related to querying the backend for the video shape, such\n as when it has not been set or when the video file is not found.\n \"\"\"\n try:\n return self.backend.shape\n except:\n if \"shape\" in self.backend_metadata:\n return self.backend_metadata[\"shape\"]\n return None\n\n @property\n def grayscale(self) -> bool | None:\n \"\"\"Return whether the video is grayscale.\n\n If the video backend is not set or it cannot determine whether the video is\n grayscale, this will return None.\n \"\"\"\n shape = self.shape\n if shape is not None:\n return shape[-1] == 1\n else:\n grayscale = None\n if \"grayscale\" in self.backend_metadata:\n grayscale = self.backend_metadata[\"grayscale\"]\n return grayscale\n\n @grayscale.setter\n def grayscale(self, value: bool):\n \"\"\"Set the grayscale value and adjust the backend.\"\"\"\n if self.backend is not None:\n self.backend.grayscale = value\n self.backend._cached_shape = None\n\n self.backend_metadata[\"grayscale\"] = value\n\n def __len__(self) -> int:\n \"\"\"Return the length of the video as the number of frames.\"\"\"\n shape = self.shape\n return 0 if shape is None else shape[0]\n\n def __repr__(self) -> str:\n \"\"\"Informal string representation (for print or format).\"\"\"\n dataset = (\n f\"dataset={self.backend.dataset}, \"\n if getattr(self.backend, \"dataset\", \"\")\n else \"\"\n )\n return (\n \"Video(\"\n f'filename=\"{self.filename}\", '\n f\"shape={self.shape}, \"\n f\"{dataset}\"\n f\"backend={type(self.backend).__name__}\"\n \")\"\n )\n\n def __str__(self) -> str:\n \"\"\"Informal string representation (for print or format).\"\"\"\n return self.__repr__()\n\n def __getitem__(self, inds: int | list[int] | slice) -> np.ndarray:\n \"\"\"Return the frames of the video at the given indices.\n\n Args:\n inds: Index or list of indices of frames to read.\n\n Returns:\n Frame or frames as a numpy array of shape `(height, width, channels)` if a\n scalar index is provided, or `(frames, height, width, channels)` if a list\n of indices is provided.\n\n See also: VideoBackend.get_frame, VideoBackend.get_frames\n \"\"\"\n if not self.is_open:\n if self.open_backend:\n self.open()\n else:\n raise ValueError(\n \"Video backend is not open. Call video.open() or set \"\n \"video.open_backend to True to do automatically on frame read.\"\n )\n return self.backend[inds]\n\n def exists(self, check_all: bool = False, dataset: str | None = None) -> bool:\n \"\"\"Check if the video file exists and is accessible.\n\n Args:\n check_all: If `True`, check that all filenames in a list exist. If `False`\n (the default), check that the first filename exists.\n dataset: Name of dataset in HDF5 file. If specified, this will function will\n return `False` if the dataset does not exist.\n\n Returns:\n `True` if the file exists and is accessible, `False` otherwise.\n \"\"\"\n if isinstance(self.filename, list):\n if check_all:\n for f in self.filename:\n if not is_file_accessible(f):\n return False\n return True\n else:\n return is_file_accessible(self.filename[0])\n\n file_is_accessible = is_file_accessible(self.filename)\n if not file_is_accessible:\n return False\n\n if dataset is None or dataset == \"\":\n dataset = self.backend_metadata.get(\"dataset\", None)\n\n if dataset is not None and dataset != \"\":\n has_dataset = False\n if (\n self.backend is not None\n and type(self.backend) == HDF5Video\n and self.backend._open_reader is not None\n ):\n has_dataset = dataset in self.backend._open_reader\n else:\n with h5py.File(self.filename, \"r\") as f:\n has_dataset = dataset in f\n return has_dataset\n\n return True\n\n @property\n def is_open(self) -> bool:\n \"\"\"Check if the video backend is open.\"\"\"\n return self.exists() and self.backend is not None\n\n def open(\n self,\n filename: Optional[str] = None,\n dataset: Optional[str] = None,\n grayscale: Optional[str] = None,\n keep_open: bool = True,\n ):\n \"\"\"Open the video backend for reading.\n\n Args:\n filename: Filename to open. If not specified, will use the filename set on\n the video object.\n dataset: Name of dataset in HDF5 file.\n grayscale: Whether to force grayscale. If None, autodetect on first frame\n load.\n keep_open: Whether to keep the video reader open between calls to read\n frames. If False, will close the reader after each call. If True (the\n default), it will keep the reader open and cache it for subsequent calls\n which may enhance the performance of reading multiple frames.\n\n Notes:\n This is useful for opening the video backend to read frames and then closing\n it after reading all the necessary frames.\n\n If the backend was already open, it will be closed before opening a new one.\n Values for the HDF5 dataset and grayscale will be remembered if not\n specified.\n \"\"\"\n if filename is not None:\n self.replace_filename(filename, open=False)\n\n # Try to remember values from previous backend if available and not specified.\n if self.backend is not None:\n if dataset is None:\n dataset = getattr(self.backend, \"dataset\", None)\n if grayscale is None:\n grayscale = getattr(self.backend, \"grayscale\", None)\n\n else:\n if dataset is None and \"dataset\" in self.backend_metadata:\n dataset = self.backend_metadata[\"dataset\"]\n if grayscale is None:\n if \"grayscale\" in self.backend_metadata:\n grayscale = self.backend_metadata[\"grayscale\"]\n elif \"shape\" in self.backend_metadata:\n grayscale = self.backend_metadata[\"shape\"][-1] == 1\n\n if not self.exists(dataset=dataset):\n msg = f\"Video does not exist or is inaccessible: {self.filename}\"\n if dataset is not None:\n msg += f\" (dataset: {dataset})\"\n raise FileNotFoundError(msg)\n\n # Close previous backend if open.\n self.close()\n\n # Create new backend.\n self.backend = VideoBackend.from_filename(\n self.filename,\n dataset=dataset,\n grayscale=grayscale,\n keep_open=keep_open,\n )\n\n def close(self):\n \"\"\"Close the video backend.\"\"\"\n if self.backend is not None:\n # Try to remember values from previous backend if available and not\n # specified.\n try:\n self.backend_metadata[\"dataset\"] = getattr(\n self.backend, \"dataset\", None\n )\n self.backend_metadata[\"grayscale\"] = getattr(\n self.backend, \"grayscale\", None\n )\n self.backend_metadata[\"shape\"] = getattr(self.backend, \"shape\", None)\n except:\n pass\n\n del self.backend\n self.backend = None\n\n def replace_filename(\n self, new_filename: str | Path | list[str] | list[Path], open: bool = True\n ):\n \"\"\"Update the filename of the video, optionally opening the backend.\n\n Args:\n new_filename: New filename to set for the video.\n open: If `True` (the default), open the backend with the new filename. If\n the new filename does not exist, no error is raised.\n \"\"\"\n if isinstance(new_filename, Path):\n new_filename = new_filename.as_posix()\n\n if isinstance(new_filename, list):\n new_filename = [\n p.as_posix() if isinstance(p, Path) else p for p in new_filename\n ]\n\n self.filename = new_filename\n self.backend_metadata[\"filename\"] = new_filename\n\n if open:\n if self.exists():\n self.open()\n else:\n self.close()\n\n def save(\n self,\n save_path: str | Path,\n frame_inds: list[int] | np.ndarray | None = None,\n video_kwargs: dict[str, Any] | None = None,\n ) -> Video:\n \"\"\"Save video frames to a new video file.\n\n Args:\n save_path: Path to the new video file. Should end in MP4.\n frame_inds: Frame indices to save. Can be specified as a list or array of\n frame integers. If not specified, saves all video frames.\n video_kwargs: A dictionary of keyword arguments to provide to\n `sio.save_video` for video compression.\n\n Returns:\n A new `Video` object pointing to the new video file.\n \"\"\"\n video_kwargs = {} if video_kwargs is None else video_kwargs\n frame_inds = np.arange(len(self)) if frame_inds is None else frame_inds\n\n with VideoWriter(save_path, **video_kwargs) as vw:\n for frame_ind in frame_inds:\n vw(self[frame_ind])\n\n new_video = Video.from_filename(save_path, grayscale=self.grayscale)\n return new_video\n
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.grayscale","title":"grayscale: bool | None
property
writable
","text":"Return whether the video is grayscale.
If the video backend is not set or it cannot determine whether the video is grayscale, this will return None.
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.is_open","title":"is_open: bool
property
","text":"Check if the video backend is open.
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.shape","title":"shape: Tuple[int, int, int, int] | None
property
","text":"Return the shape of the video as (num_frames, height, width, channels).
If the video backend is not set or it cannot determine the shape of the video, this will return None.
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.__attrs_post_init__","title":"__attrs_post_init__()
","text":"Post init syntactic sugar.
Source code insleap_io/model/video.py
def __attrs_post_init__(self):\n \"\"\"Post init syntactic sugar.\"\"\"\n if self.open_backend and self.backend is None and self.exists():\n try:\n self.open()\n except Exception as e:\n # If we can't open the backend, just ignore it for now so we don't\n # prevent the user from building the Video object entirely.\n pass\n
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.__deepcopy__","title":"__deepcopy__(memo)
","text":"Deep copy the video object.
Source code insleap_io/model/video.py
def __deepcopy__(self, memo):\n \"\"\"Deep copy the video object.\"\"\"\n if id(self) in memo:\n return memo[id(self)]\n\n reopen = False\n if self.is_open:\n reopen = True\n self.close()\n\n new_video = Video(\n filename=self.filename,\n backend=None,\n backend_metadata=self.backend_metadata,\n source_video=self.source_video,\n open_backend=self.open_backend,\n )\n\n memo[id(self)] = new_video\n\n if reopen:\n self.open()\n\n return new_video\n
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.__getitem__","title":"__getitem__(inds)
","text":"Return the frames of the video at the given indices.
Parameters:
Name Type Description Defaultinds
int | list[int] | slice
Index or list of indices of frames to read.
requiredReturns:
Type Descriptionndarray
Frame or frames as a numpy array of shape (height, width, channels)
if a scalar index is provided, or (frames, height, width, channels)
if a list of indices is provided.
See also: VideoBackend.get_frame, VideoBackend.get_frames
Source code insleap_io/model/video.py
def __getitem__(self, inds: int | list[int] | slice) -> np.ndarray:\n \"\"\"Return the frames of the video at the given indices.\n\n Args:\n inds: Index or list of indices of frames to read.\n\n Returns:\n Frame or frames as a numpy array of shape `(height, width, channels)` if a\n scalar index is provided, or `(frames, height, width, channels)` if a list\n of indices is provided.\n\n See also: VideoBackend.get_frame, VideoBackend.get_frames\n \"\"\"\n if not self.is_open:\n if self.open_backend:\n self.open()\n else:\n raise ValueError(\n \"Video backend is not open. Call video.open() or set \"\n \"video.open_backend to True to do automatically on frame read.\"\n )\n return self.backend[inds]\n
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.__len__","title":"__len__()
","text":"Return the length of the video as the number of frames.
Source code insleap_io/model/video.py
def __len__(self) -> int:\n \"\"\"Return the length of the video as the number of frames.\"\"\"\n shape = self.shape\n return 0 if shape is None else shape[0]\n
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.__repr__","title":"__repr__()
","text":"Informal string representation (for print or format).
Source code insleap_io/model/video.py
def __repr__(self) -> str:\n \"\"\"Informal string representation (for print or format).\"\"\"\n dataset = (\n f\"dataset={self.backend.dataset}, \"\n if getattr(self.backend, \"dataset\", \"\")\n else \"\"\n )\n return (\n \"Video(\"\n f'filename=\"{self.filename}\", '\n f\"shape={self.shape}, \"\n f\"{dataset}\"\n f\"backend={type(self.backend).__name__}\"\n \")\"\n )\n
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.__str__","title":"__str__()
","text":"Informal string representation (for print or format).
Source code insleap_io/model/video.py
def __str__(self) -> str:\n \"\"\"Informal string representation (for print or format).\"\"\"\n return self.__repr__()\n
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.close","title":"close()
","text":"Close the video backend.
Source code insleap_io/model/video.py
def close(self):\n \"\"\"Close the video backend.\"\"\"\n if self.backend is not None:\n # Try to remember values from previous backend if available and not\n # specified.\n try:\n self.backend_metadata[\"dataset\"] = getattr(\n self.backend, \"dataset\", None\n )\n self.backend_metadata[\"grayscale\"] = getattr(\n self.backend, \"grayscale\", None\n )\n self.backend_metadata[\"shape\"] = getattr(self.backend, \"shape\", None)\n except:\n pass\n\n del self.backend\n self.backend = None\n
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.exists","title":"exists(check_all=False, dataset=None)
","text":"Check if the video file exists and is accessible.
Parameters:
Name Type Description Defaultcheck_all
bool
If True
, check that all filenames in a list exist. If False
(the default), check that the first filename exists.
False
dataset
str | None
Name of dataset in HDF5 file. If specified, this will function will return False
if the dataset does not exist.
None
Returns:
Type Descriptionbool
True
if the file exists and is accessible, False
otherwise.
sleap_io/model/video.py
def exists(self, check_all: bool = False, dataset: str | None = None) -> bool:\n \"\"\"Check if the video file exists and is accessible.\n\n Args:\n check_all: If `True`, check that all filenames in a list exist. If `False`\n (the default), check that the first filename exists.\n dataset: Name of dataset in HDF5 file. If specified, this will function will\n return `False` if the dataset does not exist.\n\n Returns:\n `True` if the file exists and is accessible, `False` otherwise.\n \"\"\"\n if isinstance(self.filename, list):\n if check_all:\n for f in self.filename:\n if not is_file_accessible(f):\n return False\n return True\n else:\n return is_file_accessible(self.filename[0])\n\n file_is_accessible = is_file_accessible(self.filename)\n if not file_is_accessible:\n return False\n\n if dataset is None or dataset == \"\":\n dataset = self.backend_metadata.get(\"dataset\", None)\n\n if dataset is not None and dataset != \"\":\n has_dataset = False\n if (\n self.backend is not None\n and type(self.backend) == HDF5Video\n and self.backend._open_reader is not None\n ):\n has_dataset = dataset in self.backend._open_reader\n else:\n with h5py.File(self.filename, \"r\") as f:\n has_dataset = dataset in f\n return has_dataset\n\n return True\n
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.from_filename","title":"from_filename(filename, dataset=None, grayscale=None, keep_open=True, source_video=None, **kwargs)
classmethod
","text":"Create a Video from a filename.
Parameters:
Name Type Description Defaultfilename
str | list[str]
The filename(s) of the video. Supported extensions: \"mp4\", \"avi\", \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\", \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are expected. If filename is a folder, it will be searched for images.
requireddataset
Optional[str]
Name of dataset in HDF5 file.
None
grayscale
Optional[bool]
Whether to force grayscale. If None, autodetect on first frame load.
None
keep_open
bool
Whether to keep the video reader open between calls to read frames. If False, will close the reader after each call. If True (the default), it will keep the reader open and cache it for subsequent calls which may enhance the performance of reading multiple frames.
True
source_video
Optional[Video]
The source video object if this is a proxy video. This is present when the video contains an embedded subset of frames from another video.
None
Returns:
Type DescriptionVideoBackend
Video instance with the appropriate backend instantiated.
Source code insleap_io/model/video.py
@classmethod\ndef from_filename(\n cls,\n filename: str | list[str],\n dataset: Optional[str] = None,\n grayscale: Optional[bool] = None,\n keep_open: bool = True,\n source_video: Optional[Video] = None,\n **kwargs,\n) -> VideoBackend:\n \"\"\"Create a Video from a filename.\n\n Args:\n filename: The filename(s) of the video. Supported extensions: \"mp4\", \"avi\",\n \"mov\", \"mj2\", \"mkv\", \"h5\", \"hdf5\", \"slp\", \"png\", \"jpg\", \"jpeg\", \"tif\",\n \"tiff\", \"bmp\". If the filename is a list, a list of image filenames are\n expected. If filename is a folder, it will be searched for images.\n dataset: Name of dataset in HDF5 file.\n grayscale: Whether to force grayscale. If None, autodetect on first frame\n load.\n keep_open: Whether to keep the video reader open between calls to read\n frames. If False, will close the reader after each call. If True (the\n default), it will keep the reader open and cache it for subsequent calls\n which may enhance the performance of reading multiple frames.\n source_video: The source video object if this is a proxy video. This is\n present when the video contains an embedded subset of frames from\n another video.\n\n Returns:\n Video instance with the appropriate backend instantiated.\n \"\"\"\n return cls(\n filename=filename,\n backend=VideoBackend.from_filename(\n filename,\n dataset=dataset,\n grayscale=grayscale,\n keep_open=keep_open,\n **kwargs,\n ),\n source_video=source_video,\n )\n
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.open","title":"open(filename=None, dataset=None, grayscale=None, keep_open=True)
","text":"Open the video backend for reading.
Parameters:
Name Type Description Defaultfilename
Optional[str]
Filename to open. If not specified, will use the filename set on the video object.
None
dataset
Optional[str]
Name of dataset in HDF5 file.
None
grayscale
Optional[str]
Whether to force grayscale. If None, autodetect on first frame load.
None
keep_open
bool
Whether to keep the video reader open between calls to read frames. If False, will close the reader after each call. If True (the default), it will keep the reader open and cache it for subsequent calls which may enhance the performance of reading multiple frames.
True
Notes This is useful for opening the video backend to read frames and then closing it after reading all the necessary frames.
If the backend was already open, it will be closed before opening a new one. Values for the HDF5 dataset and grayscale will be remembered if not specified.
Source code insleap_io/model/video.py
def open(\n self,\n filename: Optional[str] = None,\n dataset: Optional[str] = None,\n grayscale: Optional[str] = None,\n keep_open: bool = True,\n):\n \"\"\"Open the video backend for reading.\n\n Args:\n filename: Filename to open. If not specified, will use the filename set on\n the video object.\n dataset: Name of dataset in HDF5 file.\n grayscale: Whether to force grayscale. If None, autodetect on first frame\n load.\n keep_open: Whether to keep the video reader open between calls to read\n frames. If False, will close the reader after each call. If True (the\n default), it will keep the reader open and cache it for subsequent calls\n which may enhance the performance of reading multiple frames.\n\n Notes:\n This is useful for opening the video backend to read frames and then closing\n it after reading all the necessary frames.\n\n If the backend was already open, it will be closed before opening a new one.\n Values for the HDF5 dataset and grayscale will be remembered if not\n specified.\n \"\"\"\n if filename is not None:\n self.replace_filename(filename, open=False)\n\n # Try to remember values from previous backend if available and not specified.\n if self.backend is not None:\n if dataset is None:\n dataset = getattr(self.backend, \"dataset\", None)\n if grayscale is None:\n grayscale = getattr(self.backend, \"grayscale\", None)\n\n else:\n if dataset is None and \"dataset\" in self.backend_metadata:\n dataset = self.backend_metadata[\"dataset\"]\n if grayscale is None:\n if \"grayscale\" in self.backend_metadata:\n grayscale = self.backend_metadata[\"grayscale\"]\n elif \"shape\" in self.backend_metadata:\n grayscale = self.backend_metadata[\"shape\"][-1] == 1\n\n if not self.exists(dataset=dataset):\n msg = f\"Video does not exist or is inaccessible: {self.filename}\"\n if dataset is not None:\n msg += f\" (dataset: {dataset})\"\n raise FileNotFoundError(msg)\n\n # Close previous backend if open.\n self.close()\n\n # Create new backend.\n self.backend = VideoBackend.from_filename(\n self.filename,\n dataset=dataset,\n grayscale=grayscale,\n keep_open=keep_open,\n )\n
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.replace_filename","title":"replace_filename(new_filename, open=True)
","text":"Update the filename of the video, optionally opening the backend.
Parameters:
Name Type Description Defaultnew_filename
str | Path | list[str] | list[Path]
New filename to set for the video.
requiredopen
bool
If True
(the default), open the backend with the new filename. If the new filename does not exist, no error is raised.
True
Source code in sleap_io/model/video.py
def replace_filename(\n self, new_filename: str | Path | list[str] | list[Path], open: bool = True\n):\n \"\"\"Update the filename of the video, optionally opening the backend.\n\n Args:\n new_filename: New filename to set for the video.\n open: If `True` (the default), open the backend with the new filename. If\n the new filename does not exist, no error is raised.\n \"\"\"\n if isinstance(new_filename, Path):\n new_filename = new_filename.as_posix()\n\n if isinstance(new_filename, list):\n new_filename = [\n p.as_posix() if isinstance(p, Path) else p for p in new_filename\n ]\n\n self.filename = new_filename\n self.backend_metadata[\"filename\"] = new_filename\n\n if open:\n if self.exists():\n self.open()\n else:\n self.close()\n
"},{"location":"reference/sleap_io/model/video/#sleap_io.model.video.Video.save","title":"save(save_path, frame_inds=None, video_kwargs=None)
","text":"Save video frames to a new video file.
Parameters:
Name Type Description Defaultsave_path
str | Path
Path to the new video file. Should end in MP4.
requiredframe_inds
list[int] | ndarray | None
Frame indices to save. Can be specified as a list or array of frame integers. If not specified, saves all video frames.
None
video_kwargs
dict[str, Any] | None
A dictionary of keyword arguments to provide to sio.save_video
for video compression.
None
Returns:
Type DescriptionVideo
A new Video
object pointing to the new video file.
sleap_io/model/video.py
def save(\n self,\n save_path: str | Path,\n frame_inds: list[int] | np.ndarray | None = None,\n video_kwargs: dict[str, Any] | None = None,\n) -> Video:\n \"\"\"Save video frames to a new video file.\n\n Args:\n save_path: Path to the new video file. Should end in MP4.\n frame_inds: Frame indices to save. Can be specified as a list or array of\n frame integers. If not specified, saves all video frames.\n video_kwargs: A dictionary of keyword arguments to provide to\n `sio.save_video` for video compression.\n\n Returns:\n A new `Video` object pointing to the new video file.\n \"\"\"\n video_kwargs = {} if video_kwargs is None else video_kwargs\n frame_inds = np.arange(len(self)) if frame_inds is None else frame_inds\n\n with VideoWriter(save_path, **video_kwargs) as vw:\n for frame_ind in frame_inds:\n vw(self[frame_ind])\n\n new_video = Video.from_filename(save_path, grayscale=self.grayscale)\n return new_video\n
"}]}
\ No newline at end of file