diff --git a/src/depiction/image/multi_channel_image_concatenation.py b/src/depiction/image/multi_channel_image_concatenation.py index 1be4a0d..85c0b44 100644 --- a/src/depiction/image/multi_channel_image_concatenation.py +++ b/src/depiction/image/multi_channel_image_concatenation.py @@ -63,7 +63,7 @@ def relabel_combined_image(self, image: MultiChannelImage) -> MultiChannelImageC """ # Ensure the new image has the same shape as the original combined image original_combined = self.get_combined_image() - if image.dimensions != original_combined.dimensions: + if (image.sizes["y"], image.sizes["x"]) != (original_combined.sizes["y"], original_combined.sizes["x"]): raise ValueError("The new image must have the same shape as the original combined image") labeled = image.append_channels(self._data.sel_channels(coords=["image_index"])) return MultiChannelImageConcatenation(data=labeled) diff --git a/src/depiction/visualize/plot_image.py b/src/depiction/visualize/plot_image.py index b314deb..9b702dc 100644 --- a/src/depiction/visualize/plot_image.py +++ b/src/depiction/visualize/plot_image.py @@ -84,7 +84,7 @@ def plot_channels_grid( n_cols = min(n_channels, n_per_row) # determine the figure size - im_width, im_height = self._image.dimensions + im_width, im_height = self._image.sizes["x"], self._image.sizes["y"] aspect_ratio = im_width / im_height # set up the grid diff --git a/src/depiction_cluster_sandbox/workflow/proc/compute_image_umap_coefs.py b/src/depiction_cluster_sandbox/workflow/proc/compute_image_umap_coefs.py index 558f309..f6577b1 100644 --- a/src/depiction_cluster_sandbox/workflow/proc/compute_image_umap_coefs.py +++ b/src/depiction_cluster_sandbox/workflow/proc/compute_image_umap_coefs.py @@ -34,7 +34,7 @@ def compute_image_umap_coefs( input_image = retain_features(feature_selection=feature_selection, image=input_image) # compute the umap transformation into 2D - logger.info(f"Computing UMAP for input image with shape {input_image.dimensions}") + logger.info(f"Computing UMAP for input image {input_image}") umap = UMAP(n_components=2, n_jobs=n_jobs, random_state=random_state) values = umap.fit_transform(input_image.data_flat.values.T)