diff --git a/sed/latest/_images/user_guide_1_binning_fake_data_13_0.png b/sed/latest/_images/user_guide_1_binning_fake_data_13_0.png index 7204597..15436cb 100644 Binary files a/sed/latest/_images/user_guide_1_binning_fake_data_13_0.png and b/sed/latest/_images/user_guide_1_binning_fake_data_13_0.png differ diff --git a/sed/latest/_images/user_guide_1_binning_fake_data_8_0.png b/sed/latest/_images/user_guide_1_binning_fake_data_8_0.png index 1513783..1099620 100644 Binary files a/sed/latest/_images/user_guide_1_binning_fake_data_8_0.png and b/sed/latest/_images/user_guide_1_binning_fake_data_8_0.png differ diff --git a/sed/latest/_modules/index.html b/sed/latest/_modules/index.html index e77ce59..c614414 100644 --- a/sed/latest/_modules/index.html +++ b/sed/latest/_modules/index.html @@ -7,7 +7,7 @@
-SED 0.1.10a6 documentation
+SED 0.1.10a5 documentation
diff --git a/sed/latest/_modules/sed/binning/binning.html b/sed/latest/_modules/sed/binning/binning.html index 7e2cd42..e7024a0 100644 --- a/sed/latest/_modules/sed/binning/binning.html +++ b/sed/latest/_modules/sed/binning/binning.html @@ -7,7 +7,7 @@ -SED 0.1.10a6 documentation
+SED 0.1.10a5 documentation
@@ -447,14 +447,13 @@
"""This module contains the binning functions of the sed.binning module
-
"""
+from __future__ import annotations
+
import gc
+from collections.abc import Sequence
from functools import reduce
from typing import cast
-from typing import List
-from typing import Sequence
-from typing import Tuple
from typing import Union
import dask.dataframe
@@ -476,33 +475,27 @@ Source code for sed.binning.binning
[docs]
def bin_partition(
- part: Union[dask.dataframe.DataFrame, pd.DataFrame],
- bins: Union[
- int,
- dict,
- Sequence[int],
- Sequence[np.ndarray],
- Sequence[tuple],
- ] = 100,
+ part: dask.dataframe.DataFrame | pd.DataFrame,
+ bins: int | dict | Sequence[int] | Sequence[np.ndarray] | Sequence[tuple] = 100,
axes: Sequence[str] = None,
- ranges: Sequence[Tuple[float, float]] = None,
+ ranges: Sequence[tuple[float, float]] = None,
hist_mode: str = "numba",
- jitter: Union[list, dict] = None,
+ jitter: list | dict = None,
return_edges: bool = False,
skip_test: bool = False,
-) -> Union[np.ndarray, Tuple[np.ndarray, list]]:
+) -> np.ndarray | tuple[np.ndarray, list]:
"""Compute the n-dimensional histogram of a single dataframe partition.
Args:
- part (Union[dask.dataframe.DataFrame, pd.DataFrame]): dataframe on which
+ part (dask.dataframe.DataFrame | pd.DataFrame): dataframe on which
to perform the histogram. Usually a partition of a dask DataFrame.
- bins (int, dict, Sequence[int], Sequence[np.ndarray], Sequence[tuple], optional):
+ bins (int | dict | Sequence[int] | Sequence[np.ndarray] | Sequence[tuple], optional):
Definition of the bins. Can be any of the following cases:
- an integer describing the number of bins for all dimensions. This
requires "ranges" to be defined as well.
- A sequence containing one entry of the following types for each
- dimension:
+ dimenstion:
- an integer describing the number of bins. This requires "ranges"
to be defined as well.
@@ -520,7 +513,7 @@ Source code for sed.binning.binning
the order of the dimensions in the resulting array. Only not required if
bins are provided as dictionary containing the axis names.
Defaults to None.
- ranges (Sequence[Tuple[float, float]], optional): Sequence of tuples containing
+ ranges (Sequence[tuple[float, float]], optional): Sequence of tuples containing
the start and end point of the binning range. Required if bins given as
int or Sequence[int]. Defaults to None.
hist_mode (str, optional): Histogram calculation method.
@@ -529,18 +522,18 @@ Source code for sed.binning.binning
- "numba" use a numba powered similar method.
Defaults to "numba".
- jitter (Union[list, dict], optional): a list of the axes on which to apply
+ jitter (list | dict, optional): a list of the axes on which to apply
jittering. To specify the jitter amplitude or method (normal or uniform
noise) a dictionary can be passed. This should look like
jitter={'axis':{'amplitude':0.5,'mode':'uniform'}}.
- This example also shows the default behavior, in case None is
+ This example also shows the default behaviour, in case None is
passed in the dictionary, or jitter is a list of strings.
Warning: this is not the most performing approach. Applying jitter
on the dataframe before calling the binning is much faster.
Defaults to None.
return_edges (bool, optional): If True, returns a list of D arrays
describing the bin edges for each dimension, similar to the
- behavior of ``np.histogramdd``. Defaults to False.
+ behaviour of ``np.histogramdd``. Defaults to False.
skip_test (bool, optional): Turns off input check and data transformation.
Defaults to False as it is intended for internal use only.
Warning: setting this True might make error tracking difficult.
@@ -552,8 +545,8 @@ Source code for sed.binning.binning
present in the dataframe
Returns:
- Union[np.ndarray, Tuple[np.ndarray, list]]: 2-element tuple returned only when
- returnEdges is True. Otherwise only hist is returned.
+ np.ndarray | tuple[np.ndarray: 2-element tuple returned only when
+ return_edges is True. Otherwise only hist is returned.
- **hist**: The result of the n-dimensional binning
- **edges**: A list of D arrays describing the bin edges for each dimension.
@@ -572,19 +565,19 @@ Source code for sed.binning.binning
raise TypeError(
"axes needs to be of type 'List[str]' if tests are skipped!",
)
- bins = cast(Union[List[int], List[np.ndarray]], bins)
- axes = cast(List[str], axes)
- ranges = cast(List[Tuple[float, float]], ranges)
+ bins = cast(Union[list[int], list[np.ndarray]], bins)
+ axes = cast(list[str], axes)
+ ranges = cast(list[tuple[float, float]], ranges)
# convert bin centers to bin edges:
if all(isinstance(x, np.ndarray) for x in bins):
- bins = cast(List[np.ndarray], bins)
+ bins = cast(list[np.ndarray], bins)
for i, bin_centers in enumerate(bins):
bins[i] = bin_centers_to_bin_edges(bin_centers)
else:
- bins = cast(List[int], bins)
+ bins = cast(list[int], bins)
# shift ranges by half a bin size to align the bin centers to the given ranges,
- # as the histogram functions interpret the ranges as limits for the edges.
+ # as the histogram functions interprete the ranges as limits for the edges.
for i, nbins in enumerate(bins):
halfbinsize = (ranges[i][1] - ranges[i][0]) / (nbins) / 2
ranges[i] = (
@@ -656,18 +649,12 @@ Source code for sed.binning.binning
[docs]
def bin_dataframe(
df: dask.dataframe.DataFrame,
- bins: Union[
- int,
- dict,
- Sequence[int],
- Sequence[np.ndarray],
- Sequence[tuple],
- ] = 100,
+ bins: int | dict | Sequence[int] | Sequence[np.ndarray] | Sequence[tuple] = 100,
axes: Sequence[str] = None,
- ranges: Sequence[Tuple[float, float]] = None,
+ ranges: Sequence[tuple[float, float]] = None,
hist_mode: str = "numba",
mode: str = "fast",
- jitter: Union[list, dict] = None,
+ jitter: list | dict = None,
pbar: bool = True,
n_cores: int = N_CPU - 1,
threads_per_worker: int = 4,
@@ -681,13 +668,13 @@ Source code for sed.binning.binning
Args:
df (dask.dataframe.DataFrame): a dask.DataFrame on which to perform the
histogram.
- bins (int, dict, Sequence[int], Sequence[np.ndarray], Sequence[tuple], optional):
+ bins (int | dict | Sequence[int] | Sequence[np.ndarray] | Sequence[tuple], optional):
Definition of the bins. Can be any of the following cases:
- an integer describing the number of bins for all dimensions. This
requires "ranges" to be defined as well.
- A sequence containing one entry of the following types for each
- dimension:
+ dimenstion:
- an integer describing the number of bins. This requires "ranges"
to be defined as well.
@@ -705,7 +692,7 @@ Source code for sed.binning.binning
the order of the dimensions in the resulting array. Only not required if
bins are provided as dictionary containing the axis names.
Defaults to None.
- ranges (Sequence[Tuple[float, float]], optional): Sequence of tuples containing
+ ranges (Sequence[tuple[float, float]], optional): Sequence of tuples containing
the start and end point of the binning range. Required if bins given as
int or Sequence[int]. Defaults to None.
hist_mode (str, optional): Histogram calculation method.
@@ -722,11 +709,11 @@ Source code for sed.binning.binning
- 'legacy': Single-core recombination of partition results.
Defaults to "fast".
- jitter (Union[list, dict], optional): a list of the axes on which to apply
+ jitter (list | dict, optional): a list of the axes on which to apply
jittering. To specify the jitter amplitude or method (normal or uniform
noise) a dictionary can be passed. This should look like
jitter={'axis':{'amplitude':0.5,'mode':'uniform'}}.
- This example also shows the default behavior, in case None is
+ This example also shows the default behaviour, in case None is
passed in the dictionary, or jitter is a list of strings.
Warning: this is not the most performing approach. applying jitter
on the dataframe before calling the binning is much faster.
@@ -757,14 +744,14 @@ Source code for sed.binning.binning
# create the coordinate axes for the xarray output
# if provided as array, they are interpreted as bin centers
if isinstance(bins[0], np.ndarray):
- bins = cast(List[np.ndarray], bins)
+ bins = cast(list[np.ndarray], bins)
coords = dict(zip(axes, bins))
elif ranges is None:
raise ValueError(
"bins is not an array and range is none. this shouldn't happen.",
)
else:
- bins = cast(List[int], bins)
+ bins = cast(list[int], bins)
coords = {
ax: np.linspace(r[0], r[1], n, endpoint=False) for ax, r, n in zip(axes, ranges, bins)
}
@@ -933,7 +920,7 @@ Source code for sed.binning.binning
bin_centers: np.ndarray,
time_unit: float,
) -> xr.DataArray:
- """Get a normalization histogram from a timed dataframe.
+ """Get a normalization histogram from a timed datafram.
Args:
df (dask.dataframe.DataFrame): a dask.DataFrame on which to perform the
@@ -963,7 +950,7 @@ Source code for sed.binning.binning
def apply_jitter_on_column(
- df: Union[dask.dataframe.core.DataFrame, pd.DataFrame],
+ df: dask.dataframe.core.DataFrame | pd.DataFrame,
amp: float,
col: str,
mode: str = "uniform",
diff --git a/sed/latest/_modules/sed/binning/numba_bin.html b/sed/latest/_modules/sed/binning/numba_bin.html
index f78ddd2..9376529 100644
--- a/sed/latest/_modules/sed/binning/numba_bin.html
+++ b/sed/latest/_modules/sed/binning/numba_bin.html
@@ -7,7 +7,7 @@
- sed.binning.numba_bin — SED 0.1.10a6 documentation
+ sed.binning.numba_bin — SED 0.1.10a5 documentation
@@ -34,7 +34,7 @@
-
+
@@ -43,7 +43,7 @@
@@ -121,7 +121,7 @@
- SED 0.1.10a6 documentation
+ SED 0.1.10a5 documentation
@@ -448,14 +448,12 @@
Source code for sed.binning.numba_bin
"""This file contains code for binning using numba precompiled code for the
sed.binning module
-
"""
+from __future__ import annotations
+
+from collections.abc import Sequence
from typing import Any
from typing import cast
-from typing import List
-from typing import Sequence
-from typing import Tuple
-from typing import Union
import numba
import numpy as np
@@ -472,7 +470,7 @@ Source code for sed.binning.numba_bin
bit integers.
Args:
- sample (np.ndarray): The data to be histogram'd with shape N,D.
+ sample (np.ndarray): The data to be histogrammed with shape N,D.
bins (Sequence[int]): The number of bins for each dimension D.
ranges (np.ndarray): A sequence of length D, each an optional (lower,
upper) tuple giving the outer bin edges to be used if the edges are
@@ -497,7 +495,7 @@ Source code for sed.binning.numba_bin
for i in range(ndims):
delta[i] = 1 / ((ranges[i, 1] - ranges[i, 0]) / bins[i])
- strides[i] = hist.strides[i] // hist.itemsize
+ strides[i] = hist.strides[i] // hist.itemsize # pylint: disable=E1136
for t in range(sample.shape[0]):
is_inside = True
@@ -559,7 +557,7 @@ Source code for sed.binning.numba_bin
def _hist_from_bins(
sample: np.ndarray,
bins: Sequence[np.ndarray],
- shape: Tuple,
+ shape: tuple,
) -> np.ndarray:
"""Numba powered binning method, similar to np.histogramdd.
@@ -569,7 +567,7 @@ Source code for sed.binning.numba_bin
sample (np.ndarray) : the array of shape (N,D) on which to compute the histogram
bins (Sequence[np.ndarray]): array of shape (N,D) defining the D bins on which
to compute the histogram, i.e. the desired output axes.
- shape (Tuple): shape of the resulting array. Workaround for the fact numba
+ shape (tuple): shape of the resulting array. Workaround for the fact numba
does not allow to create tuples.
Returns:
hist: the computed n-dimensional histogram
@@ -607,10 +605,10 @@ Source code for sed.binning.numba_bin
[docs]
def numba_histogramdd(
sample: np.ndarray,
- bins: Union[int, Sequence[int], Sequence[np.ndarray], np.ndarray],
+ bins: int | Sequence[int] | Sequence[np.ndarray] | np.ndarray,
ranges: Sequence = None,
-) -> Tuple[np.ndarray, List[np.ndarray]]:
- """Multidimensional histogram function, powered by Numba.
+) -> tuple[np.ndarray, list[np.ndarray]]:
+ """Multidimensional histogramming function, powered by Numba.
Behaves in total much like numpy.histogramdd. Returns uint32 arrays.
This was chosen because it has a significant performance improvement over
@@ -620,8 +618,8 @@ Source code for sed.binning.numba_bin
sizes.
Args:
- sample (np.ndarray): The data to be histogram'd with shape N,D
- bins (Union[int, Sequence[int], Sequence[np.ndarray], np.ndarray]): The number
+ sample (np.ndarray): The data to be histogrammed with shape N,D
+ bins (int | Sequence[int] | Sequence[np.ndarray] | np.ndarray): The number
of bins for each dimension D, or a sequence of bin edges on which to calculate
the histogram.
ranges (Sequence, optional): The range(s) to use for binning when bins is a sequence
@@ -634,7 +632,7 @@ Source code for sed.binning.numba_bin
RuntimeError: Internal shape error after binning
Returns:
- Tuple[np.ndarray, List[np.ndarray]]: 2-element tuple of The computed histogram
+ tuple[np.ndarray, list[np.ndarray]]: 2-element tuple of The computed histogram
and s list of D arrays describing the bin edges for each dimension.
- **hist**: The computed histogram
@@ -666,7 +664,7 @@ Source code for sed.binning.numba_bin
# method == "array"
if isinstance(bins[0], np.ndarray):
- bins = cast(List[np.ndarray], list(bins))
+ bins = cast(list[np.ndarray], list(bins))
hist = _hist_from_bins(
sample,
tuple(bins),
@@ -692,7 +690,7 @@ Source code for sed.binning.numba_bin
bins = tuple(bins)
# Create edge arrays
- edges: List[Any] = []
+ edges: list[Any] = []
nbin = np.empty(num_cols, int)
for i in range(num_cols):
diff --git a/sed/latest/_modules/sed/binning/utils.html b/sed/latest/_modules/sed/binning/utils.html
index 222b725..de0841f 100644
--- a/sed/latest/_modules/sed/binning/utils.html
+++ b/sed/latest/_modules/sed/binning/utils.html
@@ -7,7 +7,7 @@
- sed.binning.utils — SED 0.1.10a6 documentation
+ sed.binning.utils — SED 0.1.10a5 documentation
@@ -34,7 +34,7 @@
-
+
@@ -43,7 +43,7 @@
@@ -121,7 +121,7 @@
- SED 0.1.10a6 documentation
+ SED 0.1.10a5 documentation
@@ -447,13 +447,11 @@
Source code for sed.binning.utils
"""This file contains helper functions for the sed.binning module
-
"""
+from __future__ import annotations
+
+from collections.abc import Sequence
from typing import cast
-from typing import List
-from typing import Sequence
-from typing import Tuple
-from typing import Union
import numpy as np
@@ -466,16 +464,10 @@ Source code for sed.binning.utils
[docs]
def simplify_binning_arguments(
- bins: Union[
- int,
- dict,
- Sequence[int],
- Sequence[np.ndarray],
- Sequence[tuple],
- ],
+ bins: int | dict | Sequence[int] | Sequence[np.ndarray] | Sequence[tuple],
axes: Sequence[str] = None,
- ranges: Sequence[Tuple[float, float]] = None,
-) -> Tuple[Union[List[int], List[np.ndarray]], List[str], List[Tuple[float, float]]]:
+ ranges: Sequence[tuple[float, float]] = None,
+) -> tuple[list[int] | list[np.ndarray], list[str], list[tuple[float, float]]]:
"""Convert the flexible input for defining bins into a
simple "axes" "bins" "ranges" tuple.
@@ -483,13 +475,13 @@ Source code for sed.binning.utils
binning functions defined here.
Args:
- bins (int, dict, Sequence[int], Sequence[np.ndarray], Sequence[tuple]):
+ bins (int | dict | Sequence[int] | Sequence[np.ndarray] | Sequence[tuple]):
Definition of the bins. Can be any of the following cases:
- an integer describing the number of bins for all dimensions. This
requires "ranges" to be defined as well.
- A sequence containing one entry of the following types for each
- dimension:
+ dimenstion:
- an integer describing the number of bins. This requires "ranges"
to be defined as well.
@@ -506,7 +498,7 @@ Source code for sed.binning.utils
the order of the dimensions in the resulting array. Only not required if
bins are provided as dictionary containing the axis names.
Defaults to None.
- ranges (Sequence[Tuple[float, float]], optional): Sequence of tuples containing
+ ranges (Sequence[tuple[float, float]], optional): Sequence of tuples containing
the start and end point of the binning range. Required if bins given as
int or Sequence[int]. Defaults to None.
@@ -517,7 +509,7 @@ Source code for sed.binning.utils
AttributeError: Shape mismatch
Returns:
- Tuple[Union[List[int], List[np.ndarray]], List[Tuple[float, float]]]: Tuple
+ tuple[list[int] | list[np.ndarray], list[str], list[tuple[float, float]]]: Tuple
containing lists of bin centers, axes, and ranges.
"""
# if bins is a dictionary: unravel to axes and bins
@@ -563,7 +555,7 @@ Source code for sed.binning.utils
# if bins are provided as int, check that ranges are present
if all(isinstance(x, (int, np.int64)) for x in bins):
- bins = cast(List[int], list(bins))
+ bins = cast(list[int], list(bins))
if ranges is None:
raise AttributeError(
"Must provide a range if bins is an integer or list of integers",
@@ -573,9 +565,9 @@ Source code for sed.binning.utils
f"Ranges must be a sequence, not {type(ranges)}.",
)
- # otherwise, all bins should be of type np.ndarray here
+ # otherwise, all bins should by np.ndarrays here
elif all(isinstance(x, np.ndarray) for x in bins):
- bins = cast(List[np.ndarray], list(bins))
+ bins = cast(list[np.ndarray], list(bins))
else:
raise TypeError(f"Could not interpret bins of type {type(bins)}")
diff --git a/sed/latest/_modules/sed/calibrator/delay.html b/sed/latest/_modules/sed/calibrator/delay.html
index 781f036..c42dc4d 100644
--- a/sed/latest/_modules/sed/calibrator/delay.html
+++ b/sed/latest/_modules/sed/calibrator/delay.html
@@ -7,7 +7,7 @@
- sed.calibrator.delay — SED 0.1.10a6 documentation
+ sed.calibrator.delay — SED 0.1.10a5 documentation
@@ -34,7 +34,7 @@
-
+
@@ -43,7 +43,7 @@
@@ -121,7 +121,7 @@
- SED 0.1.10a6 documentation
+ SED 0.1.10a5 documentation
@@ -448,14 +448,12 @@
Source code for sed.calibrator.delay
"""sed.calibrator.delay module. Code for delay calibration.
"""
+from __future__ import annotations
+
+from collections.abc import Sequence
from copy import deepcopy
from datetime import datetime
from typing import Any
-from typing import Dict
-from typing import List
-from typing import Sequence
-from typing import Tuple
-from typing import Union
import dask.dataframe
import h5py
@@ -496,32 +494,32 @@ Source code for sed.calibrator.delay
"corrected_delay_column",
self.delay_column,
)
- self.calibration: Dict[str, Any] = self._config["delay"].get("calibration", {})
- self.offsets: Dict[str, Any] = self._config["delay"].get("offsets", {})
+ self.calibration: dict[str, Any] = self._config["delay"].get("calibration", {})
+ self.offsets: dict[str, Any] = self._config["delay"].get("offsets", {})
[docs]
def append_delay_axis(
self,
- df: Union[pd.DataFrame, dask.dataframe.DataFrame],
+ df: pd.DataFrame | dask.dataframe.DataFrame,
adc_column: str = None,
delay_column: str = None,
- calibration: Dict[str, Any] = None,
- adc_range: Union[Tuple, List, np.ndarray] = None,
- delay_range: Union[Tuple, List, np.ndarray] = None,
+ calibration: dict[str, Any] = None,
+ adc_range: tuple | list | np.ndarray = None,
+ delay_range: tuple | list | np.ndarray = None,
time0: float = None,
- delay_range_mm: Union[Tuple, List, np.ndarray] = None,
+ delay_range_mm: tuple | list | np.ndarray = None,
datafile: str = None,
p1_key: str = None,
p2_key: str = None,
t0_key: str = None,
verbose: bool = True,
- ) -> Tuple[Union[pd.DataFrame, dask.dataframe.DataFrame], dict]:
+ ) -> tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]:
"""Calculate and append the delay axis to the events dataframe, by converting
values from an analog-digital-converter (ADC).
Args:
- df (Union[pd.DataFrame, dask.dataframe.DataFrame]): The dataframe where
+ df (pd.DataFrame | dask.dataframe.DataFrame): The dataframe where
to apply the delay calibration to.
adc_column (str, optional): Source column for delay calibration.
Defaults to config["dataframe"]["adc_column"].
@@ -529,14 +527,14 @@ Source code for sed.calibrator.delay
Defaults to config["dataframe"]["delay_column"].
calibration (dict, optional): Calibration dictionary with parameters for
delay calibration.
- adc_range (Union[Tuple, List, np.ndarray], optional): The range of used
+ adc_range (tuple | list | np.ndarray, optional): The range of used
ADC values. Defaults to config["delay"]["adc_range"].
- delay_range (Union[Tuple, List, np.ndarray], optional): Range of scanned
+ delay_range (tuple | list | np.ndarray, optional): Range of scanned
delay values in ps. If omitted, the range is calculated from the
delay_range_mm and t0 values.
time0 (float, optional): Pump-Probe overlap value of the delay coordinate.
If omitted, it is searched for in the data files.
- delay_range_mm (Union[Tuple, List, np.ndarray], optional): Range of scanned
+ delay_range_mm (tuple | list | np.ndarray, optional): Range of scanned
delay stage in mm. If omitted, it is searched for in the data files.
datafile (str, optional): Datafile in which delay parameters are searched
for. Defaults to None.
@@ -554,8 +552,8 @@ Source code for sed.calibrator.delay
NotImplementedError: Raised if no sufficient information passed.
Returns:
- Union[pd.DataFrame, dask.dataframe.DataFrame]: dataframe with added column
- and delay calibration metadata dictionary.
+ tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]: dataframe with added column
+ and delay calibration metdata dictionary.
"""
# pylint: disable=duplicate-code
if calibration is None:
@@ -662,39 +660,40 @@ Source code for sed.calibrator.delay
def add_offsets(
self,
df: dask.dataframe.DataFrame,
- offsets: Dict[str, Any] = None,
+ offsets: dict[str, Any] = None,
constant: float = None,
flip_delay_axis: bool = None,
- columns: Union[str, Sequence[str]] = None,
- weights: Union[float, Sequence[float]] = 1.0,
- preserve_mean: Union[bool, Sequence[bool]] = False,
- reductions: Union[str, Sequence[str]] = None,
+ columns: str | Sequence[str] = None,
+ weights: float | Sequence[float] = 1.0,
+ preserve_mean: bool | Sequence[bool] = False,
+ reductions: str | Sequence[str] = None,
delay_column: str = None,
verbose: bool = True,
- ) -> Tuple[dask.dataframe.DataFrame, dict]:
+ ) -> tuple[dask.dataframe.DataFrame, dict]:
"""Apply an offset to the delay column based on a constant or other columns.
Args:
df (Union[pd.DataFrame, dask.dataframe.DataFrame]): Dataframe to use.
- offsets (Dict, optional): Dictionary of delay offset parameters.
+ offsets (dict, optional): Dictionary of delay offset parameters.
constant (float, optional): The constant to shift the delay axis by.
flip_delay_axis (bool, optional): Whether to flip the time axis. Defaults to False.
- columns (Union[str, Sequence[str]]): Name of the column(s) to apply the shift from.
- weights (Union[int, Sequence[int]]): weights to apply to the columns.
+ columns (str | Sequence[str]): Name of the column(s) to apply the shift from.
+ weights (float | Sequence[float]): weights to apply to the columns.
Can also be used to flip the sign (e.g. -1). Defaults to 1.
- preserve_mean (bool): Whether to subtract the mean of the column before applying the
- shift. Defaults to False.
- reductions (str): The reduction to apply to the column. Should be an available method
- of dask.dataframe.Series. For example "mean". In this case the function is applied
- to the column to generate a single value for the whole dataset. If None, the shift
- is applied per-dataframe-row. Defaults to None. Currently only "mean" is supported.
+ preserve_mean (bool | Sequence[bool]): Whether to subtract the mean of the column
+ before applying the shift. Defaults to False.
+ reductions (str | Sequence[str]): The reduction to apply to the column. Should be an
+ available method of dask.dataframe.Series. For example "mean". In this case the
+ function is applied to the column to generate a single value for the whole dataset.
+ If None, the shift is applied per-dataframe-row. Defaults to None. Currently only
+ "mean" is supported.
delay_column (str, optional): Name of the column containing the delay values.
verbose (bool, optional): Option to print out diagnostic information.
Defaults to True.
Returns:
- dask.dataframe.DataFrame: Dataframe with the shifted delay axis.
- dict: Metadata dictionary.
+ tuple[dask.dataframe.DataFrame, dict]: Dataframe with the shifted delay axis and
+ Metadata dictionary.
"""
if offsets is None:
offsets = deepcopy(self.offsets)
@@ -702,7 +701,7 @@ Source code for sed.calibrator.delay
if delay_column is None:
delay_column = self.delay_column
- metadata: Dict[str, Any] = {
+ metadata: dict[str, Any] = {
"applied": True,
}
@@ -838,7 +837,7 @@ Source code for sed.calibrator.delay
p1_key: str,
p2_key: str,
t0_key: str,
-) -> Tuple:
+) -> tuple:
"""
Read delay stage ranges from hdf5 file
@@ -866,18 +865,18 @@ Source code for sed.calibrator.delay
[docs]
def mm_to_ps(
- delay_mm: Union[float, np.ndarray],
+ delay_mm: float | np.ndarray,
time0_mm: float,
-) -> Union[float, np.ndarray]:
- """Converts a delay stage position in mm into a relative delay in picoseconds
+) -> float | np.ndarray:
+ """Converts a delaystage position in mm into a relative delay in picoseconds
(double pass).
Args:
- delay_mm (Union[float, Sequence[float]]): Delay stage position in mm
+ delay_mm (float | np.ndarray): Delay stage position in mm
time0_mm (float): Delay stage position of pump-probe overlap in mm
Returns:
- Union[float, Sequence[float]]: Relative delay in picoseconds
+ float | np.ndarray: Relative delay in picoseconds
"""
delay_ps = (delay_mm - time0_mm) / 0.15
return delay_ps
diff --git a/sed/latest/_modules/sed/calibrator/energy.html b/sed/latest/_modules/sed/calibrator/energy.html
index 1ea72b2..0f0e3a9 100644
--- a/sed/latest/_modules/sed/calibrator/energy.html
+++ b/sed/latest/_modules/sed/calibrator/energy.html
@@ -7,7 +7,7 @@
- sed.calibrator.energy — SED 0.1.10a6 documentation
+ sed.calibrator.energy — SED 0.1.10a5 documentation
@@ -34,7 +34,7 @@
-
+
@@ -43,7 +43,7 @@
@@ -121,7 +121,7 @@
- SED 0.1.10a6 documentation
+ SED 0.1.10a5 documentation
@@ -449,19 +449,17 @@ Source code for sed.calibrator.energy
"""sed.calibrator.energy module. Code for energy calibration and
correction. Mostly ported from https://github.com/mpes-kit/mpes.
"""
+from __future__ import annotations
+
import itertools as it
import warnings as wn
+from collections.abc import Sequence
from copy import deepcopy
from datetime import datetime
from functools import partial
from typing import Any
from typing import cast
-from typing import Dict
-from typing import List
from typing import Literal
-from typing import Sequence
-from typing import Tuple
-from typing import Union
import bokeh.plotting as pbk
import dask.dataframe
@@ -543,9 +541,9 @@ Source code for sed.calibrator.energy
self._config = config
- self.featranges: List[Tuple] = [] # Value ranges for feature detection
+ self.featranges: list[tuple] = [] # Value ranges for feature detection
self.peaks: np.ndarray = np.asarray([])
- self.calibration: Dict[str, Any] = self._config["energy"].get("calibration", {})
+ self.calibration: dict[str, Any] = self._config["energy"].get("calibration", {})
self.tof_column = self._config["dataframe"]["tof_column"]
self.tof_ns_column = self._config["dataframe"].get("tof_ns_column", None)
@@ -564,8 +562,8 @@ Source code for sed.calibrator.energy
self.color_clip = self._config["energy"]["color_clip"]
self.sector_delays = self._config["dataframe"].get("sector_delays", None)
self.sector_id_column = self._config["dataframe"].get("sector_id_column", None)
- self.offsets: Dict[str, Any] = self._config["energy"].get("offsets", {})
- self.correction: Dict[str, Any] = self._config["energy"].get("correction", {})
+ self.offsets: dict[str, Any] = self._config["energy"].get("offsets", {})
+ self.correction: dict[str, Any] = self._config["energy"].get("correction", {})
@property
def ntraces(self) -> int:
@@ -632,10 +630,10 @@ Source code for sed.calibrator.energy
[docs]
def bin_data(
self,
- data_files: List[str],
- axes: List[str] = None,
- bins: List[int] = None,
- ranges: Sequence[Tuple[float, float]] = None,
+ data_files: list[str],
+ axes: list[str] = None,
+ bins: list[int] = None,
+ ranges: Sequence[tuple[float, float]] = None,
biases: np.ndarray = None,
bias_key: str = None,
**kwds,
@@ -643,12 +641,12 @@ Source code for sed.calibrator.energy
"""Bin data from single-event files, and load into class.
Args:
- data_files (List[str]): list of file names to bin
- axes (List[str], optional): bin axes. Defaults to
+ data_files (list[str]): list of file names to bin
+ axes (list[str], optional): bin axes. Defaults to
config["dataframe"]["tof_column"].
- bins (List[int], optional): number of bins.
+ bins (list[int], optional): number of bins.
Defaults to config["energy"]["bins"].
- ranges (Sequence[Tuple[float, float]], optional): bin ranges.
+ ranges (Sequence[tuple[float, float]], optional): bin ranges.
Defaults to config["energy"]["ranges"].
biases (np.ndarray, optional): Bias voltages used.
If not provided, biases are extracted from the file meta data.
@@ -664,7 +662,7 @@ Source code for sed.calibrator.energy
ranges_ = [
np.array(self._config["energy"]["ranges"]) / 2 ** (self.binning - 1),
]
- ranges = [cast(Tuple[float, float], tuple(v)) for v in ranges_]
+ ranges = [cast(tuple[float, float], tuple(v)) for v in ranges_]
# pylint: disable=duplicate-code
hist_mode = kwds.pop("hist_mode", self._config["binning"]["hist_mode"])
mode = kwds.pop("mode", self._config["binning"]["mode"])
@@ -750,7 +748,7 @@ Source code for sed.calibrator.energy
[docs]
def adjust_ranges(
self,
- ranges: Tuple,
+ ranges: tuple,
ref_id: int = 0,
traces: np.ndarray = None,
peak_window: int = 7,
@@ -761,7 +759,7 @@ Source code for sed.calibrator.energy
(containing the peaks) among all traces.
Args:
- ranges (Tuple):
+ ranges (tuple):
Collection of feature detection ranges, within which an algorithm
(i.e. 1D peak detector) with look for the feature.
ref_id (int, optional): Index of the reference trace. Defaults to 0.
@@ -842,8 +840,8 @@ Source code for sed.calibrator.energy
plot_segs[itr].set_ydata(traceseg)
plot_segs[itr].set_xdata(tofseg)
- plot_peaks[itr].set_xdata(self.peaks[itr, 0])
- plot_peaks[itr].set_ydata(self.peaks[itr, 1])
+ plot_peaks[itr].set_xdata([self.peaks[itr, 0]])
+ plot_peaks[itr].set_ydata([self.peaks[itr, 1]])
fig.canvas.draw_idle()
@@ -893,7 +891,7 @@ Source code for sed.calibrator.energy
[docs]
def add_ranges(
self,
- ranges: Union[List[Tuple], Tuple],
+ ranges: list[tuple] | tuple,
ref_id: int = 0,
traces: np.ndarray = None,
infer_others: bool = True,
@@ -903,14 +901,14 @@ Source code for sed.calibrator.energy
"""Select or extract the equivalent feature ranges (containing the peaks) among all traces.
Args:
- ranges (Union[List[Tuple], Tuple]):
+ ranges (list[tuple] | tuple):
Collection of feature detection ranges, within which an algorithm
(i.e. 1D peak detector) with look for the feature.
ref_id (int, optional): Index of the reference trace. Defaults to 0.
traces (np.ndarray, optional): Collection of energy dispersion curves.
Defaults to self.traces_normed.
infer_others (bool, optional): Option to infer the feature detection range
- in other traces from a given one using a time warp algorithm.
+ in other traces from a given one using a time warp algorthm.
Defaults to True.
mode (str, optional): Specification on how to change the feature ranges
('append' or 'replace'). Defaults to "replace".
@@ -923,7 +921,7 @@ Source code for sed.calibrator.energy
# Infer the corresponding feature detection range of other traces by alignment
if infer_others:
assert isinstance(ranges, tuple)
- newranges: List[Tuple] = []
+ newranges: list[tuple] = []
for i in range(self.ntraces):
pathcorr = find_correspondence(
@@ -949,14 +947,14 @@ Source code for sed.calibrator.energy
[docs]
def feature_extract(
self,
- ranges: List[Tuple] = None,
+ ranges: list[tuple] = None,
traces: np.ndarray = None,
peak_window: int = 7,
):
"""Select or extract the equivalent landmarks (e.g. peaks) among all traces.
Args:
- ranges (List[Tuple], optional): List of ranges in each trace to look for
+ ranges (list[tuple], optional): List of ranges in each trace to look for
the peak feature, [start, end]. Defaults to self.featranges.
traces (np.ndarray, optional): Collection of 1D spectra to use for
calibration. Defaults to self.traces_normed.
@@ -1082,7 +1080,7 @@ Source code for sed.calibrator.energy
def view( # pylint: disable=dangerous-default-value
self,
traces: np.ndarray,
- segs: List[Tuple] = None,
+ segs: list[tuple] = None,
peaks: np.ndarray = None,
show_legend: bool = True,
backend: str = "matplotlib",
@@ -1096,7 +1094,7 @@ Source code for sed.calibrator.energy
Args:
traces (np.ndarray): Matrix of traces to visualize.
- segs (List[Tuple], optional): Segments to be highlighted in the
+ segs (list[tuple], optional): Segments to be highlighted in the
visualization. Defaults to None.
peaks (np.ndarray, optional): Peak positions for labelling the traces.
Defaults to None.
@@ -1130,7 +1128,7 @@ Source code for sed.calibrator.energy
if backend == "matplotlib":
figsize = kwds.pop("figsize", (12, 4))
- fig, ax = plt.subplots(figsize=figsize)
+ fig_plt, ax = plt.subplots(figsize=figsize)
for itr, trace in enumerate(traces):
if align:
ax.plot(
@@ -1255,17 +1253,17 @@ Source code for sed.calibrator.energy
[docs]
def append_energy_axis(
self,
- df: Union[pd.DataFrame, dask.dataframe.DataFrame],
+ df: pd.DataFrame | dask.dataframe.DataFrame,
tof_column: str = None,
energy_column: str = None,
calibration: dict = None,
verbose: bool = True,
**kwds,
- ) -> Tuple[Union[pd.DataFrame, dask.dataframe.DataFrame], dict]:
+ ) -> tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]:
"""Calculate and append the energy axis to the events dataframe.
Args:
- df (Union[pd.DataFrame, dask.dataframe.DataFrame]):
+ df (pd.DataFrame | dask.dataframe.DataFrame):
Dataframe to apply the energy axis calibration to.
tof_column (str, optional): Label of the source column.
Defaults to config["dataframe"]["tof_column"].
@@ -1284,7 +1282,7 @@ Source code for sed.calibrator.energy
NotImplementedError: Raised if an invalid calib_type is found.
Returns:
- Union[pd.DataFrame, dask.dataframe.DataFrame]: dataframe with added column
+ tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]: dataframe with added column
and energy calibration metadata dictionary.
"""
if tof_column is None:
@@ -1371,15 +1369,15 @@ Source code for sed.calibrator.energy
[docs]
def append_tof_ns_axis(
self,
- df: Union[pd.DataFrame, dask.dataframe.DataFrame],
+ df: pd.DataFrame | dask.dataframe.DataFrame,
tof_column: str = None,
tof_ns_column: str = None,
**kwds,
- ) -> Tuple[Union[pd.DataFrame, dask.dataframe.DataFrame], dict]:
+ ) -> tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]:
"""Converts the time-of-flight time from steps to time in ns.
Args:
- df (Union[pd.DataFrame, dask.dataframe.DataFrame]): Dataframe to convert.
+ df (pd.DataFrame | dask.dataframe.DataFrame): Dataframe to convert.
tof_column (str, optional): Name of the column containing the
time-of-flight steps. Defaults to config["dataframe"]["tof_column"].
tof_ns_column (str, optional): Name of the column to store the
@@ -1390,8 +1388,8 @@ Source code for sed.calibrator.energy
Defaults to config["energy"]["tof_binning"].
Returns:
- dask.dataframe.DataFrame: Dataframe with the new columns.
- dict: Metadata dictionary.
+ tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]: Dataframe with the new columns
+ and Metadata dictionary.
"""
binwidth = kwds.pop("binwidth", self.binwidth)
binning = kwds.pop("binning", self.binning)
@@ -1409,7 +1407,7 @@ Source code for sed.calibrator.energy
binning,
df[tof_column].astype("float64"),
)
- metadata: Dict[str, Any] = {
+ metadata: dict[str, Any] = {
"applied": True,
"binwidth": binwidth,
"binning": binning,
@@ -1431,7 +1429,7 @@ Source code for sed.calibrator.energy
"""
if calibration is None:
calibration = self.calibration
- metadata: Dict[Any, Any] = {}
+ metadata: dict[Any, Any] = {}
metadata["applied"] = True
metadata["calibration"] = deepcopy(calibration)
metadata["tof"] = deepcopy(self.tof)
@@ -1449,7 +1447,7 @@ Source code for sed.calibrator.energy
image: xr.DataArray,
correction_type: str = None,
amplitude: float = None,
- center: Tuple[float, float] = None,
+ center: tuple[float, float] = None,
correction: dict = None,
apply: bool = False,
**kwds,
@@ -1469,7 +1467,7 @@ Source code for sed.calibrator.energy
Defaults to config["energy"]["correction_type"].
amplitude (float, optional): Amplitude of the time-of-flight correction
term. Defaults to config["energy"]["correction"]["correction_type"].
- center (Tuple[float, float], optional): Center (x/y) coordinates for the
+ center (tuple[float, float], optional): Center (x/y) coordinates for the
correction. Defaults to config["energy"]["correction"]["center"].
correction (dict, optional): Correction dict. Defaults to the config values
and is updated from provided and adjusted parameters.
@@ -1620,9 +1618,9 @@ Source code for sed.calibrator.energy
)
trace1.set_ydata(correction_x)
- line1.set_xdata(x=x_center)
+ line1.set_xdata([x_center])
trace2.set_ydata(correction_y)
- line2.set_xdata(x=y_center)
+ line2.set_xdata([y_center])
fig.canvas.draw_idle()
@@ -1642,7 +1640,7 @@ Source code for sed.calibrator.energy
update(correction["amplitude"], x_center, y_center, diameter=correction["diameter"])
except KeyError as exc:
raise ValueError(
- "Parameter 'diameter' required for correction type 'spherical', ",
+ "Parameter 'diameter' required for correction type 'sperical', ",
"but not present!",
) from exc
@@ -1798,7 +1796,7 @@ Source code for sed.calibrator.energy
[docs]
def apply_energy_correction(
self,
- df: Union[pd.DataFrame, dask.dataframe.DataFrame],
+ df: pd.DataFrame | dask.dataframe.DataFrame,
tof_column: str = None,
new_tof_column: str = None,
correction_type: str = None,
@@ -1806,11 +1804,11 @@ Source code for sed.calibrator.energy
correction: dict = None,
verbose: bool = True,
**kwds,
- ) -> Tuple[Union[pd.DataFrame, dask.dataframe.DataFrame], dict]:
+ ) -> tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]:
"""Apply correction to the time-of-flight (TOF) axis of single-event data.
Args:
- df (Union[pd.DataFrame, dask.dataframe.DataFrame]): The dataframe where
+ df (pd.DataFrame | dask.dataframe.DataFrame): The dataframe where
to apply the energy correction to.
tof_column (str, optional): Name of the source column to convert.
Defaults to config["dataframe"]["tof_column"].
@@ -1827,7 +1825,7 @@ Source code for sed.calibrator.energy
Defaults to config["energy"]["correction_type"].
amplitude (float, optional): Amplitude of the time-of-flight correction
term. Defaults to config["energy"]["correction"]["correction_type"].
- correction (dict, optional): Correction dictionary containing parameters
+ correction (dict, optional): Correction dictionary containing paramters
for the correction. Defaults to self.correction or
config["energy"]["correction"].
verbose (bool, optional): Option to print out diagnostic information.
@@ -1847,7 +1845,7 @@ Source code for sed.calibrator.energy
asymmetric 2D Lorentz profile, X-direction.
Returns:
- Union[pd.DataFrame, dask.dataframe.DataFrame]: dataframe with added column
+ tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]: dataframe with added column
and Energy correction metadata dictionary.
"""
if correction is None:
@@ -1908,7 +1906,7 @@ Source code for sed.calibrator.energy
"""
if correction is None:
correction = self.correction
- metadata: Dict[Any, Any] = {}
+ metadata: dict[Any, Any] = {}
metadata["applied"] = True
metadata["correction"] = deepcopy(correction)
@@ -1923,11 +1921,11 @@ Source code for sed.calibrator.energy
tof_column: str = None,
sector_id_column: str = None,
sector_delays: np.ndarray = None,
- ) -> Tuple[dask.dataframe.DataFrame, dict]:
+ ) -> tuple[dask.dataframe.DataFrame, dict]:
"""Aligns the time-of-flight axis of the different sections of a detector.
Args:
- df (Union[pd.DataFrame, dask.dataframe.DataFrame]): Dataframe to use.
+ df (dask.dataframe.DataFrame): Dataframe to use.
tof_column (str, optional): Name of the column containing the time-of-flight values.
Defaults to config["dataframe"]["tof_column"].
sector_id_column (str, optional): Name of the column containing the sector id values.
@@ -1936,8 +1934,8 @@ Source code for sed.calibrator.energy
config["dataframe"]["sector_delays"].
Returns:
- dask.dataframe.DataFrame: Dataframe with the new columns.
- dict: Metadata dictionary.
+ tuple[dask.dataframe.DataFrame, dict]: Dataframe with the new columns and Metadata
+ dictionary.
"""
if sector_delays is None:
sector_delays = self.sector_delays
@@ -1959,7 +1957,7 @@ Source code for sed.calibrator.energy
return val.astype(np.float32)
df[tof_column] = df.map_partitions(align_sector, meta=(tof_column, np.float32))
- metadata: Dict[str, Any] = {
+ metadata: dict[str, Any] = {
"applied": True,
"sector_delays": sector_delays,
}
@@ -1970,16 +1968,16 @@ Source code for sed.calibrator.energy
[docs]
def add_offsets(
self,
- df: Union[pd.DataFrame, dask.dataframe.DataFrame] = None,
- offsets: Dict[str, Any] = None,
+ df: pd.DataFrame | dask.dataframe.DataFrame = None,
+ offsets: dict[str, Any] = None,
constant: float = None,
- columns: Union[str, Sequence[str]] = None,
- weights: Union[float, Sequence[float]] = None,
- preserve_mean: Union[bool, Sequence[bool]] = False,
- reductions: Union[str, Sequence[str]] = None,
+ columns: str | Sequence[str] = None,
+ weights: float | Sequence[float] = None,
+ preserve_mean: bool | Sequence[bool] = False,
+ reductions: str | Sequence[str] = None,
energy_column: str = None,
verbose: bool = True,
- ) -> Tuple[Union[pd.DataFrame, dask.dataframe.DataFrame], dict]:
+ ) -> tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]:
"""Apply an offset to the energy column by the values of the provided columns.
If no parameter is passed to this function, the offset is applied as defined in the
@@ -1987,25 +1985,26 @@ Source code for sed.calibrator.energy
and the offset is applied using the ``dfops.apply_offset_from_columns()`` function.
Args:
- df (Union[pd.DataFrame, dask.dataframe.DataFrame]): Dataframe to use.
+ df (pd.DataFrame | dask.dataframe.DataFrame): Dataframe to use.
offsets (Dict, optional): Dictionary of energy offset parameters.
constant (float, optional): The constant to shift the energy axis by.
- columns (Union[str, Sequence[str]]): Name of the column(s) to apply the shift from.
- weights (Union[float, Sequence[float]]): weights to apply to the columns.
+ columns (str | Sequence[str]): Name of the column(s) to apply the shift from.
+ weights (float | Sequence[float]): weights to apply to the columns.
Can also be used to flip the sign (e.g. -1). Defaults to 1.
- preserve_mean (bool): Whether to subtract the mean of the column before applying the
- shift. Defaults to False.
- reductions (str): The reduction to apply to the column. Should be an available method
- of dask.dataframe.Series. For example "mean". In this case the function is applied
- to the column to generate a single value for the whole dataset. If None, the shift
- is applied per-dataframe-row. Defaults to None. Currently only "mean" is supported.
+ preserve_mean (bool | Sequence[bool]): Whether to subtract the mean of the column
+ before applying the shift. Defaults to False.
+ reductions (str | Sequence[str]): The reduction to apply to the column. Should be an
+ available method of dask.dataframe.Series. For example "mean". In this case the
+ function is applied to the column to generate a single value for the whole dataset.
+ If None, the shift is applied per-dataframe-row. Defaults to None. Currently only
+ "mean" is supported.
energy_column (str, optional): Name of the column containing the energy values.
verbose (bool, optional): Option to print out diagnostic information.
Defaults to True.
Returns:
- dask.dataframe.DataFrame: Dataframe with the new columns.
- dict: Metadata dictionary.
+ tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]: Dataframe with the new columns
+ and Metadata dictionary.
"""
if offsets is None:
offsets = deepcopy(self.offsets)
@@ -2013,7 +2012,7 @@ Source code for sed.calibrator.energy
if energy_column is None:
energy_column = self.energy_column
- metadata: Dict[str, Any] = {
+ metadata: dict[str, Any] = {
"applied": True,
}
@@ -2145,17 +2144,17 @@ Source code for sed.calibrator.energy
[docs]
-def extract_bias(files: List[str], bias_key: str) -> np.ndarray:
+def extract_bias(files: list[str], bias_key: str) -> np.ndarray:
"""Read bias values from hdf5 files
Args:
- files (List[str]): List of filenames
+ files (list[str]): List of filenames
bias_key (str): hdf5 path to the bias value
Returns:
np.ndarray: Array of bias values.
"""
- bias_list: List[float] = []
+ bias_list: list[float] = []
for file in files:
with h5py.File(file, "r") as file_handle:
if bias_key[0] == "@":
@@ -2170,21 +2169,21 @@ Source code for sed.calibrator.energy
[docs]
def correction_function(
- x: Union[float, np.ndarray],
- y: Union[float, np.ndarray],
+ x: float | np.ndarray,
+ y: float | np.ndarray,
correction_type: str,
- center: Tuple[float, float],
+ center: tuple[float, float],
amplitude: float,
**kwds,
-) -> Union[float, np.ndarray]:
+) -> float | np.ndarray:
"""Calculate the TOF correction based on the given X/Y coordinates and a model.
Args:
- x (float): x coordinate
- y (float): y coordinate
+ x (float | np.ndarray): x coordinate
+ y (float | np.ndarray): y coordinate
correction_type (str): type of correction. One of
"spherical", "Lorentzian", "Gaussian", or "Lorentzian_asymmetric"
- center (Tuple[int, int]): center position of the distribution (x,y)
+ center (tuple[int, int]): center position of the distribution (x,y)
amplitude (float): Amplitude of the correction
**kwds: Keyword arguments:
@@ -2199,7 +2198,7 @@ Source code for sed.calibrator.energy
asymmetric 2D Lorentz profile, X-direction.
Returns:
- float: calculated correction value
+ float | np.ndarray: calculated correction value
"""
if correction_type == "spherical":
try:
@@ -2359,21 +2358,21 @@ Source code for sed.calibrator.energy
[docs]
def range_convert(
x: np.ndarray,
- xrng: Tuple,
+ xrng: tuple,
pathcorr: np.ndarray,
-) -> Tuple:
+) -> tuple:
"""Convert value range using a pairwise path correspondence (e.g. obtained
from time warping algorithm).
Args:
x (np.ndarray): Values of the x axis (e.g. time-of-flight values).
- xrng (Tuple): Boundary value range on the x axis.
+ xrng (tuple): Boundary value range on the x axis.
pathcorr (np.ndarray): Path correspondence between two 1D arrays in the
following form,
[(id_1_trace_1, id_1_trace_2), (id_2_trace_1, id_2_trace_2), ...]
Returns:
- Tuple: Transformed range according to the path correspondence.
+ tuple: Transformed range according to the path correspondence.
"""
pathcorr = np.asarray(pathcorr)
xrange_trans = []
@@ -2409,7 +2408,7 @@ Source code for sed.calibrator.energy
def peaksearch(
traces: np.ndarray,
tof: np.ndarray,
- ranges: List[Tuple] = None,
+ ranges: list[tuple] = None,
pkwindow: int = 3,
plot: bool = False,
) -> np.ndarray:
@@ -2418,7 +2417,7 @@ Source code for sed.calibrator.energy
Args:
traces (np.ndarray): Collection of 1D spectra.
tof (np.ndarray): Time-of-flight values.
- ranges (List[Tuple], optional): List of ranges for peak detection in the format
+ ranges (list[tuple], optional): List of ranges for peak detection in the format
[(LowerBound1, UpperBound1), (LowerBound2, UpperBound2), ....].
Defaults to None.
pkwindow (int, optional): Window width of a peak (amounts to lookahead in
@@ -2458,8 +2457,8 @@ Source code for sed.calibrator.energy
def _datacheck_peakdetect(
x_axis: np.ndarray,
y_axis: np.ndarray,
-) -> Tuple[np.ndarray, np.ndarray]:
- """Input format checking for 1D peakdetect algorithm
+) -> tuple[np.ndarray, np.ndarray]:
+ """Input format checking for 1D peakdtect algorithm
Args:
x_axis (np.ndarray): x-axis array
@@ -2469,7 +2468,7 @@ Source code for sed.calibrator.energy
ValueError: Raised if x and y values don't have the same length.
Returns:
- Tuple[np.ndarray, np.ndarray]: Tuple of checked (x/y) arrays.
+ tuple[np.ndarray, np.ndarray]: Tuple of checked (x/y) arrays.
"""
if x_axis is None:
@@ -2494,7 +2493,7 @@ Source code for sed.calibrator.energy
x_axis: np.ndarray = None,
lookahead: int = 200,
delta: int = 0,
-) -> Tuple[np.ndarray, np.ndarray]:
+) -> tuple[np.ndarray, np.ndarray]:
"""Function for detecting local maxima and minima in a signal.
Discovers peaks by searching for values which are surrounded by lower
or larger values for maxima and minima respectively
@@ -2521,7 +2520,7 @@ Source code for sed.calibrator.energy
ValueError: Raised if lookahead and delta are out of range.
Returns:
- Tuple[np.ndarray, np.ndarray]: Tuple of positions of the positive peaks,
+ tuple[np.ndarray, np.ndarray]: Tuple of positions of the positive peaks,
positions of the negative peaks
"""
max_peaks = []
@@ -2542,7 +2541,7 @@ Source code for sed.calibrator.energy
# maxima and minima candidates are temporarily stored in
# mx and mn respectively
- _min, _max = np.Inf, -np.Inf
+ _min, _max = np.inf, -np.inf
# Only detect peak if there is 'lookahead' amount of points after it
for index, (x, y) in enumerate(
@@ -2557,15 +2556,15 @@ Source code for sed.calibrator.energy
_min_pos = x
# Find local maxima
- if y < _max - delta and _max != np.Inf:
+ if y < _max - delta and _max != np.inf:
# Maxima peak candidate found
# look ahead in signal to ensure that this is a peak and not jitter
if y_axis[index : index + lookahead].max() < _max:
max_peaks.append([_max_pos, _max])
dump.append(True)
# Set algorithm to only find minima now
- _max = np.Inf
- _min = np.Inf
+ _max = np.inf
+ _min = np.inf
if index + lookahead >= length:
# The end is within lookahead no more peaks can be found
@@ -2576,15 +2575,15 @@ Source code for sed.calibrator.energy
# mxpos = x_axis[np.where(y_axis[index:index+lookahead]==mx)]
# Find local minima
- if y > _min + delta and _min != -np.Inf:
+ if y > _min + delta and _min != -np.inf:
# Minima peak candidate found
# look ahead in signal to ensure that this is a peak and not jitter
if y_axis[index : index + lookahead].min() > _min:
min_peaks.append([_min_pos, _min])
dump.append(False)
# Set algorithm to only find maxima now
- _min = -np.Inf
- _max = -np.Inf
+ _min = -np.inf
+ _max = -np.inf
if index + lookahead >= length:
# The end is within lookahead no more peaks can be found
@@ -2611,13 +2610,13 @@ Source code for sed.calibrator.energy
[docs]
def fit_energy_calibration(
- pos: Union[List[float], np.ndarray],
- vals: Union[List[float], np.ndarray],
+ pos: list[float] | np.ndarray,
+ vals: list[float] | np.ndarray,
binwidth: float,
binning: int,
ref_id: int = 0,
ref_energy: float = None,
- t: Union[List[float], np.ndarray] = None,
+ t: list[float] | np.ndarray = None,
energy_scale: str = "kinetic",
verbose: bool = True,
**kwds,
@@ -2627,16 +2626,16 @@ Source code for sed.calibrator.energy
function d/(t-t0)**2.
Args:
- pos (Union[List[float], np.ndarray]): Positions of the spectral landmarks
+ pos (list[float] | np.ndarray): Positions of the spectral landmarks
(e.g. peaks) in the EDCs.
- vals (Union[List[float], np.ndarray]): Bias voltage value associated with
+ vals (list[float] | np.ndarray): Bias voltage value associated with
each EDC.
binwidth (float): Time width of each original TOF bin in ns.
binning (int): Binning factor of the TOF values.
ref_id (int, optional): Reference dataset index. Defaults to 0.
- ref_energy (float, optional): Energy value of the feature in the reference
+ ref_energy (float, optional): Energy value of the feature in the refence
trace (eV). required to output the calibration. Defaults to None.
- t (Union[List[float], np.ndarray], optional): Array of TOF values. Required
+ t (list[float] | np.ndarray, optional): Array of TOF values. Required
to calculate calibration trace. Defaults to None.
energy_scale (str, optional): Direction of increasing energy scale.
@@ -2656,7 +2655,7 @@ Source code for sed.calibrator.energy
Returns:
dict: A dictionary of fitting parameters including the following,
- - "coeffs": Fitted function coefficients.
+ - "coeffs": Fitted function coefficents.
- "axis": Fitted energy axis.
"""
vals = np.asarray(vals)
@@ -2750,12 +2749,12 @@ Source code for sed.calibrator.energy
[docs]
def poly_energy_calibration(
- pos: Union[List[float], np.ndarray],
- vals: Union[List[float], np.ndarray],
+ pos: list[float] | np.ndarray,
+ vals: list[float] | np.ndarray,
order: int = 3,
ref_id: int = 0,
ref_energy: float = None,
- t: Union[List[float], np.ndarray] = None,
+ t: list[float] | np.ndarray = None,
aug: int = 1,
method: str = "lstsq",
energy_scale: str = "kinetic",
@@ -2770,15 +2769,15 @@ Source code for sed.calibrator.energy
Args:
- pos (Union[List[float], np.ndarray]): Positions of the spectral landmarks
+ pos (list[float] | np.ndarray): Positions of the spectral landmarks
(e.g. peaks) in the EDCs.
- vals (Union[List[float], np.ndarray]): Bias voltage value associated with
+ vals (list[float] | np.ndarray): Bias voltage value associated with
each EDC.
order (int, optional): Polynomial order of the fitting function. Defaults to 3.
ref_id (int, optional): Reference dataset index. Defaults to 0.
- ref_energy (float, optional): Energy value of the feature in the reference
+ ref_energy (float, optional): Energy value of the feature in the refence
trace (eV). required to output the calibration. Defaults to None.
- t (Union[List[float], np.ndarray], optional): Array of TOF values. Required
+ t (list[float] | np.ndarray, optional): Array of TOF values. Required
to calculate calibration trace. Defaults to None.
aug (int, optional): Fitting dimension augmentation
(1=no change, 2=double, etc). Defaults to 1.
@@ -2907,7 +2906,7 @@ Source code for sed.calibrator.energy
[docs]
def tof2evpoly(
- poly_a: Union[List[float], np.ndarray],
+ poly_a: list[float] | np.ndarray,
energy_offset: float,
t: float,
) -> float:
@@ -2915,7 +2914,7 @@ Source code for sed.calibrator.energy
conversion formula.
Args:
- poly_a (Union[List[float], np.ndarray]): Polynomial coefficients.
+ poly_a (list[float] | np.ndarray): Polynomial coefficients.
energy_offset (float): Energy offset in eV.
t (float): TOF value in bin number.
diff --git a/sed/latest/_modules/sed/calibrator/momentum.html b/sed/latest/_modules/sed/calibrator/momentum.html
index 4a15598..d6eb762 100644
--- a/sed/latest/_modules/sed/calibrator/momentum.html
+++ b/sed/latest/_modules/sed/calibrator/momentum.html
@@ -7,7 +7,7 @@
- sed.calibrator.momentum — SED 0.1.10a6 documentation
+ sed.calibrator.momentum — SED 0.1.10a5 documentation
@@ -34,7 +34,7 @@
-
+
@@ -43,7 +43,7 @@
@@ -121,7 +121,7 @@
- SED 0.1.10a6 documentation
+ SED 0.1.10a5 documentation
@@ -449,14 +449,12 @@ Source code for sed.calibrator.momentum
"""sed.calibrator.momentum module. Code for momentum calibration and distortion
correction. Mostly ported from https://github.com/mpes-kit/mpes.
"""
+from __future__ import annotations
+
import itertools as it
from copy import deepcopy
from datetime import datetime
from typing import Any
-from typing import Dict
-from typing import List
-from typing import Tuple
-from typing import Union
import bokeh.palettes as bp
import bokeh.plotting as pbk
@@ -490,9 +488,9 @@ Source code for sed.calibrator.momentum
Momentum distortion correction and momentum calibration workflow functions.
Args:
- data (Union[xr.DataArray, np.ndarray], optional): Multidimensional hypervolume
+ data (xr.DataArray | np.ndarray, optional): Multidimensional hypervolume
containing the data. Defaults to None.
- bin_ranges (List[Tuple], optional): Binning ranges of the data volume, if
+ bin_ranges (list[tuple], optional): Binning ranges of the data volume, if
provided as np.ndarray. Defaults to None.
rotsym (int, optional): Rotational symmetry of the data. Defaults to 6.
config (dict, optional): Config dictionary. Defaults to None.
@@ -500,17 +498,17 @@ Source code for sed.calibrator.momentum
def __init__(
self,
- data: Union[xr.DataArray, np.ndarray] = None,
- bin_ranges: List[Tuple] = None,
+ data: xr.DataArray | np.ndarray = None,
+ bin_ranges: list[tuple] = None,
rotsym: int = 6,
config: dict = None,
):
"""Constructor of the MomentumCorrector class.
Args:
- data (Union[xr.DataArray, np.ndarray], optional): Multidimensional
+ data (xr.DataArray | np.ndarray, optional): Multidimensional
hypervolume containing the data. Defaults to None.
- bin_ranges (List[Tuple], optional): Binning ranges of the data volume,
+ bin_ranges (list[tuple], optional): Binning ranges of the data volume,
if provided as np.ndarray. Defaults to None.
rotsym (int, optional): Rotational symmetry of the data. Defaults to 6.
config (dict, optional): Config dictionary. Defaults to None.
@@ -525,7 +523,7 @@ Source code for sed.calibrator.momentum
self.slice: np.ndarray = None
self.slice_corrected: np.ndarray = None
self.slice_transformed: np.ndarray = None
- self.bin_ranges: List[Tuple] = self._config["momentum"].get("bin_ranges", [])
+ self.bin_ranges: list[tuple] = self._config["momentum"].get("bin_ranges", [])
if data is not None:
self.load_data(data=data, bin_ranges=bin_ranges)
@@ -540,7 +538,7 @@ Source code for sed.calibrator.momentum
self.include_center: bool = False
self.use_center: bool = False
self.pouter: np.ndarray = None
- self.pcent: Tuple[float, ...] = None
+ self.pcent: tuple[float, ...] = None
self.pouter_ord: np.ndarray = None
self.prefs: np.ndarray = None
self.ptargs: np.ndarray = None
@@ -556,10 +554,10 @@ Source code for sed.calibrator.momentum
self.cdeform_field_bkp: np.ndarray = None
self.inverse_dfield: np.ndarray = None
self.dfield_updated: bool = False
- self.transformations: Dict[str, Any] = self._config["momentum"].get("transformations", {})
- self.correction: Dict[str, Any] = self._config["momentum"].get("correction", {})
- self.adjust_params: Dict[str, Any] = {}
- self.calibration: Dict[str, Any] = self._config["momentum"].get("calibration", {})
+ self.transformations: dict[str, Any] = self._config["momentum"].get("transformations", {})
+ self.correction: dict[str, Any] = self._config["momentum"].get("correction", {})
+ self.adjust_params: dict[str, Any] = {}
+ self.calibration: dict[str, Any] = self._config["momentum"].get("calibration", {})
self.x_column = self._config["dataframe"]["x_column"]
self.y_column = self._config["dataframe"]["y_column"]
@@ -605,15 +603,15 @@ Source code for sed.calibrator.momentum
[docs]
def load_data(
self,
- data: Union[xr.DataArray, np.ndarray],
- bin_ranges: List[Tuple] = None,
+ data: xr.DataArray | np.ndarray,
+ bin_ranges: list[tuple] = None,
):
"""Load binned data into the momentum calibrator class
Args:
- data (Union[xr.DataArray, np.ndarray]):
+ data (xr.DataArray | np.ndarray):
2D or 3D data array, either as np.ndarray or xr.DataArray.
- bin_ranges (List[Tuple], optional):
+ bin_ranges (list[tuple], optional):
Binning ranges. Needs to be provided in case the data are given
as np.ndarray. Otherwise, they are determined from the coords of
the xr.DataArray. Defaults to None.
@@ -722,9 +720,7 @@ Source code for sed.calibrator.momentum
axmax = np.max(self.slice, axis=(0, 1))
if axmin < axmax:
img.set_clim(axmin, axmax)
- ax.set_title(
- f"Plane[{start}:{stop}]",
- )
+ ax.set_title(f"Plane[{start}:{stop}]")
fig.canvas.draw_idle()
plane_slider.close()
@@ -745,13 +741,13 @@ Source code for sed.calibrator.momentum
[docs]
def select_slice(
self,
- selector: Union[slice, List[int], int],
+ selector: slice | list[int] | int,
axis: int = 2,
):
"""Select (hyper)slice from a (hyper)volume.
Args:
- selector (Union[slice, List[int], int]):
+ selector (slice | list[int] | int):
Selector along the specified axis to extract the slice (image). Use
the construct slice(start, stop, step) to select a range of images
and sum them. Use an integer to specify only a particular slice.
@@ -798,7 +794,7 @@ Source code for sed.calibrator.momentum
Option to calculate symmetry scores. Defaults to False.
**kwds: Keyword arguments.
- - **symtype** (str): Type of symmetry scores to calculate
+ - **symtype** (str): Type of symmetry scores to calculte
if symscores is True. Defaults to "rotation".
Raises:
@@ -1070,7 +1066,7 @@ Source code for sed.calibrator.momentum
use_center: bool = None,
fixed_center: bool = True,
interp_order: int = 1,
- ascale: Union[float, list, tuple, np.ndarray] = None,
+ ascale: float | list | tuple | np.ndarray = None,
verbose: bool = True,
**kwds,
) -> np.ndarray:
@@ -1088,13 +1084,13 @@ Source code for sed.calibrator.momentum
interp_order (int, optional):
Order of interpolation (see ``scipy.ndimage.map_coordinates()``).
Defaults to 1.
- ascale: (Union[float, np.ndarray], optional): Scale parameter determining a relative
- scale for each symmetry feature. If provided as single float, rotsym has to be 4.
- This parameter describes the relative scaling between the two orthogonal symmetry
- directions (for an orthorhombic system). This requires the correction points to be
- located along the principal axes (X/Y points of the Brillouin zone). Otherwise, an
- array with ``rotsym`` elements is expected, containing relative scales for each
- feature. Defaults to an array of equal scales.
+ ascale: (float | list | tuple | np.ndarray, optional): Scale parameter determining a
+ relative scale for each symmetry feature. If provided as single float, rotsym has
+ to be 4. This parameter describes the relative scaling between the two orthogonal
+ symmetry directions (for an orthorhombic system). This requires the correction
+ points to be located along the principal axes (X/Y points of the Brillouin zone).
+ Otherwise, an array with ``rotsym`` elements is expected, containing relative
+ scales for each feature. Defaults to an array of equal scales.
verbose (bool, optional): Option to report the used landmarks for correction.
Defaults to True.
**kwds: keyword arguments:
@@ -1261,7 +1257,7 @@ Source code for sed.calibrator.momentum
self.slice_corrected = corrected_image
if verbose:
- print("Calculated thin spline correction based on the following landmarks:")
+ print("Calulated thin spline correction based on the following landmarks:")
print(f"pouter: {self.pouter}")
if use_center:
print(f"pcent: {self.pcent}")
@@ -1375,7 +1371,7 @@ Source code for sed.calibrator.momentum
- rotation_auto.
- scaling.
- scaling_auto.
- - homography.
+ - homomorphy.
keep (bool, optional): Option to keep the specified coordinate transform in
the class. Defaults to False.
@@ -1495,7 +1491,7 @@ Source code for sed.calibrator.momentum
)
self.slice_transformed = slice_transformed
else:
- # if external image is provided, apply only the new additional transformation
+ # if external image is provided, apply only the new addional tranformation
slice_transformed = ndi.map_coordinates(
image,
[rdeform, cdeform],
@@ -1519,7 +1515,7 @@ Source code for sed.calibrator.momentum
[docs]
def pose_adjustment(
self,
- transformations: Dict[str, Any] = None,
+ transformations: dict[str, Any] = None,
apply: bool = False,
reset: bool = True,
verbose: bool = True,
@@ -1531,7 +1527,7 @@ Source code for sed.calibrator.momentum
Args:
transformations (dict, optional): Dictionary with transformations.
- Defaults to self.transformations or config["momentum"]["transformations"].
+ Defaults to self.transformations or config["momentum"]["transformtions"].
apply (bool, optional):
Option to directly apply the provided transformations.
Defaults to False.
@@ -1759,7 +1755,7 @@ Source code for sed.calibrator.momentum
image: np.ndarray = None,
origin: str = "lower",
cmap: str = "terrain_r",
- figsize: Tuple[int, int] = (4, 4),
+ figsize: tuple[int, int] = (4, 4),
points: dict = None,
annotated: bool = False,
backend: str = "matplotlib",
@@ -1767,7 +1763,7 @@ Source code for sed.calibrator.momentum
scatterkwds: dict = {},
cross: bool = False,
crosshair: bool = False,
- crosshair_radii: List[int] = [50, 100, 150],
+ crosshair_radii: list[int] = [50, 100, 150],
crosshair_thickness: int = 1,
**kwds,
):
@@ -1778,7 +1774,7 @@ Source code for sed.calibrator.momentum
origin (str, optional): Figure origin specification ('lower' or 'upper').
Defaults to "lower".
cmap (str, optional): Colormap specification. Defaults to "terrain_r".
- figsize (Tuple[int, int], optional): Figure size. Defaults to (4, 4).
+ figsize (tuple[int, int], optional): Figure size. Defaults to (4, 4).
points (dict, optional): Points for annotation. Defaults to None.
annotated (bool, optional): Option to add annotation. Defaults to False.
backend (str, optional): Visualization backend specification. Defaults to
@@ -1795,7 +1791,7 @@ Source code for sed.calibrator.momentum
self.pcent. Defaults to False.
crosshair (bool, optional): Display option to plot circles around center
self.pcent. Works only in bokeh backend. Defaults to False.
- crosshair_radii (List[int], optional): Pixel radii of circles to plot when
+ crosshair_radii (list[int], optional): Pixel radii of circles to plot when
crosshair option is activated. Defaults to [50, 100, 150].
crosshair_thickness (int, optional): Thickness of crosshair circles.
Defaults to 1.
@@ -1814,7 +1810,7 @@ Source code for sed.calibrator.momentum
txtsize = kwds.pop("textsize", 12)
if backend == "matplotlib":
- fig, ax = plt.subplots(figsize=figsize)
+ fig_plt, ax = plt.subplots(figsize=figsize)
ax.imshow(image.T, origin=origin, cmap=cmap, **imkwds)
if cross:
@@ -1909,30 +1905,31 @@ Source code for sed.calibrator.momentum
[docs]
def select_k_range(
self,
- point_a: Union[np.ndarray, List[int]] = None,
- point_b: Union[np.ndarray, List[int]] = None,
+ point_a: np.ndarray | list[int] = None,
+ point_b: np.ndarray | list[int] = None,
k_distance: float = None,
- k_coord_a: Union[np.ndarray, List[float]] = None,
- k_coord_b: Union[np.ndarray, List[float]] = np.array([0.0, 0.0]),
+ k_coord_a: np.ndarray | list[float] = None,
+ k_coord_b: np.ndarray | list[float] = np.array([0.0, 0.0]),
equiscale: bool = True,
apply: bool = False,
):
- """Interactive selection function for features for the Momentum axes calibration. It allows
- the user to select the pixel positions of two symmetry points (a and b) and the k-space
- distance of the two. Alternatively, the coordinates of both points can be provided. See the
- equiscale option for details on the specifications of point coordinates.
+ """Interactive selection function for features for the Momentum axes calibra-
+ tion. It allows the user to select the pixel positions of two symmetry points
+ (a and b) and the k-space distance of the two. Alternatively, the corrdinates
+ of both points can be provided. See the equiscale option for details on the
+ specifications of point coordinates.
Args:
- point_a (Union[np.ndarray, List[int]], optional): Pixel coordinates of the
+ point_a (np.ndarray | list[int], optional): Pixel coordinates of the
symmetry point a.
- point_b (Union[np.ndarray, List[int]], optional): Pixel coordinates of the
+ point_b (np.ndarray | list[int], optional): Pixel coordinates of the
symmetry point b. Defaults to the center pixel of the image, defined by
config["momentum"]["center_pixel"].
k_distance (float, optional): The known momentum space distance between the
two symmetry points.
- k_coord_a (Union[np.ndarray, List[float]], optional): Momentum coordinate
+ k_coord_a (np.ndarray | list[float], optional): Momentum coordinate
of the symmetry points a. Only valid if equiscale=False.
- k_coord_b (Union[np.ndarray, List[float]], optional): Momentum coordinate
+ k_coord_b (np.ndarray | list[float], optional): Momentum coordinate
of the symmetry points b. Only valid if equiscale=False. Defaults to
the k-space center np.array([0.0, 0.0]).
equiscale (bool, optional): Option to adopt equal scale along both the x
@@ -2062,11 +2059,11 @@ Source code for sed.calibrator.momentum
[docs]
def calibrate(
self,
- point_a: Union[np.ndarray, List[int]],
- point_b: Union[np.ndarray, List[int]],
+ point_a: np.ndarray | list[int],
+ point_b: np.ndarray | list[int],
k_distance: float = None,
- k_coord_a: Union[np.ndarray, List[float]] = None,
- k_coord_b: Union[np.ndarray, List[float]] = np.array([0.0, 0.0]),
+ k_coord_a: np.ndarray | list[float] = None,
+ k_coord_b: np.ndarray | list[float] = np.array([0.0, 0.0]),
equiscale: bool = True,
image: np.ndarray = None,
) -> dict:
@@ -2077,16 +2074,16 @@ Source code for sed.calibrator.momentum
of point coordinates.
Args:
- point_a (Union[np.ndarray, List[int]], optional): Pixel coordinates of the
+ point_a (np.ndarray | list[int], optional): Pixel coordinates of the
symmetry point a.
- point_b (Union[np.ndarray, List[int]], optional): Pixel coordinates of the
+ point_b (np.ndarray | list[int], optional): Pixel coordinates of the
symmetry point b. Defaults to the center pixel of the image, defined by
config["momentum"]["center_pixel"].
k_distance (float, optional): The known momentum space distance between the
two symmetry points.
- k_coord_a (Union[np.ndarray, List[float]], optional): Momentum coordinate
+ k_coord_a (np.ndarray | list[float], optional): Momentum coordinate
of the symmetry points a. Only valid if equiscale=False.
- k_coord_b (Union[np.ndarray, List[float]], optional): Momentum coordinate
+ k_coord_b (np.ndarray | list[float], optional): Momentum coordinate
of the symmetry points b. Only valid if equiscale=False. Defaults to
the k-space center np.array([0.0, 0.0]).
equiscale (bool, optional): Option to adopt equal scale along both the x
@@ -2182,20 +2179,20 @@ Source code for sed.calibrator.momentum
[docs]
def apply_corrections(
self,
- df: Union[pd.DataFrame, dask.dataframe.DataFrame],
+ df: pd.DataFrame | dask.dataframe.DataFrame,
x_column: str = None,
y_column: str = None,
new_x_column: str = None,
new_y_column: str = None,
verbose: bool = True,
**kwds,
- ) -> Tuple[Union[pd.DataFrame, dask.dataframe.DataFrame], dict]:
+ ) -> tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]:
"""Calculate and replace the X and Y values with their distortion-corrected
version.
Args:
- df (Union[pd.DataFrame, dask.dataframe.DataFrame]): Dataframe to apply
- the distortion correction to.
+ df (pd.DataFrame | dask.dataframe.DataFrame): Dataframe to apply
+ the distotion correction to.
x_column (str, optional): Label of the 'X' column before momentum
distortion correction. Defaults to config["momentum"]["x_column"].
y_column (str, optional): Label of the 'Y' column before momentum
@@ -2217,7 +2214,7 @@ Source code for sed.calibrator.momentum
Additional keyword arguments are passed to ``apply_dfield``.
Returns:
- Tuple[Union[pd.DataFrame, dask.dataframe.DataFrame], dict]: Dataframe with
+ tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]: Dataframe with
added columns and momentum correction metadata dictionary.
"""
if x_column is None:
@@ -2274,7 +2271,7 @@ Source code for sed.calibrator.momentum
Returns:
dict: generated correction metadata dictionary.
"""
- metadata: Dict[Any, Any] = {}
+ metadata: dict[Any, Any] = {}
if len(self.correction) > 0:
metadata["correction"] = self.correction
metadata["correction"]["applied"] = True
@@ -2289,11 +2286,11 @@ Source code for sed.calibrator.momentum
metadata["registration"]["creation_date"] = datetime.now().timestamp()
metadata["registration"]["applied"] = True
metadata["registration"]["depends_on"] = (
- "/entry/process/registration/transformations/rot_z"
+ "/entry/process/registration/tranformations/rot_z"
if "angle" in metadata["registration"] and metadata["registration"]["angle"]
- else "/entry/process/registration/transformations/trans_y"
+ else "/entry/process/registration/tranformations/trans_y"
if "xtrans" in metadata["registration"] and metadata["registration"]["xtrans"]
- else "/entry/process/registration/transformations/trans_x"
+ else "/entry/process/registration/tranformations/trans_x"
if "ytrans" in metadata["registration"] and metadata["registration"]["ytrans"]
else "."
)
@@ -2317,7 +2314,7 @@ Source code for sed.calibrator.momentum
[0.0, 1.0, 0.0],
)
metadata["registration"]["trans_y"]["depends_on"] = (
- "/entry/process/registration/transformations/trans_x"
+ "/entry/process/registration/tranformations/trans_x"
if "ytrans" in metadata["registration"] and metadata["registration"]["ytrans"]
else "."
)
@@ -2333,9 +2330,9 @@ Source code for sed.calibrator.momentum
(metadata["registration"]["center"], [0.0]),
)
metadata["registration"]["rot_z"]["depends_on"] = (
- "/entry/process/registration/transformations/trans_y"
+ "/entry/process/registration/tranformations/trans_y"
if "xtrans" in metadata["registration"] and metadata["registration"]["xtrans"]
- else "/entry/process/registration/transformations/trans_x"
+ else "/entry/process/registration/tranformations/trans_x"
if "ytrans" in metadata["registration"] and metadata["registration"]["ytrans"]
else "."
)
@@ -2347,19 +2344,19 @@ Source code for sed.calibrator.momentum
[docs]
def append_k_axis(
self,
- df: Union[pd.DataFrame, dask.dataframe.DataFrame],
+ df: pd.DataFrame | dask.dataframe.DataFrame,
x_column: str = None,
y_column: str = None,
new_x_column: str = None,
new_y_column: str = None,
calibration: dict = None,
**kwds,
- ) -> Tuple[Union[pd.DataFrame, dask.dataframe.DataFrame], dict]:
+ ) -> tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]:
"""Calculate and append the k axis coordinates (kx, ky) to the events dataframe.
Args:
- df (Union[pd.DataFrame, dask.dataframe.DataFrame]): Dataframe to apply the
- distortion correction to.
+ df (pd.DataFrame | dask.dataframe.DataFrame): Dataframe to apply the
+ distotion correction to.
x_column (str, optional): Label of the source 'X' column.
Defaults to config["momentum"]["corrected_x_column"] or
config["momentum"]["x_column"] (whichever is present).
@@ -2376,7 +2373,7 @@ Source code for sed.calibrator.momentum
to the calibration dictionary.
Returns:
- Tuple[Union[pd.DataFrame, dask.dataframe.DataFrame], dict]: Dataframe with
+ tuple[pd.DataFrame | dask.dataframe.DataFrame, dict]: Dataframe with
added columns and momentum calibration metadata dictionary.
"""
if x_column is None:
@@ -2406,7 +2403,7 @@ Source code for sed.calibrator.momentum