diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/docs/.buildinfo b/docs/.buildinfo new file mode 100644 index 00000000..7bcae0b0 --- /dev/null +++ b/docs/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: b38b08fdbfd87f3e2d99a5696681f12d +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/.nojekyll b/docs/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/docs/_images/example_graph.svg b/docs/_images/example_graph.svg new file mode 100644 index 00000000..a042b348 --- /dev/null +++ b/docs/_images/example_graph.svg @@ -0,0 +1,103 @@ + + + + + + +%3 + + +3102162992712 + +g + + +3102162990528 + +Subtract + + +3102162990528->3102162992712 + + + + +3102162990472 + +8 + + +3102162990472->3102162990528 + + + + +3102137770952 + +Add + + +3102137770952->3102162990472 + + + + +3102137770728 + +f + + +3102137770728->3102137770952 + + + + +3102137771176 + +Multiply + + +3102137771176->3102137770728 + + + + +3102137771064 + +x + + +3102137771064->3102137770952 + + + + +3102137771064->3102137771176 + + + + +3102137771288 + +y + + +3102137771288->3102137771176 + + + + +3102162990696 + +2 + + +3102162990696->3102162990528 + + + + + diff --git a/docs/_images/mygrad-absolute-1.png b/docs/_images/mygrad-absolute-1.png new file mode 100644 index 00000000..3b217601 Binary files /dev/null and b/docs/_images/mygrad-absolute-1.png differ diff --git a/docs/_images/mygrad-arccosh-1.png b/docs/_images/mygrad-arccosh-1.png new file mode 100644 index 00000000..36429d75 Binary files /dev/null and b/docs/_images/mygrad-arccosh-1.png differ diff --git a/docs/_images/mygrad-arcsinh-1.png b/docs/_images/mygrad-arcsinh-1.png new file mode 100644 index 00000000..3334f53e Binary files /dev/null and b/docs/_images/mygrad-arcsinh-1.png differ diff --git a/docs/_images/mygrad-arctanh-1.png b/docs/_images/mygrad-arctanh-1.png new file mode 100644 index 00000000..1806a037 Binary files /dev/null and b/docs/_images/mygrad-arctanh-1.png differ diff --git a/docs/_images/mygrad-cosh-1.png b/docs/_images/mygrad-cosh-1.png new file mode 100644 index 00000000..11704fb5 Binary files /dev/null and b/docs/_images/mygrad-cosh-1.png differ diff --git a/docs/_images/mygrad-nnet-activations-elu-1.png b/docs/_images/mygrad-nnet-activations-elu-1.png new file mode 100644 index 00000000..d23d21f0 Binary files /dev/null and b/docs/_images/mygrad-nnet-activations-elu-1.png differ diff --git a/docs/_images/mygrad-nnet-activations-hard_tanh-1.png b/docs/_images/mygrad-nnet-activations-hard_tanh-1.png new file mode 100644 index 00000000..e39b99f2 Binary files /dev/null and b/docs/_images/mygrad-nnet-activations-hard_tanh-1.png differ diff --git a/docs/_images/mygrad-nnet-activations-leaky_relu-1.png b/docs/_images/mygrad-nnet-activations-leaky_relu-1.png new file mode 100644 index 00000000..191e9109 Binary files /dev/null and b/docs/_images/mygrad-nnet-activations-leaky_relu-1.png differ diff --git a/docs/_images/mygrad-nnet-activations-relu-1.png b/docs/_images/mygrad-nnet-activations-relu-1.png new file mode 100644 index 00000000..b65a948b Binary files /dev/null and b/docs/_images/mygrad-nnet-activations-relu-1.png differ diff --git a/docs/_images/mygrad-nnet-activations-selu-1.png b/docs/_images/mygrad-nnet-activations-selu-1.png new file mode 100644 index 00000000..759298ed Binary files /dev/null and b/docs/_images/mygrad-nnet-activations-selu-1.png differ diff --git a/docs/_images/mygrad-nnet-activations-sigmoid-1.png b/docs/_images/mygrad-nnet-activations-sigmoid-1.png new file mode 100644 index 00000000..a731e7ac Binary files /dev/null and b/docs/_images/mygrad-nnet-activations-sigmoid-1.png differ diff --git a/docs/_images/mygrad-nnet-activations-soft_sign-1.png b/docs/_images/mygrad-nnet-activations-soft_sign-1.png new file mode 100644 index 00000000..a4f3a4d3 Binary files /dev/null and b/docs/_images/mygrad-nnet-activations-soft_sign-1.png differ diff --git a/docs/_images/mygrad-nnet-activations-tanh-1.png b/docs/_images/mygrad-nnet-activations-tanh-1.png new file mode 100644 index 00000000..59569a7a Binary files /dev/null and b/docs/_images/mygrad-nnet-activations-tanh-1.png differ diff --git a/docs/_images/mygrad-nnet-layers-conv_nd-1.png b/docs/_images/mygrad-nnet-layers-conv_nd-1.png new file mode 100644 index 00000000..3b35aad6 Binary files /dev/null and b/docs/_images/mygrad-nnet-layers-conv_nd-1.png differ diff --git a/docs/_images/mygrad-nnet-layers-conv_nd-2_00.png b/docs/_images/mygrad-nnet-layers-conv_nd-2_00.png new file mode 100644 index 00000000..5d23e6a8 Binary files /dev/null and b/docs/_images/mygrad-nnet-layers-conv_nd-2_00.png differ diff --git a/docs/_images/mygrad-nnet-layers-conv_nd-2_01.png b/docs/_images/mygrad-nnet-layers-conv_nd-2_01.png new file mode 100644 index 00000000..075471e5 Binary files /dev/null and b/docs/_images/mygrad-nnet-layers-conv_nd-2_01.png differ diff --git a/docs/_images/mygrad-sinh-1.png b/docs/_images/mygrad-sinh-1.png new file mode 100644 index 00000000..8d5bddc0 Binary files /dev/null and b/docs/_images/mygrad-sinh-1.png differ diff --git a/docs/_images/mygrad-tanh-1.png b/docs/_images/mygrad-tanh-1.png new file mode 100644 index 00000000..59569a7a Binary files /dev/null and b/docs/_images/mygrad-tanh-1.png differ diff --git a/docs/_modules/index.html b/docs/_modules/index.html new file mode 100644 index 00000000..78a95e58 --- /dev/null +++ b/docs/_modules/index.html @@ -0,0 +1,611 @@ + + + + + + + + + + Overview: module code — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

All modules for which code is available

+ + +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/_io.html b/docs/_modules/mygrad/_io.html new file mode 100644 index 00000000..38ee7d18 --- /dev/null +++ b/docs/_modules/mygrad/_io.html @@ -0,0 +1,696 @@ + + + + + + + + + + mygrad._io — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad._io

+from pathlib import Path
+from typing import BinaryIO, Union
+
+import numpy as np
+
+import mygrad.tensor_base as tb
+
+_FileLike = Union[str, Path, BinaryIO]
+
+
+
[docs]def save(file: _FileLike, tensor: tb.Tensor) -> None: + """Saves a tensor and its gradient information. + + This docstring was adapted from that of numpy.save() + + Parameters + ---------- + file : str | Path | BinaryIO + The file or file-path that where the tensor data and its gradient + will be saved. Note that the file will be saved as a .npz file. + + tensor : Tensor + The tensor to be saved. If it has an associated gradient, that will + be saved as well. + + Notes + ----- + This function uses ``numpy.savez(file, data=tensor.data, grad=tensor.grad)`` + to save the tensor's data and its gradient. No ``grad`` field is included + if the tensor does not have a gradient. + + See Also + -------- + mygrad.load + + Examples + -------- + >>> import mygrad as mg + >>> from tempfile import TemporaryFile + >>> outfile = TemporaryFile() + >>> x = mg.arange(10.0) + >>> mg.save(outfile, x) + >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file + >>> mg.load(outfile) + Tensor([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]) + + An example of saving a tensor that has an associated gradient. + + >>> (x * x).backward() + >>> x.grad + array([ 0., 2., 4., 6., 8., 10., 12., 14., 16., 18.]) + >>> outfile = TemporaryFile() + >>> x = mg.arange(10.0) + >>> mg.save(outfile, x) + >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file + >>> loaded = mg.load(outfile) + >>> loaded + Tensor([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]) + >>> loaded.grad + array([ 0., 2., 4., 6., 8., 10., 12., 14., 16., 18.]) + """ + if not isinstance(tensor, tb.Tensor): + raise TypeError( + f"mygrad.save requires a Tensor-type object, got type {type(tensor)}" + ) + + if tensor.grad is not None: + np.savez(file, data=tensor.data, grad=tensor.grad) + else: + np.savez(file, data=tensor.data)
+ + +
[docs]def load(file: _FileLike) -> tb.Tensor: + """Loads a saved Tensor and its gradient information (if applicable). + + This docstring was adapted from that of numpy.load() + + Parameters + ---------- + file : str | Path | BinaryIO + The name of the file that holds the tensor data to load. + + Returns + ------- + loaded : Tensor + The loaded tensor (whose gradient will be loaded if it was saved). + + See Also + -------- + mygrad.save + + Examples + -------- + >>> import mygrad as mg + >>> from tempfile import TemporaryFile + >>> outfile = TemporaryFile() + >>> x = mg.arange(10.0) + >>> mg.save(outfile, x) + >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file + >>> mg.load(outfile) + Tensor([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]) + + An example of saving a tensor that has an associated gradient. + + >>> (x * x).backward() + >>> x.grad + array([ 0., 2., 4., 6., 8., 10., 12., 14., 16., 18.]) + >>> outfile = TemporaryFile() + >>> x = mg.arange(10.0) + >>> mg.save(outfile, x) + >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file + >>> loaded = mg.load(outfile) + >>> loaded + Tensor([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]) + >>> loaded.grad + array([ 0., 2., 4., 6., 8., 10., 12., 14., 16., 18.]) + """ + loaded = np.load(file) + + loaded_tensor = tb.tensor(loaded["data"]) + + if "grad" in loaded: + loaded_tensor.backward(loaded["grad"]) + + return loaded_tensor
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/_utils/lock_management.html b/docs/_modules/mygrad/_utils/lock_management.html new file mode 100644 index 00000000..d6fcd9ef --- /dev/null +++ b/docs/_modules/mygrad/_utils/lock_management.html @@ -0,0 +1,972 @@ + + + + + + + + + + mygrad._utils.lock_management — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad._utils.lock_management

+"""
+Provides utilities responsible for locking/releasing array writeability.
+"""
+
+import os
+from collections import Counter, defaultdict
+from typing import TYPE_CHECKING
+from typing import Counter as CounterType
+from typing import DefaultDict, Dict, Generator, Iterable, Set
+from weakref import finalize, ref
+
+import numpy as np
+
+from mygrad._utils import ContextTracker, WeakRef, WeakRefIterable
+
+if TYPE_CHECKING:  # pragma: no cover
+    from mygrad.tensor_base import Tensor as TensorType
+
+
+# arr-id -> num active ops involving arr
+_array_counter: CounterType[int] = Counter()
+
+# arr-id -> weak-ref of arr, for arrays participating in live ops
+_array_tracker: Dict[int, WeakRef[np.ndarray]] = dict()
+
+# maps base-array ID to ID of view that can't be unlocked until
+# base is unlocked
+_views_waiting_for_unlock: DefaultDict[int, Set[int]] = defaultdict(set)
+
+__all__ = [
+    "lock_arr_writeability",
+    "release_writeability_lock_on_op",
+    "mem_guard_off",
+    "mem_guard_on",
+    "mem_guard_active",
+]
+
+
+def array_is_tracked(arr: np.ndarray) -> bool:
+    """Returns True if the provided array, or a view of it, is currently
+    involved in one or more mygrad operation."""
+    arr_id = id(arr)
+    return arr_id in _array_tracker and _array_tracker[arr_id]() is not None
+
+
+def lock_arr_writeability(arr: np.ndarray, force_lock: bool = False) -> np.ndarray:
+    """Increments the count of active ops that an array is involved in
+    and makes the array read-only
+
+    Parameters
+    ----------
+    arr : numpy.ndarray
+
+    force_lock : bool, optional (default=False)
+        If True, and array that is already read-only will be tracked
+        for unlocking
+
+    Returns
+    -------
+    numpy.ndarray
+        The locked array"""
+    arr_id = id(arr)
+    if not array_is_tracked(arr):
+        if (
+            not force_lock
+            and not arr.flags.writeable
+            and (arr.base is None or not array_is_tracked(arr.base))
+        ):
+            # array is natively read-only; don't do anything
+            return arr
+        # keeps track of array so we can clean up the array
+        # counter when tracked arrays fall out of scope
+        _array_tracker[arr_id] = ref(arr)
+        _array_counter[arr_id] = 1
+    else:
+        _array_counter[arr_id] += 1
+    if arr.flags.writeable is True:
+        arr.flags.writeable = False
+    return arr
+
+
+def unique_arrs_and_bases(
+    tensors: Iterable["TensorType"],
+) -> Generator[np.ndarray, None, None]:
+    """
+    Yields unique (by-ID) arrays from an iterable. If an array
+    has a base, the base is yielded first (assuming that base
+    object has not already been yielded).
+    """
+    seen = set()
+    for t in tensors:
+        arr = t.data
+        arr_id = id(arr)
+        if arr_id not in seen:
+            # important note!
+            # We must yield array bases first so that base's
+            # writeability is restored first.
+            # Then view's writeability can be restored
+            if arr.base is not None:
+                base_id = id(arr.base)
+                if base_id not in seen:
+                    seen.add(base_id)
+                    yield arr.base
+            seen.add(arr_id)
+            yield arr
+
+
+def _release_lock_on_arr_writeability(arr: np.ndarray):
+    """
+    Decrements the number of active ops the array participates in.
+    An array no longer participating in any ops will have its
+    writeability restored.
+    """
+    arr_id = id(arr)
+    num_active_ops = _array_counter[arr_id]
+
+    if num_active_ops == 1:
+        # final active op involving array is being de-referenced:
+        # okay to unlock array
+        del _array_counter[arr_id]
+
+        if arr.base is not None and arr.base.flags.writeable is False:
+            # Array is view and must wait until its base is released
+            # before it can be unlocked
+            # Thus we are still tracking this array
+            _views_waiting_for_unlock[id(arr.base)].add(arr_id)
+        else:
+            # we no longer need to track the array
+            arr.flags.writeable = True
+            _array_tracker.pop(arr_id, None)
+            if not _array_tracker and _views_waiting_for_unlock:
+                # If no arrays are being tracked, then there can't
+                # be any views waiting to be unlocked.
+                # Clean up!
+                _views_waiting_for_unlock.clear()
+    elif num_active_ops > 0:
+        _array_counter[arr_id] = num_active_ops - 1
+
+    if (
+        arr.base is None
+        and arr.flags.writeable
+        and (arr_id in _views_waiting_for_unlock)
+    ):
+        # array was base of view waiting to be unlocked..
+        #
+        # Either:
+        #    view no longer exists
+        #    or view is involved in new op
+        #    or view can now get unlocked
+        # under all conditions view will no longer be waiting to be unlocked
+        for view_arr_id in tuple(_views_waiting_for_unlock[arr_id]):
+            if _array_counter[view_arr_id] > 0:
+                # view involved in new op
+                continue
+
+            _views_waiting_for_unlock[arr_id].remove(view_arr_id)
+
+            try:
+                view_arr = _array_tracker.pop(view_arr_id)()
+                if view_arr is None:
+                    continue
+            except KeyError:
+                # view array is no longer available for unlocking
+                continue
+
+            try:
+                view_arr.flags.writeable = True
+            except ValueError:  # pragma: no cover
+                # sometimes this raises.. but it is not
+                # reproducible and is very rare
+                pass
+
+        if not _views_waiting_for_unlock[arr_id]:
+            _views_waiting_for_unlock.pop(arr_id)
+
+
+def release_writeability_lock_on_op(arr_refs: WeakRefIterable[np.ndarray]):
+    """Marks each array (and for a view, its base) to have its
+    writeability lock released.
+
+    An array is made writeable only once all of its locks
+    have been released.
+
+    Parameters
+    ----------
+    arr_refs : WeakRefIterable[np.ndarray]
+        The arrays to be unlocked. Only one lock is released
+        on each array, even if the same array occurs
+        multiple times in the iterable."""
+    for arr in arr_refs:
+        _release_lock_on_arr_writeability(arr)
+
+
+MEM_GUARD = os.environ.get("MYGRAD_MEM_GUARD", True)
+
+if MEM_GUARD in {"True", "true", "1", 1, True}:
+    MEM_GUARD = True
+elif MEM_GUARD in {"False", "false", "0", 0, False}:  # pragma: no cover
+    MEM_GUARD = False
+else:  # pragma: no cover
+    from warnings import warn
+
+    warn(
+        f"Environment variable MYGRAD_MEM_GUARD was set to an unknown value {MEM_GUARD}. "
+        f"Proceeding with `MEM_GUARD=True`"
+    )
+    MEM_GUARD = True
+
+
+class MemStateContext(ContextTracker):
+    @property
+    def state(self):
+        return MEM_GUARD
+
+    @state.setter
+    def state(self, value: bool):
+        if not isinstance(value, bool):  # pragma: no cover
+            raise TypeError(
+                f"MEM_GUARD must be set to a boolean value, got {value} (type={type(value)})"
+            )
+
+        global MEM_GUARD
+        MEM_GUARD = value
+
+
+class _NoMemGuard(MemStateContext):
+    """A context manager used to suspend memory-locking behavior
+
+    Examples
+    --------
+    >>> from mygrad import  mem_guard_off
+    >>> with mem_guard_off:
+    ...     # array-memory locking is turned off
+    ...     pass
+    ... # previous memory-locking behavior is restored
+
+    This can also be used as a decorator
+
+    >>> @mem_guard_off
+    >>> def f():
+    ...     # array-memory locking is turned off within function
+    ...     return
+
+    """
+
+    _enter_set_value = False
+
+
+class _WithMemGuard(MemStateContext):
+    """A context manager used to enable memory-locking behavior
+
+    Examples
+    --------
+    >>> from mygrad import mem_guard_on
+    >>> with mem_guard_on:
+    ...     # array-memory locking is turned on
+    ...     pass
+    ... # previous memory-locking behavior is restored
+
+    This can also be used as a decorator
+
+    >>> @mem_guard_on
+    >>> def f():
+    ...     # array-memory locking is turned on within function
+    ...     return
+
+    """
+
+    _enter_set_value = True
+
+
+mem_guard_off = _NoMemGuard()
+mem_guard_on = _WithMemGuard()
+
+
+
[docs]def turn_memory_guarding_off(): + """Globally disables all memory-guarding mechanisms, except + for in contexts where they are explicitly enabled. + + Notes + ----- + With memory guarding disabled, arrays participating in active + computational graphs are not protected from being mutated by + the user. Mutating such an array will corrupt the derivatives + that are computed via back-propagation, and will produce + incorrect results. + + This can speed up computations involving many small tensors + substantially. + + If you want to disable memory guarding at the system level, you + can set the system environment variable MYGRAD_MEM_GUARD=False. + NOTE THAT THIS IS NOT RECOMMENDED. + + See Also + -------- + turn_memory_guarding_on : Globally enables all memory-guarding mechanisms + mem_guard_off : context manager & decorator for suspending memory guarding + mem_guard_on : context manager & decorator for enabling memory guarding + + Examples + -------- + The following demonstrates how one can unwittingly corrupt + backpropagation through a computational graph + + >>> import mygrad as mg + >>> import numpy as np + >>> mg.turn_memory_guarding_off() # speeds up calculations, but with risks involved.. + >>> x = np.arange(3.) + >>> y = mg.ones_like(x) + >>> z = x * y + >>> x[:] = 0 # mutates x, corrupting state associated with z + >>> z.backward() + >>> y.grad # would be array([0., 1., 2.]) if graph wasn't corrupted + array([0., 0., 0.]) + """ + global MEM_GUARD + MEM_GUARD = False
+ + +def turn_memory_guarding_on(): + """Globally enables all memory-guarding mechanisms, except + for in contexts where they are explicitly disabled. + + Notes + ----- + Memory guarding is enabled by default. It ensures that arrays + that are participating in computational graphs cannot be mutated + (at least unwittingly..), which provides important assurances that + the state of the computational graph is not corrupted for + back-propagation. + + Memory guarding can slow down computations involving many small tensors. + Realistic worst-case benchmarks suggest a ~50% slowdown. + + If performance is important, it is recommended that you test your code leaving + memory guarding enabled. Presuming the code runs without any errors regarding + writing to read-only arrays, you can proceed to disable memory guarding and + enjoy the concomitant speedups. + + Note also that running your code in a `no_autodiff` context will automatically + disable memory guarding. + + See Also + -------- + turn_memory_guarding_off : Globally enables all memory-guarding mechanisms + mem_guard_off : context manager & decorator for suspending memory guarding + mem_guard_on : context manager & decorator for enabling memory guarding + no_autodiff : context manager for disabling graph-tracking for back propagation + + Examples + -------- + The following demonstrates how memory guarding prevents one from + unwittingly corrupting an active computational graph + + >>> import mygrad as mg + >>> import numpy as np + >>> # (Note that memory guarding is on by default, so + >>> # this call isn't usually needed...) + >>> mg.turn_memory_guarding_on() + >>> x = np.arange(3.) + >>> y = mg.ones_like(x) + >>> z = x * y + >>> try: + ... x[:] = 0 # raises because `x` is made read-only + ... except ValueError: + ... pass + >>> z.backward() + >>> y.grad # correct gradient is computed + array([0., 1., 2.]) + """ + global MEM_GUARD + MEM_GUARD = True + + +def mem_guard_active() -> bool: + """Indicates whether or not memory guarding is active. + + See Also + -------- + turn_memory_guarding_on : Globally enables all memory-guarding mechanisms + turn_memory_guarding_off : Globally enables all memory-guarding mechanisms + mem_guard_off : context manager & decorator for suspending memory guarding + mem_guard_on : context manager & decorator for enabling memory guarding + """ + return MEM_GUARD + + +def force_lock_tensor_and_creators(tensor: "TensorType"): + unique_arrs = tuple( + lock_arr_writeability(arr) + for arr in unique_arrs_and_bases(tensor.creator.variables) + ) + lock_arr_writeability(tensor.data, force_lock=True) + tensor_refs = WeakRefIterable(unique_arrs) + tensor_refs.append(tensor.data) + finalize( + tensor.creator, + release_writeability_lock_on_op, + tensor_refs, + ) +
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/computational_graph.html b/docs/_modules/mygrad/computational_graph.html new file mode 100644 index 00000000..781b4bd8 --- /dev/null +++ b/docs/_modules/mygrad/computational_graph.html @@ -0,0 +1,727 @@ + + + + + + + + + + mygrad.computational_graph — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.computational_graph

+import numpy as np
+
+try:
+    from graphviz import Digraph
+except ImportError:
+    pass
+
+from mygrad.tensor_base import Tensor
+
+__all__ = ["build_graph"]
+
+
+
[docs]def build_graph( + fin, + names=None, + *, + render=True, + save=False, + dims=False, + dtypes=False, + sum_stats=False, +): + """Builds and renders a computational graph. + + Parameters + ---------- + fin : mygrad.Tensor + The tensor object that will be the final node in the + computational graph. + + names : Optional[Dict[str, Union[mygrad.Tensor, numpy.ndarray]]] + A dictionary that maps names of Tensors to Tensor objects. If + an argument is passed to names, the key name that maps to a Tensor + included in the computational graph will be used as a label for the + Tensor's node. If no argument is passed, the nodes on the + computational graph will display the full Tensor. + + To use the names assigned in the local environment, + pass ``names=locals()`` to the build_graph function. + + If different names are used from the local environment, + the key must map to the exact Tensor object. A new Tensor or copy + of the original Tensor should not be created as the value in the + dictionary. + + Only instances of mygrad.Tensor or numpy.ndarray can have labels + assigned to Nodes. If a list or tuple is used in an operation + with a Tensor, and names is not None, the Node label will be + set to *Constant*. If a list or tuple is used in multiple operations, + a unique Node will be created for each time it is used. + + A scalar will always be used as the label for a 0-dimensional + Tensor's Node. + + render : bool, optional (default=True) + If True, build_graph will return a graphviz Digraph object that, + when called, will render the computational graph in a Jupyter + notebook or the Jupyter Qt console. If False, nothing is returned. + + save : bool, optional (default=False) + If True, build_graph will save a rendered computational graph to + the current working directory as ``computational_graph.pdf``. + + dims : bool, optional (default=False) + If True, Tensor dimensions are added to Node labels. Dimensions + will not be displayed for scalar values. + + dtypes : bool, optional (default=False) + If True, Tensor data types are added to Node labels. + + sum_stats : bool, optional (default=False) + If True, Tensor minimums, maximums, medians, and means are + added to Node labels. These will not be displayed for scalar values. + + Returns + ------- + Union[graphviz.Digraph, None] + + Notes + ----- + build_graph requires that Graphviz is installed. + """ + assert isinstance(fin, Tensor), "fin must be a Tensor" + assert isinstance(names, (dict, type(None))) + assert isinstance(render, bool) + assert isinstance(save, bool) + assert isinstance(dims, bool) + assert isinstance(dtypes, bool) + assert isinstance(sum_stats, bool) + + graph = Digraph(strict=True) + graph.node_attr.update(fontsize="12") + + _add_node(fin, graph, names=names, dims=dims, dtypes=dtypes, sum_stats=sum_stats) + + if save: + graph.render(filename="computational_graph", cleanup=True) + + if render: + return graph
+ + +def _add_node(node, graph, op_id=None, **kwargs): + """Recursively traces computational graph and adds nodes to Digraph.""" + node_id = str(id(node)) + node_lab = repr(node) + if kwargs["names"] is not None: + for key in kwargs["names"]: + if id(kwargs["names"][key]) == id(node): + node_lab = key + break + elif id(kwargs["names"][key]) == id(node.data): + node_lab = key + "\n*Constant*" + node_id = str(id(node.data)) + break + if node_lab == repr(node): + if not node.ndim: + node_lab = str(node.data) + elif node._constant: + node_lab = "*Constant*" + else: + node_lab = "Intermediary Tensor" + + if node.ndim: + if kwargs["dims"]: + node_lab += f"\nDims: {node.shape}" + if kwargs["dtypes"]: + node_lab += f"\nDtype: {node.dtype}" + if kwargs["sum_stats"]: + node_lab += ( + f"\nMin: {np.amin(node.data)}" + f"\nMedian: {np.median(node.data)}" + f"\nMean: {np.mean(node.data)}" + f"\nMax: {np.amax(node.data)}" + ) + else: + if kwargs["dtypes"]: + node_lab += f"\nDtype: {node.dtype}" + + graph.node(name=node_id, label=node_lab) + + if node._creator is None: + if op_id is not None: + graph.edge(node_id, op_id) + return + else: + op_lab = repr(node._creator).rpartition(".")[-1].split(" ")[0] + if op_id is not None: + graph.edge(node_id, op_id) + op_id = str(id(node._creator)) + + graph.node(name=op_id, label=op_lab, style="filled", fillcolor="red") + graph.edge(op_id, node_id) + + for var in node._creator.variables: + _add_node(var, graph, op_id=op_id, **kwargs) +
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/indexing_routines/funcs.html b/docs/_modules/mygrad/indexing_routines/funcs.html new file mode 100644 index 00000000..66a06850 --- /dev/null +++ b/docs/_modules/mygrad/indexing_routines/funcs.html @@ -0,0 +1,675 @@ + + + + + + + + + + mygrad.indexing_routines.funcs — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.indexing_routines.funcs

+from typing import Optional
+
+import numpy as np
+
+from mygrad.operation_base import _NoValue
+from mygrad.tensor_base import Tensor, asarray, implements_numpy_override
+from mygrad.typing import ArrayLike
+
+from .ops import Where
+
+__all__ = ["where"]
+
+
+
[docs]@implements_numpy_override() +def where( + condition: ArrayLike, + x: ArrayLike = _NoValue, + y: ArrayLike = _NoValue, + *, + constant: Optional[bool] = None, +) -> Tensor: + """ + where(condition, [x, y]) + + Return elements chosen from `x` or `y` depending on `condition`. + + .. note:: + When only ``condition`` is provided, this function is a shorthand for + ``np.asarray(condition).nonzero()``. The rest of this + documentation covers only the case where all three arguments are + provided. + + This docstring was adapted from that of ``numpy.where``. + + Parameters + ---------- + condition : ArrayLike, bool + Where True, yield `x`, otherwise yield ``y``. ``x``, ``y`` + and `condition` need to be broadcastable to some shape. + + x : ArrayLike + Values from which to chosen where ``condition`` is ``True``. + + y : ArrayLike + Values from which to chosen where ``condition`` is ``False``. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + out : mygrad.Tensor + A tensor with elements from `x` where `condition` is True, and elements + from `y` elsewhere. + + Examples + -------- + >>> import mygrad as mg + >>> a = mg.arange(10) + >>> a + Tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> mg.where(a < 5, a, 10*a) + Tensor([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90]) + + This can be used on multidimensional tensors too: + + >>> mg.where([[True, False], [True, True]], + ... [[1, 2], [3, 4]], + ... [[9, 8], [7, 6]]) + Tensor([[1, 8], + [3, 4]]) + + The shapes of x, y, and the condition are broadcast together: + + >>> x, y = np.ogrid[:3, :4] + >>> mg.where(x < y, x, 10 + y) # both x and 10+y are broadcast + Tensor([[10, 0, 0, 0], + [10, 11, 1, 1], + [10, 11, 12, 2]]) + + >>> a = mg.Tensor([[0, 1, 2], + ... [0, 2, 4], + ... [0, 3, 6]]) + >>> mg.where(a < 4, a, -1) # -1 is broadcast + Tensor([[ 0, 1, 2], + [ 0, 2, -1], + [ 0, 3, -1]]) + """ + if x is _NoValue and y is _NoValue: + return np.where(asarray(condition)) + + if x is _NoValue or y is _NoValue: + raise ValueError("either both or neither of x and y should be given") + + return Tensor._op( + Where, x, y, op_kwargs={"condition": condition}, constant=constant + )
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/linalg/funcs.html b/docs/_modules/mygrad/linalg/funcs.html new file mode 100644 index 00000000..933ad9b1 --- /dev/null +++ b/docs/_modules/mygrad/linalg/funcs.html @@ -0,0 +1,1022 @@ + + + + + + + + + + mygrad.linalg.funcs — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.linalg.funcs

+from numbers import Real
+from typing import Optional, Sequence, Tuple, Union
+
+import numpy as np
+
+try:  # pragma: no cover
+    from numpy._core.einsumfunc import _parse_einsum_input
+except ImportError:  # pragma: no cover
+    from numpy.core.einsumfunc import _parse_einsum_input
+
+from mygrad.math.misc.funcs import absolute
+from mygrad.math.sequential.funcs import max as mg_max
+from mygrad.math.sequential.funcs import min as mg_min
+from mygrad.tensor_base import Tensor, implements_numpy_override
+from mygrad.typing import ArrayLike
+
+from .ops import EinSum, Norm
+
+__all__ = ["einsum", "norm"]
+
+
+
[docs]@implements_numpy_override(np.linalg.norm) +def norm( + x: ArrayLike, + ord: Optional[Union[int, float]] = None, + axis: Optional[Union[int, Tuple[int]]] = None, + keepdims: bool = False, + *, + nan_to_num: bool = True, + constant: Optional[bool] = None, +) -> Tensor: + r"""Vector norm. + + This function is an infinite number of vector norms (described below), depending + on the value of the ``ord`` parameter. + + In contrast to ``numpy.linalg.norm``, matrix norms are not supported. + + This docstring was adapted from that of ``numpy.linalg.norm`` [1]_. + + Parameters + ---------- + x : ArrayLike + Input tensor. If `axis` is None, then `x` must be 1-D unless `ord` + is None. If both `axis` and `ord` are None, the 2-norm of + ``x.ravel`` will be returned. + + ord : Optional[Union[int, float]] + Order of the norm (see table under ``Notes``). inf means numpy's + `inf` object. The default is None. + + axis : Optional[Union[int, Tuple[int]]] + If `axis` is an integer, it specifies the axis of `x` along which to + compute the vector norms. The default is None. + + keepdims : bool, optional (default=False) + If this is set to True, the axes which are normed over are left in the + result as dimensions with size one. With this option the result will + broadcast correctly against the original `x`. + + nan_to_num : bool, optional (default=True) + If `True` then gradients that would store nans due to the presence of + zeros in `x` will instead store zeros in those places. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + Tensor + Norm(s) of the vector(s). + + Notes + ----- + For values of ``ord < 1``, the result is, strictly speaking, not a + mathematical 'norm', but it may still be useful for various numerical + purposes. + + The following norms can be calculated: + + ===== ========================== + ord norm for vectors + ===== ========================== + inf max(abs(x)) + -inf min(abs(x)) + 0 sum(x != 0) + 1 as below + -1 as below + 2 as below + -2 as below + other sum(abs(x)**ord)**(1./ord) + ===== ========================== + + The Frobenius norm is given by [1]_: + + :math:`||A||_F = [\sum_{i,j} abs(a_{i,j})^2]^{1/2}` + + The nuclear norm is the sum of the singular values. + + Both the Frobenius and nuclear norm orders are only defined for + matrices and raise a ValueError when ``x.ndim != 2``. + + References + ---------- + .. [1] Retrieved from: https://numpy.org/doc/stable/reference/generated/numpy.linalg.norm.html + .. [2] G. H. Golub and C. F. Van Loan, *Matrix Computations*, + Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15 + + Examples + -------- + >>> import mygrad as mg + >>> x = mg.tensor([[1.0, 2.0, 3.0], + ... [1.0, 0.0, 0.0]]) + >>> l2_norms = mg.linalg.norm(x, axis=1, ord=2) + >>> l2_norms + Tensor([3.74165739, 1. ]) + + The presence of the elementwise absolute values in the norm operation means that zero-valued entries in any of + input vectors have an undefined derivative. When `nan_to_num=False` is specified these derivatives will be reported + as `nan`, otherwise they will be made to be 0.0. + + >>> l2_norms = mg.linalg.norm(x, axis=1, ord=2, nan_to_num=True) + >>> l2_norms.backward() + >>> x.grad + array([[0.26726124, 0.53452248, 0.80178373], + [1. , nan, nan]]) + + This is rigorously true, but is often not the desired behavior in autodiff applications. + Rather, it can be preferable to use `0.0` to fill these undefined derivatives. + This is the default behavior, when `nan_to_num` is not specified. + + >>> l2_norms = mg.linalg.norm(x, axis=1, ord=2, nan_to_num=False) # default setting: `nan_to_num=False` + >>> l2_norms.backward() + >>> x.grad + array([[0.26726124, 0.53452248, 0.80178373], + [1. , 0., 0.]]) + + L1 norms along each of the three columns: + + >>> mg.linalg.norm(x, axis=0, ord=1) + Tensor([2., 2., 3.]) + """ + if isinstance(ord, Real) and np.isinf(ord): + op = mg_max if ord > 0 else mg_min + abs_ = absolute(x, constant=constant) + out = op(abs_, axis=axis, keepdims=keepdims) + + in_ndim = abs_.creator.variables[0].ndim + + if (axis is None and ord is not None and in_ndim == 2) or ( + hasattr(axis, "__len__") and len(axis) > 1 + ): + raise NotImplementedError( + "mygrad.linalg.norm does not support matrix norms" + ) + return out + return Tensor._op( + Norm, + x, + op_kwargs={ + "axis": axis, + "keepdims": keepdims, + "ord": ord, + "nan_to_num": nan_to_num, + }, + constant=constant, + )
+ + +
[docs]@implements_numpy_override() +def einsum( + *operands: Union[ArrayLike, str, Sequence[int]], + optimize: bool = False, + out: Optional[Union[np.ndarray, Tensor]] = None, + constant: Optional[bool] = None, +) -> Tensor: + r""" + einsum(subscripts, *operands) + + Evaluates the Einstein summation convention on the operands. This implementation + exactly mirrors that of ``numpy.einsum`` and supports back-propagation through + all variety of tensor-products, sums, traces, and views that it can perform. + + The following docstring was adapted from the documentation for ``numpy.einsum`` + + Using the Einstein summation convention, many common multi-dimensional + array operations can be represented in a simple fashion. This function + provides a way to compute such summations. The best way to understand this + function is to try the examples below, which show how many common NumPy/MyGrad + functions can be implemented as calls to ``einsum``. + + Back-propagation via ``einsum`` is optimized such that any tensor that occurs + redundantly within the summation will only have its gradient computed once. + This optimization accommodates all number and combination of redundancies that can + be encountered. + + E.g. back-propping through ``einsum('...,...->', x, x)`` will only incur a single + computation/accumulation for ``x.grad`` rather than two. This permits users to + leverage the efficiency of sum-reduction, where ``(x ** 2).sum()`` is sub-optimal, + without being penalized during back-propagation. + + Parameters + ---------- + subscripts : str + Specifies the subscripts for summation. + + operands : array_like + The tensors used in the summation. + + optimize : {False, True, 'greedy', 'optimal'}, optional (default=False) + Controls if intermediate optimization should occur; also enables + the use of BLAS where possible. This can produce significant speedups + for computations like matrix multiplication. + + No optimization will occur if False and True will default to the 'greedy' + algorithm. Also accepts an explicit contraction list from the + ``np.einsum_path`` function. See ``np.einsum_path`` for more details. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + output : mygrad.Tensor + The calculation based on the Einstein summation convention. + + Notes + ----- + The subscripts string is a comma-separated list of subscript labels, + where each label refers to a dimension of the corresponding operand. + Repeated subscripts labels in one operand take the diagonal. For example, + ``einsum('ii', a)`` is equivalent to ``np.trace(a)`` (however, the former + supports back-propagation). + + Whenever a label is repeated, it is summed, so ``einsum('i, i', a, b)`` + is equivalent to ``np.inner(a, b)``. If a label appears only once, + it is not summed, so ``einsum('i', a)`` produces a view of ``a`` + with no changes. + + The order of labels in the output is by default alphabetical. This + means that ``np.einsum('ij', a)`` doesn't affect a 2D tensor, while + ``einsum('ji', a)`` takes its transpose. + + The output can be controlled by specifying output subscript labels + as well. This specifies the label order, and allows summing to + be disallowed or forced when desired. The call ``einsum('i->', a)`` + is like ``np.sum(a, axis=-1)``, and ``einsum('ii->i', a)`` + is like ``np.diag(a)``. The difference is that `einsum` does not + allow broadcasting by default. + + To enable and control broadcasting, use an ellipsis. Default + NumPy-style broadcasting is done by adding an ellipsis + to the left of each term, like ``einsum('...ii->...i', a)``. + To take the trace along the first and last axes, + you can do ``einsum('i...i', a)``, or to do a matrix-matrix + product with the left-most indices instead of rightmost, you can do + ``einsum('ij...,jk...->ik...', a, b)``. + + When there is only one operand, no axes are summed, and no output + parameter is provided, a view into the operand is returned instead + of a new tensor. Thus, taking the diagonal as ``einsum('ii->i', a)`` + produces a view. + + An alternative way to provide the subscripts and operands is as + ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. The examples + below have corresponding `einsum` calls with the two parameter methods. + + Examples + -------- + >>> import mygrad as mg + >>> import numpy as np + >>> a = mg.arange(25).reshape(5,5) + >>> b = mg.arange(5) + >>> c = mg.arange(6).reshape(2,3) + + Compute the trace of ``a``, :math:`\sum_{i}{A_{ii}} = f`: + + >>> einsum('ii', a) + Tensor(60) + >>> einsum(a, [0, 0]) + Tensor(60) + >>> np.trace(a.data) + array(60) + + Return a view along the diagonal of ``a``, :math:`A_{ii} = F_{i}`: + + >>> einsum('ii->i', a) + Tensor([ 0, 6, 12, 18, 24]) + >>> einsum(a, [0,0], [0]) + Tensor([ 0, 6, 12, 18, 24]) + >>> np.diag(a.data) + array([ 0, 6, 12, 18, 24]) + + Compute the matrix-vector product of ``a`` with ``b``, :math:`\sum_{j}{A_{ij} B_{j}} = F_{i}`: + + >>> einsum('ij,j', a, b) + Tensor([ 30, 80, 130, 180, 230]) + >>> einsum(a, [0,1], b, [1]) + Tensor([ 30, 80, 130, 180, 230]) + >>> mg.matmul(a, b) + Tensor([ 30, 80, 130, 180, 230]) + >>> einsum('...j,j', a, b) + Tensor([ 30, 80, 130, 180, 230]) + + Take the transpose of ``c``, :math:`C_{ji} = F_{ij}`: + + >>> einsum('ji', c) + Tensor([[0, 3], + [1, 4], + [2, 5]]) + >>> einsum(c, [1, 0]) + Tensor([[0, 3], + [1, 4], + [2, 5]]) + >>> c.T + Tensor([[0, 3], + [1, 4], + [2, 5]]) + + Compute ``3 * c``: + + >>> einsum('..., ...', 3, c) + Tensor([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> einsum(',ij', 3, c) + Tensor([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> einsum(3, [Ellipsis], c, [Ellipsis]) + Tensor([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> 3 * c + Tensor([[ 0, 3, 6], + [ 9, 12, 15]]) + + Compute the inner product of ``b`` with itself, :math:`\sum_{i}{B_{i} B_{i}} = f`: + + >>> einsum('i,i', b, b) + Tensor(30) + >>> einsum(b, [0], b, [0]) + Tensor(30) + >>> np.inner(b.data, b.data) + 30 + + Compute the outer product of ``array([1, 2])`` with ``b``, :math:`A_{i}B_{j} = F_{ij}`: + + >>> einsum('i,j', np.arange(2)+1, b) + Tensor([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> einsum(np.arange(2)+1, [0], b, [1]) + Tensor([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> np.outer(np.arange(2)+1, b) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> einsum('i...->...', a) + Tensor([50, 55, 60, 65, 70]) + >>> einsum(a, [0,Ellipsis], [Ellipsis]) + Tensor([50, 55, 60, 65, 70]) + >>> np.sum(a, axis=0) + array([50, 55, 60, 65, 70]) + + Compute the tensor product :math:`\sum_{ij}{A_{ijk} B_{jil}} = F_{kl}` + + >>> a = mg.arange(60.).reshape(3,4,5) + >>> b = mg.arange(24.).reshape(4,3,2) + >>> einsum('ijk,jil->kl', a, b) + Tensor([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + >>> einsum(a, [0,1,2], b, [1,0,3], [2,3]) + Tensor([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + >>> np.tensordot(a,b, axes=([1,0],[0,1])) + array([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + + Matrix multiply ``a.T`` with ``b.T``, :math:`\sum_{k}{A_{ki} B_{jk}} = F_{ij}` + + >>> a = mg.arange(6).reshape((3,2)) + >>> b = mg.arange(12).reshape((4,3)) + >>> einsum('ki,jk->ij', a, b) + Tensor([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> einsum('ki,...k->i...', a, b) + Tensor([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> einsum('k...,jk', a, b) + Tensor([[10, 28, 46, 64], + [13, 40, 67, 94]]) + + Make an assignment to a view along the diagonal of ``a``: + + >>> a = mg.zeros((3, 3)) + >>> einsum('ii->i', a).data[:] = 1 + >>> a + Tensor([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + """ + + # TODO: normalize error handling for invalid inputs + operands = list(operands) + if isinstance(operands[0], str): + # operands form: "ijk, ijk", x, y + variables = operands[1:] + if any(isinstance(i, Tensor) for i in operands): + operands[1:] = ( + var.data if isinstance(var, Tensor) else var for var in operands[1:] + ) + else: + # operands form: op0, sublist0, op1, sublist1, ..., [sublistout] + end = -1 if len(operands) % 2 else None # -1 if sublistout is included + variables = operands[:end:2] + if any(isinstance(i, Tensor) for i in operands): + operands[:end:2] = ( + var.data if isinstance(var, Tensor) else var for var in operands[:end:2] + ) + + in_lbls, out_lbls, _ = _parse_einsum_input(operands) + + # einsum doesn't handle out=None properly in numpy 1.17 + + return Tensor._op( + EinSum, + *variables, + op_kwargs=dict(in_lbls=in_lbls, out_lbls=out_lbls, optimize=optimize), + constant=constant, + out=out, + )
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/math/arithmetic/funcs.html b/docs/_modules/mygrad/math/arithmetic/funcs.html new file mode 100644 index 00000000..d5324ebd --- /dev/null +++ b/docs/_modules/mygrad/math/arithmetic/funcs.html @@ -0,0 +1,1402 @@ + + + + + + + + + + mygrad.math.arithmetic.funcs — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.math.arithmetic.funcs

+from typing import Optional, Union
+
+from numpy import ndarray
+
+from mygrad.tensor_base import Tensor
+from mygrad.typing import ArrayLike, DTypeLikeReals, Mask
+from mygrad.ufuncs._ufunc_creators import ufunc_creator
+
+from .ops import (
+    Add,
+    AddSequence,
+    Divide,
+    Multiply,
+    MultiplySequence,
+    Negative,
+    Positive,
+    Power,
+    Reciprocal,
+    Square,
+    Subtract,
+)
+
+__all__ = [
+    "add",
+    "add_sequence",
+    "divide",
+    "multiply",
+    "multiply_sequence",
+    "negative",
+    "positive",
+    "power",
+    "reciprocal",
+    "square",
+    "subtract",
+    "true_divide",
+]
+
+
+@ufunc_creator(Add)
+def add(
+    x1: ArrayLike,
+    x2: ArrayLike,
+    out: Optional[Union[ndarray, Tensor]] = None,
+    *,
+    where: Mask = True,
+    dtype: DTypeLikeReals = None,
+    constant: Optional[bool] = None,
+) -> Tensor:  # pragma: no cover
+    """Add the arguments element-wise.
+
+    This docstring was adapted from that of numpy.add [1]_
+
+    Parameters
+    ----------
+    x1, x2 : ArrayLike
+        The arrays to be added.
+        If ``x1.shape != x2.shape``, they must be broadcastable to a common
+        shape (which becomes the shape of the output). Non-tensor array-likes are
+        treated as constants.
+
+    constant : Optional[bool]
+        If ``True``, this tensor is treated as a constant, and thus does not
+        facilitate back propagation (i.e. ``constant.grad`` will always return
+        ``None``).
+
+        Defaults to ``False`` for float-type data.
+        Defaults to ``True`` for integer-type data.
+
+        Integer-type tensors must be constant.
+
+    dtype : Optional[DTypeLikeReals]
+        The dtype of the resulting tensor.
+
+    out : Optional[Union[ndarray, Tensor]]
+        A location into which the result is stored. If provided, it must have
+        a shape that the inputs broadcast to. If not provided or None,
+        a freshly-allocated tensor is returned.
+
+    where : Mask
+        This condition is broadcast over the input. At locations where the
+        condition is True, the ``out`` tensor will be set to the ufunc result.
+        Elsewhere, the ``out`` tensor will retain its original value.
+        Note that if an uninitialized `out` tensor is created via the default
+        ``out=None``, locations within it where the condition is False will
+        remain uninitialized.
+
+    Returns
+    -------
+    add : Tensor
+        The sum of `x1` and `x2`, element-wise.
+
+    Notes
+    -----
+    Equivalent to `x1` + `x2` in terms of tensor broadcasting.
+
+    References
+    ----------
+    .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.add.html
+
+    Examples
+    --------
+    >>> import mygrad as mg
+    >>> mg.add(1.0, 4.0)
+    Tensor(5.0)
+    >>> x1 = mg.tensor([[0., 1., 2.],
+    ...                 [3., 4., 5.],
+    ...                 [6., 7., 8.]])
+    >>> x2 = mg.tensor([0., 1., 2.])
+    >>> mg.add(x1, x2)
+    Tensor([[  0.,   2.,   4.],
+            [  3.,   5.,   7.],
+            [  6.,   8.,  10.]])
+    """
+    ...
+
+
+@ufunc_creator(Subtract)
+def subtract(
+    x1: ArrayLike,
+    x2: ArrayLike,
+    out: Optional[Union[ndarray, Tensor]] = None,
+    *,
+    where: Mask = True,
+    dtype: DTypeLikeReals = None,
+    constant: Optional[bool] = None,
+) -> Tensor:  # pragma: no cover
+    """Subtract the arguments element-wise.
+
+    This docstring was adapted from that of numpy.subtract [1]_
+
+    Parameters
+    ----------
+    x1, x2 : ArrayLike
+        The arrays to be subtracted from each other.
+        If ``x1.shape != x2.shape``, they must be broadcastable to a common
+        shape (which becomes the shape of the output). Non-tensor array-likes are
+        treated as constants.
+
+    constant : Optional[bool]
+        If ``True``, this tensor is treated as a constant, and thus does not
+        facilitate back propagation (i.e. ``constant.grad`` will always return
+        ``None``).
+
+        Defaults to ``False`` for float-type data.
+        Defaults to ``True`` for integer-type data.
+
+        Integer-type tensors must be constant.
+
+    dtype : Optional[DTypeLikeReals]
+        The dtype of the resulting tensor.
+
+    out : Optional[Union[ndarray, Tensor]]
+        A location into which the result is stored. If provided, it must have
+        a shape that the inputs broadcast to. If not provided or None,
+        a freshly-allocated tensor is returned.
+
+    where : Mask
+        This condition is broadcast over the input. At locations where the
+        condition is True, the ``out`` tensor will be set to the ufunc result.
+        Elsewhere, the ``out`` tensor will retain its original value.
+        Note that if an uninitialized `out` tensor is created via the default
+        ``out=None``, locations within it where the condition is False will
+        remain uninitialized.
+
+    Returns
+    -------
+    subtract : Tensor
+        The difference of `x1` and `x2`, element-wise.
+
+    Notes
+    -----
+    Equivalent to ``x1 - x2`` in terms of tensor broadcasting.
+
+    References
+    ----------
+    .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.subtract.html
+
+    Examples
+    --------
+    >>> import mygrad as mg
+    >>> mg.subtract(1.0, 4.0)
+    Tensor(-3.0)
+
+    >>> x1 = mg.tensor([[0., 1., 2.],
+    ...                 [3., 4., 5.],
+    ...                 [6., 7., 8.]])
+    >>> x2 = mg.tensor([0., 1., 2.])
+    >>> mg.subtract(x1, x2)
+    Tensor([[ 0.,  0.,  0.],
+            [ 3.,  3.,  3.],
+            [ 6.,  6.,  6.]])
+    """
+    ...
+
+
+@ufunc_creator(Multiply)
+def multiply(
+    x1: ArrayLike,
+    x2: ArrayLike,
+    out: Optional[Union[ndarray, Tensor]] = None,
+    *,
+    where: Mask = True,
+    dtype: DTypeLikeReals = None,
+    constant: Optional[bool] = None,
+) -> Tensor:  # pragma: no cover
+    """Multiply the arguments element-wise.
+
+    This docstring was adapted from that of numpy.multiply [1]_
+
+    Parameters
+    ----------
+    x1, x2 : ArrayLike
+        Input arrays to be multiplied.
+        If ``x1.shape != x2.shape``, they must be broadcastable to a common
+        shape (which becomes the shape of the output). Non-tensor array-likes
+        are treated as constants.
+
+    constant : Optional[bool]
+        If ``True``, this tensor is treated as a constant, and thus does not
+        facilitate back propagation (i.e. ``constant.grad`` will always return
+        ``None``).
+
+        Defaults to ``False`` for float-type data.
+        Defaults to ``True`` for integer-type data.
+
+        Integer-type tensors must be constant.
+
+    dtype : Optional[DTypeLikeReals]
+        The dtype of the resulting tensor.
+
+    out : Optional[Union[ndarray, Tensor]]
+        A location into which the result is stored. If provided, it must have
+        a shape that the inputs broadcast to. If not provided or None,
+        a freshly-allocated tensor is returned.
+
+    where : Mask
+        This condition is broadcast over the input. At locations where the
+        condition is True, the ``out`` tensor will be set to the ufunc result.
+        Elsewhere, the ``out`` tensor will retain its original value.
+        Note that if an uninitialized `out` tensor is created via the default
+        ``out=None``, locations within it where the condition is False will
+        remain uninitialized.
+
+    Returns
+    -------
+    multiply : Tensor
+        The product of `x1` and `x2`, element-wise.
+
+    Notes
+    -----
+    Equivalent to `x1` * `x2` in terms of tensor broadcasting.
+
+    References
+    ----------
+    .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.multiply.html
+
+    Examples
+    --------
+    >>> import mygrad as mg
+    >>> mg.multiply(2.0, 4.0)
+    Tensor(8.0)
+
+    >>> x1 = mg.tensor([[0., 1., 2.],
+    ...                 [3., 4., 5.],
+    ...                 [6., 7., 8.]])
+    >>> x2 = mg.tensor([0., 1., 2.])
+    >>> mg.multiply(x1, x2)
+    Tensor([[  0.,   1.,   4.],
+            [  0.,   4.,  10.],
+            [  0.,   7.,  16.]])
+    """
+    ...
+
+
+@ufunc_creator(Divide)
+def true_divide(
+    x1: ArrayLike,
+    x2: ArrayLike,
+    out: Optional[Union[ndarray, Tensor]] = None,
+    *,
+    where: Mask = True,
+    dtype: DTypeLikeReals = None,
+    constant: Optional[bool] = None,
+) -> Tensor:  # pragma: no cover
+    """Divide the arguments element-wise.
+
+    This docstring was adapted from that of numpy.true_divide [1]_
+
+    Parameters
+    ----------
+    x1 : ArrayLike
+        Dividend array.
+
+    x2 : ArrayLike
+        Divisor array.
+        If ``x1.shape != x2.shape``, they must be broadcastable to a common
+        shape (which becomes the shape of the output). Non-tensor array-likes
+        are treated as constants.
+
+    constant : Optional[bool]
+        If ``True``, this tensor is treated as a constant, and thus does not
+        facilitate back propagation (i.e. ``constant.grad`` will always return
+        ``None``).
+
+        Defaults to ``False`` for float-type data.
+        Defaults to ``True`` for integer-type data.
+
+        Integer-type tensors must be constant.
+
+    dtype : Optional[DTypeLikeReals]
+        The dtype of the resulting tensor.
+
+    out : Optional[Union[ndarray, Tensor]]
+        A location into which the result is stored. If provided, it must have
+        a shape that the inputs broadcast to. If not provided or None,
+        a freshly-allocated tensor is returned.
+
+    where : Mask
+        This condition is broadcast over the input. At locations where the
+        condition is True, the ``out`` tensor will be set to the ufunc result.
+        Elsewhere, the ``out`` tensor will retain its original value.
+        Note that if an uninitialized `out` tensor is created via the default
+        ``out=None``, locations within it where the condition is False will
+        remain uninitialized.
+
+    Returns
+    -------
+    true_divide : Tensor
+        The quotient of `x1` with `x2`, element-wise.
+
+    Notes
+    -----
+    In Python, ``//`` is the floor division operator and ``/`` the
+    true division operator.  The ``true_divide(x1, x2)`` function is
+    equivalent to true division in Python.
+
+    References
+    ----------
+    .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.true_divide.html
+
+    Examples
+    --------
+    >>> import mygrad as mg
+    >>> x = mg.arange(5)
+    >>> mg.true_divide(x, 4)
+    Tensor([ 0.  ,  0.25,  0.5 ,  0.75,  1.  ])
+
+    >>> x/4
+    Tensor([ 0.  ,  0.25,  0.5 ,  0.75,  1.  ])
+
+    >>> x // 4
+    Tensor([0, 0, 0, 0, 1], dtype=int32)
+
+    Floor division with a tensor always produces a constant
+
+    >>> (x // 4).constant
+    True
+    """
+    ...
+
+
+divide = true_divide
+
+
+@ufunc_creator(Power)
+def power(
+    x1: ArrayLike,
+    x2: ArrayLike,
+    out: Optional[Union[ndarray, Tensor]] = None,
+    *,
+    where: Mask = True,
+    dtype: DTypeLikeReals = None,
+    constant: Optional[bool] = None,
+) -> Tensor:  # pragma: no cover
+    """First tensor elements raised to powers from second tensor, element-wise.
+
+    Raise each base in `x1` to the positionally-corresponding power in
+    `x2`.  `x1` and `x2` must be broadcastable to the same shape. Note that an
+    integer type raised to a negative integer power will raise a ValueError.
+
+    This docstring was adapted from that of numpy.power [1]_
+
+    Parameters
+    ----------
+    x1 : ArrayLike
+        The bases.
+
+    x2 : ArrayLike
+        The exponents.
+        If ``x1.shape != x2.shape``, they must be broadcastable to a common
+        shape (which becomes the shape of the output). Non-tensor array-likes
+        are treated as constants.
+
+    constant : Optional[bool]
+        If ``True``, this tensor is treated as a constant, and thus does not
+        facilitate back propagation (i.e. ``constant.grad`` will always return
+        ``None``).
+
+        Defaults to ``False`` for float-type data.
+        Defaults to ``True`` for integer-type data.
+
+        Integer-type tensors must be constant.
+
+    dtype : Optional[DTypeLikeReals]
+        The dtype of the resulting tensor.
+
+    out : Optional[Union[ndarray, Tensor]]
+        A location into which the result is stored. If provided, it must have
+        a shape that the inputs broadcast to. If not provided or None,
+        a freshly-allocated tensor is returned.
+
+    where : Mask
+        This condition is broadcast over the input. At locations where the
+        condition is True, the ``out`` tensor will be set to the ufunc result.
+        Elsewhere, the ``out`` tensor will retain its original value.
+        Note that if an uninitialized `out` tensor is created via the default
+        ``out=None``, locations within it where the condition is False will
+        remain uninitialized.
+
+    Returns
+    -------
+    power : Tensor
+        The combination of `x1` and `x2`, element-wise.
+
+    See Also
+    --------
+    float_power : power function that promotes integers to float
+
+    References
+    ----------
+    .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.power.html
+
+    Examples
+    --------
+    Cube each element in a list.
+
+    >>> import mygrad as mg
+    >>> x1 = range(6)
+    >>> x1
+    [0, 1, 2, 3, 4, 5]
+    >>> mg.power(x1, 3)
+    Tensor([  0,   1,   8,  27,  64, 125])
+
+    Raise the bases to different exponents.
+
+    >>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
+    >>> mg.power(x1, x2)
+    Tensor([  0.,   1.,   8.,  27.,  16.,   5.])
+
+    The effect of broadcasting.
+
+    >>> x2 = mg.tensor([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
+    >>> x2
+    Tensor([[1, 2, 3, 3, 2, 1],
+            [1, 2, 3, 3, 2, 1]])
+    >>> mg.power(x1, x2)
+    Tensor([[ 0,  1,  8, 27, 16,  5],
+            [ 0,  1,  8, 27, 16,  5]])
+    """
+    ...
+
+
+@ufunc_creator(Negative)
+def negative(
+    x: ArrayLike,
+    out: Optional[Union[ndarray, Tensor]] = None,
+    *,
+    where: Mask = True,
+    dtype: DTypeLikeReals = None,
+    constant: Optional[bool] = None,
+) -> Tensor:  # pragma: no cover
+    """Negates the tensor element-wise.
+
+    This docstring was adapted from that of numpy.negative [1]_
+
+    Parameters
+    ----------
+    x : ArrayLike or scalar
+        Input tensor.
+
+    out : Optional[Union[Tensor, ndarray]]
+        A location into which the result is stored. If provided, it must have
+        a shape that the inputs broadcast to. If not provided or None,
+        a freshly-allocated tensor is returned.
+
+    constant : Optional[bool]
+        If ``True``, this tensor is treated as a constant, and thus does not
+        facilitate back propagation (i.e. ``constant.grad`` will always return
+        ``None``).
+
+        Defaults to ``False`` for float-type data.
+        Defaults to ``True`` for integer-type data.
+
+        Integer-type tensors must be constant.
+
+    where : Mask
+        This condition is broadcast over the input. At locations where the
+        condition is True, the ``out`` tensor will be set to the ufunc result.
+        Elsewhere, the ``out`` tensor will retain its original value.
+        Note that if an uninitialized `out` tensor is created via the default
+        ``out=None``, locations within it where the condition is False will
+        remain uninitialized.
+
+    dtype : Optional[DTypeLikeReals]
+        The dtype of the resulting tensor.
+
+    Returns
+    -------
+    negative : Tensor
+        The combination of `x1` and `x2`, element-wise.
+
+    References
+    ----------
+    .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.negative.html
+
+    Examples
+    --------
+    >>> import mygrad as mg
+    >>> mg.negative([1.,-1.])
+    Tensor([-1.,  1.])
+    """
+    ...
+
+
+@ufunc_creator(Positive)
+def positive(
+    x: ArrayLike,
+    out: Optional[Union[ndarray, Tensor]] = None,
+    *,
+    where: Mask = True,
+    dtype: DTypeLikeReals = None,
+    constant: Optional[bool] = None,
+) -> Tensor:  # pragma: no cover
+    """Returns a copy of the tensor.
+
+    This docstring was adapted from that of numpy.positive [1]_
+
+    Parameters
+    ----------
+    x : ArrayLike
+        Input array.
+
+    constant : Optional[bool]
+        If ``True``, this tensor is treated as a constant, and thus does not
+        facilitate back propagation (i.e. ``constant.grad`` will always return
+        ``None``).
+
+        Defaults to ``False`` for float-type data.
+        Defaults to ``True`` for integer-type data.
+
+        Integer-type tensors must be constant.
+
+    dtype : Optional[DTypeLikeReals]
+        The dtype of the resulting tensor.
+
+    out : Optional[Union[Tensor, ndarray]]
+        A location into which the result is stored. If provided, it must have
+        a shape that the inputs broadcast to. If not provided or None,
+        a freshly-allocated tensor is returned.
+
+    where : Mask
+        This condition is broadcast over the input. At locations where the
+        condition is True, the ``out`` tensor will be set to the ufunc result.
+        Elsewhere, the ``out`` tensor will retain its original value.
+        Note that if an uninitialized `out` tensor is created via the default
+        ``out=None``, locations within it where the condition is False will
+        remain uninitialized.
+
+    Returns
+    -------
+    positive : Tensor
+
+    Notes
+    -----
+    Equivalent to `x.copy()`, but only defined for types that support
+    arithmetic.
+
+    References
+    ----------
+    .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.positive.html
+    """
+    ...
+
+
+@ufunc_creator(Reciprocal)
+def reciprocal(
+    x: ArrayLike,
+    out: Optional[Union[ndarray, Tensor]] = None,
+    *,
+    where: Mask = True,
+    dtype: DTypeLikeReals = None,
+    constant: Optional[bool] = None,
+) -> Tensor:  # pragma: no cover
+    """Return the reciprocal of the argument element-wise.
+
+    This docstring was adapted from that of numpy.reciprocal [1]_
+
+    Parameters
+    ----------
+    x : ArrayLike
+        Input array.
+
+    constant : Optional[bool]
+        If ``True``, this tensor is treated as a constant, and thus does not
+        facilitate back propagation (i.e. ``constant.grad`` will always return
+        ``None``).
+
+        Defaults to ``False`` for float-type data.
+        Defaults to ``True`` for integer-type data.
+
+        Integer-type tensors must be constant.
+
+    dtype : Optional[DTypeLikeReals]
+        The dtype of the resulting tensor.
+
+    out : Optional[Union[Tensor, ndarray]]
+        A location into which the result is stored. If provided, it must have
+        a shape that the inputs broadcast to. If not provided or None,
+        a freshly-allocated tensor is returned.
+
+    where : Mask
+        This condition is broadcast over the input. At locations where the
+        condition is True, the ``out`` tensor will be set to the ufunc result.
+        Elsewhere, the ``out`` tensor will retain its original value.
+        Note that if an uninitialized `out` tensor is created via the default
+        ``out=None``, locations within it where the condition is False will
+        remain uninitialized.
+
+    Returns
+    -------
+    reciprocal : Tensor
+
+    Notes
+    -----
+    .. note::
+        This function is not designed to work with integers.
+
+    For integer arguments with absolute value larger than 1 the result is
+    always zero because of the way Python handles integer division.  For
+    integer zero the result is an overflow.
+
+    References
+    ----------
+    .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.reciprocal.html
+
+    Examples
+    --------
+    >>> import mygrad as mg
+    >>> mg.reciprocal(2.)
+    Tensor(0.5)
+    >>> mg.reciprocal([1, 2., 3.33])
+    Tensor([ 1.       ,  0.5      ,  0.3003003])
+    """
+    ...
+
+
+@ufunc_creator(Square)
+def square(
+    x: ArrayLike,
+    out: Optional[Union[ndarray, Tensor]] = None,
+    *,
+    where: Mask = True,
+    dtype: DTypeLikeReals = None,
+    constant: Optional[bool] = None,
+) -> Tensor:  # pragma: no cover
+    """Return the square of the argument element-wise.
+
+    This docstring was adapted from that of numpy.square [1]_
+
+    Parameters
+    ----------
+    x : ArrayLike
+        Input data.
+
+    constant : Optional[bool]
+        If ``True``, this tensor is treated as a constant, and thus does not
+        facilitate back propagation (i.e. ``constant.grad`` will always return
+        ``None``).
+
+        Defaults to ``False`` for float-type data.
+        Defaults to ``True`` for integer-type data.
+
+        Integer-type tensors must be constant.
+
+    dtype : Optional[DTypeLikeReals]
+        The dtype of the resulting tensor.
+
+    out : Optional[Union[Tensor, ndarray]]
+        A location into which the result is stored. If provided, it must have
+        a shape that the inputs broadcast to. If not provided or None,
+        a freshly-allocated tensor is returned.
+
+    where : Mask
+        This condition is broadcast over the input. At locations where the
+        condition is True, the ``out`` tensor will be set to the ufunc result.
+        Elsewhere, the ``out`` tensor will retain its original value.
+        Note that if an uninitialized `out` tensor is created via the default
+        ``out=None``, locations within it where the condition is False will
+        remain uninitialized.
+
+    Returns
+    -------
+    square : Tensor
+
+    See Also
+    --------
+    sqrt
+    power
+
+    References
+    ----------
+    .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.square.html
+
+    Examples
+    --------
+    >>> import mygrad as mg
+    >>> mg.square([100., 1000.])
+    array([10.,  100.])
+    """
+    ...
+
+
+
[docs]def multiply_sequence(*variables: ArrayLike, constant: Optional[bool] = None) -> Tensor: + """``f(a, b, ...) -> a * b * ...`` + + Multiply a sequence of tensors. + + Parameters + ---------- + variables : ArrayLike + A sequence of broadcast-compatible tensors. Non-tensor array-likes are + treated as constants. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + mygrad.Tensor + + Notes + ----- + It is more efficient to back-propagate through this + function than it is through a computational graph + with N-1 corresponding multiplication operations. + + Examples + -------- + >>> import mygrad as mg + >>> x = mg.tensor([1. , 2.]) + >>> y = mg.tensor([-1.]) + >>> z = mg.tensor([[1.]]) + >>> out = mg.multiply_sequence(x, y, z); out + Tensor([[-1., -2.]]) + + >>> out.backward() + >>> x.grad + array([-1., -1.]) + >>> y.grad + array([3.]) + >>> z.grad + array([[-3.]]) + """ + if len(variables) < 2: + raise ValueError( + f"`multiply_sequence` requires at least two inputs, got {len(variables)} inputs" + ) + return Tensor._op(MultiplySequence, *variables, constant=constant)
+ + +
[docs]def add_sequence(*variables: ArrayLike, constant: Optional[bool] = None) -> Tensor: + """``f(a, b, ...) -> a + b + ...`` + + Add a sequence of tensors. + + Parameters + ---------- + variables : ArrayLike + A sequence of broadcast-compatible tensors. Non-tensor array-likes are + treated as constants. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + mygrad.Tensor + + Notes + ----- + It is more efficient to back-propagate through this + function than it is through a computational graph + with N-1 corresponding addition operations. + + + Examples + -------- + >>> import mygrad as mg + >>> x = mg.tensor([1. , 2.]) + >>> y = mg.tensor([-1.]) + >>> z = mg.tensor([[1.]]) + >>> out = mg.add_sequence(x, y, z); out + Tensor([[1., 2.]]) + + >>> out.backward() + >>> x.grad + array([1., 1.]) + >>> y.grad + array([2.]) + >>> z.grad + array([[2.]]) + """ + if len(variables) < 2: + raise ValueError( + f"`add_sequence` requires at least two inputs, got {len(variables)} inputs" + ) + return Tensor._op(AddSequence, *variables, constant=constant)
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/math/misc/funcs.html b/docs/_modules/mygrad/math/misc/funcs.html new file mode 100644 index 00000000..2a002130 --- /dev/null +++ b/docs/_modules/mygrad/math/misc/funcs.html @@ -0,0 +1,1421 @@ + + + + + + + + + + mygrad.math.misc.funcs — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.math.misc.funcs

+from typing import Optional, Union
+
+import numpy as np
+from numpy import ndarray
+
+import mygrad as mg
+from mygrad.math.misc.ops import MatMul
+from mygrad.tensor_base import Tensor, implements_numpy_override
+from mygrad.typing import ArrayLike, DTypeLikeReals, Mask
+from mygrad.ufuncs import ufunc_creator
+
+from .ops import Abs, Cbrt, Maximum, Minimum, Sqrt
+
+__all__ = [
+    "abs",
+    "absolute",
+    "cbrt",
+    "clip",
+    "sqrt",
+    "maximum",
+    "minimum",
+    "matmul",
+    "multi_matmul",
+]
+
+
+@ufunc_creator(Abs)
+def absolute(
+    x: ArrayLike,
+    out: Optional[Union[ndarray, Tensor]] = None,
+    *,
+    where: Mask = True,
+    dtype: DTypeLikeReals = None,
+    constant: Optional[bool] = None,
+    nan_to_num: bool = True,
+) -> Tensor:  # pragma: no cover
+    """The absolute value, computed elementwise.
+
+    This docstring was adapted from that of numpy.absolute [1]_
+
+    Parameters
+    ----------
+    x : ArrayLike
+        Input array.
+
+    out : Optional[Union[Tensor, ndarray]]
+        A location into which the result is stored. If provided, it must have
+        a shape that the inputs broadcast to. If not provided or None,
+        a freshly-allocated tensor is returned.
+
+    constant : Optional[bool]
+        If ``True``, this tensor is treated as a constant, and thus does not
+        facilitate back propagation (i.e. ``constant.grad`` will always return
+        ``None``).
+
+        Defaults to ``False`` for float-type data.
+        Defaults to ``True`` for integer-type data.
+
+        Integer-type tensors must be constant.
+
+    nan_to_num : bool, optional (default=True)
+        If `True` then gradients that would store nans due to the presence of
+        zeros in `x` will instead store zeros in those places.
+
+    where : Mask
+        This condition is broadcast over the input. At locations where the
+        condition is True, the ``out`` tensor will be set to the ufunc result.
+        Elsewhere, the ``out`` tensor will retain its original value.
+        Note that if an uninitialized `out` tensor is created via the default
+        ``out=None``, locations within it where the condition is False will
+        remain uninitialized.
+
+    dtype : Optional[DTypeLikeReals]
+        The dtype of the resulting tensor.
+
+    Returns
+    -------
+    absolute : Tensor
+        An ndarray containing the absolute value of
+        each element in `x`.
+
+    References
+    ----------
+    .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.absolute.html
+
+    Examples
+    --------
+    >>> import mygrad as mg
+    >>> x = mg.array([-1.2, 1.2])
+    >>> mg.absolute([-1.2, 1.2])
+    Tensor([ 1.2,  1.2])
+
+    The absolute-value function is not differentiable at `x=0.0`.
+    By default the derivative at this point is treated as 0.
+
+    >>> x = mg.tensor([-2.0, 0.0, 2.0])
+    >>> mg.absolute(x).backward()
+    >>> x.grad
+    np.array([-1., 0., 1.])
+
+    However a more rigorous behavior can be enabled such that the
+    undefined derivative will be returned as `nan`.
+
+    >>> x = mg.tensor([-2.0, 0.0, 2.0])
+    >>> mg.absolute(x, nan_to_num=False).backward()
+    >>> x.grad
+    np.array([-1., nan, 1.])
+
+    Plot the function and its derivate over ``[-10, 10]``:
+
+    .. plot::
+
+       >>> import mygrad as mg
+       >>> import matplotlib.pyplot as plt
+       >>> x = mg.linspace(-5, 5, 100)
+       >>> y = mg.absolute(x)
+       >>> plt.title("absolute(x)")
+       >>> y.backward()
+       >>> plt.plot(x, x.grad, label="df/dx")
+       >>> plt.plot(x, y, label="f(x)")
+       >>> plt.legend()
+       >>> plt.grid()
+       >>> plt.show()
+    """
+    ...
+
+
+abs = absolute
+
+
+@ufunc_creator(Sqrt)
+def sqrt(
+    x: ArrayLike,
+    out: Optional[Union[ndarray, Tensor]] = None,
+    *,
+    where: Mask = True,
+    dtype: DTypeLikeReals = None,
+    constant: Optional[bool] = None,
+) -> Tensor:  # pragma: no cover
+    """The square root, elementwise.
+
+    This docstring was adapted from that of numpy.sqrt [1]_
+
+    Parameters
+    ----------
+    x : ArrayLike
+        The values whose square-roots are required.
+
+    out : Optional[Union[Tensor, ndarray]]
+        A location into which the result is stored. If provided, it must have
+        a shape that the inputs broadcast to. If not provided or None,
+        a freshly-allocated tensor is returned.
+
+
+    constant : Optional[bool]
+        If ``True``, this tensor is treated as a constant, and thus does not
+        facilitate back propagation (i.e. ``constant.grad`` will always return
+        ``None``).
+
+        Defaults to ``False`` for float-type data.
+        Defaults to ``True`` for integer-type data.
+
+        Integer-type tensors must be constant.
+
+    where : Mask
+        This condition is broadcast over the input. At locations where the
+        condition is True, the ``out`` tensor will be set to the ufunc result.
+        Elsewhere, the ``out`` tensor will retain its original value.
+        Note that if an uninitialized `out` tensor is created via the default
+        ``out=None``, locations within it where the condition is False will
+        remain uninitialized.
+
+    dtype : Optional[DTypeLikeReals]
+        The dtype of the resulting tensor.
+
+    Returns
+    -------
+    y : ndarray
+        A tensor of the same shape as `x`, containing the positive
+        square-root of each element in `x`. Negative-valued inputs
+        produce nans.
+
+
+    Notes
+    -----
+    *sqrt* has--consistent with common convention--as its branch cut the
+    real "interval" [`-inf`, 0), and is continuous from above on it.
+    A branch cut is a curve in the complex plane across which a given
+    complex function fails to be continuous.
+
+    References
+    ----------
+    .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.sqrt.html
+
+    Examples
+    --------
+    >>> import mygrad as mg
+    >>> mg.sqrt([1, 4, 9])
+    Tensor([ 1.,  2.,  3.])
+
+    >>> mg.sqrt([4, -1, mg.inf])
+    Tensor([ 2., nan, inf])
+    """
+    ...
+
+
+@ufunc_creator(Cbrt)
+def cbrt(
+    x: ArrayLike,
+    out: Optional[Union[ndarray, Tensor]] = None,
+    *,
+    where: Mask = True,
+    dtype: DTypeLikeReals = None,
+    constant: Optional[bool] = None,
+) -> Tensor:  # pragma: no cover
+    """The cube root elementwise.
+
+    This docstring was adapted from that of numpy.cbrt [1]_
+
+    Parameters
+    ----------
+    x : ArrayLike
+        The values whose cube-roots are computed.
+
+    out : Optional[Union[Tensor, ndarray]]
+        A location into which the result is stored. If provided, it must have
+        a shape that the inputs broadcast to. If not provided or None,
+        a freshly-allocated tensor is returned.
+
+    constant : Optional[bool]
+        If ``True``, this tensor is treated as a constant, and thus does not
+        facilitate back propagation (i.e. ``constant.grad`` will always return
+        ``None``).
+
+        Defaults to ``False`` for float-type data.
+        Defaults to ``True`` for integer-type data.
+
+        Integer-type tensors must be constant.
+
+    where : Mask
+        This condition is broadcast over the input. At locations where the
+        condition is True, the ``out`` tensor will be set to the ufunc result.
+        Elsewhere, the ``out`` tensor will retain its original value.
+        Note that if an uninitialized `out` tensor is created via the default
+        ``out=None``, locations within it where the condition is False will
+        remain uninitialized.
+
+    dtype : Optional[DTypeLikeReals]
+        The dtype of the resulting tensor.
+
+    Returns
+    -------
+    y : ndarray
+        A tensor of the same shape as `x`, containing the cube
+        cube-root of each element in `x`.
+
+    References
+    ----------
+    .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.cbrt.html
+
+    Examples
+    --------
+    >>> import mygrad as mg
+    >>> mg.cbrt([1, 8, 27])
+    Tensor([ 1.,  2.,  3.])
+    """
+    ...
+
+
+@ufunc_creator(Maximum)
+def maximum(
+    x1: ArrayLike,
+    x2: ArrayLike,
+    out: Optional[Union[ndarray, Tensor]] = None,
+    *,
+    where: Mask = True,
+    dtype: DTypeLikeReals = None,
+    constant: Optional[bool] = None,
+) -> Tensor:  # pragma: no cover
+    """Pair-wise maximum of tensor elements.
+
+    This docstring was adapted from that of numpy.maximum [1]_
+
+    Parameters
+    ----------
+    x1, x2 : ArrayLike
+        The tensors holding the elements to be compared.
+        If ``x1.shape != x2.shape``, they must be broadcastable to a common
+        shape (which becomes the shape of the output).
+
+    out : Optional[Union[Tensor, ndarray]]
+        A location into which the result is stored. If provided, it must have
+        a shape that the inputs broadcast to. If not provided or None,
+        a freshly-allocated tensor is returned.
+
+    constant : Optional[bool]
+        If ``True``, this tensor is treated as a constant, and thus does not
+        facilitate back propagation (i.e. ``constant.grad`` will always return
+        ``None``).
+
+        Defaults to ``False`` for float-type data.
+        Defaults to ``True`` for integer-type data.
+
+        Integer-type tensors must be constant.
+
+    where : Mask
+        This condition is broadcast over the input. At locations where the
+        condition is True, the ``out`` tensor will be set to the ufunc result.
+        Elsewhere, the ``out`` tensor will retain its original value.
+        Note that if an uninitialized `out` tensor is created via the default
+        ``out=None``, locations within it where the condition is False will
+        remain uninitialized.
+
+    dtype : Optional[DTypeLikeReals]
+        The dtype of the resulting tensor.
+
+    Returns
+    -------
+    y : Tensor
+        The maximum of `x1` and `x2`, element-wise.
+
+    See Also
+    --------
+    minimum :
+        Element-wise minimum of two arrays, propagates NaNs.
+
+    Notes
+    -----
+    The maximum is equivalent to ``mg.where(x1 >= x2, x1, x2)`` when
+    neither x1 nor x2 are nans, but it is faster and does proper
+    broadcasting.
+
+    References
+    ----------
+    .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.maximum.html
+
+    Examples
+    --------
+    >>> import mygrad as mg
+    >>> mg.maximum([2, 3, 4], [1, 5, 2])
+    Tensor([2, 5, 4])
+
+    >>> mg.maximum(mg.eye(2), [0.5, 2]) # broadcasting
+    Tensor([[ 1. ,  2. ],
+           [ 0.5,  2. ]])
+
+    >>> mg.maximum([mg.nan, 0, mg.nan], [0, mg.nan, mg.nan])
+    Tensor([nan, nan, nan])
+    >>> mg.maximum(mg.Inf, 1)
+    Tensor(inf)
+    """
+    ...
+
+
+@ufunc_creator(Minimum)
+def minimum(
+    x1: ArrayLike,
+    x2: ArrayLike,
+    out: Optional[Union[ndarray, Tensor]] = None,
+    *,
+    where: Mask = True,
+    dtype: DTypeLikeReals = None,
+    constant: Optional[bool] = None,
+) -> Tensor:  # pragma: no cover
+    """Pair-wise minimum of tensor elements.
+
+    This docstring was adapted from that of numpy.minimum [1]_
+
+    Parameters
+    ----------
+    x1, x2 : ArrayLike
+        The tensors holding the elements to be compared.
+        If ``x1.shape != x2.shape``, they must be broadcastable to a common
+        shape (which becomes the shape of the output).
+
+    out : Optional[Union[Tensor, ndarray]]
+        A location into which the result is stored. If provided, it must have
+        a shape that the inputs broadcast to. If not provided or None,
+        a freshly-allocated tensor is returned.
+
+    constant : Optional[bool]
+        If ``True``, this tensor is treated as a constant, and thus does not
+        facilitate back propagation (i.e. ``constant.grad`` will always return
+        ``None``).
+
+        Defaults to ``False`` for float-type data.
+        Defaults to ``True`` for integer-type data.
+
+        Integer-type tensors must be constant.
+
+    where : Mask
+        This condition is broadcast over the input. At locations where the
+        condition is True, the ``out`` tensor will be set to the ufunc result.
+        Elsewhere, the ``out`` tensor will retain its original value.
+        Note that if an uninitialized `out` tensor is created via the default
+        ``out=None``, locations within it where the condition is False will
+        remain uninitialized.
+
+    dtype : Optional[DTypeLikeReals]
+        The dtype of the resulting tensor.
+
+    Returns
+    -------
+    y : Tensor
+        The minimum of `x1` and `x2`, element-wise.
+
+    See Also
+    --------
+    maximum :
+        Element-wise maximum of two arrays, propagates NaNs.
+
+    Notes
+    -----
+    The minimum is equivalent to ``mg.where(x1 <= x2, x1, x2)`` when
+    neither x1 nor x2 are NaNs, but it is faster and does proper
+    broadcasting.
+
+    References
+    ----------
+    .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.minimum.html
+
+    Examples
+    --------
+    >>> import mygrad as mg
+    >>> mg.minimum([2, 3, 4], [1, 5, 2])
+    Tensor([1, 3, 2])
+
+    >>> mg.minimum(mg.eye(2), [0.5, 2]) # broadcasting
+    Tensor([[ 0.5,  0. ],
+           [ 0. ,  1. ]])
+
+    >>> mg.minimum([mg.nan, 0, mg.nan],[0, mg.nan, mg.nan])
+    Tensor([nan, nan, nan])
+    >>> mg.minimum(-mg.Inf, 1)
+    Tensor(-inf)
+    """
+    ...
+
+
+
[docs]@implements_numpy_override() +def clip( + a: ArrayLike, + a_min: Union[ArrayLike, None], + a_max: Union[ArrayLike, None], + out: Optional[Union[np.ndarray, Tensor]] = None, + *, + constant: Optional[bool] = None, +) -> Tensor: + """Clip (limit) the values in an array. + + Given an interval, values outside the interval are clipped to + the interval edges. For example, if an interval of ``[0, 1]`` + is specified, values smaller than 0 become 0, and values larger + than 1 become 1. + + Equivalent to `mg.minimum(a_max, mg.maximum(a, a_min))``. + + No check is performed to ensure ``a_min < a_max``. + + This docstring was adapted from that of `numpy.clip` + + Parameters + ---------- + a : ArrayLike + Array containing elements to clip. + + a_min : Optional[float, ArrayLike] + Minimum value. If `None`, clipping is not performed on lower + interval edge. Not more than one of `a_min` and `a_max` may be + `None`. + + a_max : Optional[float, ArrayLike] + Maximum value. If `None`, clipping is not performed on upper + interval edge. Not more than one of `a_min` and `a_max` may be + `None`. If `a_min` or `a_max` are ArrayLike, then the three + arrays will be broadcasted to match their shapes. + + out : Optional[Union[ndarray, Tensor]] + A location into which the result is stored. If provided, it must have + a shape that the inputs broadcast to. If not provided or None, a + freshly-allocated tensor is returned. + + constant : bool, optional(default=False) + If ``True``, the returned tensor is a constant (it + does not backpropagate a gradient) + + Returns + ------- + Tensor + A tensor with the elements of `a`, but where values + < `a_min` are replaced with `a_min`, and those > `a_max` + with `a_max`. + + Examples + -------- + >>> import mygrad as mg + >>> a = mg.arange(10) + >>> mg.clip(a, 1, 8) + Tensor([1, 1, 2, 3, 4, 5, 6, 7, 8, 8]) + >>> a + Tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> mg.clip(a, [3, 4, 1, 1, 1, 4, 4, 4, 4, 4], 8) + Tensor([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])""" + if np.__version__ < "2.1.0" and a_min is None and a_max is None: # pragma: no cover + raise ValueError("`a_min` and `a_max` cannot both be set to `None`") + + if a_min is not None: + a = maximum(a_min, a, out=out, constant=constant) + + if a_max is not None: + a = minimum(a_max, a, out=out, constant=constant) + return mg.astensor(a)
+ + +@ufunc_creator(MatMul) +def matmul( + x1: ArrayLike, + x2: ArrayLike, + out: Optional[Union[np.ndarray, Tensor]] = None, + *, + dtype: DTypeLikeReals = None, + constant: Optional[bool] = None, +) -> Tensor: # pragma: no cover + r""" + Matrix product of two tensors: + + ``matmul(x, y)`` is equivalent to ``x @ y``. + + This documentation was adapted from ``numpy.matmul`` + + The behavior depends on the arguments in the following way. + + - If both arguments are 2-D they are multiplied like conventional + matrices. + - If either argument is N-D, N > 2, it is treated as a stack of + matrices residing in the last two indexes and broadcast accordingly. + - If the first argument is 1-D, it is promoted to a matrix by + prepending a 1 to its dimensions. After matrix multiplication + the prepended 1 is removed. + - If the second argument is 1-D, it is promoted to a matrix by + appending a 1 to its dimensions. After matrix multiplication + the appended 1 is removed. + + Multiplication by a scalar is not allowed, use ``*`` instead. Note that + multiplying a stack of matrices with a vector will result in a stack of + vectors, but matmul will not recognize it as such. + + ``matmul`` differs from ``numpy.dot`` in two important ways. + + - Multiplication by scalars is not allowed. + - Stacks of matrices are broadcast together as if the matrices + were elements. + + + Parameters + ---------- + x1 : ArrayLike + + x2 : ArrayLike + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + dtype : Optional[DTypeLikeReals] + The dtype of the resulting tensor. + + out : Optional[Union[ndarray, Tensor]] + A location into which the result is stored. If provided, it must have + a shape that the inputs broadcast to. If not provided or None, + a freshly-allocated tensor is returned. + + Returns + ------- + output : mygrad.Tensor + Returns the matrix product of ``x1`` and `x2``. + + Raises + ------ + ValueError + If : + - The last dimension of ``x1`` is not the same size as + the second-to-last dimension of ``x2``. + - If scalar value is passed. + + See Also + -------- + einsum : Einstein summation convention. + + Notes + ----- + The matmul function implements the semantics of the `@` operator introduced + in Python 3.5 following PEP465. + + Examples + -------- + For two 2D tensors, ``matmul(a, b)`` is the matrix product :math:`\sum_{j}{A_{ij} B_{jk}} = F_{ik}`: + + >>> import mygrad as mg + >>> a = [[1, 0], [0, 1]] + >>> b = [[4, 1], [2, 2]] + >>> mg.matmul(a, b) + Tensor([[4, 1], + [2, 2]]) + + For 2-D mixed with 1-D, the result is the matrix-vector product, :math:`\sum_{j}{A_{ij} B_{j}} = F_{i}`: + + >>> a = [[1, 0], [0, 1]] + >>> b = [1, 2] + >>> mg.matmul(a, b) + Tensor([1, 2]) + + Broadcasting is conventional for stacks of arrays. Here ``a`` is treated + like a stack of three 5x6 matrices, and the 6x4 matrix ``b`` is broadcast + matrix-multiplied against each one. This produces a shape-(3, 5, 4) tensor + as a result. + + >>> a = mg.arange(3*5*6).reshape((3,5,6)) + >>> b = mg.arange(6*4).reshape((6,4)) + >>> mg.matmul(a,b).shape + (3, 5, 4) + + Scalar multiplication raises an error. + + >>> mg.matmul(a, 3) + Traceback (most recent call last): + ... + ValueError: Scalar operands are not allowed, use '*' instead""" + ... + + +
[docs]def multi_matmul(tensors: ArrayLike, *, constant: Optional[bool] = None) -> Tensor: + """ + Matrix product of two or more tensors calculated in the optimal ordering + + Parameters + ---------- + tensors: Sequence[array_like] + The sequence of tensors to be matrix-multiplied. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + mygrad.Tensor + Returns the matrix product of the tensors provided + + + Extended Summary + ---------------- + This documentation was adapted from ``numpy.linalg.multi_dot`` + + Compute the matrix multiplication of two or more arrays in a single function + call, while automatically selecting the fastest evaluation order. + ``multi_matmul`` chains ``matmul`` and uses optimal parenthesization [1]_ [2]_. + Depending on the shapes of the matrices, this can speed up the multiplication a lot. + + If the first argument is 1-D it is treated as a row vector. + + If the last argument is 1-D it is treated as a column vector. + + The other arguments must be 2-D or greater. + + Think of `multi_dot` as an optimized version of:: + + def multi_dot(tensors): return functools.reduce(mg.matmul, tensors) + + Raises + ------ + ValueError + If ``tensors`` contains less than two array_like items. + + ValueError + If ``tensor`` other than the first or last is less than two dimensional + + See Also + -------- + matmul : matrix multiplication with two arguments. + + References + ---------- + + .. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378 + .. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication + + Notes + ----- + The cost for a matrix multiplication can be calculated with the + following function:: + + def cost(A, B): + return A.shape[0] * A.shape[1] * B.shape[1] + + Let's assume we have three matrices :math:`A_{10x100}, B_{100x5}, C_{5x50}`. + + The costs for the two different parenthesizations are as follows:: + + cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500 + cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000 + + Examples + -------- + ``multi_matmul`` allows you to write: + + >>> from mygrad.math.misc.funcs import matmul >>> from mygrad import multi_matmul, Tensor + >>> import numpy as np + >>> # Prepare some random tensors + >>> A = Tensor(np.random.random((10000, 100))) + >>> B = Tensor(np.random.random((100, 1000))) + >>> C = Tensor(np.random.random((1000, 5))) + >>> D = Tensor(np.random.random((5, 333))) + >>> # the actual matrix multiplication + >>> multi_matmul([A, B, C, D]) # computes (A @ (B @ C)) @ D + + instead of: + + >>> matmul(matmul(matmul(A, B), C), D) + >>> # or + >>> A @ B @ C @ D + """ + + for a in tensors: + if not (1 <= a.ndim <= 2): + raise ValueError( + "%d-dimensional tensor given. Tensor must be one or two-dimensional" + % (a.ndim,) + ) + + n = len(tensors) + if n < 2: + raise ValueError("Expecting at least two arrays.") + elif n == 2: + return matmul(tensors[0], tensors[1], constant=constant) + + tensors = [a if isinstance(a, Tensor) else np.asarray(a) for a in tensors] + + # save original ndim to reshape the result array into the proper form later + ndim_first, ndim_last = tensors[0].ndim, tensors[-1].ndim + + # Explicitly convert vectors to 2D arrays to keep the logic of this function simpler + if tensors[0].ndim == 1: + tensors[0] = mg.expand_dims( + tensors[0], + axis=0, + constant=tensors[0].constant if isinstance(tensors[0], Tensor) else True, + ) + if tensors[-1].ndim == 1: + tensors[-1] = mg.expand_dims( + tensors[-1], + axis=1, + constant=tensors[-1].constant if isinstance(tensors[-1], Tensor) else True, + ) + + if n == 3: + result = _multi_matmul_three( + tensors[0], tensors[1], tensors[2], constant=constant + ) + else: + order = _multi_matmul_chain_order(tensors) + result = _multi_matmul(tensors, order, 0, n - 1, constant=constant) + + # return proper shape since we possibly added dimensions to the first + # and last arrays + if ndim_first == 1 and ndim_last == 1: + result = result[0, 0] + return result + elif ndim_first == 1 or ndim_last == 1: + result = result.reshape(-1) + return result + else: + return result
+ + +def _multi_matmul_three(A, B, C, *, constant=None) -> Tensor: + """ + Find the best order for three arrays and do the multiplication. + + """ + a0, a1b0 = A.shape[-2:] + b1c0, c1 = C.shape[-2:] + cost1 = a0 * b1c0 * (a1b0 + c1) + cost2 = a1b0 * c1 * (a0 + b1c0) + + if cost1 < cost2: + return matmul(matmul(A, B, constant=constant), C, constant=constant) + else: + return matmul(A, matmul(B, C, constant=constant), constant=constant) + + +def _multi_matmul_chain_order(arrays): + """ + Return a np.array that encodes the optimal order of multiplications. + The optimal order array is then used by `_multi_matmul()` to do the + multiplication. + + The implementation CLOSELY follows Cormen, "Introduction to Algorithms", + Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices. + + cost[i, j] = min([ + cost[prefix] + cost[suffix] + cost_mult(prefix, suffix) + for k in range(i, j)]) + """ + n = len(arrays) + # p stores the dimensions of the matrices + # Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50] + # Using -2 to generalize for shapes that are more than 2 dimensions + p = [a.shape[-2] for a in arrays] + [arrays[-1].shape[-1]] + # m is a matrix of costs of the subproblems + # m[i,j]: min number of scalar multiplications needed to compute A_{i..j} + m = np.zeros((n, n), dtype=np.double) + # s is the actual ordering + # s[i, j] is the value of k at which we split the product A_i..A_j + s = np.empty((n, n), dtype=np.intp) + + for ind in range(1, n): + for i in range(n - ind): + j = i + ind + m[i, j] = np.inf + for k in range(i, j): + q = m[i, k] + m[k + 1, j] + p[i] * p[k + 1] * p[j + 1] + if q < m[i, j]: + m[i, j] = q + s[i, j] = k # Note that Cormen uses 1-based index + return s + + +def _multi_matmul(arrays, order, i, j, *, constant=None) -> Tensor: + """Actually do the multiplication with the given order.""" + if i == j: + return arrays[i] + else: + return matmul( + _multi_matmul(arrays, order, i, order[i, j], constant=constant), + _multi_matmul(arrays, order, order[i, j] + 1, j, constant=constant), + constant=constant, + ) +
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/math/sequential/funcs.html b/docs/_modules/mygrad/math/sequential/funcs.html new file mode 100644 index 00000000..5302b315 --- /dev/null +++ b/docs/_modules/mygrad/math/sequential/funcs.html @@ -0,0 +1,1279 @@ + + + + + + + + + + mygrad.math.sequential.funcs — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.math.sequential.funcs

+from typing import Optional, Tuple, Union
+
+import numpy as np
+
+from mygrad.operation_base import _NoValue
+from mygrad.tensor_base import (
+    _REGISTERED_DIFFERENTIABLE_NUMPY_FUNCS,
+    Tensor,
+    implements_numpy_override,
+)
+from mygrad.typing import ArrayLike
+
+from .ops import *
+
+Axis = Union[None, int, Tuple[int, ...]]
+
+
+__all__ = [
+    "sum",
+    "mean",
+    "var",
+    "std",
+    "amax",
+    "amin",
+    "max",
+    "min",
+    "prod",
+    "cumprod",
+    "cumsum",
+]
+
+
+
[docs]@implements_numpy_override() +def sum( + x: ArrayLike, + axis: Axis = None, + keepdims: bool = False, + *, + constant: Optional[bool] = None, +) -> Tensor: + """ + Sum of tensor elements over a given axis. + + Parameters + ---------- + x : ArrayLike + + axis : Optional[int, Tuple[ints, ...]] + Axis or axes along which a sum is performed. The default, + axis=None, will sum all of the elements of the input tensor. If + axis is negative it counts from the last to the first axis. + If axis is a tuple of ints, a sum is performed on all of the axes + specified in the tuple instead of a single axis or all the axes as + before. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input tensor. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + sum_along_axis : mygrad.Tensor + A Tensor with the same shape as `self`, with the specified + axis/axes removed. If `self` is a 0-d tensor, or if `axis` is None, + a 0-dim Tensor is returned. + + See Also + -------- + mygrad.Tensor.sum : Equivalent method. + + cumsum : Cumulative sum of array elements. + + mean, average + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + The sum of an empty tensor is the neutral element 0: + + >>> mygrad.sum([]) + Tensor(0.0) + + Examples + -------- + >>> import mygrad as mg + >>> import numpy as np + >>> mg.sum([0.5, 1.5]) + Tensor(2.0) + >>> mg.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32) + Tensor(1) + >>> mg.sum([[0, 1], [0, 5]]) + Tensor(6) + >>> mg.sum([[0, 1], [0, 5]], axis=0) + Tensor([0, 6]) + >>> mg.sum([[0, 1], [0, 5]], axis=1) + Tensor([1, 5]) + + If the accumulator is too small, overflow occurs: + + >>> mg.ones(128, dtype=mg.int8).sum(dtype=np.int8) + Tensor(-128) + """ + return Tensor._op( + Sum, x, op_kwargs={"axis": axis, "keepdims": keepdims}, constant=constant + )
+ + +
[docs]@implements_numpy_override() +def mean( + x: ArrayLike, + axis: Axis = None, + keepdims: bool = False, + *, + constant: Optional[bool] = None, +) -> Tensor: + """ + Mean of tensor elements over a given axis. + + Parameters + ---------- + x : ArrayLike + + axis : Optional[int, Tuple[ints, ...] + Axis or axes along which a mean is performed. The default, + axis=None, will mean all of the elements of the input tensor. If + axis is negative it counts from the last to the first axis. + + If axis is a tuple of ints, a mean is performed on all of the axes + specified in the tuple instead of a single axis or all the axes as + before. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input tensor. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + mean_along_axis : Tensor + A Tensor with the same shape as `self`, with the specified + axis/axes removed. If `self` is a 0-d tensor, or if `axis` is None, + a 0-dim Tensor is returned. + + Examples + -------- + >>> import mygrad as mg + >>> import numpy as np + >>> a = mg.Tensor([[1, 2], + ... [3, 4]]) + >>> mg.mean(a) + Tensor(2.5) + >>> mg.mean(a, axis=0) + Tensor([ 2., 3.]) + >>> mg.mean(a, axis=1) + Tensor([ 1.5, 3.5]) + + In single precision, `mean` can be inaccurate: + + >>> a = mg.zeros((2, 512*512), dtype=np.float32) + >>> a[0, :] = 1.0 + >>> a[1, :] = 0.1 + >>> mg.mean(a) + Tensor(0.54999924) + + Computing the mean in float64 is more accurate: + + >>> mg.mean(a, dtype=np.float64) + Tensor(0.55000000074505806) + """ + return Tensor._op( + Mean, x, op_kwargs={"axis": axis, "keepdims": keepdims}, constant=constant + )
+ + +
[docs]@implements_numpy_override() +def var( + x: ArrayLike, + axis: Axis = None, + ddof: int = 0, + keepdims: bool = False, + *, + constant: Optional[bool] = None, +) -> Tensor: + """ + Compute the variance along the specified axis. + + Returns the variance of the array elements, a measure of the spread of a + distribution. The variance is computed for the flattened array by + default, otherwise over the specified axis. + + Parameters + ---------- + x : ArrayLike + Array containing numbers whose variance is desired. + + axis : Optional[int, Tuple[int, ...]] + Axis or axes along which the variance is computed. The default is to + compute the variance of the flattened array. + + ddof : int, optional (default=0) + "Delta Degrees of Freedom": the divisor used in the calculation is + ``N - ddof``, where ``N`` represents the number of elements. By + default `ddof` is zero. + + keepdims : bool, optional (default=False) + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array.. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + Returns + ------- + variance : mygrad.Tensor + + Notes + ----- + The variance is the average of the squared deviations from the mean, + i.e., ``var = mean(abs(x - x.mean())**2)``. + + The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``. + If, however, `ddof` is specified, the divisor ``N - ddof`` is used + instead. In standard statistical practice, ``ddof=1`` provides an + unbiased estimator of the variance of a hypothetical infinite population. + ``ddof=0`` provides a maximum likelihood estimate of the variance for + normally distributed variables. + + Examples + -------- + >>> import mygrad as mg + >>> import numpy as np + >>> a = mg.Tensor([[1, 2], + ... [3, 4]]) + >>> mg.var(a) + Tensor(1.25) + >>> mg.var(a, axis=0) + Tensor([ 1., 1.]) + >>> mg.var(a, axis=1) + Tensor([ 0.25, 0.25]) + + In single precision, ``var()`` can be inaccurate: + + >>> a = mg.zeros((2, 512*512), dtype=np.float32) + >>> a[0, :] = 1.0 + >>> a[1, :] = 0.1 + >>> mg.var(a) + Tensor(0.20250003) + + Computing the variance in float64 is more accurate: + + >>> mg.var(a, dtype=np.float64) + Tensor(0.20249999932944759) + >>> ((1-0.55)**2 + (0.1-0.55)**2)/2 + Tensor(0.2025) + """ + return Tensor._op( + Variance, + x, + op_kwargs={"axis": axis, "keepdims": keepdims, "ddof": ddof}, + constant=constant, + )
+ + +
[docs]@implements_numpy_override() +def std( + x: ArrayLike, + axis: Axis = None, + ddof: int = 0, + keepdims: bool = False, + *, + constant: Optional[bool] = None, +) -> Tensor: + """ + Compute the standard deviation along the specified axis. + + Returns the variance of the array elements, a measure of the spread of a + distribution. The variance is computed for the flattened array by + default, otherwise over the specified axis. + + Parameters + ---------- + x : ArrayLike + Array containing numbers whose standard deviation is desired. + + axis : Optional[int, Tuple[int, ...]] + Axis or axes along which the variance is computed. The default is to + compute the variance of the flattened array. + + ddof : int, optional (default=0) + "Delta Degrees of Freedom": the divisor used in the calculation is + ``N - ddof``, where ``N`` represents the number of elements. By + default `ddof` is zero. + + keepdims : bool, optional (default=False) + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + std : mygrad.Tensor + + Notes + ----- + The variance is the average of the squared deviations from the mean, + i.e., ``var = mean(abs(x - x.mean())**2)``. + + The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``. + If, however, `ddof` is specified, the divisor ``N - ddof`` is used + instead. In standard statistical practice, ``ddof=1`` provides an + unbiased estimator of the variance of a hypothetical infinite population. + ``ddof=0`` provides a maximum likelihood estimate of the variance for + normally distributed variables. + + Examples + -------- + >>> import mygrad as mg + >>> import numpy as np + >>> a = mg.Tensor([[1, 2], + ... [3, 4]]) + >>> mg.std(a) + Tensor(1.1180339887498949) + >>> mg.std(a, axis=0) + Tensor([ 1., 1.]) + >>> mg.std(a, axis=1) + Tensor([ 0.5, 0.5]) + + In single precision, ``var()`` can be inaccurate: + + >>> a = mg.zeros((2, 512*512), dtype=np.float32) + >>> a[0, :] = 1.0 + >>> a[1, :] = 0.1 + >>> mg.std(a) + Tensor(0.45000005) + + Computing the variance in float64 is more accurate: + + >>> mg.std(a, dtype=np.float64) + Tensor(0.44999999925494177) + """ + return Tensor._op( + StdDev, + x, + op_kwargs={"axis": axis, "keepdims": keepdims, "ddof": ddof}, + constant=constant, + )
+ + +
[docs]@implements_numpy_override() +def max( + x: ArrayLike, + axis: Axis = None, + keepdims: bool = False, + *, + constant: Optional[bool] = None, +) -> Tensor: + """ + Return the maximum of a tensor or maximum along its axes. + + Parameters + ---------- + x : ArrayLike + + axis : Optional[int, Tuple[int, ...]] + Axis or axes along which to operate. By default, flattened input is used. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + max : mygrad.Tensor + Maximum of `a`. If `axis` is None, the result is a 0-D tensor. + + Examples + -------- + >>> import mygrad as mg + >>> import numpy as np + >>> a = mg.arange(4).reshape((2,2)) + >>> a + Tensor([[0, 1], + [2, 3]]) + >>> mg.amax(a) # Maximum of the flattened array + Tensor(3) + >>> mg.amax(a, axis=0) # Maxima along the first axis + Tensor([2, 3]) + >>> mg.amax(a, axis=1) # Maxima along the second axis + Tensor([1, 3]) + >>> b = mg.arange(5, dtype=float) + >>> b[2] = np.NaN + >>> mg.amax(b) + Tensor(nan) + """ + return Tensor._op( + Max, + x, + op_kwargs={"axis": axis, "keepdims": keepdims, "dtype": _NoValue}, + constant=constant, + )
+ + +
[docs]@implements_numpy_override() +def min( + x: ArrayLike, + axis: Axis = None, + keepdims: bool = False, + *, + constant: Optional[bool] = None, +) -> Tensor: + """ + Return the minimum of a tensor or minimum along its axes. + + Parameters + ---------- + axis : Optional[int, Tuple[int, ...]] + Axis or axes along which to operate. By default, flattened input is used. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + min : mygrad.Tensor + Minimum of `a`. If `axis` is None, the result is a 0-D tensor. + + Examples + -------- + >>> import mygrad as mg + >>> import numpy as np + >>> a = mg.arange(4).reshape((2,2)) + >>> a + Tensor([[0, 1], + [2, 3]]) + >>> mg.amin(a) # Minimum of the flattened array + Tensor(0) + >>> mg.amin(a, axis=0) # Minima along the first axis + Tensor([0, 1]) + >>> mg.amin(a, axis=1) # Minima along the second axis + Tensor([0, 2]) + >>> b = mg.arange(5, dtype=float) + >>> b[2] = np.NaN + >>> mg.amin(b) + Tensor(nan) + """ + return Tensor._op( + Min, + x, + op_kwargs={"axis": axis, "keepdims": keepdims, "dtype": _NoValue}, + constant=constant, + )
+ + +# aliases +amin = min +amax = max +_REGISTERED_DIFFERENTIABLE_NUMPY_FUNCS[np.amin] = amin +_REGISTERED_DIFFERENTIABLE_NUMPY_FUNCS[np.amax] = amax + + +
[docs]@implements_numpy_override() +def prod( + a: ArrayLike, + axis: Axis = None, + keepdims: bool = False, + *, + constant: Optional[bool] = None, +) -> Tensor: + """ + Return the product of array elements over given axes. + + Parameters + ---------- + a : ArrayLike + Input data. + + axis : Optional[int, Tuple[int, ...]] + Axis or axes along which to operate. By default, flattened input is used. + + keepdims : bool, optional (default=False) + If this is set to True, the axes which are reduced are left in the + result as dimensions with size one. With this option, the result + will broadcast correctly against the input array. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + product_along_axis : mygrad.Tensor + A tensor shaped as `a` but with the specified axis removed. + + Notes + ----- + The product of an empty tensor is the neutral element 1: + + >>> import mygrad + >>> mygrad.prod([]) + Tensor(1.0) + + Examples + -------- + By default, calculate the product of all elements: + + >>> import mygrad as mg + >>> mg.prod([1.,2.]) + Tensor(2.0) + + Even when the input array is two-dimensional: + + >>> mg.prod([[1.,2.], + ... [3.,4.]]) + Tensor(24.0) + + But we can also specify the axis over which to multiply: + + >>> mg.prod([[1.,2.], + ... [3.,4.]], axis=1) + Tensor([ 2., 12.])""" + return Tensor._op( + Prod, a, op_kwargs={"axis": axis, "keepdims": keepdims}, constant=constant + )
+ + +
[docs]@implements_numpy_override() +def cumprod( + a: ArrayLike, axis: Axis = None, *, constant: Optional[bool] = None +) -> Tensor: + """ + Return the cumulative product of elements along a given axis. + + This docstring was adapted from the official numpy documentation + + Parameters + ---------- + a : ArrayLike + Input array. + + axis : Optional[int] + Axis along which the cumulative product is computed. By default + the input is flattened. + + constant : bool, optional(default=False) + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient) + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + mygrad.Tensor + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + Examples + -------- + >>> from mygrad import cumprod, Tensor + >>> a = Tensor([[1, 2, 3], + ... [4, 5, 6]]) + + >>> cumprod(a) + Tensor([ 1 2 6 24 120 720]) + + The cumulative product for each column (i.e., over the rows) of `a`: + + >>> cumprod(a, axis=0) + Tensor([[ 1, 2, 3], + [ 4, 10, 18]]) + + The cumulative product for each row (i.e. over the columns) of `a`: + + >>> cumprod(a, axis=1) + Tensor([[ 1, 2, 6], + [ 4, 20, 120]])""" + + return Tensor._op(CumProd, a, op_kwargs={"axis": axis}, constant=constant)
+ + +
[docs]@implements_numpy_override() +def cumsum( + a: ArrayLike, axis: Axis = None, *, constant: Optional[bool] = None +) -> Tensor: + """ + Return the cumulative sum of the elements along a given axis. + + This docstring was adapted from the official numpy documentation + + Parameters + ---------- + a : ArrayLike + Input array. + + axis : int, optional + Axis along which the cumulative sum is computed. The default + (None) is to compute the cumsum over the flattened array. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + mygrad.Tensor + + Examples + -------- + >>> from mygrad import cumsum, Tensor + >>> a = Tensor([[1, 2, 3], + ... [4, 5, 6]]) + >>> cumsum(a) + Tensor([ 1, 3, 6, 10, 15, 21]) + + >>> cumsum(a, axis=0) # sum over rows for each of the 3 columns + Tensor([[1, 2, 3], + [5, 7, 9]]) + >>> cumsum(a, axis=1) # sum over columns for each of the 2 rows + Tensor([[ 1, 3, 6], + [ 4, 9, 15]]) + """ + + return Tensor._op(CumSum, a, op_kwargs={"axis": axis}, constant=constant)
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/math/trigonometric/funcs.html b/docs/_modules/mygrad/math/trigonometric/funcs.html new file mode 100644 index 00000000..d5df89b1 --- /dev/null +++ b/docs/_modules/mygrad/math/trigonometric/funcs.html @@ -0,0 +1,1357 @@ + + + + + + + + + + mygrad.math.trigonometric.funcs — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.math.trigonometric.funcs

+from typing import Optional, Union
+
+from numpy import ndarray
+
+from mygrad.tensor_base import Tensor
+from mygrad.typing import ArrayLike, DTypeLikeReals, Mask
+from mygrad.ufuncs import ufunc_creator
+
+from .ops import *
+
+__all__ = [
+    "sin",
+    "sinc",
+    "cos",
+    "tan",
+    "cot",
+    "csc",
+    "sec",
+    "arccos",
+    "arccsc",
+    "arcsin",
+    "arctan",
+    "arcsec",
+    "arccot",
+    "arctan2",
+]
+
+
+@ufunc_creator(Sin)
+def sin(
+    x: ArrayLike,
+    out: Optional[Union[ndarray, Tensor]] = None,
+    *,
+    where: Mask = True,
+    dtype: DTypeLikeReals = None,
+    constant: Optional[bool] = None,
+) -> Tensor:  # pragma: no cover
+    r"""Trigonometric sine, element-wise.
+
+    This docstring was adapted from that of numpy.sin [1]_
+
+    Parameters
+    ----------
+    x : ArrayLike
+        Angle, in radians (:math:`2 \pi` rad equals 360 degrees).
+
+    out : Optional[Union[Tensor, ndarray]]
+        A location into which the result is stored. If provided, it must have
+        a shape that the inputs broadcast to. If not provided or None,
+        a freshly-allocated tensor is returned.
+
+    constant : Optional[bool]
+        If ``True``, this tensor is treated as a constant, and thus does not
+        facilitate back propagation (i.e. ``constant.grad`` will always return
+        ``None``).
+
+        Defaults to ``False`` for float-type data.
+        Defaults to ``True`` for integer-type data.
+
+        Integer-type tensors must be constant.
+
+    where : Mask
+        This condition is broadcast over the input. At locations where the
+        condition is True, the ``out`` tensor will be set to the ufunc result.
+        Elsewhere, the ``out`` tensor will retain its original value.
+        Note that if an uninitialized `out` tensor is created via the default
+        ``out=None``, locations within it where the condition is False will
+        remain uninitialized.
+
+    dtype : Optional[DTypeLikeReals]
+        The dtype of the resulting tensor.
+
+    Returns
+    -------
+    y : Tensor
+        The sine of each element of x.
+
+    See Also
+    --------
+    arcsin, sinh, cos
+
+    Notes
+    -----
+    The sine is one of the fundamental functions of trigonometry (the
+    mathematical study of triangles).  Consider a circle of radius 1
+    centered on the origin.  A ray comes in from the :math:`+x` axis, makes
+    an angle at the origin (measured counter-clockwise from that axis), and
+    departs from the origin.  The :math:`y` coordinate of the outgoing
+    ray's intersection with the unit circle is the sine of that angle.  It
+    ranges from -1 for :math:`x=3\pi / 2` to +1 for :math:`\pi / 2.`  The
+    function has zeroes where the angle is a multiple of :math:`\pi`.
+    Sines of angles between :math:`\pi` and :math:`2\pi` are negative.
+    The numerous properties of the sine and related functions are included
+    in any standard trigonometry text.
+
+    References
+    ----------
+    .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.sin.html
+
+    Examples
+    --------
+    >>> import mygrad as mg
+    >>> mg.sin(mg.pi/2.)
+    Tensor(1.0)
+
+    Print sines of an array of angles given in degrees:
+
+    >>> mg.sin(mg.tensor((0., 30., 45., 60., 90.)) * mg.pi / 180. )
+    Tensor([ 0.        ,  0.5       ,  0.70710678,  0.8660254 ,  1.        ])
+    """
+    ...
+
+
+@ufunc_creator(Cos)
+def cos(
+    x: ArrayLike,
+    out: Optional[Union[ndarray, Tensor]] = None,
+    *,
+    where: Mask = True,
+    dtype: DTypeLikeReals = None,
+    constant: Optional[bool] = None,
+) -> Tensor:  # pragma: no cover
+    """Trigonometric cosine, element-wise.
+
+    This docstring was adapted from that of numpy.cos [1]_
+
+    Parameters
+    ----------
+    x : ArrayLike
+        Input array in radians.
+
+    out : Optional[Union[Tensor, ndarray]]
+        A location into which the result is stored. If provided, it must have
+        a shape that the inputs broadcast to. If not provided or None,
+        a freshly-allocated tensor is returned.
+
+    constant : Optional[bool]
+        If ``True``, this tensor is treated as a constant, and thus does not
+        facilitate back propagation (i.e. ``constant.grad`` will always return
+        ``None``).
+
+        Defaults to ``False`` for float-type data.
+        Defaults to ``True`` for integer-type data.
+
+        Integer-type tensors must be constant.
+
+    where : Mask
+        This condition is broadcast over the input. At locations where the
+        condition is True, the ``out`` tensor will be set to the ufunc result.
+        Elsewhere, the ``out`` tensor will retain its original value.
+        Note that if an uninitialized `out` tensor is created via the default
+        ``out=None``, locations within it where the condition is False will
+        remain uninitialized.
+
+    dtype : Optional[DTypeLikeReals]
+        The dtype of the resulting tensor.
+
+    Returns
+    -------
+    y : Tensor
+        The corresponding cosine values.
+
+    Notes
+    -----
+    If `out` is provided, the function writes the result into it,
+    and returns a reference to `out`.  (See Examples)
+
+    References
+    ----------
+    .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.cos.html
+
+    M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
+    New York, NY: Dover, 1972.
+
+    Examples
+    --------
+    >>> import mygrad as mg
+    >>> mg.cos([0, mg.pi/2, mg.pi])
+    Tensor([  1.00000000e+00,   6.12303177e-17,  -1.00000000e+00])
+    """
+    ...
+
+
+@ufunc_creator(Tan)
+def tan(
+    x: ArrayLike,
+    out: Optional[Union[ndarray, Tensor]] = None,
+    *,
+    where: Mask = True,
+    dtype: DTypeLikeReals = None,
+    constant: Optional[bool] = None,
+) -> Tensor:  # pragma: no cover
+    """Trigonometric tangent, element-wise.
+
+    This docstring was adapted from that of numpy.tan [1]_
+
+    Parameters
+    ----------
+    x : ArrayLike
+        Input array.
+
+    out : Optional[Union[Tensor, ndarray]]
+        A location into which the result is stored. If provided, it must have
+        a shape that the inputs broadcast to. If not provided or None,
+        a freshly-allocated tensor is returned.
+
+    constant : Optional[bool]
+        If ``True``, this tensor is treated as a constant, and thus does not
+        facilitate back propagation (i.e. ``constant.grad`` will always return
+        ``None``).
+
+        Defaults to ``False`` for float-type data.
+        Defaults to ``True`` for integer-type data.
+
+        Integer-type tensors must be constant.
+
+    where : Mask
+        This condition is broadcast over the input. At locations where the
+        condition is True, the ``out`` tensor will be set to the ufunc result.
+        Elsewhere, the ``out`` tensor will retain its original value.
+        Note that if an uninitialized `out` tensor is created via the default
+        ``out=None``, locations within it where the condition is False will
+        remain uninitialized.
+
+    dtype : Optional[DTypeLikeReals]
+        The dtype of the resulting tensor.
+
+    Returns
+    -------
+    y : Tensor
+        The corresponding tangent values.
+
+    Notes
+    -----
+    If `out` is provided, the function writes the result into it,
+    and returns a reference to `out`.  (See Examples)
+
+    References
+    ----------
+    .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.tan.html
+
+    M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
+    New York, NY: Dover, 1972.
+
+    Examples
+    --------
+    >>> import mygrad as mg
+    >>> from math import pi
+    >>> mg.tan([-pi, pi / 2, pi])
+    Tensor([  1.22460635e-16,   1.63317787e+16,  -1.22460635e-16])
+    """
+    ...
+
+
+@ufunc_creator(Arcsin)
+def arcsin(
+    x: ArrayLike,
+    out: Optional[Union[ndarray, Tensor]] = None,
+    *,
+    where: Mask = True,
+    dtype: DTypeLikeReals = None,
+    constant: Optional[bool] = None,
+) -> Tensor:  # pragma: no cover
+    """Inverse sine, element-wise.
+
+    This docstring was adapted from that of numpy.arcsin [1]_
+
+    Parameters
+    ----------
+    x : ArrayLike
+        `y`-coordinate on the unit circle.
+
+    out : Optional[Union[Tensor, ndarray]]
+        A location into which the result is stored. If provided, it must have
+        a shape that the inputs broadcast to. If not provided or None,
+        a freshly-allocated tensor is returned.
+
+    constant : Optional[bool]
+        If ``True``, this tensor is treated as a constant, and thus does not
+        facilitate back propagation (i.e. ``constant.grad`` will always return
+        ``None``).
+
+        Defaults to ``False`` for float-type data.
+        Defaults to ``True`` for integer-type data.
+
+        Integer-type tensors must be constant.
+
+    where : Mask
+        This condition is broadcast over the input. At locations where the
+        condition is True, the ``out`` tensor will be set to the ufunc result.
+        Elsewhere, the ``out`` tensor will retain its original value.
+        Note that if an uninitialized `out` tensor is created via the default
+        ``out=None``, locations within it where the condition is False will
+        remain uninitialized.
+
+    dtype : Optional[DTypeLikeReals]
+        The dtype of the resulting tensor.
+
+    Returns
+    -------
+    angle : Tensor
+        The inverse sine of each element in `x`, in radians and in the
+        closed interval ``[-pi/2, pi/2]``.
+
+    See Also
+    --------
+    sin, cos, arccos, tan, arctan, arctan2
+
+    Notes
+    -----
+    `arcsin` is a multivalued function: for each `x` there are infinitely
+    many numbers `z` such that :math:`sin(z) = x`.  The convention is to
+    return the angle `z` whose real part lies in [-pi/2, pi/2].
+
+    For real-valued input data types, *arcsin* always returns real output.
+    For each value that cannot be expressed as a real number or infinity,
+    it yields ``nan`` and sets the `invalid` floating point error flag.
+
+    For complex-valued input, `arcsin` is a complex analytic function that
+    has, by convention, the branch cuts [-inf, -1] and [1, inf]  and is
+    continuous from above on the former and from below on the latter.
+
+    The inverse sine is also known as `asin` or sin^{-1}.
+
+    References
+    ----------
+    .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.arcsin.html
+
+    Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
+    10th printing, New York: Dover, 1964, pp. 79ff.
+    http://www.math.sfu.ca/~cbm/aands/
+
+    Examples
+    --------
+    >>> import mygrad as mg
+    >>> mg.arcsin(1)     # pi/2
+    Tensor(1.5707963267948966)
+    >>> mg.arcsin(-1)    # -pi/2
+    Tensor(-1.5707963267948966)
+    >>> mg.arcsin(0)
+    Tensor(0.0)
+    """
+    ...
+
+
+@ufunc_creator(Arccos)
+def arccos(
+    x: ArrayLike,
+    out: Optional[Union[ndarray, Tensor]] = None,
+    *,
+    where: Mask = True,
+    dtype: DTypeLikeReals = None,
+    constant: Optional[bool] = None,
+) -> Tensor:  # pragma: no cover
+    """Inverse cosine, element-wise.
+
+    This docstring was adapted from that of numpy.arccos [1]_
+
+    Parameters
+    ----------
+    x : ArrayLike
+        `x`-coordinate on the unit circle.
+        For real arguments, the domain is [-1, 1].
+
+    out : Optional[Union[Tensor, ndarray]]
+        A location into which the result is stored. If provided, it must have
+        a shape that the inputs broadcast to. If not provided or None,
+        a freshly-allocated tensor is returned.
+
+    constant : Optional[bool]
+        If ``True``, this tensor is treated as a constant, and thus does not
+        facilitate back propagation (i.e. ``constant.grad`` will always return
+        ``None``).
+
+        Defaults to ``False`` for float-type data.
+        Defaults to ``True`` for integer-type data.
+
+        Integer-type tensors must be constant.
+
+    where : Mask
+        This condition is broadcast over the input. At locations where the
+        condition is True, the ``out`` tensor will be set to the ufunc result.
+        Elsewhere, the ``out`` tensor will retain its original value.
+        Note that if an uninitialized `out` tensor is created via the default
+        ``out=None``, locations within it where the condition is False will
+        remain uninitialized.
+
+    dtype : Optional[DTypeLikeReals]
+        The dtype of the resulting tensor.
+
+    Returns
+    -------
+    angle : Tensor
+        The angle of the ray intersecting the unit circle at the given
+        `x`-coordinate in radians [0, pi].
+
+    See Also
+    --------
+    cos, arctan, arcsin
+
+    Notes
+    -----
+    `arccos` is a multivalued function: for each `x` there are infinitely
+    many numbers `z` such that `cos(z) = x`. The convention is to return
+    the angle `z` whose real part lies in `[0, pi]`.
+
+    For real-valued input data types, `arccos` always returns real output.
+    For each value that cannot be expressed as a real number or infinity,
+    it yields ``nan`` and sets the `invalid` floating point error flag.
+
+    For complex-valued input, `arccos` is a complex analytic function that
+    has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from
+    above on the former and from below on the latter.
+
+    The inverse `cos` is also known as `acos` or cos^-1.
+
+    References
+    ----------
+    .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.arccos.html
+
+    M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
+    10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
+
+    Examples
+    --------
+    We expect the arccos of 1 to be 0, and of -1 to be pi:
+
+    >>> import mygrad as mg
+    >>> mg.arccos([1, -1])
+    Tensor([ 0.        ,  3.14159265])
+    """
+    ...
+
+
+@ufunc_creator(Arctan)
+def arctan(
+    x: ArrayLike,
+    out: Optional[Union[ndarray, Tensor]] = None,
+    *,
+    where: Mask = True,
+    dtype: DTypeLikeReals = None,
+    constant: Optional[bool] = None,
+) -> Tensor:  # pragma: no cover
+    """Inverse tangent, element-wise.
+
+    This docstring was adapted from that of numpy.arctan [1]_
+
+    Parameters
+    ----------
+    x : ArrayLike
+
+    out : Optional[Union[Tensor, ndarray]]
+        A location into which the result is stored. If provided, it must have
+        a shape that the inputs broadcast to. If not provided or None,
+        a freshly-allocated tensor is returned.
+
+    constant : Optional[bool]
+        If ``True``, this tensor is treated as a constant, and thus does not
+        facilitate back propagation (i.e. ``constant.grad`` will always return
+        ``None``).
+
+        Defaults to ``False`` for float-type data.
+        Defaults to ``True`` for integer-type data.
+
+        Integer-type tensors must be constant.
+
+    where : Mask
+        This condition is broadcast over the input. At locations where the
+        condition is True, the ``out`` tensor will be set to the ufunc result.
+        Elsewhere, the ``out`` tensor will retain its original value.
+        Note that if an uninitialized `out` tensor is created via the default
+        ``out=None``, locations within it where the condition is False will
+        remain uninitialized.
+
+    dtype : Optional[DTypeLikeReals]
+        The dtype of the resulting tensor.
+
+    Returns
+    -------
+    out : Tensor
+
+    See Also
+    --------
+    arctan2 : The "four quadrant" arctan of the angle formed by (`x`, `y`)
+        and the positive `x`-axis.
+
+    Notes
+    -----
+    `arctan` is a multi-valued function: for each `x` there are infinitely
+    many numbers `z` such that tan(`z`) = `x`.  The convention is to return
+    the angle `z` whose real part lies in [-pi/2, pi/2].
+
+    For real-valued input data types, `arctan` always returns real output.
+    For each value that cannot be expressed as a real number or infinity,
+    it yields ``nan`` and sets the `invalid` floating point error flag.
+
+    For complex-valued input, `arctan` is a complex analytic function that
+    has [`1j, infj`] and [`-1j, -infj`] as branch cuts, and is continuous
+    from the left on the former and from the right on the latter.
+
+    The inverse tangent is also known as `atan` or tan^{-1}.
+
+    References
+    ----------
+    .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.arctan.html
+
+    Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
+    10th printing, New York: Dover, 1964, pp. 79.
+    http://www.math.sfu.ca/~cbm/aands/
+
+    Examples
+    --------
+    We expect the arctan of 0 to be 0, and of 1 to be pi/4:
+
+    >>> import mygrad as mg
+    >>> mg.arctan([0, 1])
+    Tensor([ 0.        ,  0.78539816])
+
+    >>> mg.pi / 4
+    0.78539816339744828
+    """
+    ...
+
+
+@ufunc_creator(Arctan2)
+def arctan2(
+    x1: ArrayLike,
+    x2: ArrayLike,
+    out: Optional[Union[ndarray, Tensor]] = None,
+    *,
+    where: Mask = True,
+    dtype: DTypeLikeReals = None,
+    constant: Optional[bool] = None,
+) -> Tensor:  # pragma: no cover
+    """Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
+
+    This docstring was adapted from that of numpy.arctan [1]_
+
+    Parameters
+    ----------
+    x1 : ArrayLike
+        ``y``-coordinates.
+
+    x2 : ArrayLike
+        ``x``-coordinates.
+
+    out : Optional[Union[Tensor, ndarray]]
+        A location into which the result is stored. If provided, it must have
+        a shape that the inputs broadcast to. If not provided or None,
+        a freshly-allocated tensor is returned.
+
+    constant : Optional[bool]
+        If ``True``, this tensor is treated as a constant, and thus does not
+        facilitate back propagation (i.e. ``constant.grad`` will always return
+        ``None``).
+
+        Defaults to ``False`` for float-type data.
+        Defaults to ``True`` for integer-type data.
+
+        Integer-type tensors must be constant.
+
+    where : Mask
+        This condition is broadcast over the input. At locations where the
+        condition is True, the ``out`` tensor will be set to the ufunc result.
+        Elsewhere, the ``out`` tensor will retain its original value.
+        Note that if an uninitialized `out` tensor is created via the default
+        ``out=None``, locations within it where the condition is False will
+        remain uninitialized.
+
+    dtype : Optional[DTypeLikeReals]
+        The dtype of the resulting tensor.
+
+    Returns
+    -------
+    angle : Tensor
+        Tensor of angles in radians, in the range ``[-pi, pi]``.
+
+    See Also
+    --------
+    arctan, tan
+
+    Notes
+    -----
+    *arctan2* is identical to the `atan2` function of the underlying
+    C library.  The following special values are defined in the C
+    standard: [2]_
+
+    ====== ====== ================
+    `x1`   `x2`   `arctan2(x1,x2)`
+    ====== ====== ================
+    +/- 0  +0     +/- 0
+    +/- 0  -0     +/- pi
+     > 0   +/-inf +0 / +pi
+     < 0   +/-inf -0 / -pi
+    +/-inf +inf   +/- (pi/4)
+    +/-inf -inf   +/- (3*pi/4)
+    ====== ====== ================
+
+    Note that +0 and -0 are distinct floating point numbers, as are +inf
+    and -inf.
+
+    References
+    ----------
+    .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.arctan.html
+    .. [2] ISO/IEC standard 9899:1999, "Programming language C."
+
+    Examples
+    --------
+    Consider four points in different quadrants:
+
+    >>> import mygrad as mg
+    >>> x = mg.tensor([-1.0, +1.0, +1.0, -1.0])
+    >>> y = mg.tensor([-1.0, -1.0, +1.0, +1.0])
+    >>> mg.arctan2(y, x) * 180 / mg.pi
+    Tensor([-135.,  -45.,   45.,  135.])
+
+    Note the order of the parameters. `arctan2` is defined also when `x2` = 0
+    and at several other special points, obtaining values in
+    the range ``[-pi, pi]``:
+
+    >>> mg.arctan2([1., -1.], [0., 0.])
+    Tenor([ 1.57079633, -1.57079633])
+    >>> mg.arctan2([0., 0., mg.inf], [+0., -0., mg.inf])
+    Tenor([ 0.        ,  3.14159265,  0.78539816])"""
+    ...
+
+
+
[docs]def sinc(a: ArrayLike, *, constant: Optional[bool] = None) -> Tensor: + """``f(a) -> sin(a) / a`` + + Parameters + ---------- + a : ArrayLike + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + mygrad.Tensor""" + return Tensor._op(Sinc, a, constant=constant)
+ + +def cot(a: ArrayLike, *, constant: Optional[bool] = None) -> Tensor: + """``f(a) -> cot(a)`` + + Parameters + ---------- + a : ArrayLike + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + mygrad.Tensor""" + return Tensor._op(Cot, a, constant=constant) + + +def csc(a: ArrayLike, *, constant: Optional[bool] = None) -> Tensor: + """``f(a) -> csc(a)`` + + Parameters + ---------- + a : ArrayLike + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + mygrad.Tensor""" + return Tensor._op(Csc, a, constant=constant) + + +def sec(a: ArrayLike, *, constant: Optional[bool] = None) -> Tensor: + """``f(a) -> sec(a)`` + + Parameters + ---------- + a : ArrayLike + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + mygrad.Tensor""" + return Tensor._op(Sec, a, constant=constant) + + +def arccsc(a: ArrayLike, *, constant: Optional[bool] = None) -> Tensor: + """``f(a) -> arccsc(a)`` + + Parameters + ---------- + a : ArrayLike + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + mygrad.Tensor""" + return Tensor._op(Arccsc, a, constant=constant) + + +def arccot(a: ArrayLike, *, constant: Optional[bool] = None) -> Tensor: + """``f(a) -> arccot(a)`` + + Parameters + ---------- + a : ArrayLike + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + mygrad.Tensor""" + return Tensor._op(Arccot, a, constant=constant) + + +def arcsec(a: ArrayLike, *, constant: Optional[bool] = None) -> Tensor: + """``f(a) -> arcsec(a)`` + + Parameters + ---------- + a : ArrayLike + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + mygrad.Tensor""" + return Tensor._op(Arcsec, a, constant=constant) +
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/nnet/activations/elu.html b/docs/_modules/mygrad/nnet/activations/elu.html new file mode 100644 index 00000000..e08d130e --- /dev/null +++ b/docs/_modules/mygrad/nnet/activations/elu.html @@ -0,0 +1,679 @@ + + + + + + + + + + mygrad.nnet.activations.elu — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.nnet.activations.elu

+from numbers import Real
+from typing import Optional
+
+import numpy as np
+
+from mygrad.operation_base import Operation
+from mygrad.tensor_base import Tensor
+from mygrad.typing import ArrayLike
+
+__all__ = ["elu"]
+
+
+class ELU(Operation):
+    """Returns the exponential linear activation (ELU) elementwise along x.
+
+    The ELU is given by `ɑ(exp(x) - 1) for x < 0 and x for x ≥ 0`.
+    """
+
+    def __call__(self, x, alpha):
+        """
+        Parameters
+        ----------
+        x : mygrad.Tensor
+            Input data.
+
+        alpha : Real
+            The multiplicative factor on the negative activation.
+
+        Returns
+        -------
+        numpy.ndarray
+            The ELU function applied to `x` elementwise.
+        """
+        self.variables = (x,)
+
+        x = x.data
+        self.exp = alpha * (np.exp(x) - 1)
+        self.alpha = alpha
+        return np.where(x < 0, self.exp, x)
+
+    def backward_var(self, grad, index, **kwargs):
+        x = self.variables[index]
+        return grad * np.where(x.data < 0, self.exp + self.alpha, 1)
+
+
+
[docs]def elu(x: ArrayLike, alpha: Real, *, constant: Optional[bool] = None) -> Tensor: + """Returns the exponential linear activation (ELU) elementwise along x. + + The ELU is given by `ɑ(exp(x) - 1) for x < 0 and x for x ≥ 0`. + + Parameters + ---------- + x : ArrayLike + Input data. + + alpha : Real + The multiplicative factor on the negative activation. + + constant : Optional[bool] + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient) + + Returns + ------- + mygrad.Tensor + The ELU function applied to `x` elementwise. + + Examples + -------- + >>> import mygrad as mg + >>> from mygrad.nnet.activations import elu + >>> x = mg.arange(-5, 6) + >>> x + Tensor([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]) + >>> y = elu(x, alpha=0.1); y + Tensor([-0.09932621, -0.09816844, -0.09502129, -0.08646647, -0.06321206, + 0. , 1. , 2. , 3. , 4. , + 5. ]) + >>> y.backward() + >>> x.grad + array([6.73794700e-04, 1.83156389e-03, 4.97870684e-03, 1.35335283e-02, + 3.67879441e-02, 1.00000000e+00, 1.00000000e+00, 1.00000000e+00, + 1.00000000e+00, 1.00000000e+00, 1.00000000e+00]) + + .. plot:: + + >>> import mygrad as mg + >>> from mygrad.nnet.activations import elu + >>> import matplotlib.pyplot as plt + >>> x = mg.linspace(-2, 2, 100) + >>> y = elu(x, alpha=0.1) + >>> plt.title("elu(x, alpha=0.1)") + >>> y.backward() + >>> plt.plot(x, x.grad, label="df/dx") + >>> plt.plot(x, y, label="f(x)") + >>> plt.legend() + >>> plt.grid() + >>> plt.show() + """ + if isinstance(alpha, (np.ndarray, Tensor)): + alpha = alpha.item() + + if not isinstance(alpha, Real): + raise TypeError( + f"`alpha` must be a real-valued scalar, got {alpha} (type {type(alpha)})" + ) + + return Tensor._op(ELU, x, op_args=(alpha,), constant=constant)
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/nnet/activations/glu.html b/docs/_modules/mygrad/nnet/activations/glu.html new file mode 100644 index 00000000..c3021205 --- /dev/null +++ b/docs/_modules/mygrad/nnet/activations/glu.html @@ -0,0 +1,648 @@ + + + + + + + + + + mygrad.nnet.activations.glu — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.nnet.activations.glu

+from typing import Optional
+
+from numpy import ndarray
+
+from mygrad.math.arithmetic.funcs import multiply
+from mygrad.tensor_base import Tensor
+from mygrad.typing import ArrayLike
+
+from .sigmoid import sigmoid
+
+
+
[docs]def glu(x: ArrayLike, axis: int = -1, *, constant: Optional[bool] = None) -> Tensor: + """Returns the Gated Linear Unit A * σ(B), where A and B are split from `x`. + + Parameters + ---------- + x : ArrayLike + The input. + + axis : int, optional (default=-1) + The axis along which to split the input in half and apply the GLU. + + constant : Optional[bool] + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient). + + Returns + ------- + mygrad.Tensor + The result of applying the Gated Linear Unit elementwise to the input. + + Notes + ----- + The Gated Linear Unit was proposed in the paper + "Language Modeling with Gated Convolutional Networks" + Yann Dauphin, Angela Fan, Michael Auli, David Grangier + available at https://arxiv.org/abs/1612.08083 + + The GLU operation splits the input `x` in half along `axis`, storing the first half in A and the + second in B. The return value is then A ⊙ σ(B), where ⊙ is elementwise multiplication and σ is + the sigmoid function. + + Examples + -------- + >>> import mygrad as mg + >>> from mygrad.nnet.activations import glu + >>> x = mg.arange(-5., 5.) + >>> x + Tensor([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.]) + >>> y = glu(x); y + Tensor([-2.5 , -2.92423431, -2.64239123, -1.90514825, -0.98201379]) + >>> y.backward() + >>> x.grad + array([ 0, 0, 0, 0, 0, -1, 0, 0, 0, 0]) + """ + if isinstance(axis, (ndarray, Tensor)): + axis = axis.item() + + if not isinstance(axis, int): + raise TypeError( + f"`axis` must be an integer-valued scalar, got {axis} (type {type(axis)})" + ) + + first_idx = list(slice(None) for _ in x.shape) + second_idx = list(slice(None) for _ in x.shape) + first_idx[axis] = slice(0, x.shape[axis] // 2) + second_idx[axis] = slice(x.shape[axis] // 2, None) + + first_half = x[tuple(first_idx)] + second_half = x[tuple(second_idx)] + + if first_half.shape != second_half.shape: + raise ValueError( + f"The shapes after splitting must be the same but got {first_half.shape} " + "and {second_half.shape}" + ) + return multiply(first_half, sigmoid(second_half), constant=constant)
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/nnet/activations/hard_tanh.html b/docs/_modules/mygrad/nnet/activations/hard_tanh.html new file mode 100644 index 00000000..175c721f --- /dev/null +++ b/docs/_modules/mygrad/nnet/activations/hard_tanh.html @@ -0,0 +1,662 @@ + + + + + + + + + + mygrad.nnet.activations.hard_tanh — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.nnet.activations.hard_tanh

+from numbers import Real
+from typing import Optional
+
+from numpy import ndarray
+
+from mygrad.math.misc.funcs import maximum, minimum
+from mygrad.tensor_base import Tensor
+from mygrad.typing import ArrayLike
+
+__all__ = ["hard_tanh"]
+
+
+
[docs]def hard_tanh( + x: ArrayLike, + *, + lower_bound: Real = -1, + upper_bound: Real = 1, + constant: Optional[bool] = None, +) -> Tensor: + """Returns the hard hyperbolic tangent function. + + The hard_tanh function is `lower_bound` where `x` <= `lower_bound`, `upper_bound` where + `x` >= `upper_bound`, and `x` where `lower_bound` < `x` < `upper_bound`. + + Parameters + ---------- + x : ArrayLike + The input, to which to apply the hard tanh function. + + lower_bound : Real, optional (default=-1) + The lower bound on the hard tanh. + + upper_bound : Real, optional (default=1) + The upper bound on the hard tanh. + + constant : Optional[bool] + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient). + + Returns + ------- + mygrad.Tensor + The result of applying the "hard-tanh" function elementwise to `x`. + + Examples + -------- + >>> import mygrad as mg + >>> from mygrad.nnet.activations import hard_tanh + >>> x = mg.arange(-5, 6) + >>> x + Tensor([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]) + >>> y = hard_tanh(x, lower_bound=-3, upper_bound=3); y + Tensor([-3, -3, -3, -2, -1, 0, 1, 2, 3, 3, 3]) + >>> y.backward() + >>> x.grad + array([0., 0., 0., 1., 1., 1., 1., 1., 0., 0., 0.]) + + .. plot:: + + >>> import mygrad as mg + >>> from mygrad.nnet.activations import hard_tanh + >>> import matplotlib.pyplot as plt + >>> x = mg.linspace(-6, 6, 100) + >>> y = hard_tanh(x, lower_bound=-3, upper_bound=3) + >>> plt.title("hard_tanh(x, lower_bound=-3, upper_bound=3)") + >>> y.backward() + >>> plt.plot(x, x.grad, label="df/dx") + >>> plt.plot(x, y, label="f(x)") + >>> plt.legend() + >>> plt.grid() + >>> plt.show() + """ + if isinstance(lower_bound, (ndarray, Tensor)): + lower_bound = lower_bound.item() + + if isinstance(upper_bound, (ndarray, Tensor)): + upper_bound = upper_bound.item() + + if not isinstance(lower_bound, Real): + raise TypeError( + f"`lower_bound` must be a real-valued scalar, got {lower_bound} (type { type(lower_bound)})" + ) + + if not isinstance(upper_bound, Real): + raise TypeError( + f"`upper_bound` must be a real-valued scalar, got {upper_bound} (type {type(upper_bound)})" + ) + + return maximum( + lower_bound, minimum(x, upper_bound, constant=constant), constant=constant + )
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/nnet/activations/leaky_relu.html b/docs/_modules/mygrad/nnet/activations/leaky_relu.html new file mode 100644 index 00000000..6bf4b7df --- /dev/null +++ b/docs/_modules/mygrad/nnet/activations/leaky_relu.html @@ -0,0 +1,644 @@ + + + + + + + + + + mygrad.nnet.activations.leaky_relu — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.nnet.activations.leaky_relu

+from numbers import Real
+from typing import Optional
+
+from numpy import ndarray
+
+from mygrad import maximum, minimum
+from mygrad.tensor_base import Tensor
+from mygrad.typing import ArrayLike
+
+__all__ = ["leaky_relu"]
+
+
+
[docs]def leaky_relu( + x: ArrayLike, slope: float, *, constant: Optional[bool] = None +) -> Tensor: + """Returns the leaky rectified linear activation elementwise along x. + + The leaky ReLU is given by `max(x, 0) + slope*min(x, 0)`. + + Parameters + ---------- + x : ArrayLike + Input data. + + slope : Union[Real, mygrad.Tensor] + The slope of the negative activation. + + constant : Optional[bool] + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient). + + Returns + ------- + mygrad.Tensor + The result of apply the "leaky relu" function elementwise to `x`. + + Examples + -------- + >>> import mygrad as mg + >>> from mygrad.nnet.activations import leaky_relu + >>> x = mg.arange(-5, 6) + >>> x + Tensor([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]) + >>> y = leaky_relu(x, slope=0.1); y + >>> Tensor([-0.5, -0.4, -0.3, -0.2, -0.1, 0. , 1. , 2. , 3. , 4. , 5. ]) + >>> y.backward() + >>> x.grad + array([0.1, 0.1, 0.1, 0.1, 0.1, 0. , 1. , 1. , 1. , 1. , 1. ]) + + .. plot:: + + >>> import mygrad as mg + >>> from mygrad.nnet.activations import leaky_relu + >>> import matplotlib.pyplot as plt + >>> x = mg.linspace(-2, 2, 100) + >>> y = leaky_relu(x, slope=0.1) + >>> plt.title("leaky_relu(x, slope=0.1)") + >>> y.backward() + >>> plt.plot(x, x.grad, label="df/dx") + >>> plt.plot(x, y, label="f(x)") + >>> plt.legend() + >>> plt.grid() + >>> plt.show() + """ + if isinstance(slope, (ndarray, Tensor)): + slope = slope.item() + + if not isinstance(slope, Real): + raise TypeError( + f"`slope` must be a real-valued scalar, got {slope} (type { type(slope)})" + ) + + return maximum(x, 0, constant=constant) + slope * minimum(x, 0, constant=constant)
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/nnet/activations/relu.html b/docs/_modules/mygrad/nnet/activations/relu.html new file mode 100644 index 00000000..a3dc5cb1 --- /dev/null +++ b/docs/_modules/mygrad/nnet/activations/relu.html @@ -0,0 +1,639 @@ + + + + + + + + + + mygrad.nnet.activations.relu — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.nnet.activations.relu

+from typing import Optional
+
+import numpy as np
+
+from mygrad.operation_base import Operation
+from mygrad.tensor_base import Tensor
+from mygrad.typing import ArrayLike
+
+
+class ReLu(Operation):
+    def __call__(self, a):
+        self.variables = (a,)
+        self.back = np.asarray(a > 0, dtype=a.dtype)
+        return a.data * self.back
+
+    def backward_var(self, grad, index, **kwargs):
+        return grad * self.back
+
+
+
[docs]def relu(x: ArrayLike, *, constant: Optional[bool] = None) -> Tensor: + """ + Applies the recitfied linear unit activation function:: + + f(x) = {x, x > 0 + 0, x <= 0 } + + Parameters + ---------- + x : ArrayLike + relu is applied element-wise on ``x``. + + constant : Optional[bool] + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient) + + Returns + ------- + mygrad.Tensor + + Examples + -------- + >>> import mygrad as mg + >>> from mygrad.nnet import relu + >>> x = mg.linspace(-5, 5, 5) + >>> x + Tensor([-5. , -2.5, 0. , 2.5, 5. ]) + >>> relu(x) + Tensor([-0. , -0. , 0. , 2.5, 5. ]) + >>> relu(x).backward() + >>> x.grad # d(relu(x))/dx + array([0., 0., 0., 1., 1.]) + + .. plot:: + + >>> import mygrad as mg + >>> from mygrad.nnet.activations import relu + >>> import matplotlib.pyplot as plt + >>> x = mg.linspace(-2, 2, 100) + >>> y = relu(x) + >>> plt.title("relu(x)") + >>> y.backward() + >>> plt.plot(x, x.grad, label="df/dx") + >>> plt.plot(x, y, label="f(x)") + >>> plt.legend() + >>> plt.grid() + >>> plt.show() + """ + return Tensor._op(ReLu, x, constant=constant)
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/nnet/activations/selu.html b/docs/_modules/mygrad/nnet/activations/selu.html new file mode 100644 index 00000000..f43f3a2e --- /dev/null +++ b/docs/_modules/mygrad/nnet/activations/selu.html @@ -0,0 +1,674 @@ + + + + + + + + + + mygrad.nnet.activations.selu — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.nnet.activations.selu

+from typing import Optional
+
+import numpy as np
+
+from mygrad.operation_base import Operation
+from mygrad.tensor_base import Tensor
+from mygrad.typing import ArrayLike
+
+__all__ = ["selu"]
+
+
+_ALPHA = 1.6732632423543772848170429916717
+_SCALE = 1.0507009873554804934193349852946
+
+
+class SELU(Operation):
+    """Returns the scaled exponential linear activation (SELU) elementwise along x. The SELU is
+    given by  λɑ(exp(x) - 1) for x < 0 and λx for x ≥ 0.
+
+    Notes
+    -----
+    The SELU activation was proposed in the paper
+        Self-Normalizing Neural Networks
+        Günter Klambauer, Thomas Unterthiner, Andreas Mayr, Sepp Hochreiter
+    at https://arxiv.org/abs/1706.02515
+    """
+
+    def __call__(self, x):
+        """
+        Parameters
+        ----------
+        x : mygrad.Tensor
+            Input data.
+
+        Returns
+        -------
+        numpy.ndarray
+            The SELU function applied to `x` elementwise.
+        """
+        self.variables = (x,)
+
+        x = x.data
+        self.exp = _ALPHA * (np.exp(x) - 1)
+        return _SCALE * np.where(x < 0, self.exp, x)
+
+    def backward_var(self, grad, index, **kwargs):
+        x = self.variables[index]
+        return grad * _SCALE * np.where(x.data < 0, self.exp + _ALPHA, 1)
+
+
+
[docs]def selu(x: ArrayLike, *, constant: Optional[bool] = None) -> Tensor: + """Returns the scaled exponential linear activation (SELU) elementwise along x. + + The SELU is given by λɑ(exp(x) - 1) for x < 0 and λx for x ≥ 0. + + Parameters + ---------- + x : ArrayLike + Input data. + + constant : Optional[bool] + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient) + + Returns + ------- + mygrad.Tensor + The SELU function applied to `x` elementwise. + + References + ---------- + .. [1] Günter Klambauer, Thomas Unterthiner, Andreas Mayr, Sepp Hochreiter + Self-Normalizing Neural Networks + https://arxiv.org/abs/1706.02515 + + Examples + -------- + >>> import mygrad as mg + >>> from mygrad.nnet.activations import selu + >>> x = mg.arange(-5, 6) + >>> x + Tensor([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]) + >>> y = selu(x, alpha=0.1); y + Tensor([-1.74625336, -1.72589863, -1.67056873, -1.52016647, -1.11133074, + 0. , 1.05070099, 2.10140197, 3.15210296, 4.20280395, + 5.25350494]) + + .. plot:: + + >>> import mygrad as mg + >>> from mygrad.nnet.activations import selu + >>> import matplotlib.pyplot as plt + >>> x = mg.linspace(-2, 2, 100) + >>> y = selu(x) + >>> plt.title("selu(x)") + >>> y.backward() + >>> plt.plot(x, x.grad, label="df/dx") + >>> plt.plot(x, y, label="f(x)") + >>> plt.legend() + >>> plt.grid() + >>> plt.show() + """ + return Tensor._op(SELU, x, constant=constant)
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/nnet/activations/sigmoid.html b/docs/_modules/mygrad/nnet/activations/sigmoid.html new file mode 100644 index 00000000..0ec991b5 --- /dev/null +++ b/docs/_modules/mygrad/nnet/activations/sigmoid.html @@ -0,0 +1,636 @@ + + + + + + + + + + mygrad.nnet.activations.sigmoid — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.nnet.activations.sigmoid

+from typing import Optional
+
+import numpy as np
+
+from mygrad.operation_base import Operation
+from mygrad.tensor_base import Tensor
+from mygrad.typing import ArrayLike
+
+
+class Sigmoid(Operation):
+    def __call__(self, a):
+        self.variables = (a,)
+        x = np.asarray(-1.0 * a.data)
+        np.exp(x, out=x)
+        x += 1
+        np.reciprocal(x, out=x)
+        self.sigmoid = x
+        return self.sigmoid
+
+    def backward_var(self, grad, index, **kwargs):
+        return grad * self.sigmoid * (1.0 - self.sigmoid)
+
+
+
[docs]def sigmoid(x: ArrayLike, *, constant: Optional[bool] = None) -> Tensor: + """Applies the sigmoid activation function:: + + f(x) = 1 / (1 + exp(-x)) + + Parameters + ---------- + x : ArrayLike + sigmoid is applied element-wise on ``x``. + + constant : Optional[bool] + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient) + + Returns + ------- + mygrad.Tensor + + Examples + -------- + >>> import mygrad as mg + >>> from mygrad.nnet import sigmoid + >>> x = mg.linspace(-5, 5, 10) + >>> sigmoid(x) + Tensor([0.00669285, 0.02005754, 0.0585369 , 0.1588691 , 0.36457644, + 0.63542356, 0.8411309 , 0.9414631 , 0.97994246, 0.99330715]) + + .. plot:: + + >>> import mygrad as mg + >>> from mygrad.nnet.activations import sigmoid + >>> import matplotlib.pyplot as plt + >>> x = mg.linspace(-10, 10, 100) + >>> y = sigmoid(x) + >>> plt.title("sigmoid(x)") + >>> y.backward() + >>> plt.plot(x, x.grad, label="df/dx") + >>> plt.plot(x, y, label="f(x)") + >>> plt.legend() + >>> plt.grid() + >>> plt.show()""" + return Tensor._op(Sigmoid, x, constant=constant)
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/nnet/activations/soft_sign.html b/docs/_modules/mygrad/nnet/activations/soft_sign.html new file mode 100644 index 00000000..cd311993 --- /dev/null +++ b/docs/_modules/mygrad/nnet/activations/soft_sign.html @@ -0,0 +1,625 @@ + + + + + + + + + + mygrad.nnet.activations.soft_sign — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.nnet.activations.soft_sign

+from typing import Optional
+
+from mygrad import abs, divide
+from mygrad.tensor_base import Tensor
+from mygrad.typing import ArrayLike
+
+__all__ = ["soft_sign"]
+
+
+
[docs]def soft_sign(x: ArrayLike, *, constant: Optional[bool] = None) -> Tensor: + """Returns the soft sign function x / (1 + |x|). + + Parameters + ---------- + x : ArrayLike + Input data. + + constant : boolean, optional (default=False) + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient). + + Returns + ------- + mygrad.Tensor + The soft sign function applied to `x` elementwise. + + Examples + -------- + >>> import mygrad as mg + >>> from mygrad.nnet.activations import soft_sign + >>> x = mg.arange(-5, 6) + >>> x + Tensor([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]) + >>> y = soft_sign(x); y + Tensor([-0.83333333, -0.8 , -0.75 , -0.66666667, -0.5 , + 0. , 0.5 , 0.66666667, 0.75 , 0.8 , + 0.83333333]) + + .. plot:: + + >>> import mygrad as mg + >>> from mygrad.nnet.activations import soft_sign + >>> import matplotlib.pyplot as plt + >>> x = mg.linspace(-10, 10, 100) + >>> y = soft_sign(x) + >>> plt.title("soft_sign(x)") + >>> y.backward() + >>> plt.plot(x, x.grad, label="df/dx") + >>> plt.plot(x, y, label="f(x)") + >>> plt.legend() + >>> plt.grid() + >>> plt.show() + """ + return divide(x, 1 + abs(x), constant=constant)
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/nnet/activations/softmax.html b/docs/_modules/mygrad/nnet/activations/softmax.html new file mode 100644 index 00000000..125e45dc --- /dev/null +++ b/docs/_modules/mygrad/nnet/activations/softmax.html @@ -0,0 +1,741 @@ + + + + + + + + + + mygrad.nnet.activations.softmax — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.nnet.activations.softmax

+from typing import Optional, Tuple, Union
+
+import numpy as np
+
+from mygrad.math._special import logsumexp as _logsumexp
+from mygrad.operation_base import Operation
+from mygrad.tensor_base import Tensor
+from mygrad.typing import ArrayLike
+
+
+def _softmax(x, kwargs):
+    if x.ndim > 0 and x.size > 0:
+        x = x - x.max(**kwargs)
+        target = x.astype(float) if issubclass(x.dtype.type, np.integer) else x
+
+        target = np.exp(x, out=target)
+        target /= target.sum(**kwargs)
+    else:
+        target = x.astype(float) if issubclass(x.dtype.type, np.integer) else x
+        target = np.ones_like(target)
+    return target
+
+
+class Softmax(Operation):
+    def __call__(self, a, axis=-1):
+        self.variables = (a,)
+        x = a.data
+
+        self._kw = dict(axis=axis, keepdims=True)
+        self._cached_output = _softmax(x, self._kw)
+        return self._cached_output
+
+    def backward_var(self, grad, index, **kwargs):
+        _ = self.variables[index]  # check index error
+        soft = self._cached_output
+        sg = soft * grad
+        return sg - soft * np.sum(sg, **self._kw)
+
+
+
[docs]def softmax( + x: ArrayLike, + axis: Union[None, int, Tuple[int, ...]] = -1, + *, + constant: Optional[bool] = None, +) -> Tensor: + r""" + Applies the softmax activation function:: + + f(x) = exp(x) / sum( exp(x) ) + + Computes the softmax over one or more axes of an ND-tensor. + + Parameters + ---------- + x : array_like + + axis : Union[None, int, Tuple[int, ...]], optional (default=-1) + The axis/axes over which to compute the softmax. + By default, the softmax is computed over the trailing axis. + + constant : bool, optional(default=False) + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient) + + Returns + ------- + mygrad.Tensor + + Notes + ----- + - :math:`N` is the number of samples in the batch. + - :math:`C` is the number of possible classes for which scores are provided. + + This implements a numerically-stable version of softmax, however + log-softmax is still the more numerically stable activation function. + + Given the shape-:math:`(N, C)` tensor of scores, ``x``, the softmax classification + probabilities are computed. That is, the score for class-:math:`k` of a given datum + (:math:`s_{k}`) is normalized using the 'softmax' transformation: + + .. math:: + p_{k} = \frac{e^{s_k}}{\sum_{i=1}^{C}{e^{s_i}}} + + Examples + -------- + >>> import mygrad as mg + >>> from mygrad.nnet import softmax + >>> x = mg.Tensor([[ 2., 2., 2.], + ... [2E50, 2E50, 1E50]]) + >>> softmax(x) + Tensor([[0.33333333, 0.33333333, 0.33333333], + [0.5 , 0.5 , 0. ]]) + """ + return Tensor._op(Softmax, x, op_kwargs=dict(axis=axis), constant=constant)
+ + +class LogSoftmax(Operation): + scalar_only = True + + def __call__(self, a, axis=-1): + self.variables = (a,) + x = a.data + + self._kw = dict(axis=axis, keepdims=True) + return x - _logsumexp(x, **self._kw) + + def backward_var(self, grad, index, **kwargs): + a = self.variables[index] + x = a.data + soft = _softmax(x, self._kw) + return grad - soft * np.sum(grad, **self._kw) + + +
[docs]def logsoftmax( + x: ArrayLike, + axis: Union[None, int, Tuple[int, ...]] = -1, + *, + constant: Optional[bool] = None, +) -> Tensor: + r""" + Applies the log-softmax activation function:: + + f(x) = log ( exp(x) / sum( exp(x) ) ) + + Computes the log-softmax over one or more axes of an ND-tensor. + + Parameters + ---------- + x : ArrayLike + + axis : Union[None, int, Tuple[int, ...]], optional (default=-1) + The axis/axes over which to compute the log-softmax. + By default, the log-softmax is computed over the trailing axis. + + constant : constant : Optional[bool] + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient) + + Returns + ------- + log_softmax : mygrad.Tensor + Tensor with same shape as ``x`` + + Notes + ----- + - :math:`N` is the number of samples in the batch. + - :math:`C` is the number of possible classes for which scores are provided. + + This implements a numerically-stable version of log-softmax, compared + to the naive implementation using ``mygrad.log``, ``mygrad.exp``, and + ``mygrad.sum``. + + Given the shape-:math:`(N, C)` tensor of scores, ``x``, the softmax classification + probabilities are computed. That is, the score for class-:math:`k` of a given datum + (:math:`s_{k}`) is normalized using the 'softmax' transformation: + + .. math:: + p_{k} = \log{\frac{e^{s_k}}{\sum_{i=1}^{C}{e^{s_i}}}} + + Examples + -------- + >>> import mygrad as mg + >>> from mygrad.nnet import logsoftmax + >>> x = mg.Tensor([[ 2., 2., 2.], + ... [2E50, 2E50, 1E50]]) + >>> logsoftmax(x) + Tensor([[-1.09861229e+00, -1.09861229e+00, -1.09861229e+00], + [ 0.00000000e+00, 0.00000000e+00, -1.00000000e+50]]) + """ + return Tensor._op(LogSoftmax, x, op_kwargs=dict(axis=axis), constant=constant)
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/nnet/initializers/dirac.html b/docs/_modules/mygrad/nnet/initializers/dirac.html new file mode 100644 index 00000000..aee4c807 --- /dev/null +++ b/docs/_modules/mygrad/nnet/initializers/dirac.html @@ -0,0 +1,632 @@ + + + + + + + + + + mygrad.nnet.initializers.dirac — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.nnet.initializers.dirac

+from typing import Optional
+
+import numpy as np
+
+from mygrad.tensor_base import Tensor
+
+
+
[docs]def dirac(*shape: int, dtype=np.float32, constant: Optional[bool] = None) -> Tensor: + """Initialize a :class:`mygrad.Tensor` according to the Dirac initialization procedure described + by Zagoruyko and Komodakis. + + Parameters + ---------- + shape : Sequence[int] + The shape of the output Tensor. Note that ``shape`` must be at least two-dimensional. + + dtype : data-type, optional (default=float32) + The data type of the output tensor. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + mygrad.Tensor, shape=``shape`` + A Tensor, with values initialized according to the Dirac initialization. + + Extended Description + -------------------- + Zagoruyko and Komodakis put forward the Dirac initialization in the paper + "DiracNets: Training Very Deep Neural Networks without Skip Connections" + https://arxiv.org/abs/1706.00388 + + A Tensor I initialized via this should satisfy: + I ⋆ x = x + + for compatible tensors ``x``, where ``⋆`` indicates convolution. Note that this does not + guarantee that the convolution will produce ``x``, but it will preserve as many channels of + the input as possible. + """ + if len(shape) == 1: + shape = shape[0] + + if len(shape) < 2: + raise ValueError("Dirac initialization requires at least two dimensions") + + tensor = np.zeros(shape, dtype=dtype) + minimum_depth = np.minimum(shape[0], shape[1]) # out dim, in dim + depths = range(minimum_depth) + trailing_indices = ([i // 2] * len(depths) for i in tensor.shape[2:]) + # tensor[i, i, k1//2, k2//2, ..., kn//2] for each i in min(shape[0], shape[1] + # where the k values are the spatial dimensions of `tensor` + tensor[(depths, depths, *trailing_indices)] = 1 + return Tensor(tensor, constant=constant)
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/nnet/initializers/glorot_normal.html b/docs/_modules/mygrad/nnet/initializers/glorot_normal.html new file mode 100644 index 00000000..42f3026d --- /dev/null +++ b/docs/_modules/mygrad/nnet/initializers/glorot_normal.html @@ -0,0 +1,633 @@ + + + + + + + + + + mygrad.nnet.initializers.glorot_normal — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.nnet.initializers.glorot_normal

+import numpy as np
+
+from mygrad.tensor_base import Tensor
+
+
+
[docs]def glorot_normal(*shape, gain=1, dtype=np.float32, constant=None): + r"""Initialize a :class:`mygrad.Tensor` according to the normal initialization procedure + described by Glorot and Bengio. + + Parameters + ---------- + shape : Sequence[int] + The shape of the output Tensor. Note that ``shape`` must be at least two-dimensional. + + gain : Real, optional (default=1) + The gain (scaling factor) to apply. + + dtype : data-type, optional (default=float32) + The data type of the output tensor; must be a floating-point type. + + constant : bool, optional (default=False) + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient). + + Returns + ------- + mygrad.Tensor, shape=``shape`` + A Tensor, with values initialized according to the glorot normal initialization. + + Notes + ----- + Glorot and Bengio put forward this initialization in the paper + "Understanding the Difficulty of Training Deep Feedforward Neural Networks" + http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf + + A Tensor :math:`W` initialized in this way should be drawn from a distribution about + + .. math:: + \mathcal{N}(0, \frac{\sqrt{2}}{\sqrt{n_j+n_{j+1}}}) + """ + if not np.issubdtype(dtype, np.floating): + raise ValueError("Glorot Normal initialization requires a floating-point dtype") + + if len(shape) == 1: + shape = shape[0] + if len(shape) < 2: + raise ValueError( + "Glorot Normal initialization requires at least two dimensions" + ) + + if isinstance(gain, Tensor): + gain = gain.item() + + fan_in = shape[1] * (shape[-1] if len(shape) > 2 else 1) + fan_out = shape[0] * (shape[-1] if len(shape) > 2 else 1) + std = gain * np.sqrt(2 / (fan_in + fan_out)) + return Tensor( + np.random.normal(0, std, shape), + dtype=dtype, + constant=constant, + copy=False, + )
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/nnet/initializers/glorot_uniform.html b/docs/_modules/mygrad/nnet/initializers/glorot_uniform.html new file mode 100644 index 00000000..35d178c8 --- /dev/null +++ b/docs/_modules/mygrad/nnet/initializers/glorot_uniform.html @@ -0,0 +1,624 @@ + + + + + + + + + + mygrad.nnet.initializers.glorot_uniform — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.nnet.initializers.glorot_uniform

+import numpy as np
+
+from mygrad.nnet.initializers.uniform import uniform
+
+
+
[docs]def glorot_uniform(*shape, gain=1, dtype=np.float32, constant=None): + r"""Initialize a :class:`mygrad.Tensor` according to the uniform initialization procedure + described by Glorot and Bengio. + + Parameters + ---------- + shape : Sequence[int] + The shape of the output Tensor. Note that ``shape`` must be at least two-dimensional. + + gain : Real, optional (default=1) + The gain (scaling factor) to apply. + + dtype : data-type, optional (default=float32) + The data type of the output tensor; must be a floating-point type. + + constant : bool, optional (default=False) + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient). + + Returns + ------- + mygrad.Tensor, shape=``shape`` + A Tensor, with values initialized according to the glorot uniform initialization. + + Notes + ----- + Glorot and Bengio put forward this initialization in the paper + "Understanding the Difficulty of Training Deep Feedforward Neural Networks" + http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf + + A Tensor :math:`W` initialized in this way should be drawn from a distribution about + + .. math:: + U[-\frac{\sqrt{6}}{\sqrt{n_j+n_{j+1}}}, \frac{\sqrt{6}}{\sqrt{n_j+n_{j+1}}}] + """ + if len(shape) == 1: + shape = shape[0] + if len(shape) < 2: + raise ValueError( + "Glorot Uniform initialization requires at least two dimensions" + ) + + fan_in = shape[1] * (np.prod(shape[2:]) if len(shape) > 2 else 1) + fan_out = shape[0] * (np.prod(shape[2:]) if len(shape) > 2 else 1) + bound = gain * np.sqrt(6 / (fan_in + fan_out)) + return uniform( + shape, lower_bound=-bound, upper_bound=bound, dtype=dtype, constant=constant + )
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/nnet/initializers/he_normal.html b/docs/_modules/mygrad/nnet/initializers/he_normal.html new file mode 100644 index 00000000..813a4f38 --- /dev/null +++ b/docs/_modules/mygrad/nnet/initializers/he_normal.html @@ -0,0 +1,643 @@ + + + + + + + + + + mygrad.nnet.initializers.he_normal — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.nnet.initializers.he_normal

+import numpy as np
+
+from mygrad.nnet.initializers.normal import normal
+
+
+
[docs]def he_normal(*shape, gain=1, dtype=np.float32, constant=None): + r"""Initialize a :class:`mygrad.Tensor` according to the normal initialization procedure + described by He et al. + + Parameters + ---------- + shape : Sequence[int] + The shape of the output Tensor. Note that ``shape`` must be at least two-dimensional. + + gain : Real, optional (default=1) + The gain (scaling factor) to apply. + + dtype : data-type, optional (default=float32) + The data type of the output tensor; must be a floating-point type. + + constant : bool, optional (default=False) + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient). + + Returns + ------- + mygrad.Tensor, shape=``shape`` + A Tensor, with values initialized according to the He normal initialization. + + Notes + ----- + He, Zhang, Ren, and Sun put forward this initialization in the paper + "Delving Deep into Rectifiers: Surpassing Human-Level Performance + on ImageNet Classification" + https://arxiv.org/abs/1502.01852 + + A Tensor :math:`W` initialized in this way should be drawn from a distribution about + + .. math:: + \mathcal{N}(0, \sqrt{\frac{2}{(1+a^2)n_l}}) + + where :math:`a` is the slope of the rectifier following this layer, which is incorporated + using the `gain` variable above. + + The guidance put forward in that paper is that this initialization procedure should be preferred + over the ``mygrad.nnet.initializers.glorot_*`` functions especially when rectifiers (e.g. ReLU, + PReLU, leaky_relu) in very deep (> 1-20 or so layer) networks. + + Examples + -------- + >>> from mygrad.nnet.initializers import he_normal + >>> he_normal(2, 3) + Tensor([[-2.3194842 , 0.45956254, -0.28709933], + [-0.15776408, 0.6777564 , -0.05587448]], dtype=float32) + + >>> he_normal(4, 2, gain=5/3, dtype="float64", constant=True) + Tensor([[ 0.25962918, 1.1503933 ], + [-0.13638746, 0.10581096], + [ 1.44805926, 0.51367645], + [-0.32018705, -0.80306442]]) + + >>> he_normal(2, 1, 2, dtype="float16") + Tensor([[[ 0.8057 , -0.2922 ]], + [[ 0.12213, -0.715 ]]], dtype=float16) + """ + if len(shape) == 1: + shape = shape[0] + if len(shape) < 2: + raise ValueError("He Normal initialization requires at least two dimensions") + + std = gain / np.sqrt(shape[1] * (np.prod(shape[2:]) if len(shape) > 2 else 1)) + return normal(shape, mean=0, std=std, dtype=dtype, constant=constant)
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/nnet/initializers/he_uniform.html b/docs/_modules/mygrad/nnet/initializers/he_uniform.html new file mode 100644 index 00000000..42245c87 --- /dev/null +++ b/docs/_modules/mygrad/nnet/initializers/he_uniform.html @@ -0,0 +1,645 @@ + + + + + + + + + + mygrad.nnet.initializers.he_uniform — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.nnet.initializers.he_uniform

+import numpy as np
+
+from mygrad.nnet.initializers.uniform import uniform
+
+
+
[docs]def he_uniform(*shape, gain=1, dtype=np.float32, constant=None): + r"""Initialize a :class:`mygrad.Tensor` according to the uniform initialization procedure + described by He et al. + + Parameters + ---------- + shape : Sequence[int] + The shape of the output Tensor. Note that ``shape`` must be at least two-dimensional. + + gain : Real, optional (default=1) + The gain (scaling factor) to apply. + + dtype : data-type, optional (default=float32) + The data type of the output tensor; must be a floating-point type. + + constant : bool, optional (default=False) + If `True`, the returned tensor is a constant (it + does not back-propagate a gradient). + + Returns + ------- + mygrad.Tensor, shape=``shape`` + A Tensor, with values initialized according to the He uniform initialization. + + Notes + ----- + He, Zhang, Ren, and Sun put forward this initialization in the paper + "Delving Deep into Rectifiers: Surpassing Human-Level Performance + on ImageNet Classification" + https://arxiv.org/abs/1502.01852 + + A Tensor :math:`W` initialized in this way should be drawn from a distribution about + + .. math:: + U[-\sqrt{\frac{6}{(1+a^2)n_l}}, \sqrt{\frac{6}{(1+a^2)n_l}}] + + where :math:`a` is the slope of the rectifier following this layer, which is incorporated + using the `gain` variable above. + + The guidance put forward in that paper is that this initialization procedure should be preferred + over the ``mygrad.nnet.initializers.glorot_*`` functions especially when rectifiers (e.g. ReLU, + PReLU, leaky_relu) in very deep (> 1-20 or so layer) networks. + + Examples + -------- + >>> from mygrad.nnet.initializers import he_uniform + >>> he_uniform(2, 3) + Tensor([[-0.97671795, 0.85518736, -0.8187388 ], + [ 0.7599437 , 0.94951814, -0.96755147]], dtype=float32) + + >>> he_uniform(4, 2, gain=5/3, dtype="float64", constant=True) + Tensor([[-1.10372799, -0.16472136], + [-1.32614867, 1.14142637], + [ 0.78044471, 0.20562334], + [-1.23968259, 1.0057054 ]]) + + >>> he_uniform(2, 1, 2, dtype="float16") + Tensor([[[-0.1233, 0.1023]], + [[ 0.3845, 0.1003]]], dtype=float16) + """ + if len(shape) == 1: + shape = shape[0] + if len(shape) < 2: + raise ValueError("He Uniform initialization requires at least two dimensions") + + bound = gain / np.sqrt(3 / shape[1] * (np.prod(shape[2:]) if len(shape) > 2 else 1)) + return uniform( + shape, lower_bound=-bound, upper_bound=bound, dtype=dtype, constant=constant + )
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/nnet/initializers/normal.html b/docs/_modules/mygrad/nnet/initializers/normal.html new file mode 100644 index 00000000..11443dcf --- /dev/null +++ b/docs/_modules/mygrad/nnet/initializers/normal.html @@ -0,0 +1,635 @@ + + + + + + + + + + mygrad.nnet.initializers.normal — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.nnet.initializers.normal

+import numpy as np
+
+from mygrad.tensor_base import Tensor
+
+
+
[docs]def normal(*shape, mean=0, std=1, dtype=np.float32, constant=None): + """Initialize a :class:`mygrad.Tensor` by drawing from a normal (Gaussian) distribution. + + Parameters + ---------- + shape : Sequence[int] + The output shape. + + mean : Real, optional (default=0) + The mean of the distribution from which to draw. + + std : Real, optional (default=1) + The standard deviation of the distribution from which to draw. + + dtype : data-type, optional (default=float32) + The data type of the output tensor; must be a floating-point type. + + constant : bool, optional (default=False) + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient). + + Returns + ------- + mygrad.Tensor, shape=``shape`` + A Tensor, with values drawn from Ɲ(μ, σ²), where μ=``mean`` and σ=``std``. + + Examples + -------- + >>> from mygrad.nnet.initializers import normal + >>> normal(1, 2, 3) + Tensor([[[-0.06481607, -0.550582 , 0.04689528], + [ 0.82973075, 2.83742 , 1.0964519 ]]], dtype=float32) + + >>> normal(2, 2, dtype="float16", constant=True) + Tensor([[-1.335 , 0.9297], + [ 1.746 , -0.1222]], dtype=float16) + + >>> normal(5, dtype="float64") + Tensor([-0.03875407, 0.65368466, -0.72636993, 1.57404148, -1.17444345]) + """ + if not np.issubdtype(dtype, np.floating): + raise ValueError("Normal initialization requires a floating-point dtype") + if std < 0: + raise ValueError("Standard deviation must be non-negative") + + if len(shape) == 1: + shape = shape[0] + + if isinstance(mean, Tensor): + mean = mean.item() + if isinstance(std, Tensor): + std = std.item() + + return Tensor( + np.random.normal(mean, std, shape), + dtype=dtype, + constant=constant, + copy=False, + )
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/nnet/initializers/uniform.html b/docs/_modules/mygrad/nnet/initializers/uniform.html new file mode 100644 index 00000000..121fa012 --- /dev/null +++ b/docs/_modules/mygrad/nnet/initializers/uniform.html @@ -0,0 +1,635 @@ + + + + + + + + + + mygrad.nnet.initializers.uniform — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.nnet.initializers.uniform

+import numpy as np
+
+from mygrad.tensor_base import Tensor
+
+
+
[docs]def uniform(*shape, lower_bound=0, upper_bound=1, dtype=np.float32, constant=None): + """Initialize a :class:`mygrad.Tensor` by drawing from a uniform distribution. + + Parameters + ---------- + shape : Sequence[int] + The output shape. + + lower_bound : Real, optional (default=0) + Lower bound on the output interval, inclusive. + + upper_bound : Real, optional (default=1) + Upper bound on the output interval, exclusive. + + dtype : data-type, optional (default=float32) + The data type of the output tensor; must be a floating-point type. + + constant : bool, optional (default=False) + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient). + + Returns + ------- + mygrad.Tensor, shape=``shape`` + A Tensor, with values drawn uniformly from [lower_bound, upper_bound). + + Examples + -------- + >>> from mygrad.nnet.initializers import uniform + >>> uniform(2, 3) + Tensor([[0.8731087 , 0.30872548, 0.75528544], + [0.55404514, 0.7652222 , 0.4955769 ]], dtype=float32) + + >>> uniform(2, 2, lower_bound=-1, upper_bound=3) + Tensor([[ 1.9151938 , -0.28968155], + [-0.01240687, -0.24448799]], dtype=float32) + + >>> uniform(5, dtype="float16", constant=True) + Tensor([0.5186, 0.1481, 0.3745, 0.941 , 0.331 ], dtype=float16) + """ + if lower_bound >= upper_bound: + raise ValueError("Uniform lower bound cannot be greater than upper bound") + if not np.issubdtype(dtype, np.floating): + raise ValueError("Uniform initialization requires a floating-point dtype") + + if len(shape) == 1: + shape = shape[0] + + if isinstance(lower_bound, Tensor): + lower_bound = lower_bound.item() + if isinstance(upper_bound, Tensor): + upper_bound = upper_bound.item() + + return Tensor( + np.random.uniform(lower_bound, upper_bound, shape), + dtype=dtype, + constant=constant, + copy=False, + )
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/nnet/layers/batchnorm.html b/docs/_modules/mygrad/nnet/layers/batchnorm.html new file mode 100644 index 00000000..0fff8fce --- /dev/null +++ b/docs/_modules/mygrad/nnet/layers/batchnorm.html @@ -0,0 +1,741 @@ + + + + + + + + + + mygrad.nnet.layers.batchnorm — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.nnet.layers.batchnorm

+from typing import Optional
+
+import numpy as np
+
+from mygrad.operation_base import Operation
+from mygrad.tensor_base import Tensor
+from mygrad.typing import ArrayLike
+
+__all__ = ["batchnorm"]
+
+
+# TODO: Remove affine parameters from Operation
+class BatchNorm(Operation):
+    """
+    Attributes
+    ----------
+    mean : numpy.ndarray
+    var : numpy.ndarray
+
+    Notes
+    -----
+    `mean` and `var` are bound as instance-attributes upon
+    calling the batch-norm instance.
+    """
+
+    def __call__(self, x, gamma, beta, *, eps):
+        """
+        y(x) = (x - E[x]) / sqrt(Var[x} + eps)
+        batchnorm(x) = gamma * y(x) + beta
+
+        Parameters
+        ----------
+        x : mygrad.Tensor
+        gamma : Optional[mygrad.Tensor]
+        beta : Optional[mygrad.Tensor]
+        eps : Real
+           A small non-negative number.
+
+        Returns
+        -------
+        numpy.ndarray
+        """
+        normed_dims = tuple(i for i in range(x.ndim) if i != 1)
+        keepdims_shape = tuple(1 if n != 1 else d for n, d in enumerate(x.shape))
+
+        self.variables = tuple(i for i in (x, gamma, beta))
+
+        if gamma.size == 0:
+            gamma = None
+        if beta.size == 0:
+            beta = None
+
+        self.gamma = gamma
+        self.beta = beta
+
+        x = x.data
+        self.x_norm = None  # required for backprop through gamma
+        self.mean = x.mean(axis=normed_dims)
+        self.var = x.var(axis=normed_dims)
+
+        y = x - self.mean.reshape(keepdims_shape)
+        self._std = np.sqrt(self.var + eps).reshape(keepdims_shape)  # sqrt(var + eps)
+        y /= self._std
+        self.x_norm = y
+        # optional affine transformation
+        if gamma is not None:
+            gamma = gamma.data
+            # must copy `y` to prevent mutation of `self.x_norm`
+            y = y * gamma.reshape(keepdims_shape)
+
+        if beta is not None:
+            beta = beta.data
+            y = y + beta.reshape(keepdims_shape)
+        return y
+
+    def backward_var(self, grad, index, **kwargs):
+        x = self.variables[0].data
+        if index == 0:  # backprop through x
+            normed_dims = tuple(i for i in range(x.ndim) if i != 1)
+            keepdims_shape = tuple(1 if n != 1 else d for n, d in enumerate(x.shape))
+            N = x.size / x.shape[1]
+
+            # all sums carried over non-channel dims
+            # (1/sqrt(var + eps)) * [dL - dL.mean() - (1/N)*x_norm*(x_norm @ dL)]
+            grad_ = grad - np.mean(grad, axis=normed_dims, keepdims=True)
+
+            rterm = self.x_norm * np.reshape(
+                np.einsum(grad, range(x.ndim), self.x_norm, range(x.ndim), [1]),
+                keepdims_shape,
+            )
+            rterm /= N
+            grad_ -= rterm
+            grad_ /= self._std
+            if (
+                self.gamma is not None
+            ):  # backprop through optional affine transformation
+                gamma = self.gamma.data
+                grad_ *= gamma.reshape(keepdims_shape)
+            return grad_
+
+        elif index == 1 and self.gamma is not None:  # backprop through gamma
+            return np.einsum(grad, range(x.ndim), self.x_norm, range(x.ndim), [1])
+
+        elif (index == 1 and self.gamma is None) or index == 2:
+            normed_dims = tuple(i for i in range(x.ndim) if i != 1)
+            return grad.sum(axis=normed_dims)
+        else:  # pragma: no cover
+            raise IndexError
+
+
+
[docs]def batchnorm( + x: ArrayLike, + *, + gamma: Optional[ArrayLike] = None, + beta: Optional[ArrayLike] = None, + eps: float, + constant: Optional[bool] = None, +) -> Tensor: + """ + Performs batch normalization on ``x``:: + + y(x) = (x - E[x]) / sqrt(Var[x] + eps) + batchnorm(x) = gamma * y(x) + beta + + Where :math:`E[x]` and :math:`Var[x]` represent the mean and variance, respectively, + over axis-1 of ``x``. The subsequent affine transformation on ``y`` + is optional. + + Parameters + ---------- + x : array_like, shape=(N, C, ...) + The batch to be normalized within each entry of C + + gamma : Optional[array_like], shape=(C,) + Optional per-channel scaling factors to be applied after the + normalization step. + + beta : Optional[array_like], shape=(C,) + Optional per-channel scaling bias factors to be applied after the + normalization step. + + eps : Real + A small non-negative number. + + constant : bool, optional (default=False) + If True, the resulting Tensor is a constant. + + Returns + ------- + mygrad.Tensor + The batch-normalized data. + + Examples + -------- + >>> import mygrad as mg + >>> from mygrad.nnet import batchnorm + >>> x = mg.Tensor([1., 4., 1.]).reshape(3, 1) + >>> batchnorm(x, eps=0) + Tensor([[-0.70710678], + [ 1.41421356], + [-0.70710678]]) + """ + # pass gamma and beta as empty arrays if they are not supplied + if gamma is None: + gamma = np.array([]) + if beta is None: + beta = np.array([]) + return Tensor._op( + BatchNorm, x, gamma, beta, op_kwargs=dict(eps=eps), constant=constant + )
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/nnet/layers/conv.html b/docs/_modules/mygrad/nnet/layers/conv.html new file mode 100644 index 00000000..ca7901f8 --- /dev/null +++ b/docs/_modules/mygrad/nnet/layers/conv.html @@ -0,0 +1,966 @@ + + + + + + + + + + mygrad.nnet.layers.conv — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.nnet.layers.conv

+from numbers import Integral
+from typing import Optional, Tuple, Union
+
+import numpy as np
+
+from mygrad.nnet.layers.utils import sliding_window_view
+from mygrad.operation_base import Operation
+from mygrad.tensor_base import Tensor
+from mygrad.typing import ArrayLike
+
+__all__ = ["conv_nd"]
+
+
+class ConvND(Operation):
+    def __call__(self, x, w, *, stride, padding=0, dilation=1):
+        self.variables = (x, w)
+        # x ... data:    (N, C, X0, X1, ...)
+        # w ... filters: (F, C, W0, W1, ...)
+
+        x = x.data
+        w = w.data
+
+        assert x.ndim > 2
+        assert x.ndim == w.ndim
+        assert (
+            w.shape[1] == x.shape[1]
+        ), "The channel-depth of the batch and filters must agree"
+
+        num_conv_channels = w.ndim - 2
+        x_shape = np.array(
+            x.shape[2:]
+        )  # (X0, ...): shape of the channels being convolved over
+        w_shape = np.array(w.shape[2:])  # (W0, ...): shape of each conv filter
+
+        dilation = (
+            np.array((dilation,) * num_conv_channels)
+            if isinstance(dilation, Integral)
+            else np.array(dilation, dtype=int)
+        )
+
+        assert len(dilation) == num_conv_channels and all(
+            d >= 1 and isinstance(d, Integral) for d in dilation
+        )
+
+        padding = (
+            np.array((padding,) * num_conv_channels)
+            if isinstance(padding, Integral)
+            else np.array(padding, dtype=int)
+        )
+        assert len(padding) == num_conv_channels and all(
+            p >= 0 and isinstance(p, Integral) for p in padding
+        )
+
+        stride = (
+            np.array((stride,) * num_conv_channels)
+            if isinstance(stride, Integral)
+            else np.asarray(stride, dtype=int)
+        )
+        assert len(stride) == num_conv_channels and all(
+            s >= 1 and isinstance(s, Integral) for s in stride
+        )
+
+        out_shape = (
+            x_shape + 2 * padding - ((w_shape - 1) * dilation + 1)
+        ) / stride + 1
+
+        if not all(i.is_integer() and i > 0 for i in out_shape):
+            msg = "Stride and kernel dimensions are incompatible: \n"
+            msg += f"Input dimensions: {tuple(x_shape)}\n"
+            msg += f"Stride dimensions: {tuple(stride)}\n"
+            msg += f"Kernel dimensions: {tuple(w_shape)}\n"
+            msg += f"Padding dimensions: {tuple(padding)}\n"
+            msg += f"Dilation dimensions: {tuple(dilation)}\n"
+            raise ValueError(msg)
+
+        self.padding = padding
+        self.stride = stride
+        self.dilation = dilation
+
+        # symmetric 0-padding for X0, X1, ... dimensions
+        axis_pad = tuple((i, i) for i in (0, 0, *padding))
+        x = np.pad(x, axis_pad, mode="constant") if sum(padding) else x
+
+        # (G0, ...) is the tuple of grid-positions for placing each window (not including stride)
+        # (N, C, X0, ...) -> (G0, ..., N, C, W0, ...)
+        windowed_data = sliding_window_view(
+            x, window_shape=w_shape, step=self.stride, dilation=self.dilation
+        )
+
+        w_conv_channels = list(range(1, num_conv_channels + 2))  # C, W0, ...
+        window_conv_channels = [
+            i + 1 + num_conv_channels  # C, W0, ...
+            for i in range(num_conv_channels + 1)
+        ]
+
+        # (F, C, W0, ...) ⋆ (G0, ..., N, C, W0, ...) -> (F, G0, ..., N)
+        conv_out = np.tensordot(
+            w, windowed_data, axes=[w_conv_channels, window_conv_channels]
+        )
+
+        # (F, G0, ..., N) -> (N, F, G0, ...)
+        out = np.moveaxis(conv_out, source=-1, destination=0)
+        return out if out.flags["C_CONTIGUOUS"] else np.ascontiguousarray(out)
+
+    def backward_var(self, grad, index, **kwargs):
+        """Computes dX, where X is the data batch
+
+        Parameters
+        ----------
+        grad : numpy.ndarray, shape=(N, F, G0, ...)"""
+        x, w = (i.data for i in self.variables)
+        num_conv_channels = grad.ndim - 2
+
+        if index == 0:  # backprop through x
+            x_shape = x.shape[:2] + tuple(
+                i + 2 * p for i, p in zip(x.shape[-num_conv_channels:], self.padding)
+            )
+            dx = np.zeros(x_shape, dtype=x.dtype)  # (N, C, X0, ...)
+
+            # `gp` stores all of the various broadcast multiplications of each grad
+            # element against the conv filter.
+            # (N, F, G0, ...) -tdot- (F, C, W0, ...) --> (N, G0, ..., C, W0, ...)
+            gp = np.tensordot(grad, w, axes=[[1], [0]])
+            for ind in np.ndindex(grad.shape[-num_conv_channels:]):
+                # ind: (g0, ...) - grid-position of filter placement
+                slices = tuple(
+                    slice(i * s, i * s + w * d, d)
+                    for i, w, s, d in zip(ind, w.shape[2:], self.stride, self.dilation)
+                )
+                # Add (grad-element * filter) to each appropriate window position in `dx`
+                # dx[N, C, g0*s0 : g0*s0 + w0*d0 : d0, (...)] += gp[N, g0, (...), C, W0, (...)]
+                dx[(..., *slices)] += gp[(slice(None), *ind, ...)]
+
+            # remove padding from dx
+            if sum(self.padding):
+                no_pads = tuple(slice(p, -p if p else None) for p in self.padding)
+                dx = dx[(..., *no_pads)]
+            return dx
+
+        else:  # backprop through w
+            # backprop into f
+            # symmetric 0-padding for H, W dimensions
+            axis_pad = tuple((i, i) for i in (0, 0, *self.padding))
+            x = np.pad(x, axis_pad, mode="constant") if sum(self.padding) else x
+
+            # (G0, ...) is the tuple of grid-indices for placing each window (not including stride)
+            # (N, C, X0, ...) -> (G0, ..., N, C, W0, ...)
+            windowed_data = sliding_window_view(
+                x, window_shape=w.shape[2:], step=self.stride, dilation=self.dilation
+            )
+
+            # (N, F, G0, ...) -tdot- (G0, ..., N, C, W0, ...) --> (F, C, W0, ...)
+            grad_axes = list(range(2, num_conv_channels + 2)) + [0]  # (G0, ..., N)
+            window_axes = list(range(num_conv_channels + 1))  # (G0, ..., N)
+            return np.tensordot(grad, windowed_data, axes=[grad_axes, window_axes])
+
+
+
[docs]def conv_nd( + x: ArrayLike, + filter_bank: ArrayLike, + *, + stride: Union[int, Tuple[int, ...]], + padding: Union[int, Tuple[int, ...]] = 0, + dilation: Union[int, Tuple[int, ...]] = 1, + constant: Optional[bool] = None, +) -> Tensor: + """Use ``filter_bank`` (``w``) to perform strided N-dimensional neural network-style + convolutions (see Notes) over ``x``.:: + + f(x, w) -> x ⋆ w + + shapes: + (N, C, X0, ...) ⋆ (F, C, W0, ...) -> (N, F, G0, ...) + + ``x`` represents a batch of data over which the filters + are convolved. Specifically, it must be a tensor of shape + :math:`(N, C, X_0, ...)`, where :math:`N` is the number of samples in the batch, + C is the channel-depth of each datum, and :math:`(X_0, ...)` are the + dimensions over which the filters are convolved. Accordingly, + each filter must have a channel depth of :math:`C`. + + Thus convolving :math:`F` filters, each with a shape :math:`(C, W_0, ...)`, + over the data batch will produce a tensor of shape + :math:`(N, F, G_0, ...)`, where :math:`(G_0, ...)` is the shape of the grid + commensurate with the filter placements + + Parameters + ---------- + x : ArrayLike, shape=(N, C, Xo, ...) + The data batch to be convolved over. + + filter_bank : Union[Tensor, array_like], shape=(F, C, Wo, ...) + The filters used to perform the convolutions. + + stride : Union[int, Tuple[int, ...]] + (keyword-only argument) The step-size with which each + filter is placed along the H and W axes during the + convolution. The tuple indicates (stride-0, ...). If a + single integer is provided, this stride is used for all + convolved dimensions + + padding : Union[int, Tuple[int, ...]] + (keyword-only argument) The number of zeros to be padded + to both ends of each convolved dimension, respectively. + If a single integer is provided, this padding is used for + all of the convolved axes + + dilation : Union[int, Tuple[int, ...]], optional (default=1) + (keyword-only argument) The spacing used when placing kernel + elements along the data. E.g. for a 1D convolution the ith + placement of the kernel multiplied against the dilated-window: + ``x[:, :, i*s:(i*s + w*d):d]``, where ``s`` is + the stride, ``w`` is the kernel-size, and ``d`` is the dilation factor. + + If a single integer is provided, that dilation value is used for all + of the convolved axes + + constant : Optional[None] + If True, the resulting Tensor is a constant. + + Returns + ------- + Tensor, shape=(N, F, G0, ...) + The result of each filter being convolved over each datum in + the batch. + + Notes + ----- + - The filters are *not* flipped by this operation, meaning that + an auto-correlation is being performed rather than a true convolution. + + - Only 'valid' filter placements – where the filters overlap + completely with the (padded) data – are permitted. + + Examples + -------- + Here we perform a 1D convolution of a constant-valued kernel, ``k``, with a + 'square-wave' signal, ``x``, using stride-1. Note that because we are constrained + to doing deep learning-style convolutions, that we prepend the dimensions + :math:`(N=1, C=1)` to ``x``, and :math:`(F=1, C=1)` and to ``k``. That is, + we are performing a convolution on one, single-channeled signal using + one kernel. + + See that this convolution produces the expected triangle-shaped + response. The shape of the resulting tensor is :math:`(N=1, F=1, G_0=12)`. + That is, the length-5 kernel can be placed in 12 valid positions, using a + stride of 1. + + >>> import mygrad as mg + >>> from mygrad.nnet import conv_nd + >>> x = mg.zeros((1, 1, 16)) # a square-wave signal + >>> x[..., 5:11] = 1 + >>> k = mg.ones((1, 1, 5)) # a constant-valued kernel + >>> conv_nd(x, k, stride=1) # performing a stride-1, 1D convolution + Tensor([[[0., 1., 2., 3., 4., 5., 5., 4., 3., 2., 1., 0.]]], dtype=float32) + + Back-propagating through the (summed) convolution: + + >>> conv_nd(x, k, stride=1).sum().backward() # sum to a scalar to perform back-prop + >>> x.grad # d(summed_conv)/dx + array([[[1., 2., 3., 4., 5., 5., 5., 5., 5., 5., 5., 5., 4., 3., 2., 1.]]], + dtype=float32) + >>> k.grad # d(summed_conv)/dk + array([[[6., 6., 6., 6., 6.]]]) + + .. plot:: + + >>> import mygrad as mg + >>> from mygrad.nnet import conv_nd + >>> import matplotlib.pyplot as plt + >>> kernel = mg.ones(5) # a square-wave signal + >>> x = mg.zeros((1, 1, 16)) # a square-wave signal + >>> x[..., 5:11] = 1 + >>> k = mg.ones((1, 1, 5)) # a constant-valued kernel + >>> y = conv_nd(x, k, stride=1) # performing a stride-1, 1D convolution + >>> plt.title("conv(f, g); stride: 1") + >>> y.backward() + >>> plt.plot(x.data[0,0], label="f", ls="--", lw=3, drawstyle='steps-pre') + >>> plt.plot(kernel, label="g", ls="--", lw=3, drawstyle='steps-pre') + >>> plt.plot(y.data[0,0], label="f * g") + >>> plt.plot(mg.arange(16.), x.grad[0, 0], label="d[sum(f * g)]/df") + >>> kernel = mg.ones(5) # a square-wave signal + >>> plt.legend() + >>> plt.grid() + >>> plt.show() + + Let's apply a edge-detection kernel to each color channel of an RGB image. + + >>> import matplotlib.pyplot as plt + >>> import matplotlib.image as mpimg + >>> from mygrad.nnet.layers import conv_nd + >>> # A shape-(H, W, 3) RGB image + >>> img = mpimg.imread('../_static/meerkat.png') + >>> # We'll treat this like a batch of three greyscale images + >>> # where each "image" is actually a color channel + >>> # shape-(H, W, 3) -> shape-(3, 1, H, W) + >>> x = img.transpose(2, 0, 1)[:, None, :, :] + + >>> # edge detection kernel + >>> kernel = np.array([[-1, -1, -1], + ... [-1, 8, -1], + ... [-1, -1, -1]]) + >>> # (Hf, Wf) --> (1, 1, Hf, Wf) + >>> kernel = kernel.reshape(1, 1, *kernel.shape) + + >>> # conv: (3, 1, H, W) w/ (1, 1, Hf, Wf) --> (3, 1, H', W') + >>> # squeeze + transpose: (3, 1, H', W') --> (H', W', 3) + >>> processed = conv_nd(x, kernel, stride=(1, 1)) + >>> processed = processed.data.squeeze().transpose(1, 2, 0) + + >>> fig, ax = plt.subplots() + >>> ax.imshow(img) + + >>> fig, ax = plt.subplots() + >>> ax.imshow(processed) + + .. plot:: + + >>> import matplotlib.pyplot as plt + >>> import matplotlib.image as mpimg + >>> from mygrad.nnet.layers import conv_nd + >>> img = mpimg.imread('../_static/meerkat.png') + + >>> # edge detection + >>> kernel = np.array([[-1, -1, -1], + ... [-1, 8, -1], + ... [-1, -1, -1]]) + >>> x = img.transpose(2,0,1)[:, None, :, :] + + >>> # (Hf, Wf) --> (1, 1, Hf, Wf) + >>> kernel = kernel.reshape(1, 1, *kernel.shape) + + >>> # conv: (C, 1, H, W) w/ (1, 1, Hf, Wf) --> (C, 1, H', W') + >>> # squeeze + transpose: (C, 1, H', W') --> (H', W', C) + >>> processed = conv_nd(x, kernel, stride=(1, 1)).data.squeeze().transpose(1, 2, 0) + + >>> fig, ax = plt.subplots() + >>> ax.imshow(img) + + >>> fig, ax = plt.subplots() + >>> ax.imshow(processed) + + Now, let's demonstrate a more typical usage for ``conv_nd`` in the context of + neural networks. ``x`` will represent 10, 32x32 RGB images, and we will use + 5 distinct 2x2 kernels to convolve over each of these images . Note that + each kernel must possess 3-channel - one for each RGB channel. + + That is, we will be performing NxF channel-wise 2D convolutions. Supposing + that we don't want the kernel placements to overlap, we can use a stride of 2. In + total, this will produce a shape-:math:`(N=10, F=5, G_0=16, G_1=16)` tensor as a + result. + + >>> import mygrad as mg + >>> x = mg.random.rand(10, 3, 32, 32)) # creating 10 random 32x32 RGB images + >>> k = mg.random.rand(5, 3, 2, 2)) # creating 5 random 3-channel 2x2 kernels + + Given the shapes of ``x`` and ``k``, ``conv_nd`` automatically executes a 2D convolution: + + >>> conv_nd(x, k, stride=2).shape + (10, 5, 16, 16) + + Extrapolating further, ``conv_nd`` is capable of performing ND convolutions! + + Performing a convolution over a batch of single-channel, "spatial-3D" tensor data: + + >>> # shape-(N=1, C=1, X=10, Y=12, Z=10) + >>> x = mg.random.rand(1, 1, 10, 12, 10) + >>> # shape-(F=2, C=1, Wx=3, Wy=1, Wz=2) + >>> k = mg.random.rand(2, 1, 3, 1, 32) + >>> conv_nd(x, k, stride=1).shape + (1, 2, 8, 12, 9) + """ + if x.ndim < 3: + raise ValueError( + f"`x` must possess at least three " f"dimensions, got {x.ndim} dimensions" + ) + + if x.ndim != filter_bank.ndim: + raise ValueError( + f"`x` ({x.ndim}-dimensions) must have the same dimensionality as " + f"`filter_bank` ({filter_bank.ndim}-dimensions)" + ) + + if filter_bank.shape[1] != x.shape[1]: + raise ValueError( + f"`x.shape[1]` ({x.shape[1]}) must match `filter_bank.shape[1]` ({filter_bank.shape[1]})" + ) + + return Tensor._op( + ConvND, + x, + filter_bank, + op_kwargs={"stride": stride, "padding": padding, "dilation": dilation}, + constant=constant, + )
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/nnet/layers/gru.html b/docs/_modules/mygrad/nnet/layers/gru.html new file mode 100644 index 00000000..ece467d4 --- /dev/null +++ b/docs/_modules/mygrad/nnet/layers/gru.html @@ -0,0 +1,1143 @@ + + + + + + + + + + mygrad.nnet.layers.gru — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.nnet.layers.gru

+import weakref
+from numbers import Integral
+
+import numpy as np
+
+from mygrad._utils import SkipGradient
+from mygrad.operation_base import Operation
+from mygrad.tensor_base import Tensor
+
+try:
+    from numba import njit, vectorize
+except ImportError:  # pragma: no cover
+    raise ImportError(
+        "The package `numba` must be installed in order to access the gru."
+    )
+
+
+@vectorize(
+    ["float32(float32)", "float64(float64)"],
+    nopython=True,
+)
+def sig(f):  # pragma: no cover
+    """
+    Calculates a sigmoid function
+    """
+    return 1 / (1 + np.exp(-f))
+
+
+@vectorize(
+    ["float32(float32)", "float64(float64)"],
+    nopython=True,
+)
+def d_sig(f):  # pragma: no cover
+    """
+    Calculates the derivative of a sigmoid function
+    """
+    return f * (1 - f)
+
+
+@vectorize(
+    ["float32(float32)", "float64(float64)"],
+    nopython=True,
+)
+def d_tanh(f):  # pragma: no cover
+    """
+    Calculates the derivative of a tanh function
+    """
+    return 1 - f**2
+
+
+@njit
+def dot(a, b):
+    """
+    Calculates the dot product between 2 arrays
+    of shapes (W,X,Y) and (Y,Z), respectively
+    """
+    return np.dot(a.reshape(-1, a.shape[-1]), b).reshape(*a.shape[:-1], b.shape[-1])
+
+
+@njit
+def _gru_layer(s, z, r, h, Wz, Wr, Wh):
+    """Given:
+        S(t=0)
+        z = X(t) Uz + bz
+        r = X(t) Ur + br
+        h = X(t) Uh + bh
+
+    Compute Z(t), R(t), H(t), S(t) for all 1 <= t <= T
+
+    Parameters
+    ----------
+    s : numpy.ndarray, shape=(T+1, N, D)
+        Modified in-place
+    z : numpy.ndarray, shape=(T, N, D)
+        Modified in-place
+    r : numpy.ndarray, shape=(T, N, D)
+        Modified in-place
+    h : numpy.ndarray, shape=(T, N, D)
+        Modified in-place
+    Wz : numpy.ndarray, shape=(D, D)
+    Wr : numpy.ndarray, shape=(D, D)
+    Wh : numpy.ndarray, shape=(D, D)"""
+    for n in range(len(s) - 1):
+        z[n] += np.dot(s[n], Wz)
+        z[n] = sig(z[n])
+
+        r[n] += np.dot(s[n], Wr)
+        r[n] = sig(r[n])
+
+        h[n] += np.dot(r[n] * s[n], Wh)
+        h[n] = np.tanh(h[n])
+
+        s[n + 1] = (1 - z[n]) * h[n] + z[n] * s[n]
+
+
+@njit
+def _gru_dLds(s, z, r, dLds, Wz, Wh, Wr, dz, dh, dr, s_h, one_z):
+    """
+                        Z_{t} = sigmoid(Uz X_{t} + Wz S_{t-1} + bz)
+                        R_{t} = sigmoid(Ur X_{t} + Wr S_{t-1} + br)
+                        H_{t} = tanh(Uh X_{t} + Wh (R{t} * S_{t-1}) + bh)
+                        S_{t} = (1 - Z{t}) * H{t} + Z{t} * S_{t-1}
+
+    Returns
+    --------
+
+        dL / ds(t) =   partial dL / ds(t+1) * ds(t+1) / ds(t)
+                     + partial dL / ds(t+1) * ds(t+1) / dz(t) * dz(t) / ds(t)
+                     + partial dL / ds(t+1) * ds(t+1) / dh(t) * dh(t) / ds(t)
+                     + partial dL / ds(t+1) * ds(t+1) / dh(t) * dh(t) / dr(t) * dr(t) / ds(t)
+    """
+    dLdh = dot(dLds * one_z * dh, Wh)
+
+    out = z * dLds
+    out += dot(dLds * s_h * dz, Wz)
+    out += dLdh * r
+    out += dot(dLdh * s * dr, Wr)
+
+    return out
+
+
+@njit
+def _gru_bptt(
+    X, dLds, s, z, r, Wz, Wh, Wr, dz, dh, dr, s_h, one_z, bp_lim, old_dLds=None
+):
+    Wz, Wh, Wr = Wz.T, Wh.T, Wr.T
+    bptt = bp_lim < len(X) - 1
+    if bptt:  # pragma: no cover
+        old_dLds = np.zeros_like(dLds)
+
+    for i in range(bp_lim):
+        #  dL(t) / ds(t) + dL(t+1) / ds(t)
+        if bptt:  # pragma: no cover
+            source_index = slice(1, len(dLds) - i)
+            target_index = slice(None, len(dLds) - (i + 1))
+            dt = dLds[source_index] - old_dLds[source_index]
+            old_dLds = np.copy(dLds)
+        else:  # no backprop truncation
+            source_index = slice(len(dLds) - (i + 1), len(dLds) - i)
+            target_index = slice(len(dLds) - (i + 2), len(dLds) - (i + 1))
+            dt = dLds[source_index]
+
+        dLds[target_index] += _gru_dLds(
+            s[source_index],
+            z[source_index],
+            r[source_index],
+            dt,
+            Wz,
+            Wh,
+            Wr,
+            dz[source_index],
+            dh[source_index],
+            dr[source_index],
+            s_h[source_index],
+            one_z[source_index],
+        )
+
+
+def _backprop(var, grad):  # pragma: no cover
+    if not var.constant:
+        if var._grad is None:
+            var._grad = np.asarray(grad)
+        else:
+            var._grad += grad
+
+
+class GRUnit(Operation):
+    def __call__(
+        self, X, Uz, Wz, bz, Ur, Wr, br, Uh, Wh, bh, s0=None, bp_lim=None, dropout=0.0
+    ):
+        if bp_lim is not None:
+            assert isinstance(bp_lim, Integral) and 0 <= bp_lim < len(X)
+        assert 0.0 <= dropout < 1.0
+        self._dropout = dropout
+        self.bp_lim = bp_lim if bp_lim is not None else len(X) - 1
+
+        self.X = X  # type: Tensor  # shape=(T, N, C)
+
+        self.Uz = Uz  # type: Tensor  # shape=(C, D)
+        self.Wz = Wz  # type: Tensor  # shape=(D, D)
+        self.bz = bz  # type: Tensor  # shape=(D,)
+
+        self.Ur = Ur  # type: Tensor  # shape=(C, D)
+        self.Wr = Wr  # type: Tensor  # shape=(D, D)
+        self.br = br  # type: Tensor  # shape=(D,)
+
+        self.Uh = Uh  # type: Tensor  # shape=(C, D)
+        self.Wh = Wh  # type: Tensor  # shape=(D, D)
+        self.bh = bh  # type: Tensor  # shape=(D,)
+
+        self.variables = (
+            self.X,
+            self.Uz,
+            self.Wz,
+            self.bz,
+            self.Ur,
+            self.Wr,
+            self.br,
+            self.Uh,
+            self.Wh,
+            self.bh,
+        )
+
+        self.type = max(t.dtype for t in self.variables)
+
+        T, N, C = X.shape
+        (D,) = bz.shape
+
+        seq = self.X.data
+
+        # t starts at 0 for S; all other sequences begin at t = 1
+        out = np.zeros((T + 1, N, D), dtype=self.type)
+
+        if s0 is not None:
+            out[0] = s0.data if isinstance(s0, Tensor) else s0
+
+        # compute all contributions to Z, R, H from the input sequence
+        # shape: T, N, D
+        z = np.tensordot(seq, self.Uz.data, [[-1], [0]]).astype(self.type, copy=False)
+        r = np.tensordot(seq, self.Ur.data, [[-1], [0]]).astype(self.type, copy=False)
+        h = np.tensordot(seq, self.Uh.data, [[-1], [0]]).astype(self.type, copy=False)
+
+        if dropout:
+            p = 1 - dropout
+            # For Uz/Ur/Uh: a dropout mask is generated for each datum and is applied uniformly across T
+            self._dropUz, self._dropUr, self._dropUh = (
+                np.random.binomial(1, p, size=(3, 1, N, D)) / p
+            )
+            self._dropWz, self._dropWr, self._dropWh = (
+                np.random.binomial(1, p, size=(3, D, D)) / p
+            )
+
+            z *= self._dropUz
+            r *= self._dropUr
+            h *= self._dropUh
+
+            Wz = (self._dropWz * self.Wz.data).astype(self.type, copy=False)
+            Wr = (self._dropWr * self.Wr.data).astype(self.type, copy=False)
+            Wh = (self._dropWh * self.Wh.data).astype(self.type, copy=False)
+
+        else:
+            self._dropUz, self._dropUr, self._dropUh = None, None, None
+            self._dropWz, self._dropWr, self._dropWh = None, None, None
+            Wz = self.Wz.data.astype(self.type, copy=False)
+            Wr = self.Wr.data.astype(self.type, copy=False)
+            Wh = self.Wh.data.astype(self.type, copy=False)
+
+        z += bz.data.astype(self.type, copy=False)  # X Uz + bz
+        r += br.data.astype(self.type, copy=False)  # X Ur + br
+        h += bh.data.astype(self.type, copy=False)  # X Uh + bh
+
+        _gru_layer(out, z, r, h, Wz, Wr, Wh)
+
+        self._z = z
+        self._r = r
+        self._h = h
+
+        return out
+
+    def backward_var(self, grad, index, **kwargs):
+        raise SkipGradient("Gradient computed in GRU.backward()")
+
+    def backward(self, grad, **kwargs):
+        hidden_seq = self._hidden_seq()
+        if hidden_seq is None:  # pragma: no cover
+            assert False, "should be unreachable"
+
+        s = hidden_seq.data[:-1]
+        z = self._z
+        r = self._r
+        h = self._h
+
+        dLds = grad[1:].astype(self.type, copy=False)
+
+        const = {"1 - h**2": d_tanh(h), "z*(1 - z)": d_sig(z), "r*(1 - r)": d_sig(r)}
+
+        if self._dropout:
+            Wz = (self._dropWz * self.Wz.data).astype(self.type, copy=False)
+            Wr = (self._dropWr * self.Wr.data).astype(self.type, copy=False)
+            Wh = (self._dropWh * self.Wh.data).astype(self.type, copy=False)
+        else:
+            Wz = self.Wz.data.astype(self.type, copy=False)
+            Wr = self.Wr.data.astype(self.type, copy=False)
+            Wh = self.Wh.data.astype(self.type, copy=False)
+
+        const["s - h"] = s - h
+        const["1 - z"] = 1 - z
+
+        _gru_bptt(
+            self.X.data,
+            dLds,
+            s,
+            z,
+            r,
+            Wz,
+            Wh,
+            Wr,
+            const["z*(1 - z)"],
+            const["1 - h**2"],
+            const["r*(1 - r)"],
+            const["s - h"],
+            const["1 - z"],
+            self.bp_lim,
+        )
+
+        zgrad = dLds * const["s - h"]  # dL / dz
+        hgrad = dLds * const["1 - z"]  # dL / dh
+        rgrad = dot(const["1 - h**2"] * hgrad, Wh.T) * s  # dL / dr
+
+        hidden_seq._grad = dLds
+
+        if not (self.Uz.constant and self.Wz.constant and self.bz.constant):
+            dz = zgrad * const["z*(1 - z)"]
+        # backprop through Wz
+        if not self.Wz.constant:
+            dWz = np.tensordot(s, dz, ([0, 1], [0, 1]))
+            if self._dropout:
+                dWz *= self._dropWz
+            _backprop(
+                self.Wz, dWz.astype(self.Wz.dtype, copy=False)
+            )  # self.Wz.backward(dWz, **kwargs)
+        # backprop through bz
+        if not self.bz.constant:
+            _backprop(self.bz, dz.sum(axis=(0, 1), dtype=self.bz.dtype))
+        # backprop through bz
+        if not self.Uz.constant:
+            if self._dropout:
+                dz *= (
+                    self._dropUz
+                )  # IMPORTANT augmented update: this must come after Wz and bz backprop
+            _backprop(
+                self.Uz,
+                np.tensordot(self.X.data, dz, ([0, 1], [0, 1])).astype(
+                    self.Uz.dtype, copy=False
+                ),
+            )
+
+        if not (self.Ur.constant and self.Wr.constant and self.br.constant):
+            dr = rgrad * const["r*(1 - r)"]
+        # backprop through Wr
+        if not self.Wr.constant:
+            dWr = np.tensordot(s, dr, ([0, 1], [0, 1]))
+            if self._dropout:
+                dWr *= self._dropWr
+            _backprop(self.Wr, dWr.astype(self.Wr.dtype, copy=False))
+        # backprop through br
+        if not self.br.constant:
+            _backprop(
+                self.br, dr.sum(axis=(0, 1), dtype=self.br.dtype)
+            )  # self.br.backward(dr.sum(axis=(0, 1)), **kwargs)
+        # backprop through Ur
+        if not self.Ur.constant:
+            if self._dropout:
+                dr *= (
+                    self._dropUr
+                )  # IMPORTANT augmented update: this must come after Wr and br backprop
+            _backprop(
+                self.Ur,
+                np.tensordot(self.X.data, dr, ([0, 1], [0, 1])).astype(
+                    self.Ur.dtype, copy=False
+                ),
+            )
+
+        if not (self.Uh.constant and self.Wh.constant and self.bh.constant):
+            dh = hgrad * const["1 - h**2"]
+        # backprop through Wh
+        if not self.Wh.constant:
+            dWh = np.tensordot((s * r), dh, ([0, 1], [0, 1]))
+            if self._dropout:
+                dWh *= self._dropWh
+            _backprop(
+                self.Wh, dWh.astype(self.Wh.dtype, copy=False)
+            )  # self.Wh.backward(dWh, **kwargs)
+        # backprop through bh
+        if not self.bh.constant:
+            _backprop(
+                self.bh, dh.sum(axis=(0, 1), dtype=self.bh.dtype)
+            )  # self.bh.backward(dh.sum(axis=(0, 1)), **kwargs)
+        # backprop through Uh
+        if not self.Uh.constant:
+            if self._dropout:
+                dh *= (
+                    self._dropUh
+                )  # IMPORTANT augmented update: this must come after Wh and bh backprop
+            _backprop(
+                self.Uh,
+                np.tensordot(self.X.data, dh, ([0, 1], [0, 1])).astype(
+                    self.Uh.dtype, copy=False
+                ),
+            )
+
+        # backprop through X
+        if not self.X.constant:
+            tmp = dLds * const["1 - z"] * const["1 - h**2"]
+            if not self._dropout:
+                dLdX = np.dot(
+                    (dLds * const["s - h"]) * const["z*(1 - z)"], self.Uz.data.T
+                )
+                dLdX += np.dot(tmp, self.Uh.data.T)
+                dLdX += np.dot(
+                    np.dot(tmp, Wh.T) * s * const["r*(1 - r)"], self.Ur.data.T
+                )
+            else:
+                dLdX = np.dot(
+                    (self._dropUz * (dLds * const["s - h"]) * const["z*(1 - z)"]),
+                    self.Uz.data.T,
+                )
+                dLdX += np.dot(self._dropUh * tmp, self.Uh.data.T)
+                dLdX += np.dot(
+                    self._dropUr * (dot(tmp, Wh.T) * s * const["r*(1 - r)"]),
+                    self.Ur.data.T,
+                )
+            _backprop(
+                self.X, dLdX.astype(self.X.dtype, copy=False)
+            )  # self.X.backward(dLdX, **kwargs)
+
+        del self._z
+        del self._r
+        del self._h
+
+        super().backward(grad)
+
+
+
[docs]def gru( + X, + Uz, + Wz, + bz, + Ur, + Wr, + br, + Uh, + Wh, + bh, + s0=None, + bp_lim=None, + dropout=0.0, + constant=None, +): + r"""Performs a forward pass of sequential data through a Gated Recurrent Unit layer, returning + the 'hidden-descriptors' arrived at by utilizing the trainable parameters as follows:: + + Z_{t} = sigmoid(X_{t} Uz + S_{t-1} Wz + bz) + R_{t} = sigmoid(X_{t} Ur + S_{t-1} Wr + br) + H_{t} = tanh(X_{t} Uh + (R{t} * S_{t-1}) Wh + bh) + S_{t} = (1 - Z{t}) * H{t} + Z{t} * S_{t-1} + + Parameters + ---------- + X : array_like, shape=(T, N, C) + The sequential data to be passed forward. + + Uz : array_like, shape=(C, D) + The weights used to map sequential data to its hidden-descriptor representation + + Wz : array_like, shape=(D, D) + The weights used to map a hidden-descriptor to a hidden-descriptor. + + bz : array_like, shape=(D,) + The biases used to scale a hidden-descriptor. + + Ur : array_like, shape=(C, D) + The weights used to map sequential data to its hidden-descriptor representation + + Wr : array_like, shape=(D, D) + The weights used to map a hidden-descriptor to a hidden-descriptor. + + br : array_like, shape=(D,) + The biases used to scale a hidden-descriptor. + + Uh : array_like, shape=(C, D) + The weights used to map sequential data to its hidden-descriptor representation + + Wh : array_like, shape=(D, D) + The weights used to map a hidden-descriptor to a hidden-descriptor. + + bh : array_like, shape=(D,) + The biases used to scale a hidden-descriptor. + + s0 : Optional[array_like], shape=(N, D) + The 'seed' hidden descriptors to feed into the RNN. If None, a Tensor + of zeros of shape (N, D) is created. + + bp_lim : Optional[int] + *This feature is experimental and is currently untested*. + The (non-zero) limit of the depth of back propagation through time to be + performed. If `None` back propagation is passed back through the entire sequence. + + E.g. `bp_lim=3` will propagate gradients only up to 3 steps backward through the + recursive sequence. + + dropout : float (default=0.), 0 <= dropout < 1 + If non-zero, the dropout scheme described in [1]_ is applied. See Notes + for more details. + + constant : bool, optional (default=False) + If True, the resulting Tensor is a constant. + + Returns + ------- + mygrad.Tensor, shape=(T+1, N, D) + The sequence of 'hidden-descriptors' produced by the forward pass of the RNN. + + Notes + ----- + - :math:`T` : Sequence length + - :math:`N` : Batch size + - :math:`C` : Length of single datum + - :math:`D` : Length of 'hidden' descriptor + + The GRU system of equations is given by: + + .. math:: + + Z_{t} = \sigma (X_{t} U_z + S_{t-1} Wz + bz) + + R_{t} = \sigma (X_{t} U_r + S_{t-1} Wr + br) + + H_{t} = tanh(X_{t} U_h + (R_{t} * S_{t-1}) W_h + b_h) + + S_{t} = (1 - Z_{t}) * H_{t} + Z_{t} * S_{t-1} + + Following the dropout scheme specified in [1]_, the hidden-hidden weights (Wz/Wr/Wh) + randomly have their weights dropped prior to forward/back-prop. The input connections + (via Uz/Ur/Uh) have variational dropout ([2]_) applied to them with a common dropout + mask across all t. That is three static dropout masks, each with shape-(N,D), are + applied to + + .. math:: + X_{t} U_z + + X_{t} U_r + + X_{t} U_h + respectively, for all :math:`t`. + + References + ---------- + .. [1] S. Merity, et. al. "Regularizing and Optimizing LSTM Language Models", + arXiv:1708.02182v1, 2017. + + .. [2] Y. Gal, Z. Ghahramani "A Theoretically Grounded Application of Dropout + in Recurrent Neural Networks" arXiv:1512.05287v5, 2016.""" + if s0 is not None: + if not isinstance(s0, np.ndarray) and not ( + isinstance(s0, Tensor) and (constant or s0.constant) + ): + raise ValueError( + "GRU does not support non-constant tensors for the initial hidden" + "state value, `s0`" + ) + s = Tensor._op( + GRUnit, + X, + Uz, + Wz, + bz, + Ur, + Wr, + br, + Uh, + Wh, + bh, + op_kwargs=dict(s0=s0, bp_lim=bp_lim, dropout=dropout), + constant=constant, + ) + try: + s.creator._hidden_seq = weakref.ref(s) + except AttributeError: # pragma: no cover + # `no-autodiff` mode does not record creator + pass + return s
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/nnet/layers/pooling.html b/docs/_modules/mygrad/nnet/layers/pooling.html new file mode 100644 index 00000000..4ae05cdb --- /dev/null +++ b/docs/_modules/mygrad/nnet/layers/pooling.html @@ -0,0 +1,817 @@ + + + + + + + + + + mygrad.nnet.layers.pooling — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.nnet.layers.pooling

+from numbers import Integral
+from typing import Optional, Tuple, Union
+
+import numpy as np
+
+from mygrad.nnet.layers.utils import sliding_window_view
+from mygrad.operation_base import Operation
+from mygrad.tensor_base import Tensor
+from mygrad.typing import ArrayLike
+
+
+class MaxPoolND(Operation):
+    def __call__(self, x, pool, stride):
+        """Perform max-pooling over the last N dimensions of a data batch.
+
+        Extended Summary
+        ----------------
+        The data consists of N trailing axes to be pooled over, denoted by ``C0, ...``. These
+        can be preceded, optionally, by un-pooled axes, denoted by ``(N0, ...)``. The dimensions
+        of the window over which pooling is performed is denoted by ``P0, ...``. The window
+        is placed with stride values ``S0, ...``.
+
+        Ultimately the pooled channels have a shape ``G0, ...``.
+
+        Parameters
+        ----------
+        x : mygrad.Tensor, shape=([...], C0, ...)
+            The data batch; to be pooled along the trailing axes denoted by ``C0, ...``.
+
+        pool : Tuple[Integral, ...], (P0, ...)
+            The extent of the pooling window along the ``(C0, ...)`` axes, respectively. The
+            length of `pool` determines ``N`` - the number of trailing dimensions to pool over.
+
+        stride : Union[Integral, Tuple[Integral, ...]], (S0, ...)
+            The spacing used to place the pooling window, along ``(P0, ...)`` axes, respectively.
+            If a single value is provided, it is used for all N pooling axes.
+
+        Returns
+        -------
+        numpy.ndarray, shape=([...], G0, ...)
+            The pooled data batch.
+
+        Notes
+        -----
+        Only 'valid' placements of the pooling window are permitted - the pooling
+        window cannot extend passed the "boundaries" of the data
+        dimensions.
+        """
+        self.variables = (x,)  # data: ((N0, ...), C0, ...)
+        x = x.data
+
+        assert isinstance(pool, (tuple, list, np.ndarray)) and all(
+            i >= 0 and isinstance(i, Integral) for i in pool
+        )
+        pool = np.asarray(pool, dtype=int)
+        assert all(i > 0 for i in pool)
+        assert x.ndim >= len(
+            pool
+        ), "The number of pooled dimensions cannot exceed the dimensionality of the data."
+
+        stride = (
+            np.array([stride] * len(pool))
+            if isinstance(stride, Integral)
+            else np.asarray(stride, dtype=int)
+        )
+        assert len(stride) == len(pool) and all(
+            s >= 1 and isinstance(s, Integral) for s in stride
+        )
+
+        self.pool = pool  # (P0, ...)
+        self.stride = stride  # (S0, ...)
+
+        num_pool = len(pool)
+        num_no_pool = x.ndim - num_pool
+
+        x_shape = np.array(x.shape[num_no_pool:])
+        w_shape = pool
+
+        out_shape = (x_shape - w_shape) / stride + 1
+
+        if not all(i.is_integer() and i > 0 for i in out_shape):
+            msg = "Stride and kernel dimensions are incompatible: \n"
+            msg += f"Input dimensions: {(tuple(x_shape))}\n"
+            msg += f"Stride dimensions: {(tuple(stride))}\n"
+            msg += f"Pooling dimensions: {(tuple(w_shape))}\n"
+            raise ValueError(msg)
+
+        pool_axes = tuple(-(i + 1) for i in range(num_pool))
+
+        # (G0, ...) is the tuple of grid-positions for placing each window (not including stride)
+        # sliding_window_view(x): ((N0, ...), C0, ...)          -> (G0, ..., (N0, ...), P0, ...)
+        # max-pool:               (G0, ..., (N0, ...), P0, ...) -> (G0, ..., (N0, ...))
+        maxed = sliding_window_view(x, self.pool, self.stride).max(axis=pool_axes)
+        axes = tuple(range(maxed.ndim))
+
+        # (G0, ..., (N0, ...)) -> ((N0, ...), G0, ...)
+        out = maxed.transpose(axes[-num_no_pool:] + axes[:-num_no_pool])
+        return out if out.flags["C_CONTIGUOUS"] else np.ascontiguousarray(out)
+
+    def backward_var(self, grad, index, **kwargs):
+        """Parameters
+        ----------
+        grad : numpy.ndarray, shape=((N0, ...), G0, ...),
+        index : int"""
+        var = self.variables[index]
+        x = var.data
+        num_pool = len(self.pool)
+
+        sl = sliding_window_view(x, self.pool, self.stride)
+        grid_shape = sl.shape
+        maxed = sl.reshape(*sl.shape[:-num_pool], -1).argmax(-1)
+        axes = tuple(range(maxed.ndim))
+
+        # argmax within a given flat-window
+        maxed = maxed.transpose(
+            axes[num_pool:] + axes[:num_pool]
+        )  # ((N0, ...), G0, ...)
+
+        # flat-index offset associated with reshaped window within `x`
+        row_major_offset = tuple(np.cumprod(x.shape[-num_pool:][:0:-1])[::-1]) + (1,)
+
+        # flat index of argmax, updated based on position within window, according to shape of `x`
+        in_window_offset = sum(
+            ind * off
+            for ind, off in zip(np.unravel_index(maxed, self.pool), row_major_offset)
+        )
+
+        # flat-index of strided window placement, relative to `x`
+        window_offset = sum(
+            ind * s * off
+            for ind, s, off in zip(
+                np.indices(grid_shape[:num_pool]), self.stride, row_major_offset
+            )
+        )
+
+        # indices required to traverse pool-axis-flattened array
+        # ((N0, ...) G0*...)
+        flat_grid_shape = (*maxed.shape[:-num_pool], np.prod(maxed.shape[-num_pool:]))
+        index = np.indices(flat_grid_shape)
+
+        # update trailing indices to traverse location of max entries within pooled axes
+        index[-1] = (in_window_offset + window_offset).reshape(
+            *flat_grid_shape[:-1], -1
+        )
+
+        # accumulate gradient within pool-axis-flattened dx, then reshape to match shape of `x`
+        dx = np.zeros(x.shape[:-num_pool] + (np.prod(x.shape[-num_pool:]),))
+        np.add.at(dx, tuple(index), grad.reshape(*x.shape[:-num_pool], -1))
+        return dx.reshape(x.shape)
+
+
+
[docs]def max_pool( + x: ArrayLike, + pool: Tuple[int, ...], + stride: Union[int, Tuple[int, ...]], + *, + constant: Optional[bool] = None, +) -> Tensor: + """Perform max-pooling over the last N dimensions of a data batch. + + Extended Summary + ---------------- + The data consists of N trailing axes to be pooled over, denoted by ``C0, ...``. These + can be preceded, optionally, by un-pooled axes, denoted by ``(N0, ...)``. The dimensions + of the window over which pooling is performed is denoted by ``P0, ...``. The window + is placed with stride values ``S0, ...``. + + Ultimately the pooled channels have a shape ``G0, ...``. + + Parameters + ---------- + x : mygrad.Tensor, shape=([...], C0, ...) + The data batch; to be pooled along the trailing axes denoted by ``C0, ...``. + + pool : Tuple[Integral, ...], (P0, ...) + The extent of the pooling window along the ``(C0, ...)`` axes, respectively. The + length of `pool` determines ``N`` - the number of trailing dimensions to pool over. + + stride : Union[Integral, Tuple[Integral, ...]], (S0, ...) + The spacing used to place the pooling window, along ``(P0, ...)`` axes, respectively. + If a single value is provided, it is used for all ``N`` pooling axes. + + constant : Optional[None] + If True, the resulting Tensor is a constant. + Returns + ------- + Tensor, shape=([...], G0, ...) + The pooled data batch. + + Notes + ----- + Only "valid" placements of the pooling window are permitted - the pooling + window cannot extend passed the "boundaries" of the data + dimensions. + + Examples + -------- + Simple 2D pooling on a 2D tensor. Tiling a 2x2 max-pool window with + stride-1 over a shape-(3, 3) tensor ``x``: + + >>> import mygrad as mg + >>> from mygrad.nnet import max_pool + >>> x = mg.Tensor([[0., 10., 8.], + ... [2., 7., 3.], + ... [5., 7., 20.]]) + >>> out = max_pool(x, pool=(2, 2), stride=1) + >>> out + Tensor([[ 10., 10.], + [ 7., 20.]]) + >>> out.sum().backward() # sum to reduce to scalar for back-prop + >>> x.grad # dout/dx + array([[0., 2., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + Let's perform 1D pooling on a 2D tensor. Each row of the tensor + will be pooled over independently. Let's apply a size-2 max-pool + window to each row of ``x``, using a stride of 1: + + >>> x = mg.Tensor([[0., 10., 8.], + ... [9., 7., 3.], + ... [5., 0., 20.]]) + >>> max_pool(x, pool=(2,), stride=1) + Tensor([[10., 10.], + [ 9., 7.], + [ 5., 20.]]) + + Here we perform pooling over the trailing two dimensions of a + 4D tensor, ``x``. By specifying ``pool = (2, 2)``, we instruct + ``max_pool`` to tile a 2x2 pooling window along these last two + axes. Let's apply the window every two rows, and for each column; + i.e. we specify ``stride = (2, 1)``: + + >>> import numpy as np + >>> x = mg.Tensor(np.random.rand(10, 3, 12, 12)) + >>> pool = (2, 2) # 2x2 pooling over the last axes + >>> stride = (2, 1) # Apply 2x1 stride + >>> out = max_pool(x, pool, stride) # max-pooled Tensor + >>> out.shape + (10, 3, 6, 11) + + Had we specified, say, ``pool = (3, 2, 2)``, then a 3x2x2 + pooling window would have been tiled along the last *three* axes + of ``x``. + """ + return Tensor._op(MaxPoolND, x, op_args=(pool, stride), constant=constant)
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/nnet/layers/utils.html b/docs/_modules/mygrad/nnet/layers/utils.html new file mode 100644 index 00000000..9b8461e1 --- /dev/null +++ b/docs/_modules/mygrad/nnet/layers/utils.html @@ -0,0 +1,799 @@ + + + + + + + + + + mygrad.nnet.layers.utils — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.nnet.layers.utils

+from numbers import Integral
+
+import numpy as np
+from numpy.lib.stride_tricks import as_strided
+
+
+
[docs]def sliding_window_view(arr, window_shape, step, dilation=None): + """Create a sliding window view over the trailing dimensions of an array. + No copy is made unless the input array is not contiguous in memory. + + The window is applied only to valid regions of ``arr``, but is applied greedily. + + See Notes section for details. + + Parameters + ---------- + arr : numpy.ndarray, shape=(..., [x, (...), z]) + C-contiguous array over which sliding view-window is applied along the trailing + dimensions ``[x, ..., z]``, as determined by the length of ``window_shape``. + + If ``arr`` is not C-contiguous, it will be replaced by ``numpy.ascontiguousarray(arr)`` + + window_shape : Sequence[int] + Specifies the shape of the view-window: ``[Wx, (...), Wz]``. + The length of `window_shape` determines the length of ``[x, (...) , z]``. + + step : Union[int, Sequence[int]] + The step sized used along the ``[x, (...), z]`` dimensions: ``[Sx, (...), Sz]``. + If a single integer is specified, a uniform step size is used. + + dilation : Optional[Union[int, Sequence[int]]] + The dilation factor used along the ``[x, (...), z]`` directions: ``[Dx, (...), Dz]``. + If no value is specified, a dilation factor of 1 is used along each direction. + Dilation specifies the step size used when filling the window's elements + + Returns + ------- + numpy.ndarray + A contiguous view of ``arr``, of shape ``([X, (...), Z], ..., [Wx, (...), Wz])``, where + ``[X, ..., Z]`` is the shape of the grid on which the window was applied. See Notes + sections for more details. + + Raises + ------ + ValueError, TypeError + Invalid step-size, window shape, or dilation + + Notes + ----- + Window placement: + Given a dimension of size x, with a window of size W along this dimension, applied + with stride S and dilation D, the window will be applied:: + X = (x - (W - 1) * D + 1) // S + 1 + number of times along that dimension. + + Interpreting output: + In general, given an array ``arr`` of shape (..., x, (...), z), and:: + + out = sliding_window_view(arr, window_shape=[Wx, (...), Wz], step=[Sx, (...), Sz]) + + then indexing ``out`` with ``[ix, (...), iz]`` produces the following view of ``x``:: + + out[ix, (...), iz] == + x[..., ix*Sx:(ix*Sx + Wx*Dx):Dx, (...), iz*Sz:(iz*Sz + Wz*Dz):Dz] + + For example, suppose ``arr`` is an array of shape-(10, 12, 6). Specifying sliding + window of shape ``(3, 3)`` with step size ``(2, 2)``, dilation ``(2, 1)`` will create the view:: + + [[arr[:, 0:6:2, 0:3], arr[:, 0:6:3, 3:6]] + [arr[:, 6:12:2, 0:3], arr[:, 6:12:12, 3:6]]] + + producing a view of shape ``(2, 2, 10, 3, 3)`` in total. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(36).reshape(6, 6) + >>> x + array([[ 0, 1, 2, 3, 4, 5], + [ 6, 7, 8, 9, 10, 11], + [12, 13, 14, 15, 16, 17], + [18, 19, 20, 21, 22, 23], + [24, 25, 26, 27, 28, 29], + [30, 31, 32, 33, 34, 35]]) + + Apply an 3x2 window with step-sizes of (2, 2). This results in + the window being placed twice along axis-0 and three times along axis-1. + + >>> y = sliding_window_view(x, step=(2, 2), window_shape=(3, 2)) + >>> y.shape + (2, 3, 3, 2) + + window applied at (0, 0) + + >>> y[0, 0] + array([[ 0, 1], + [ 6, 7], + [12, 13]]) + + window applied at (2, 0) + + >>> y[1, 0] + array([[12, 13], + [18, 19], + [24, 25]]) + + window applied at (0, 2) + + >>> y[0, 1] + array([[ 2, 3], + [ 8, 9], + [14, 15]]) + + verify that an element in this window-view is correct + + >>> i, j = np.random.randint(0, 2, size=2) + >>> wx, wy = (2, 2) + >>> sx, sy = (2, 2) + >>> np.all(y[i, j] == x[..., i*sx:(i*sx + wx), j*sy:(j*sy + wy)]) + True + """ + + if not hasattr(window_shape, "__iter__"): + raise TypeError( + f"`window_shape` must be a sequence of positive integers, got: {window_shape}" + ) + window_shape = tuple(window_shape) + if not all(isinstance(i, Integral) and i > 0 for i in window_shape): + raise TypeError( + f"`window_shape` must be a sequence of positive integers, " + f"got: {window_shape}" + ) + + if len(window_shape) > arr.ndim: + raise ValueError( + f"`window_shape` ({window_shape}) cannot specify more values than " + f"`arr.ndim` ({arr.ndim})." + ) + + if not isinstance(step, Integral) and not hasattr(step, "__iter__"): + raise TypeError( + f"`step` must be a positive integer or a sequence of positive " + f"integers, got: {step}" + ) + + step = ( + (int(step),) * len(window_shape) if isinstance(step, Integral) else tuple(step) + ) + + if not all(isinstance(i, Integral) and i > 0 for i in step): + raise ValueError( + f"`step` must be a positive integer or a sequence of positive " + f"integers, got: {step}" + ) + + if any(i > j for i, j in zip(window_shape[::-1], arr.shape[::-1])): + raise ValueError( + f"Each size of the window-shape must fit within the trailing " + f"dimensions of `arr`." + f"{window_shape} does not fit in {arr.shape[-len(window_shape) :]}" + ) + + if ( + dilation is not None + and not isinstance(dilation, Integral) + and not hasattr(dilation, "__iter__") + ): + raise TypeError( + f"`dilation` must be None, a positive integer, or a sequence of " + f"positive integers, got: {dilation}" + ) + if dilation is None: + dilation = np.ones((len(window_shape),), dtype=int) + else: + if isinstance(dilation, Integral): + dilation = np.full((len(window_shape),), fill_value=dilation, dtype=int) + else: + np.asarray(dilation) + + if not all(isinstance(i, Integral) and i > 0 for i in dilation) or len( + dilation + ) != len(window_shape): + raise ValueError( + f"`dilation` must be None, a positive integer, or a sequence of " + f"positive integers with the same length as `window_shape` " + f"({window_shape}), got: {dilation}" + ) + if any( + w * d > s + for w, d, s in zip(window_shape[::-1], dilation[::-1], arr.shape[::-1]) + ): + raise ValueError( + f"The dilated window ({tuple(w * d for w, d in zip(window_shape, dilation))}) " + f"must fit within the trailing " + f"dimensions of `arr` ({arr.shape[-len(window_shape) :]})" + ) + + if not arr.flags["C_CONTIGUOUS"]: + arr = np.ascontiguousarray(arr) + + step = np.array(step) # (Sx, ..., Sz) + window_shape = np.array(window_shape) # (Wx, ..., Wz) + in_shape = np.array(arr.shape[-len(step) :]) # (x, ... , z) + nbyte = arr.strides[-1] # size, in bytes, of element in `arr` + + # per-byte strides required to fill a window + win_stride = tuple(np.cumprod(arr.shape[:0:-1])[::-1]) + (1,) + + # per-byte strides required to advance the window + step_stride = tuple(win_stride[-len(step) :] * step) + + # update win_stride to accommodate dilation + win_stride = np.array(win_stride) + win_stride[-len(step) :] *= dilation + win_stride = tuple(win_stride) + + # tuple of bytes to step to traverse corresponding dimensions of view + # see: 'internal memory layout of an ndarray' + stride = tuple(int(nbyte * i) for i in step_stride + win_stride) + + # number of window placements along x-dim: X = (x - (Wx - 1)*Dx + 1) // Sx + 1 + out_shape = tuple((in_shape - ((window_shape - 1) * dilation + 1)) // step + 1) + + # ([X, (...), Z], ..., [Wx, (...), Wz]) + out_shape = out_shape + arr.shape[: -len(step)] + tuple(window_shape) + out_shape = tuple(int(i) for i in out_shape) + + return as_strided(arr, shape=out_shape, strides=stride, writeable=False)
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/nnet/losses/focal_loss.html b/docs/_modules/mygrad/nnet/losses/focal_loss.html new file mode 100644 index 00000000..fb8e9913 --- /dev/null +++ b/docs/_modules/mygrad/nnet/losses/focal_loss.html @@ -0,0 +1,793 @@ + + + + + + + + + + mygrad.nnet.losses.focal_loss — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.nnet.losses.focal_loss

+from numbers import Real
+from typing import Optional
+
+import numpy as np
+
+import mygrad._utils.graph_tracking as _tracking
+from mygrad.nnet.activations import softmax
+from mygrad.operation_base import Operation
+from mygrad.tensor_base import Tensor, asarray
+from mygrad.typing import ArrayLike
+
+from ._utils import check_loss_inputs
+
+__all__ = ["softmax_focal_loss", "focal_loss"]
+
+
+class FocalLoss(Operation):
+    r"""Returns the per-datum focal loss as described in https://arxiv.org/abs/1708.02002
+    which is given by -ɑ(1-p)ˠlog(p).
+
+    Extended Description
+    --------------------
+    The focal loss is given by
+
+    .. math::
+        \frac{1}{N}\sum\limits_{1}^{N}-\alpha \hat{y}_i(1-p_i)^\gamma\log(p_i)
+
+    where :math:`N` is the number of elements in `x` and `y` and :math:`\hat{y}_i` is
+    one where :math:`i` is the label of the element :math:`y_i` and 0 elsewhere. That is,
+    if the label :math:`y_k` is 1 and there are four possible label values, then
+    :math:`\hat{y}_k = (0, 1, 0, 0)`.
+
+    It is recommended in the paper that you normalize by the number of foreground samples.
+    """
+
+    def __call__(self, class_probs, targets, alpha, gamma):
+        """
+        Parameters
+        ----------
+        class_probs : mygrad.Tensor, shape=(N, C)
+            The C class scores for each of the N pieces of data.
+
+        targets : Union[mygrad.Tensor, ArrayLike], shape=(N,)
+            The correct class indices, in [0, C), for each datum.
+
+        alpha : Real
+            The ɑ weighting factor in the loss formulation.
+
+        gamma : Real
+            The ɣ focusing parameter.
+
+        Returns
+        -------
+        numpy.ndarray
+            The per-datum focal loss.
+        """
+        if isinstance(targets, Tensor):  # pragma: nocover
+            targets = targets.data
+
+        check_loss_inputs(class_probs, targets)
+
+        self.variables = (class_probs,)
+        self.label_locs = (range(len(class_probs)), targets)
+
+        class_probs = asarray(class_probs)
+        pc = class_probs[self.label_locs]
+        one_m_pc = np.clip(1 - pc, a_min=0, a_max=1)
+        log_pc = np.log(pc)
+
+        one_m_pc_gamma = one_m_pc**gamma
+        loss = -(alpha * one_m_pc_gamma * log_pc)
+
+        if not _tracking.TRACK_GRAPH:
+            return loss
+
+        self.back = np.zeros(class_probs.shape, dtype=np.float64)
+
+        if np.isclose(gamma, 0, atol=1e-15):
+            self.back[self.label_locs] -= alpha / pc
+            return loss
+
+        # dL/dp = -alpha * ( (1 - p)**g / p - g * (1 - p)**(g - 1) * log(p) )
+        #
+        # term 1: (1 - p)**g / p
+        term1 = one_m_pc_gamma / pc  # (1 - p)**g / p
+
+        # term 2: - g * (1 - p)**(g - 1) * log(p)
+        if np.isclose(gamma, 1, rtol=1e-15):
+            term2 = -log_pc
+        elif gamma < 1:
+            # For g < 1 and p -> 1, the 2nd term -> 0 via L'Hôpital's rule
+            term2 = np.zeros(pc.shape, dtype=class_probs.dtype)
+            pc_not_1 = ~np.isclose(one_m_pc, 0, atol=1e-25)
+            term2[pc_not_1] = (
+                -gamma * one_m_pc[pc_not_1] ** (gamma - 1) * log_pc[pc_not_1]
+            )
+        else:
+            term2 = -gamma * one_m_pc ** (gamma - 1) * log_pc
+
+        self.back[self.label_locs] -= alpha * (term1 + term2)
+        return loss
+
+    def backward_var(self, grad, index, **kwargs):
+        self.back[self.label_locs] *= grad
+        return self.back
+
+
+
[docs]def focal_loss( + class_probs: ArrayLike, + targets: ArrayLike, + *, + alpha: float = 1, + gamma: float = 0, + constant: Optional[bool] = None, +) -> Tensor: + r"""Return the per-datum focal loss. + + Parameters + ---------- + class_probs : ArrayLike, shape=(N, C) + The C class probabilities for each of the N pieces of data. + Each value is expected to lie on (0, 1] + + targets : ArrayLike, shape=(N,) + The correct class indices, in [0, C), for each datum. + + alpha : Real, optional (default=1) + The ɑ weighting factor in the loss formulation. + + gamma : Real, optional (default=0) + The ɣ focusing parameter. Note that for Ɣ=0 and ɑ=1, this is cross-entropy loss. + Must be a non-negative value. + + constant : Optional[bool] + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient) + + Returns + ------- + mygrad.Tensor, shape=(N,) + The per-datum focal loss. + + Notes + ----- + The formulation for the focal loss introduced in https://arxiv.org/abs/1708.02002. + It is given by -ɑ(1-p)ˠlog(p). + + + The focal loss for datum-:math:`i` is given by + + .. math:: + -\alpha \hat{y}_i(1-p_i)^\gamma\log(p_i) + + where :math:`\hat{y}_i` is one in correspondence to the label associated with the + datum and 0 elsewhere. That is, if the label :math:`y_k` is 2 and + there are four possible label values, then :math:`\hat{y}_k = (0, 0, 1, 0)`. + + It is recommended in the paper that you normalize by the number of foreground samples. + """ + if not isinstance(gamma, Real) or gamma < 0: + raise ValueError(f"`gamma` must be a non-negative number, got: {gamma}") + + return Tensor._op( + FocalLoss, class_probs, op_args=(targets, alpha, gamma), constant=constant + )
+ + +
[docs]def softmax_focal_loss( + scores: ArrayLike, + targets: ArrayLike, + *, + alpha: float = 1, + gamma: float = 0, + constant: Optional[bool] = None, +) -> Tensor: + r""" + Applies the softmax normalization to the input scores before computing the + per-datum focal loss. + + Parameters + ---------- + scores : ArrayLike, shape=(N, C) + The C class scores for each of the N pieces of data. + + targets : ArrayLike, shape=(N,) + The correct class indices, in [0, C), for each datum. + + alpha : Real, optional (default=1) + The ɑ weighting factor in the loss formulation. + + gamma : Real, optional (default=0) + The ɣ focusing parameter. Note that for Ɣ=0 and ɑ=1, this is cross-entropy loss. + Must be a non-negative value. + + constant : Optional[bool] + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient) + + Returns + ------- + mygrad.Tensor, shape=(N,) + The per-datum focal loss. + + Notes + ----- + The formulation for the focal loss introduced in https://arxiv.org/abs/1708.02002. + It is given by -ɑ(1-p)ˠlog(p). + + The focal loss for datum-:math:`i` is given by + + .. math:: + -\alpha \hat{y}_i(1-p_i)^\gamma\log(p_i) + + where :math:`\hat{y}_i` is one in correspondence to the label associated with the + datum and 0 elsewhere. That is, if the label :math:`y_k` is 2 and + there are four possible label values, then :math:`\hat{y}_k = (0, 0, 1, 0)`. + + It is recommended in the paper that you normalize by the number of foreground samples. + """ + return focal_loss( + softmax(scores), targets=targets, alpha=alpha, gamma=gamma, constant=constant + )
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/nnet/losses/margin_ranking_loss.html b/docs/_modules/mygrad/nnet/losses/margin_ranking_loss.html new file mode 100644 index 00000000..df96f63c --- /dev/null +++ b/docs/_modules/mygrad/nnet/losses/margin_ranking_loss.html @@ -0,0 +1,683 @@ + + + + + + + + + + mygrad.nnet.losses.margin_ranking_loss — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.nnet.losses.margin_ranking_loss

+from numbers import Real
+from typing import Optional
+
+import numpy as np
+
+import mygrad._utils.graph_tracking as _tracking
+from mygrad.operation_base import Operation
+from mygrad.tensor_base import Tensor, asarray
+from mygrad.typing import ArrayLike
+
+
+class MarginRanking(Operation):
+    def __call__(self, x1, x2, y, margin):
+        """Computes the margin ranking loss between ``x1``
+        and ``x2``.
+
+        Parameters
+        ----------
+        x1 : mygrad.Tensor, shape=(N,) or (N, D)
+
+        x2 : mygrad.Tensor, shape=(N,) or (N, D)
+
+        y : numpy.ndarray
+
+        margin : float
+
+        Returns
+        -------
+        numpy.ndarray, shape=()
+        """
+        self.variables = (x1, x2)
+        x1 = x1.data
+        x2 = x2.data
+
+        self.y = y
+
+        M = margin - self.y * (x1 - x2)
+        not_thresh = M <= 0
+        loss = M
+        loss[not_thresh] = 0.0
+        if _tracking.TRACK_GRAPH:
+            self._grad = np.ones_like(M)
+            self._grad[not_thresh] = 0.0
+            self._grad /= M.size
+        return np.mean(loss)
+
+    def backward_var(self, grad, index, **kwargs):
+        sign = -self.y if index == 0 else self.y
+        return grad * (sign * self._grad)
+
+
+
[docs]def margin_ranking_loss( + x1: ArrayLike, + x2: ArrayLike, + y: ArrayLike, + margin: float, + *, + constant: Optional[bool] = None, +) -> Tensor: + r"""Computes the margin average margin ranking loss. + Equivalent to:: + + >>> import mygrad as mg + >>> mg.mean(mg.maximum(0, margin - y * (x1 - x2))) + + Parameters + ---------- + x1 : ArrayLike, shape=(N,) or (N, D) + A batch of scores or descriptors to compare against those in `x2` + + x2 : ArrayLike, shape=(N,) or (N, D) + A batch of scores or descriptors to compare against those in `x1` + + y : Union[int, ArrayLike], scalar or shape=(N,) + 1 or -1. Specifies whether the margin is compared against `(x1 - x2)` + or `(x2 - x1)`, for each of the N comparisons. + + margin : float + A non-negative value to be used as the margin for the loss. + + constant : bool, optional(default=False) + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient) + + Returns + ------- + mygrad.Tensor, shape=() + The mean margin ranking loss. + """ + if not 0 < x1.ndim < 3: + raise ValueError("`x1` must have shape (N,) or (N, D)") + if not x1.shape == x2.shape: + raise ValueError("`x1` and `x2` must have the same shape") + if not np.issubdtype(x1.dtype, np.floating): + raise TypeError("`x1` must contain floats") + if not np.issubdtype(x2.dtype, np.floating): + raise TypeError("`x2` must contain floats") + if not isinstance(margin, Real) or margin < 0: + raise ValueError("`margin` must be a non-negative scalar") + + y = asarray(y) + + if y.size == 1: + y = np.array(y.item()) + + if not y.ndim == 0 and not (y.ndim == 1 and len(y) == len(x1)): + raise ValueError("`y` must be a scalar or shape-(N,) array of ones") + + if y.ndim: + if x1.ndim == 2: + y = y.reshape(-1, 1) + return Tensor._op(MarginRanking, x1, x2, op_args=(y, margin), constant=constant)
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/nnet/losses/multiclass_hinge.html b/docs/_modules/mygrad/nnet/losses/multiclass_hinge.html new file mode 100644 index 00000000..5870c01b --- /dev/null +++ b/docs/_modules/mygrad/nnet/losses/multiclass_hinge.html @@ -0,0 +1,672 @@ + + + + + + + + + + mygrad.nnet.losses.multiclass_hinge — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.nnet.losses.multiclass_hinge

+from typing import Optional
+
+import numpy as np
+
+import mygrad._utils.graph_tracking as _tracking
+from mygrad.operation_base import Operation
+from mygrad.tensor_base import Tensor
+from mygrad.typing import ArrayLike
+
+from ._utils import check_loss_inputs
+
+
+class MulticlassHinge(Operation):
+    def __call__(self, a, y, hinge=1.0):
+        """Computes the average multiclass hinge loss.
+
+        Parameters
+        ----------
+        a : mygrad.Tensor, shape=(N, C)
+            The C class scores for each of the N pieces of data.
+
+        y : numpy.ndarray, shape=(N,)
+            The correct class-index, in [0, C), for each datum.
+
+        Returns
+        -------
+        The average multiclass hinge loss
+
+        Raises
+        ------
+        TypeError
+            `y_true` must be an integer-type array-like object
+
+        ValueError
+            `x` must be a 2-dimensional array-like object
+            `y_true` must be a shape-(N,) array-like object"""
+
+        check_loss_inputs(a, y)
+        self.variables = (a,)
+        scores = a.data
+        correct_labels = (range(len(y)), y)
+        correct_class_scores = scores[correct_labels]  # Nx1
+
+        M = scores - correct_class_scores[:, np.newaxis] + hinge  # NxC margins
+        not_thresh = np.where(M <= 0)
+        Lij = M
+        Lij[not_thresh] = 0
+        Lij[correct_labels] = 0
+        if _tracking.TRACK_GRAPH:
+            TMP = np.ones(M.shape, dtype=float)
+            TMP[not_thresh] = 0
+            TMP[correct_labels] = 0  # NxC; 1 where margin > 0
+            TMP[correct_labels] = -1 * TMP.sum(axis=-1)
+            self.back = TMP
+            self.back /= scores.shape[0]
+        return np.sum(Lij) / scores.shape[0]
+
+    def backward_var(self, grad, index, **kwargs):
+        return grad * self.back
+
+
+
[docs]def multiclass_hinge( + x: ArrayLike, + y_true: ArrayLike, + hinge: float = 1.0, + *, + constant: Optional[bool] = None, +) -> Tensor: + """Computes the average multiclass hinge loss. + + Parameters + ---------- + x : ArrayLike, shape=(N, K) + The K class scores for each of the N pieces of data. + + y_true : ArrayLike, shape=(N,) + The correct class-indices, in [0, K), for each datum. + + hinge : float + The size of the "hinge" outside of which a nonzero loss + is incurred. + + constant : bool, optional(default=False) + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient) + + Returns + ------- + Tensor, shape-() (scalar) + The average multiclass hinge loss + + Raises + ------ + TypeError + `y_true` must be an integer-type array-like object + + ValueError + `x` must be a 2-dimensional array-like object + `y_true` must be a shape-(N,) array-like object + """ + return Tensor._op(MulticlassHinge, x, op_args=(y_true, hinge), constant=constant)
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/nnet/losses/negative_log_likelihood.html b/docs/_modules/mygrad/nnet/losses/negative_log_likelihood.html new file mode 100644 index 00000000..3123a6e4 --- /dev/null +++ b/docs/_modules/mygrad/nnet/losses/negative_log_likelihood.html @@ -0,0 +1,661 @@ + + + + + + + + + + mygrad.nnet.losses.negative_log_likelihood — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.nnet.losses.negative_log_likelihood

+from typing import Optional
+
+import numpy as np
+
+from mygrad.math.sequential.funcs import mean
+from mygrad.tensor_base import Tensor, asarray
+from mygrad.typing import ArrayLike
+
+from ._utils import check_loss_inputs
+
+
+
[docs]def negative_log_likelihood( + x: ArrayLike, + y_true: ArrayLike, + *, + weights: Optional[ArrayLike] = None, + constant: Optional[bool] = None, +) -> Tensor: + """Returns the (weighted) negative log-likelihood loss between log-probabilities and y_true. + + Note that this does not compute a softmax, so you should input log-probabilities to this. + See ``softmax_crossentropy`` if you need your loss to compute a softmax. + + Parameters + ---------- + x : ArrayLike, shape=(N, C) + The C log-probabilities for each of the N pieces of data. + + y_true : ArrayLike, shape=(N,) + The correct class indices, in [0, C), for each datum. + + weights : ArrayLike, shape=(C,) optional (default=None) + The weighting factor to use on each class, or None. + + constant : bool, optional(default=False) + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient) + + Returns + ------- + mygrad.Tensor, shape=() + The average (weighted) negative log-likelihood loss. + + Examples + -------- + >>> import mygrad as mg + >>> from mygrad.nnet import negative_log_likelihood + + Let's take a simple case where N=1, and C=3. We'll thus make up classification + scores for a single datum. Suppose the scores are identical for the three classes + and that the true class is class-0, so that the log-probs are each 1/3: + + >>> logprob = mg.log(1 / 3).item() + >>> x = mg.Tensor([[logprob, logprob, logprob]]) # a shape-(1, 3) tensor of log-probabilities + >>> y_true = mg.Tensor([0]) # the correct class for this datum is class-0 + >>> negative_log_likelihood(x, y_true) + Tensor(1.09861229) + + Log-probabilities where the prediction is highly-confident and correct: + + >>> x = mg.Tensor([[0, -20, -20]]) + >>> negative_log_likelihood(x, y_true) + Tensor(0.) + + Adding a class-weighting: + + >>> x = mg.Tensor([[-4.6, -4.6, -0.02]]) + >>> weights = mg.Tensor([2, 1, 1]) + >>> negative_log_likelihood(x, y_true, weights=weights) + Tensor(9.2) + """ + if isinstance(y_true, Tensor): + y_true = y_true.data + check_loss_inputs(x, y_true) + + if weights is None: + weights = np.ones(x.shape[1]) + + weights = asarray(weights) + + if weights.ndim != 1 or weights.shape[0] != x.shape[1]: + raise ValueError( + "`weights` must be a shape-(C,) array: \n" + f"\tExpected shape-{x.shape[1]}\n" + f"\tGot shape-{y_true.shape}" + ) + + label_locs = (range(len(y_true)), y_true) + factors = weights[y_true] + return -mean(x[label_locs] * factors, constant=constant)
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/nnet/losses/softmax_crossentropy.html b/docs/_modules/mygrad/nnet/losses/softmax_crossentropy.html new file mode 100644 index 00000000..6b480055 --- /dev/null +++ b/docs/_modules/mygrad/nnet/losses/softmax_crossentropy.html @@ -0,0 +1,732 @@ + + + + + + + + + + mygrad.nnet.losses.softmax_crossentropy — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.nnet.losses.softmax_crossentropy

+from typing import Optional
+
+import numpy as np
+
+import mygrad._utils.graph_tracking as _tracking
+from mygrad.math._special import logsumexp
+from mygrad.operation_base import Operation
+from mygrad.tensor_base import Tensor
+from mygrad.typing import ArrayLike
+
+from ._utils import check_loss_inputs
+
+
+class SoftmaxCrossEntropy(Operation):
+    """Given the classification scores of C classes for N pieces of data,
+    computes the NxC softmax classification probabilities. The
+    cross entropy is then computed by using the true classification labels.
+
+    log-softmax is used for improved numerical stability"""
+
+    def __call__(self, x, y_true):
+        """Parameters
+        ----------
+        x : mygrad.Tensor, shape=(N, C)
+            The C class scores for each of the N pieces of data.
+
+        y_true : Sequence[int]
+            The correct class-indices, in [0, C), for each datum.
+
+        Returns
+        -------
+        The average softmax loss"""
+        if isinstance(y_true, Tensor):
+            y_true = y_true.data
+
+        check_loss_inputs(x, y_true)
+        self.variables = (x,)
+        scores = x.data
+        log_softmax = scores - logsumexp(scores, axis=-1, keepdims=True)
+        label_locs = (range(len(scores)), y_true)
+        loss = -np.sum(log_softmax[label_locs]) / scores.shape[0]
+
+        if _tracking.TRACK_GRAPH:
+            self.back = np.exp(log_softmax)
+            self.back[label_locs] -= 1.0
+            self.back /= scores.shape[0]
+        return loss
+
+    def backward_var(self, grad, index, **kwargs):
+        return grad * self.back
+
+
+
[docs]def softmax_crossentropy( + x: ArrayLike, y_true: ArrayLike, *, constant: Optional[bool] = None +) -> Tensor: + r"""Given the classification scores of C classes for N pieces of data, + + computes the NxC softmax classification probabilities. The + cross entropy is then computed by using the true classification labels. + + log-softmax is used for improved numerical stability. + + Parameters + ---------- + x : ArrayLike, shape=(N, C) + The C class scores for each of the N pieces of data. + + y_true : ArrayLike, shape=(N,) + The correct class-indices, in [0, C), for each datum. + + constant : bool, optional(default=False) + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient) + + Returns + ------- + The average softmax loss + + Raises + ------ + ValueError + Bad dimensionalities for ``x`` or ``y_true`` + + Notes + ----- + - :math:`N` is the number of samples in the batch. + - :math:`C` is the number of possible classes for which scores are provided. + + Given the shape-:math:`(N, C)` tensor of scores, ``x``, the softmax classification + probabilities are computed. That is, the score for class-:math:`k` of a given datum + (:math:`s_{k}`) is normalized using the 'softmax' transformation: + + .. math:: + + p_{k} = \frac{e^{s_k}}{\sum_{i=1}^{C}{e^{s_i}}} + + This produces the "prediction probability distribution", :math:`p`, for each datum. + The cross-entropy loss for that datum is then computed according to the true class-index + for that datum, as reported in ``y_true``. That is the "true probability distribution", + :math:`t`, for the datum is :math:`1` for the correct class-index and :math:`0` elsewhere. + + The cross-entropy loss for that datum is thus: + + .. math:: + l = - \sum_{k=1}^{C}{t_{k} \log{p_{k}}} + + Having computed each per-datum cross entropy loss, this function then returns the loss + averaged over all :math:`N` pieces of data: + + .. math:: + + L = \frac{1}{N}\sum_{i=1}^{N}{l_{i}} + + Examples + -------- + >>> import mygrad as mg + >>> from mygrad.nnet import softmax_crossentropy + + Let's take a simple case where N=1, and C=3. We'll thus make up classification + scores for a single datum. Suppose the scores are identical for the three classes + and that the true class is class-0: + + >>> x = mg.Tensor([[2., 2., 2.]]) # a shape-(1, 3) tensor of scores + >>> y_true = mg.Tensor([0]) # the correct class for this datum is class-0 + + Because the scores are identical for all three classes, the softmax normalization + will simply produce :math:`p = [\frac{1}{3}, \frac{1}{3}, \frac{1}{3}]`. Because + class-0 is the "true" class, :math:`t = [1., 0., 0.]`. Thus our softmax cross-entropy + loss should be: + + .. math:: + -(1 \times \log{\frac{1}{3}} + 0 \times \log{\frac{1}{3}} + 0 \times \log{\frac{1}{3}}) + = \log(3) \approx 1.099 + + Let's see that this is what ``softmax_crossentropy`` returns: + + >>> softmax_crossentropy(x, y_true) + Tensor(1.09861229) + + Similarly, suppose a datum's scores are :math:`[0, 0, 10^6]`, then the softmax normalization + will return :math:`p \approx [0., 0., 1.]`. If the true class for this datum is class-2, then + the loss should be nearly 0, since :math:`p` and :math:`t` are essentially identical: + + .. math:: + -(0 \times \log{0} + 0 \times \log{0} + 1 \times \log{1}) + = -\log(1) = 0 + + Now, let's construct ``x`` and ``y_true`` so that they incorporate the scores/labels for + both of the data that we have considered: + + >>> x = mg.Tensor([[2., 2., 2.], # a shape-(2, 3) tensor of scores + ... [0., 0., 1E6]]) + >>> y_true = mg.Tensor([0, 2]) # the class IDs for the two data + + ``softmax_crossentropy(x, y_true)`` will return the average loss of these two data, + :math:`\frac{1}{2}(1.099 + 0) \approx 0.55`: + + >>> softmax_crossentropy(x, y_true) + Tensor(0.54930614) + """ + return Tensor._op(SoftmaxCrossEntropy, x, op_args=(y_true,), constant=constant)
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/operation_base.html b/docs/_modules/mygrad/operation_base.html new file mode 100644 index 00000000..470f6f76 --- /dev/null +++ b/docs/_modules/mygrad/operation_base.html @@ -0,0 +1,1023 @@ + + + + + + + + + + mygrad.operation_base — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.operation_base

+"""
+Defines the base class for mathematical operations capable of back-propagating
+gradients to their input tensors."""
+
+from abc import ABC, abstractmethod
+from numbers import Real
+from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union
+
+import numpy as np
+
+from mygrad._numpy_version import NP_IS_V2
+from mygrad._utils import SkipGradient, reduce_broadcast
+from mygrad.errors import InvalidBackprop, InvalidGradient
+from mygrad.typing import DTypeLike, Mask
+
+if TYPE_CHECKING:  # pragma: no cover
+    from mygrad.tensor_base import Tensor
+
+
+__all__ = [
+    "Operation",
+    "Ufunc",
+    "UnaryUfunc",
+    "BinaryUfunc",
+    "Sequential",
+]
+
+Axis = Optional[Union[int, Tuple[int, ...]]]
+
+
+class _NoValueType:
+    """Special keyword value.
+
+    The instance of this class may be used as the default value assigned to a
+    deprecated keyword in order to check if it has been given a user defined
+    value.
+    """
+
+    __instance = None
+
+    def __new__(cls):
+        # ensure that only one instance exists
+        if not cls.__instance:
+            cls.__instance = super(_NoValueType, cls).__new__(cls)
+        return cls.__instance
+
+    def __repr__(self):  # pragma: no cover
+        return "<no value>"
+
+
+_NoValue = _NoValueType()
+
+
+
[docs]class Operation(ABC): + """Base class for all tensor operations that support back-propagation + of gradients. + + Consider the Operation-instance ``f``. A forward-pass through ``f`` is defined + via ``f.__call__(...)``. Thus, given tensors ``a`` and ``b``, a computational + graph is defined ``f.__call__(a, b) -> c``, where the "creator" of tensor ``c`` + is recorded as ``f``:: + + (node: a) --+ + -> [operation: f(a, b)] --> (node: c) + (node: b) --+ + + Back-propagating through ``c`` will instruct ``f`` to back-propagate + the gradient to its inputs, which are recorded as ``a`` and ``b``. Each + node then back-propagates to any Operation-instance that is recorded + as its creator, and so on. + """ + + # Can be set to true if the operation is guaranteed to not returns a view + # this will reduce some overhead on checking for shared memory + can_return_view: bool = False + + # Stores the input tensors that the operation will backprop through. + variables: Tuple["Tensor", ...] + +
[docs] def __init__(self): + # Stores positional and keyword arguments used to call op. + # Can be set optionally - only if op needs to be "replayed", + # e.g. with a view + self.replay_args: Optional[Tuple[Any, ...]] = None + self.replay_kwargs: Optional[Dict[str, Any]] = None + self.replay_force_constant: Optional[bool] = None + self.where: Mask = True
+ + @staticmethod + def grad_post_process_fn( + grad: np.ndarray, var_shape: Tuple[int, ...] + ) -> np.ndarray: + # this function gets called all of the time; we can avoid + # the extra function call by doing the shape check upfront + if grad.shape == var_shape: + return grad + out = reduce_broadcast(grad, var_shape) + + if out.ndim == 0: + # sum-reduction to a scalar produces a float + if NP_IS_V2: + out = np.asarray(out) + else: # pragma: no cover + out = np.array(out, copy=False) + return out + + @abstractmethod + def __call__(self, *input_vars: "Tensor", **kwargs) -> np.ndarray: + """Performs a forward pass, f, of this Operation:: + + f(x1, ...., xn) + + Parameters + ---------- + *input_vars : mygrad.Tensor + The input-arguments of f. The tuple (x1, ...., xn) + should be bound to the instance-attribute `self.variables` + + **kwargs : Any + Additional arguments for the operation + + Returns + ------- + numpy.ndarray + The output of the forward pass function. + + Notes + ----- + This method should set the ``self.variables`` attribute + with a tuple storing all of the input tensors of this operations""" + + raise NotImplementedError() # pragma: no cover + +
[docs] @abstractmethod + def backward_var(self, grad: np.ndarray, index: int, **kwargs) -> np.ndarray: + """Given ``grad = dℒ/df``, computes ``∂ℒ/∂x_{i}``, where ``x_{i}`` is one + of ``x1, ...., xn``. + + ``ℒ`` is assumed to be the terminal node from which ``ℒ.backward()`` was + called. + + Parameters + ---------- + grad : numpy.ndarray + The back-propagated total derivative with respect to the present + operation: dℒ/df. This will have the same shape as f, the result + of the forward pass. + + index : int + The index-location of ``var`` in ``self.variables`` + + Returns + ------- + numpy.ndarray + ∂ℒ/∂x_{i} + + Raises + ------ + SkipGradient""" + raise NotImplementedError() # pragma: no cover
+ +
[docs] def backward( + self, + grad: np.ndarray, + **kwargs, + ): + """Back-propagates the gradient through all of the operation's inputs, + which are stored in the tuple `self.variables`. + + Constant tensors (`tensor.constant is True`) skipped by this process. + + Parameters + ---------- + grad : numpy.ndarray + The back-propagated total derivative with respect to the present + operation (`f`): d(out)/df + """ + for index, var in enumerate(self.variables): + if var.constant: + continue + + if not var._ops: + raise InvalidBackprop( + f"Part of the computational graph containing " + f"this tensor, {var}, was 'cleared' prior to backprop.\n" + f"It is recommended that you clear all computational graphs " + f"and restart your computation." + ) + + try: + # don't cast to array here so that we have an easier time + # doing type checking (e.g. avoid `None` -> `array(None, dtype=obj)` + backed_grad = self.backward_var(grad, index, **kwargs) + except SkipGradient: + continue + + if not isinstance(backed_grad, (np.ndarray, np.number, Real)): + raise InvalidGradient( + f"An invalid gradient-value was passed to:" + f"\n\t`{type(self).__name__}.backward_var(<gradient>, index={index})`" + f"\nGradients are expected to be real-valued scalars or " + f"numpy arrays, got a gradient of type: {type(backed_grad)}" + ) + + if NP_IS_V2: + backed_grad = np.asarray(backed_grad) + else: # pragma: no cover + backed_grad = np.array(backed_grad, copy=False) + + if self.where is not True: + backed_grad = backed_grad * self.where + + backed_grad = self.grad_post_process_fn(backed_grad, var.shape) + assert backed_grad.shape == var.shape, (backed_grad.shape, var.shape) + if var._grad is None: + backed_grad = ( + np.copy(backed_grad) + # `backed_grad` is view of grad; we want to be able to + # augment tmp-grad inplace later + if backed_grad.base is not None or (backed_grad is grad) + else backed_grad + ) + if backed_grad.dtype != var.dtype: + backed_grad = backed_grad.astype(var.dtype, copy=False) + + var._grad = backed_grad + else: + var._grad += backed_grad
+ + +class Ufunc(Operation, ABC): + """The base class for mygrad's universal functions. + + 'A universal function (or ufunc for short) is a function that operates on + ndarrays in an element-by-element fashion, supporting array broadcasting, type casting, + and several other standard features. That is, a ufunc is a “vectorized” wrapper for a + function that takes a fixed number of specific inputs and produces a fixed number of + specific outputs.' [1]_ + + References + ---------- + .. [1] Retrieved from https://numpy.org/doc/stable/reference/ufuncs.html""" + + numpy_ufunc: np.ufunc + _supports_where: bool = True + + +class UnaryUfunc(Ufunc, ABC): + """A base class that specifies the common interface to – and facilitates + back-prop through – ufuncs that operate on a single array argument; + e.g. `mygrad.sin`, `mygrad.negative`.""" + + def __call__( + self, + x1: "Tensor", + out: Optional[np.ndarray] = None, + *, + where: Mask = True, + dtype: DTypeLike = None, + ) -> np.ndarray: + """f(x1, out=None, *, where=True, dtype=None) + + Parameters + ---------- + x1 : Tensor, shape-(...) + The input to the operation. + + This tensor is saved to the state of the operation instance + so that back-prop can be performed through it. + + out : Optional[np.ndarray] + A location into which the result is stored. If provided, it must + have a shape that the inputs broadcast to. If not provided or None, + a freshly-allocated array is returned. + + where: Union[bool, np.ndarray] + Accepts a boolean array which is broadcast together with ``x1``. + Values of True indicate to calculate the ufunc at that position, values + of False indicate to leave the value in the output alone. + + dtype : Optional[numpy.dtype, str, object] + Overrides the dtype of the calculation and output array. + + Returns + ------- + y : ndarray, shape-(...) + A numpy array of the same shape as ``x1`` with the ufunc applied + elementwise on ``x1``. + + Notes + ----- + This docstring was adapted from numpy's documentation [1]_. + + References + ---------- + .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.sqrt.html + """ + self.variables: Tuple["Tensor"] = (x1,) + if where is not True: + self.where = where + return self.numpy_ufunc(x1.data, out=out, where=where, dtype=dtype) + + +class BinaryUfunc(Ufunc, ABC): + """A base class that specifies the common interface to – and facilitates + back-prop through – mygrad's ufuncs that operate on a two array arguments; + e.g. `mygrad.add`, `mygrad.multiply`. + """ + + def __call__( + self, + x1: "Tensor", + x2: "Tensor", + out: Optional[np.ndarray] = None, + *, + where: Mask = True, + dtype: DTypeLike = None, + ) -> np.ndarray: + """f(x1, x2, out=None, *, where=True, dtype=None) + + Parameters + ---------- + x1 : Tensor + The first input to the operation. + + This tensor is saved to the state of the operation instance + so that back-prop can be performed through it. + + x2 : Tensor + The second input to the operation. + + This tensor is saved to the state of the operation instance + so that back-prop can be performed through it. + + out : Optional[np.ndarray] + A location into which the result is stored. If provided, it must + have a shape that the inputs broadcast to. If not provided or None, + a freshly-allocated array is returned. + + where: Union[bool, np.ndarray] + Accepts a boolean array which is broadcast jointly with ``x1`` and ``x2``. + Values of True indicate to calculate the ufunc at that position, values + of False indicate to leave the value in the output alone. + + dtype : Optional[numpy.dtype, str, object] + Overrides the dtype of the calculation and output array. + + Returns + ------- + y : ndarray + A numpy array resulting from the elementwise application of the ufunc to + corresponding pairs of elements from ``x1`` and ``x2``, respectively. + + If ``x1`` and ``x2`` are of different shapes, then the operation is broadcast + across them [1]_. + + Notes + ----- + This docstring was adapted from numpy's documentation [2]_. + + References + ---------- + .. [1] https://numpy.org/doc/stable/user/basics.broadcasting.html + .. [2] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.add.html + """ + self.variables: Tuple["Tensor", "Tensor"] = (x1, x2) + if where is not True and where is not _NoValue: + self.where = where + return self.numpy_ufunc(x1.data, x2.data, out=out, where=where, dtype=dtype) + else: + return self.numpy_ufunc(x1.data, x2.data, out=out, dtype=dtype) + + +class Sequential(Operation, ABC): + """A base class that specifies the common interface to – and facilitates + back-prop through – numpy's sequential functions; e.g. `numpy.sum`, `numpy.var`, + `numpy.max`""" + + _integer_axis_only: bool = False + + @staticmethod + @abstractmethod + def numpy_func( + a: np.ndarray, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + dtype: DTypeLike = None, + out: Optional[np.ndarray] = None, + *args, + **kwargs, + ) -> np.ndarray: + raise NotImplementedError() # pragma: no cover + + def __init__(self): + self.axis: Axis + self.keepdims: Optional[bool] + self.initial: Real + self.out_shape: Tuple[int, ...] + super().__init__() + + def __call__( + self, + a: "Tensor", + axis: Axis = None, + dtype=None, + out: Optional[np.ndarray] = None, + keepdims: bool = _NoValue, + initial: Real = _NoValue, + *, + where: Union[bool, np.ndarray] = _NoValue, + ddof: int = _NoValue, + ) -> np.ndarray: + self.variables: Tuple["Tensor"] = (a,) + + if where is not True and where is not _NoValue: + self.where = where + + self.keepdims = keepdims + self.initial = initial + self.ddof = ddof + + # Unless axis is None or the op is integer-axis-only + # normalize axis to be a tuple of ints. + if ( + not self._integer_axis_only + and axis is not None + and not hasattr(axis, "__iter__") + ): + self.axis = (axis,) + else: + self.axis = axis + + kwargs = {} + + if keepdims is not _NoValue: + kwargs["keepdims"] = keepdims + + if initial is not _NoValue: # pragma: no cover + kwargs["initial"] = initial + + if where is not _NoValue: + kwargs["where"] = where + + if ddof is not _NoValue: + kwargs["ddof"] = ddof + + if dtype is not _NoValue: + kwargs["dtype"] = dtype + + out = self.numpy_func(a.data, axis=axis, out=out, **kwargs) + self.out_shape = out.shape + + return out +
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/random/funcs.html b/docs/_modules/mygrad/random/funcs.html new file mode 100644 index 00000000..ac1068a2 --- /dev/null +++ b/docs/_modules/mygrad/random/funcs.html @@ -0,0 +1,907 @@ + + + + + + + + + + mygrad.random.funcs — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.random.funcs

+from typing import Optional
+
+import numpy as np
+
+from mygrad.tensor_base import Tensor
+from mygrad.typing import Shape
+
+
+
[docs]def rand(*shape: int, constant: Optional[bool] = None) -> Tensor: + """Create a Tensor of the given shape and populate it with random + samples from a uniform distribution over [0, 1). + + Parameters + ---------- + shape: d0, d1, ... dn : int, optional + The dimensions of the returned array, must be non-negative. + If no argument is given a single Python float is returned. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + mygrad.Tensor + A ``shape``--shaped Tensor of floating-point samples from the uniform distribution + over [0, 1), or a single such float if no parameters were supplied. + + Examples + -------- + >>> from mygrad.random import rand + >>> rand(3,4) + Tensor([[0.9805903 , 0.82640985, 0.88230632, 0.73099815], + [0.24845968, 0.12532893, 0.63171607, 0.32543228], + [0.66029533, 0.79285341, 0.54967228, 0.25178508]]) + """ + + return Tensor(np.random.rand(*shape), constant=constant, copy=False)
+ + +
[docs]def randint(low, high=None, shape: Optional[Shape] = None, dtype=int) -> Tensor: + """Return random integers from the “discrete uniform” distribution of the specified dtype in the + “half-open” interval [low, high). + + If high is None (the default), then results are from [0, low). + + Parameters + ---------- + low: int or array-like of ints + Lowest (signed) integers to be drawn from the distribution + (unless high=None, in which case this parameter is one above the highest such integer). + + high: int or array-like of ints, optional + If provided, one above the largest (signed) integer to be drawn from the + distribution (see above for behavior if high=None). If array-like, must contain integer values + + shape: int or tuple of ints, optional + Output shape. If the given shape is, e.g., (m, n, k), then m * n * k samples are drawn. + Default is None, in which case a single value is returned. + + dtype: dtype, optional + Desired dtype of the result. Byteorder must be native. The default value is int. + + Returns + ------- + int or mygrad.Tensor of ints + ``shape``-shaped array of random integers from the appropriate distribution, + or a single such random int if size not provided. + + Examples + -------- + >>> from mygrad.random import randint + >>> randint(low=1, high=7, shape=(2,5)) + Tensor([[2, 4, 1, 5, 1], + [6, 2, 5, 4, 6]]) + + >>> randint(low=4, high=100) + Tensor(57) + """ + + return Tensor(np.random.randint(low, high, shape, dtype), copy=False)
+ + +
[docs]def randn(*shape: int, constant: Optional[bool] = None) -> Tensor: + """Return a sample (or samples) from the “standard normal” distribution. + + Parameters + ---------- + shape: shape: d0, d1, ... dn : int, optional + The dimensions of the returned array, must be non-negative. + If no argument is given a single Python float is returned. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + mygrad.Tensor + A ``shape``-shaped Tensor of floating-point samples from the standard normal distribution, + or a single such float if no parameters were supplied. + + Examples + -------- + >>> from mygrad.random import randn + >>> randn(3, 3, 2) + Tensor([[[-0.45664135, 0.05060159], + [ 1.36883177, -0.46084292], + [-0.76647664, 0.81667174]], + + [[ 0.08336453, -1.35104408], + [ 0.73187355, 1.33405382], + [ 0.28411209, -0.18047323]], + + [[-0.2239412 , -0.09170368], + [-0.39175898, 0.81260396], + [-1.28788909, -1.52525778]]]) + """ + + return Tensor(np.random.randn(*shape), constant=constant, copy=False)
+ + +
[docs]def random(shape: Optional[Shape] = None, *, constant: Optional[bool] = None) -> Tensor: + """Return random floats in the half-open interval [0.0, 1.0). + + To create a random sample of a given shape on the interval [a, b), call + (b-a) * random(shape) + a + + Parameters + ---------- + shape: int or tuple of ints, optional + Output shape. If the given shape is, e.g., (m, n, k), then m * n * k samples are drawn. + Default is None, in which case a single value is returned. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + Returns + ------- + int or mygrad.Tensor of ints + ``shape``-shaped array of random integers from the appropriate distribution, or a + single such random int if size not provided. + + Examples + -------- + >>> from mygrad.random import random + >>> random((2, 4)) + Tensor([[0.14928578, 0.28812813, 0.56885892, 0.49555962], + [0.19780163, 0.51162365, 0.7849505 , 0.47864586]]) + """ + + return Tensor(np.random.random(shape), constant=constant, copy=False)
+ + +
[docs]def random_sample( + shape: Optional[Shape] = None, *, constant: Optional[bool] = None +) -> Tensor: + """Return random floats in the half-open interval [0.0, 1.0). + + Results are from the “continuous uniform” distribution over the stated interval. + + To create a random sample of a given shape on the interval [a, b), call + (b-a) * random_sample(shape) + a + + Parameters + ---------- + shape: int or tuple of ints, optional + Output shape. If the given shape is, e.g., (m, n, k), then m * n * k samples are drawn. + Default is None, in which case a single value is returned. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + Returns + ------- + int or mygrad.Tensor of ints + ``shape``-shaped array of random integers from the appropriate distribution, + or a single such random int if size not provided. + + Examples + -------- + >>> from mygrad.random import random_sample + >>> random_sample((3, 2)) + Tensor([[0.76490814, 0.69378441], + [0.65228375, 0.68395309], + [0.08228869, 0.03191064]]) + + >>> random_sample() + Tensor(0.47644928) + """ + + return Tensor(np.random.random_sample(shape), constant=constant, copy=False)
+ + +
[docs]def ranf(shape: Optional[Shape] = None, *, constant: Optional[bool] = None) -> Tensor: + """Return random floats in the half-open interval [0.0, 1.0). + + To create a random sample of a given shape on the interval [a, b), call + (b-a) * ranf(shape) + a + + Parameters + ---------- + shape: int or tuple of ints, optional + Output shape. If the given shape is, e.g., (m, n, k), then m * n * k samples are drawn. + Default is None, in which case a single value is returned. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + Returns + ------- + int or mygrad.Tensor of ints + ``shape``-shaped array of random integers from the appropriate distribution, or + a single such random int if size not provided. + + Examples + -------- + >>> from mygrad.random import ranf + >>> ranf((2, 3, 1)) + Tensor([[[0.9343681 ], + [0.29573802], + [0.84759669]], + + [[0.34563731], + [0.68601617], + [0.02388943]]]) + + >>> ranf() + Tensor(0.77739196) + """ + + return Tensor(np.random.ranf(shape), constant=constant, copy=False)
+ + +
[docs]def sample(shape: Optional[Shape] = None, *, constant: Optional[bool] = None) -> Tensor: + """Return random floats in the half-open interval [0.0, 1.0). + + To create a random sample of a given shape on the interval [a, b), call + (b-a) * sample(shape) + a + + Parameters + ---------- + shape: int or tuple of ints, optional + Output shape. If the given shape is, e.g., (m, n, k), then m * n * k samples are drawn. + Default is None, in which case a single value is returned. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + Returns + ------- + int or mygrad.Tensor of ints + ``shape``-shaped array of random integers from the appropriate distribution, + or a single such random int if size not provided. + + Examples + -------- + >>> from mygrad.random import sample + >>> sample((3, 4)) + Tensor([[0.47263933, 0.10928814, 0.19737707, 0.30879006], + [0.49870689, 0.05849937, 0.21095352, 0.09778017], + [0.405788 , 0.91888808, 0.15061143, 0.63140668]]) + + >>> sample() + Tensor(0.50690423) + """ + + return Tensor(np.random.sample(shape), constant=constant, copy=False)
+ + +
[docs]def seed(seed_number): + """Seed the generator. + + Simply used NumPy's random state - i.e. this is equivalent to ``numpy.random.seed``. + + Parameters + ---------- + seed_number : int or 1-d array_like, optional + Seed for RandomState. Must be convertible to 32 bit unsigned integers. + + Examples + -------- + >>> from mygrad.random import seed, random + >>> seed(0) + >>> random((2, 4)) + Tensor([[0.5488135 , 0.71518937, 0.60276338, 0.54488318], + [0.4236548 , 0.64589411, 0.43758721, 0.891773 ]]) + + >>> seed(1) + >>> random((2, 4)) + Tensor([[4.17022005e-01, 7.20324493e-01, 1.14374817e-04, 3.02332573e-01], + [1.46755891e-01, 9.23385948e-02, 1.86260211e-01, 3.45560727e-01]] + + >>> seed(0) + >>> random((2,4)) + Tensor([[0.5488135 , 0.71518937, 0.60276338, 0.54488318], + [0.4236548 , 0.64589411, 0.43758721, 0.891773 ]]) + """ + + np.random.seed(seed_number)
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/tensor_base.html b/docs/_modules/mygrad/tensor_base.html new file mode 100644 index 00000000..a341dc57 --- /dev/null +++ b/docs/_modules/mygrad/tensor_base.html @@ -0,0 +1,3792 @@ + + + + + + + + + + mygrad.tensor_base — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.tensor_base

+"""
+This module defines the base tensor class along with all of its essential
+attributes and special methods. Public math methods, e.g. ``sum``, ``mean``,
+etc., are bound to the Tensor class in ``mygrad.__init__.py``.
+"""
+
+from collections import deque
+from numbers import Integral, Number
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    Callable,
+    Deque,
+    Dict,
+    Iterator,
+    List,
+    Optional,
+    Sequence,
+    Set,
+    Tuple,
+    Type,
+    TypeVar,
+    Union,
+)
+from weakref import ReferenceType, finalize
+
+import numpy as np
+
+import mygrad._utils.duplicating_graph as _dup
+import mygrad._utils.graph_tracking as _track
+import mygrad._utils.lock_management as _mem
+from mygrad._numpy_version import NP_IS_V2
+from mygrad._tensor_core_ops.indexing import GetItem, SetItem
+from mygrad._utils import WeakRef, WeakRefIterable, collect_all_tensors_and_clear_grads
+from mygrad.errors import DisconnectedView
+from mygrad.math.arithmetic.ops import (
+    Add,
+    Divide,
+    Multiply,
+    Negative,
+    Positive,
+    Power,
+    Square,
+    Subtract,
+)
+from mygrad.math.misc.ops import MatMul
+from mygrad.math.sequential.ops import (
+    CumProd,
+    CumSum,
+    Max,
+    Mean,
+    Min,
+    Prod,
+    StdDev,
+    Sum,
+    Variance,
+)
+from mygrad.operation_base import Operation, _NoValue
+from mygrad.tensor_manip.array_shape.ops import Flatten, Ravel, Reshape, Squeeze
+from mygrad.tensor_manip.transpose_like.ops import (
+    MoveAxis,
+    SwapAxes,
+    Tensor_Transpose_Property,
+    Transpose,
+)
+from mygrad.typing import ArrayLike, DTypeLike, DTypeLikeReals, Index, Shape
+
+__all__ = ["Tensor", "asarray", "astensor", "implements_numpy_override"]
+
+if TYPE_CHECKING:  # pragma: no cover
+    from mygrad.ufuncs._ufunc_creators import ufunc as mygrad_ufunc
+
+
+T = TypeVar("T")
+
+CONSTANT_ONLY_DTYPES = (np.integer, np.bool_)
+
+
+def _resolve_constant(*others: Any, constant: Optional[bool]) -> Optional[bool]:
+    """Determines if `constant` should be resolved to True based on `others`.
+    Otherwise defers to a tensor-creator to handle further resolutions based on dtype.
+    """
+    if constant is not None:
+        return constant
+    for other in others:
+        if isinstance(other, Tensor) and not other.constant:
+            # let subsequent tensor casting infer constant from dtype
+            return None
+    # all inputs are constants
+    return True
+
+
+
[docs]def asarray(a: ArrayLike, dtype: DTypeLike = None, order: str = None) -> np.ndarray: + """Convert the input to an array. + + This docstring is adapted from that of ``numpy.asarray`` + + Parameters + ---------- + a : array_like + Input data, in any form - including a mygrad tensor - that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. + + dtype : data-type, optional + By default, the data-type is inferred from the input data. + + order : {'C', 'F'}, optional + Whether to use row-major (C-style) or + column-major (Fortran-style) memory representation. + Defaults to 'C'. + + Returns + ------- + out : ndarray + Array interpretation of `a`. No copy is performed if the input + is already an ndarray with matching dtype and order. If `a` is a + subclass of ndarray, a base class ndarray is returned. + + Examples + -------- + Convert a list into an array: + + >>> import mygrad as mg + >>> a = [1, 2] + >>> mg.asarray(a) + array([1, 2]) + + Convert a tensor into an array. No copy of the + underlying numpy array is created: + + >>> t = mg.Tensor([1, 2.]) + >>> mg.asarray(t) + array([1., 2.]) + >>> t.data is np.asarray(t)) + True + + Existing arrays are not copied: + + >>> a = np.array([1, 2]) + >>> mg.asarray(a) is a + True + + If `dtype` is set, array is copied only if dtype does not match: + + >>> a = np.array([1, 2], dtype=np.float32) + >>> mg.asarray(a, dtype=np.float32) is a + True + >>> mg.asarray(a, dtype=np.float64) is a + False + + Contrary to `asanyarray`, ndarray subclasses are not passed through: + + >>> issubclass(np.recarray, np.ndarray) + True + >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray) + >>> mg.asarray(a) is a + False + >>> np.asanyarray(a) is a + True + """ + if isinstance(a, Tensor): + a = a.data # faster than passing the tensor directly + return np.asarray(a, dtype=dtype, order=order)
+ + +
[docs]def tensor( + arr_like: ArrayLike, + dtype: DTypeLikeReals = None, + *, + constant: Optional[bool] = None, + copy: bool = True, + ndmin: int = 0, +) -> "Tensor": + """ + Create a tensor + + This documentation was adapted from that of ``numpy.array` + + Parameters + ---------- + arr_like : array_like + A tensor, any object exposing the array interface, an object whose + __array__ method returns an tensor, a real number, any (nested) sequence. + + dtype : data-type, optional + The desired data-type for the tensor. Restricted to integer and float type. + If not specified, then the type will be determined as the minimum type required + to hold the objects in the sequence. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant_tensor.grad`` will always + return ``None``). + + If a new tensor is returned: + - Defaults to ``False`` for float-type data. + - Defaults to ``True`` for integer-type data. + + copy : bool, optional + If true (default), or if a copy is needed to satisfy any of the + other requirements (``dtype``, ``constant``, etc.) then a new tensor + is created from copied data. Otherwise the tensor will be returned + unchanged. + + ndmin : int, optional + Specifies the minimum number of dimensions that the resulting + tensor should have. Ones will be prepended to the shape as + needed to meet this requirement. + + Returns + ------- + out : Tensor + A tensor satisfying the specified requirements. + + See Also + -------- + empty_like : Return an empty tensor with shape and type of input. + ones_like : Return an tensor of ones with shape and type of input. + zeros_like : Return an tensor of zeros with shape and type of input. + full_like : Return a new tensor with shape of input filled with value. + empty : Return a new uninitialized tensor. + ones : Return a new tensor setting values to one. + zeros : Return a new tensor setting values to zero. + full : Return a new tensor of given shape filled with value. + + Examples + -------- + >>> import mygrad as mg + >>> mg.tensor([1, 2, 3]) + Tensor([1, 2, 3]) + + Upcasting: + + >>> mg.tensor([1, 2, 3.0]) + Tensor([ 1., 2., 3.]) + + More than one dimension: + + >>> mg.tensor([[1, 2], [3, 4]]) + Tensor([[1, 2], + [3, 4]]) + + Minimum dimensions 2: + + >>> mg.tensor([1, 2, 3], ndmin=2) + Tensor([[1, 2, 3]]) + + Type provided: + + >>> mg.tensor([1, 2, 3], dtype="float32") + Tensor([1., 2., 3.], dtype=float32) + """ + + if isinstance(arr_like, Tensor) and copy is False: + if (constant is None or arr_like.constant is constant) and ( + dtype is None or (arr_like.dtype == np.dtype(dtype)) + ): + if not isinstance(ndmin, Integral): + raise TypeError( + f"TypeError: `ndmin` requires a non-negative integer (got type {type(ndmin)})" + ) + if ndmin < 0: + ndmin = 0 # numpy does this + if ndmin > arr_like.ndim: + arr_like = arr_like[(*(None for _ in range(ndmin - arr_like.ndim)),)] + # return tensor as-as + return arr_like + + return Tensor(arr_like, dtype=dtype, constant=constant, copy=copy, ndmin=ndmin)
+ + +
[docs]def astensor( + t: ArrayLike, dtype: DTypeLikeReals = None, *, constant: Optional[bool] = None +) -> "Tensor": + """Convert the input to a tensor. + + A tensor `t` is returned unchanged - its gradient and computational + graph state preserved - if dtype and constant are compatible. + A copy of the underlying numpy array is created only if dtype is + incompatible or if a non-constant tensor is being created from a constant. + + Parameters + ---------- + t : array_like + Input data, in any form that can be converted to a tensor. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. + + dtype : data-type, optional + By default, the data-type is inferred from the input data. + + constant : Optional[bool] + By default, `constant` is inferred from `t` if `t` is a tensor, + otherwise it defaults to `False`. + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + out : Tensor + Tensor interpretation of `a`. No copy is performed if the input + is already a tensor with matching dtype and constant-flag. + + Examples + -------- + Convert a list into an array: + + >>> import mygrad as mg + >>> import numpy as np + >>> t = [1, 2] + >>> mg.astensor(t) + Tensor([1, 2]) + + Convert an array into a tensor. No copy of the + underlying numpy array is created: + + >>> a = np.array([1.0, 2.0]) + >>> mg.astensor(a) + Tensor([1., 2.]) + >>> a is mg.astensor(a).data + True + + Existing tensors are not copied and their gradients and + computational graphs are preserved: + + >>> t1 = 2 * mg.tensor([1, 2]) + >>> t2 = mg.astensor(t1) + >>> t1 is t2 + True + >>> t1.creator is t2.creator + True + + If `dtype` is set, a new tensor is created - with copied data - only + if dtype does not match: + + >>> t = mg.Tensor([1, 2], dtype=np.float32) + >>> mg.astensor(t, dtype=np.float32) is t + True + >>> mg.astensor(t, dtype=np.float64) is t + False + + Otherwise, if `constant` is set, a new tensor is created (with + no copy of the underlying data) only if constant doesn't match. + + >>> t1 = mg.tensor([1, 2], constant=False) + >>> mg.astensor(t1, constant=False) is t + True + >>> mg.astensor(t1, constant=True) is t1 + False + >>> mg.astensor(t1, constant=True).data is t1.data + True + """ + return tensor(t, dtype=dtype, constant=constant, copy=False, ndmin=0)
+ + +_REGISTERED_UFUNC: Dict[np.ufunc, Type["mygrad_ufunc"]] = {} +_REGISTERED_DIFFERENTIABLE_NUMPY_FUNCS: Dict[ + Callable[..., np.ndarray], Callable[..., "Tensor"] +] = {} + +_REGISTERED_BOOL_ONLY_UFUNC: Set[np.ufunc] = { + np.isnan, + np.isfinite, + np.isinf, + np.isnat, + np.signbit, + np.logical_not, + np.logical_and, + np.logical_or, + np.logical_xor, + np.greater, + np.greater_equal, + np.less, + np.less_equal, + np.equal, + np.not_equal, +} + +# These are ufuncs that users might mistake for being differentiable functions; +# for this reason we make explicit the fact that only constant tensors are permitted +# in these operations. +_REGISTERED_CONST_ONLY_UFUNC = { + np.floor_divide, + np.remainder, + np.mod, + np.fmod, + np.divmod, + np.rint, + np.sign, + np.floor, + np.ceil, + np.trunc, +} + + +_REGISTERED_NO_DIFF_NUMPY_FUNCS: Set[Callable] = { + np.allclose, + np.bincount, + np.can_cast, + np.copyto, + np.isclose, + np.may_share_memory, + np.min_scalar_type, + np.result_type, + np.shares_memory, + np.shape, +} + + +class implements_numpy_override: + """Registers a mygrad-based override for a NumPy function of the same name, via + the standard __array_function__ interface. [1]_ + + Examples + -------- + >>> @implements_numpy_override() # np.reshape to be overridden + ... def reshape(x, shape): + ... # a mygrad-based implementation of numpy.reshape + ... print("hello world") + + >>> import numpy as np + >>> import mygrad as mg + >>> np.reshape(mg.tensor(1.), 2) + 'hello world' + + You can also explicit provide the numpy function explicitly + + >>> import numpy as np + >>> @implements_numpy_override(np.reshape) # np.reshape to be overridden + ... def some_function(x, shape): + ... pass + + References + ---------- + .. [1] https://numpy.org/devdocs/reference/arrays.classes.html?#numpy.class.__array_function__ + """ + + __slots__ = ("numpy_func",) + + def __init__(self, numpy_func: Optional[Callable] = None): + # if None, `numpy_func` is inferred from the name of the decorated function + self.numpy_func = numpy_func + + def __call__(self, wrapped_func: T) -> T: + if self.numpy_func is None: + try: + self.numpy_func = getattr(np, wrapped_func.__name__) + except AttributeError: + raise AttributeError( + f"@implements_numpy_override tried to register an override for the function numpy.{wrapped_func.__name__}, but no " + f"such function exists." + ) + + _REGISTERED_DIFFERENTIABLE_NUMPY_FUNCS[self.numpy_func] = wrapped_func + return wrapped_func + + +class _ConstantOnly(ValueError): + pass + + +def _as_constant_array(t: Union["Tensor", np.ndarray]) -> np.ndarray: + """Passes through all non-tensor objects and constant tensors. Raises on + non-constant tensors.""" + if isinstance(t, Tensor): + if t.constant is False: + raise _ConstantOnly() + return t.data + return t + + +class Tensor: + """A numpy-array-like object capable of serving as a node in a computational + graph that supports back-propagation of derivatives via the chain rule. + See the Examples section of the docstring for more details. + + Like the numpy array, mygrad's tensor stores data as an N-dimensional array + and provides an interface accessing, setting, and performing vectorized + operations along the various dimensions of this array. Vectorized operations + support numpy-style broadcasting semantics. + + The contents of a tensor can be accessed and written to using all variety + of basic and advanced indexing (along with mixtures of the two). + + Creating a Tensor + ----------------- + ``mygrad.Tensor`` can be passed any "array-like" object of numerical data. + This includes numbers, sequences (e.g. lists), nested sequences, numpy-ndarrays, + and other mygrad-tensors. mygrad also provides familiar numpy-style tensor-creation + functions (e.g. ``mygrad.arange``, ``mygrad.linspace``, etc.) + + >>> import mygrad as mg + >>> mg.tensor(2.3) # creating a 0-dimensional tensor + Tensor(2.3) + >>> mg.tensor(np.array([1.2, 3.0])) # casting a numpy-array to a tensor + Tensor([1.2, 3.0]) + >>> mg.tensor([[1, 2], [3, 4]]) # creating a 2-dimensional tensor + Tensor([[1, 2], + [3, 4]]) + >>> mg.arange(4) # using numpy-style tensor creation functions + Tensor([0, 1, 2, 3]) + + Creating a non-constant tensor will copy array data: + + >>> import numpy as np + >>> arr = np.arange(10.) + >>> t_var = tensor(arr, constant=False) + >>> np.shares_memory(arr, t_var) + False + + Creating constant tensor will not make a copy of the array data: + + >>> t_const = mg.tensor(arr, constant=True) + >>> np.shares_memory(arr, t_const) + True + + Forward and Back-Propagation + ---------------------------- + Let's construct a computational graph consisting of two zero-dimensional + tensors, ``x`` and ``y``, which are used to compute an output tensor, + ````. This is a "forward pass imperative" style for creating a computational + graph - the graph is constructed as we carry out the forward-pass computation. + + >>> x = mg.tensor(3.0) + >>> y = mg.tensor(2.0) + >>> ℒ = 2 * x + y ** 2 + + Invoking ``ℒ.backward()`` signals the computational graph to + compute the total-derivative of ``f`` with respect to each one of its dependent + variables. I.e. ``x.grad`` will store ``dℒ/dx`` and ``y.grad`` will store + ``dℒ/dy``. Thus we have back-propagated a gradient from ``f`` through our graph. + + Each tensor of derivatives is computed elementwise. That is, if `x = Tensor(x0, x1, x2)`, + then dℒ/dx represents `[dℒ/d(x0), dℒ/d(x1), dℒ/d(x2)]` + + >>> ℒ.backward() # computes df/dx and df/dy + >>> x.grad # df/dx + array(6.0) + >>> y.grad # df/dy + array(4.0) + >>> ℒ.grad + array(1.0) # dℒ/dℒ + + Once the gradients are computed, the computational graph containing ``x``, + ``y``, and ``ℒ`` is cleared automatically. Additionally, involving any + of these tensors in a new computational graph will automatically null + their gradients. + + >>> 2 * x + >>> x.grad is None + True + + Or, you can use the ``tensor.null_grad()`` method to manually clear a + tensor's gradient + + >>> y.null_grad() + Tensor(2.) + >>> y.grad is None + True + + Accessing the Underlying NumPy Array + ------------------------------------ + ``mygrad.Tensor`` is a thin wrapper on ``numpy.ndarray``. A tensor's + underlying numpy-array can be accessed via ``.data``: + + >>> x = mg.tensor([1, 2]) + >>> x.data + array([1, 2]) + + **Do not modify this underlying array**. Any in-place modifications made to this + array will not be tracked by any computational graph involving that tensor, thus + back-propagation through that tensor will likely be incorrect. + + Producing a "View" of a Tensor + ------------------------------ + MyGrad's tensors exhibit the same view semantics and memory-sharing relationships + as NumPy arrays. I.e. any (non-scalar) tensor produced via basic indexing will share + memory with its parent. + + >>> x = mg.tensor([1., 2., 3., 4.]) + >>> y = x[:2] # the view: Tensor([1., 2.]) + >>> y.base is x + True + >>> np.shares_memory(x, y) + True + + Mutating shared data will propagate through views: + + >>> y *= -1 + >>> x + Tensor([-1., -2., 3., 4.]) + >>> y + Tensor([-1., -2.]) + + And this view relationship will also manifest between the tensors' gradients + + >>> (x ** 2).backward() + >>> x.grad + array([-2., -4., 6., 8.]) + >>> y.grad + array([-2., -4.]) + + In-Place Operations are not Efficient + ===================================== + It is important to note that while MyGrad's view semantics promote a rich parity + with NumPy, that certain aspects should be avoided in the interest of optimized performance. + Namely, performing in-place operations on tensors is generally not more efficient than + their non-mutating counterparts. + + This is because MyGrad has to track the state of tensors that are involved in a computational + graph. Thus a mutated tensor must have its pre-augmented state stored for future reference; this + defeats the performance benefit of writing to an array's memory in-place. This is especially + inefficient if you are mutating a tensor involved with multiple views of the same memory( + By contrast, producing a view of a tensor _is_ efficient as one would expect). + + Thus these NumPy-like in-place semantics are supported by MyGrad not for the same performance + purposes, but instead to support convenient and familiar code-patterns and to enable one to + port NumPy code to MyGrad (or, in the future, inject MyGrad tensors into NumPy!!) and get + the exact same behavior. + + A final note: MyGrad's in-place operations, when run under :func:`~mygrad.no_autodiff` mode, + do not incur the extra costs noted above, and thus your code will benefit from the performance + benefits of in-place operations. + """ + + __array_priority__ = 15.0 + + def __array_ufunc__( + self, ufunc: Type[np.ufunc], method: str, *inputs: ArrayLike, **kwargs + ) -> Union["Tensor", np.ndarray]: + """An interface provided by NumPy to override the behavior of its ufuncs [1]_. + + MyGrad implements its own ufuncs for all differentiable NumPy ufuncs. + + Non-differentiable numpy ufuncs simply get called on the underlying arrays of tensors and + will return ndarrays. + + The differentiability - or lack thereof - of ufuncs may not be obvious to end users. + Thus potentially ambiguous ufuncs (e.g. `numpy.ceil`) will be made to raise on non-constant + tensors so that the lack of differentiability is made obvious to the users. This design decision + is made in the same spirit as requiring integer-dtype tensors be constant. + + References + ---------- + .. [1] https://numpy.org/doc/stable/reference/arrays.classes.html#numpy.class.__array_ufunc__ + + Examples + -------- + NumPy ufuncs that represent differentiable operations are overloaded by MyGrad tensors + so that they support backprop + + >>> import mygrad as mg + >>> import numpy as np + + >>> x = mg.tensor([1., 2.]) + + This calls ``mygrad.sin`` under the hood. + + >>> np.sin(x) # returns a tensor + Tensor([0.84147098, 0.90929743]) + + >>> np.sin(x).backward() + >>> x.grad # stores d(sin(x))/dx @ x = [1., 2.] + array([ 0.54030231, -0.41614684]) + + Specifying a dtype, a ``where`` mask, an in-place target (via ``out``) as an array + or a tensor, are all supported. + + >>> x = mg.tensor([1., 2.]) + >>> y = mg.tensor([-1., -1.]) + >>> np.exp(x, where=[False, True], out=y) + Tensor([-1. , 7.3890561]) + >>> y.backward() + >>> x.grad + array([0. , 7.3890561]) + + Non-differentiable NumPy ufuncs simply operate on the ndarrays that are wrapped + by MyGrad tensors; these return ndarrays, which will appropriately and explicitly + serve as constants elsewhere in a computational graph. + + >>> x = mg.tensor([1., 2.]) + >>> np.less_equal(x, 1) + array([ True, False]) + """ + out = kwargs.pop("out", (None,)) + if len(out) > 1: # pragma: no cover + raise ValueError( + "mygrad does not support in-place operations with more that one target" + ) + (out,) = out + + out: Optional[Union[np.ndarray, "Tensor"]] + + try: + # differentiable ufunc implemented by mygrad + return getattr(_REGISTERED_UFUNC[ufunc], method)(*inputs, **kwargs, out=out) + except KeyError: + pass + + # non-differentiable ufuncs get called on numpy arrays stored by tensors + if ufunc in _REGISTERED_BOOL_ONLY_UFUNC: + caster = asarray + elif ufunc in _REGISTERED_CONST_ONLY_UFUNC: + # the presence of non-constant tensors will raise + caster = _as_constant_array + else: # pragma: no cover + return NotImplemented + + try: + if out is not None: + kwargs["out"] = caster(out) + # returns ndarray + return getattr(ufunc, method)(*(caster(t) for t in inputs), **kwargs) + except _ConstantOnly: + raise ValueError( + f"{repr(ufunc)} cannot involve non-constant mygrad tensors." + ) + + def __array_function__( + self, func: Callable[..., np.ndarray], types, args, kwargs + ) -> Union["Tensor", np.ndarray]: + if func in _REGISTERED_DIFFERENTIABLE_NUMPY_FUNCS: + return _REGISTERED_DIFFERENTIABLE_NUMPY_FUNCS[func](*args, **kwargs) + elif func in _REGISTERED_NO_DIFF_NUMPY_FUNCS: + return func( + *(t.data if isinstance(t, Tensor) else t for t in args), + **{ + k: (v.data if isinstance(v, Tensor) else v) + for k, v in kwargs.items() + }, + ) + else: # pragma: no cover + return NotImplemented + + def __array__( + self, dtype: DTypeLike = None, copy: Optional[bool] = None + ) -> np.ndarray: + if NP_IS_V2: + return np.asarray(self.data, dtype=dtype, copy=copy) + else: # pragma: no cover + if copy is None: + copy = False + return np.array(self.data, dtype=dtype, copy=copy) + + def __init__( + self, + x: ArrayLike, + *, + dtype: DTypeLikeReals = None, + constant: Optional[bool] = None, + copy: bool = True, + ndmin: int = 0, + _creator: Optional[Operation] = None, + _base: Optional["Tensor"] = None, + ): + """ + Parameters + ---------- + x : ArrayLike + Input data, in any form that can be converted to an array. This + includes numbers, sequences, nested sequences, numpy-ndarrays, + and mygrad-tensors. + + dtype : DTypeLikeReals + `int`, `float`, or a real-valued numpy data type. By default the + data type is inferred from ``x`` via ``numpy.asarray(x)``. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. `self.grad` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + copy : Optional[bool] + Determines if the incoming array-data will be copied. + + ndmin : int, optional + Specifies the minimum number of dimensions that the resulting + array should have. Ones will be prepended to the shape as + needed to meet this requirement. + + Notes + ----- + The following are parameters reserved only for internal use: + + _creator : Optional[mygrad.Operation] + The operation-instance whose forward pass produced `self`. Should not + be set manually by users. + + _base : Optional[Tensor] + Points to the tensor that ``self`` shares memory with. + """ + + if constant is not None and not isinstance(constant, bool): + raise TypeError(f"`constant` must be a boolean value, got: {constant}") + + self._creator: Optional[Operation] = _creator + + if not NP_IS_V2: # pragma: no cover + self.data = np.array(x, dtype=dtype, copy=copy, ndmin=ndmin) + else: + if copy is False: + self.data = np.asarray(x, dtype=dtype) + if not isinstance(ndmin, Integral): + raise TypeError( + f"'{type(ndmin)}' object cannot be interpreted as an integer" + ) + if ndmin and self.data.ndim < ndmin: + self.data = self.data[ + (*(None for _ in range(ndmin - self.data.ndim)),) + ] + else: + self.data = np.array(x, dtype=dtype, copy=copy, ndmin=ndmin) + + dtype = self.data.dtype.type + is_float = issubclass(dtype, np.floating) # faster than `numpy.issubdtype` + if not is_float and _track.TRACK_GRAPH: + # No need to constrain dtypes if we aren't tracking the graph. + # Also, it is nice to enable complex arithmetic through mygrad + # functions that are wrapped in no_autodiff + if not issubclass(dtype, CONSTANT_ONLY_DTYPES): + raise TypeError( + f"Tensor data must be of an floating type, integer type, or boolean type, " + f"received {dtype}" + ) + + elif constant is False: + raise ValueError("Integer-valued tensors must be treated as constants.") + + if constant is None: + # non-float: default constant -> True + # float: default constant -> False + constant = not is_float + + self._constant = constant + + self._grad = None # type: Union[None, np.ndarray] + + # track all operations that this tensor participates in + self._ops: Set[WeakRef[Operation]] = set() + + # base points to the initial tensor that owns the memory of this + # tensor + self._base = _base # type: Optional[Tensor] + # stores all of the tensors that are a view of this tensor + self._view_children = WeakRefIterable() # type: WeakRefIterable[Tensor] + + # Used to reflect the view of the gradient associated with that of `self.base`. + # This is a means of distinguishing between the gradient set on `self` as + # part of backpropagation and the view of the gradient of its base. + self._view_grad: Optional[np.ndarray] = None + + @property + def grad(self) -> Optional[np.ndarray]: + """ + Returns the derivative of ``ℒ`` with respect to this tensor. + + ``ℒ`` is the terminal node in the compuational graph from which + ``ℒ.backward()`` was invoked. + + If this tensor is a view of another tensor then their gradients + will exhibit the same memory-sharing relationship as their data. + + Returns + ------- + dℒ/dx: numpy.ndarray + The gradient of the terminal node in a computational graph + with respect to this tensor. The shape of this numpy array + matches ``self.shape`` + + Examples + -------- + >>> import mygrad as mg + >>> x = mg.Tensor([1.0, 2.0]) + + Prior to backpropagation tensors have ``None`` set for their gradients. + + >>> x.grad is None + True + + Now we trigger backpropagation... + + >>> ℒ = x ** 2 + >>> ℒ.backward() + + and we see that ``x.grad`` stores dℒ/dx + + >>> x.grad # dℒ/dx + array([2., 4.]) + + Now we will demonstrate the relationship between gradient a view tensor + and that of its base. + + >>> base = mg.Tensor([1.0, 2.0, 3.0]) + >>> view = base[:2]; view + Tensor([1., 2.]) + + >>> ℒ = base ** 2 + >>> ℒ.backward() + + Although ``view`` is not directly involved in the computation in ``ℒ``, + and thus would not typically store a gradient in due to ``ℒ.backward()``, + it shares memory with ``base`` and thus it stores a gradient in correspondence + to this "view relationship". I.e. because ``view == base[:2]``, then we expect + to find that ``view.grad == base.grad[:2]``. + + >>> base.grad + array([2., 4., 6.]) + >>> view.grad + array([2., 4.]) + + >>> view.grad.base is base.grad + True + + The reasoning here is that, because a base tensor and its view share the same + array data, then varying an element in that data implies that both the base + tensor and the view will change (assuming the variation occurs specifically in + a shared region). It follows that the base tensor's gradient must share the same + relationship with the view-tensor since these are measures of "cause and effects" + associated with varying elements of data (albeit infinitesmaly). + """ + if self._base is None: + return self._grad + + if self._view_grad is not None and self._view_grad.base is self._base._grad: + # view grad has been computed already + return self._view_grad + + if self._base._grad is None or self._creator is None: + # ``self`` had its graph, connecting it to its base, cleared. + # ``self._view_grad`` can't be computed without this info. + return None + + (view_parent,) = self._creator.variables + + # recursively fetches grad from parent + grad = view_parent.grad + with _track.no_autodiff: + self._view_grad = self._replay_op(grad).data if grad is not None else None + return self._view_grad + +
[docs] def astype( + self, + dtype: DTypeLikeReals, + casting="unsafe", + copy: bool = True, + *, + constant: Optional[bool] = None, + ) -> "Tensor": + """Copy of the tensor with the specified dtype. + + The resulting tensor is not involved in any computational graph + and has no gradient associated with it. + + This docstring was adapted from that of ``ndarray.astype``. + + Parameters + ---------- + dtype : Union[type, str] + The real-valued numeric data type. This can be a numpy dtype or + a corresponding string identifier. + + casting : Literal['no', 'equiv', 'safe', 'same_kind', 'unsafe'] + Controls what kind of data casting may occur. Defaults to ‘unsafe’ for backwards compatibility. + - ‘no’ means the data types should not be cast at all. + - ‘equiv’ means only byte-order changes are allowed. + - ‘safe’ means only casts which can preserve values are allowed. + - ‘same_kind’ means only safe casts or casts within a kind, like float64 to float32, are allowed. + - ‘unsafe’ means any data conversions may be done. + + copy : bool, optional (default=True) + By default, astype always returns a newly allocated array. If this is set to false, and + the ``dtype`` and ``constant`` requirements are satisfied, the input tensor is returned + instead of a copy. + + constant : Optional[bool] + If specified, determines if the returned tensor is a constant. + Otherwise this argument is inferred from the original tensor. + + Returns + ------- + Tensor + The resulting tensor with the specified data type. + + References + ---------- + [1].. Retrieved from: https://numpy.org/doc/stable/reference/generated/numpy.ndarray.astype.html + + Examples + -------- + >>> import mygrad as mg + >>> import numpy as np + >>> x = mg.arange(10); x + Tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + Using a string to specify the data type: + + >>> x.astype("float32") + Tensor([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.], dtype=float32) + + Specifying a numpy data type object, and specifying that the + tensor is to be treated as a constant: + + >>> x.astype(np.int8, constant=True) + Tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=int8) + """ + cast_data = self.data.astype(dtype=dtype, casting=casting, copy=copy) + + if cast_data is self.data and (constant is None or self.constant is constant): + return self + + return type(self)(cast_data, copy=False, constant=constant)
+ + @classmethod + def _op( + cls, + Op: Type[Operation], + *input_vars: ArrayLike, + op_args: Optional[Sequence] = None, + op_kwargs: Optional[Dict[str, Any]] = None, + constant: Optional[bool] = None, + out: Optional[Union[np.ndarray, "Tensor"]] = None, + ): + """Wraps operations performed between tensors: f(a, b, ...). + + For developer use only. + + Parameters + ---------- + Op : Type[Operation] + Operation-class, used to perform forward-pass on `input_vars`. + + input_vars : Tuple[array_like, ...] + An arbitrary number of input-tensors. These can take any form that + can be converted to an array. This includes numbers, sequences, nested + numerical sequences, numpy-ndarrays, and mygrad-tensors. + + op_args : Optional[Tuple[Any, ...]] + Arbitrary positional arguments passed to the operation's forward pass. + + op_kwargs : Optional[Dict[str, Any]] + Arbitrary keyword arguments passed to the operation's forward pass. + + constant : bool, optional (default=False) + If True, the resulting Tensor is a constant. + + out: Optional[Union[np.ndarray, "Tensor"]] + The target where the output (an ndarray) of the operation will be written. + Thus this raises if `out` is read-only. + + There is an exception to this if a tensor is provided, in which case the + operation does not write to its underlying memory but rather triggers + "in-place semantics" so that the computational graph behaves as if the + tensor was mutated. See ``Tensor._in_place_op`` for more details. + + Returns + ------- + mygrad.Tensor + The tensor-result of the operation's forward-pass.""" + if out is not None: + if isinstance(out, tuple): + if len(out) > 1: # pragma: no cover + raise ValueError( + "mygrad does not support in-place operations with more that one target" + ) + (out,) = out + + if isinstance(out, Tensor): + out._in_place_op( + Op, + *input_vars, + op_args=op_args, + op_kwargs=op_kwargs, + constant=constant, + ) + return out + + _uniques_bases_then_arrs = () + + tensor_vars = tuple( + cls(var, constant=True, copy=False) if not isinstance(var, Tensor) else var + for var in input_vars + ) + + # cast all input-vars to tensors + if _track.TRACK_GRAPH and _mem.MEM_GUARD: + # lock memory of array data + _uniques_bases_then_arrs = WeakRefIterable( + _mem.lock_arr_writeability(x) + for x in _mem.unique_arrs_and_bases(tensor_vars) + ) + + if op_args is None: + op_args = tuple() + + if op_kwargs is None: + op_kwargs = {} + + f = Op() + + try: + if out is None: + op_out: np.ndarray = f(*tensor_vars, *op_args, **op_kwargs) + else: + op_out: np.ndarray = f(*tensor_vars, *op_args, **op_kwargs, out=out) + except Exception as e: + if _track.TRACK_GRAPH and _mem.MEM_GUARD: + _mem.release_writeability_lock_on_op(_uniques_bases_then_arrs) + raise e + + if not _track.TRACK_GRAPH: + # execute operation without tracking creator or any graph + # information + return cls( + op_out, + constant=constant, # constant not determined by graph info + copy=False, + _creator=None, + _base=None, + ) + + # points to parent tensor that op-output is a view of + base = None # type: Optional[Tensor] + + # If output of op is a view - tracks the tensor var that is + # the parent of the view + parent_var: Optional[Tensor] = None + + # Determine whether or not op was a view; if so, `base` + # points to parent Tensor + op_out_base = op_out.base + if f.can_return_view and op_out_base is not None: + vars_can_share_mem = ( + isinstance(var, (np.ndarray, Tensor)) for var in input_vars + ) + for can_share_mem, parent_var in zip(vars_can_share_mem, tensor_vars): + if not can_share_mem: + continue + parent_data = parent_var.data + parent_data_base = parent_data.base + + if ( + (op_out_base is parent_data) + or (op_out_base is parent_data_base) + or (op_out is parent_data) + ): + if parent_var._base is not None and parent_var._creator is None: + parent_var._base = None + + base = parent_var if parent_var.base is None else parent_var.base + break + else: + parent_var = None + + for v in input_vars: + if isinstance(v, Tensor): + # tensor's graph has been cleared, but its base lingers + if v._base is not None and v._creator is None: + v._base = None + + if base is None: + # non-view ops clear grads + v._grad = None + v._view_grad = None + + if base is not None: + # we need to be able to replay view-ops for doing in-place operations + # on graphs with views + f.replay_args = op_args + f.replay_kwargs = op_kwargs + f.replay_force_constant = constant + + # record graph information + if constant is None: + if any(not var.constant for var in tensor_vars): + constant = None + else: + constant = True + + # record that a variable participated in that op + ref_f = ReferenceType(f) # type: WeakRef[Operation] + for var in tensor_vars: + var._ops.add(ref_f) + + tensor_out = cls( + op_out, + constant=constant, + copy=False, + _creator=f, + _base=base, + ) + + if parent_var is not None: + parent_var._view_children.append(tensor_out) + + if _mem.MEM_GUARD: + if out is not None and tensor_out.data.base is not None: + _mem.lock_arr_writeability(tensor_out.data.base) + _uniques_bases_then_arrs.append(tensor_out.data.base) + _mem.lock_arr_writeability(tensor_out.data) + tensor_refs = _uniques_bases_then_arrs + tensor_refs.append(tensor_out.data) + finalize(f, _mem.release_writeability_lock_on_op, tensor_refs) + return tensor_out + + def _replay_op(self, *input_vars: ArrayLike) -> "Tensor": + """*dev use only* + + Replays the op that produced `self` - called on the specified + input vars""" + if self.creator is None: + raise DisconnectedView( + "``Tensor._replay_op(...)`` was called on a tensor without a creator." + "\nPlease report this error at: https://github.com/rsokl/MyGrad/issues" + ) + return self._op( + type(self.creator), + *input_vars, + op_args=self.creator.replay_args, + op_kwargs=self.creator.replay_kwargs, + constant=self.creator.replay_force_constant, + ) + +
[docs] def backward(self, grad: Optional[ArrayLike] = None): + """Trigger backpropagation and compute the derivatives of this tensor. + + Designating this tensor as the tensor ℒ, compute dℒ/dx for all (non-constant) tensors + that preceded ℒ in its computational graph, and store each of these derivatives in ``x.grad`` + respectively. + + Once back-propagation is finished, the present tensor is removed from all computational + graphs, and the preceding graph is cleared. + + If ℒ is a non-scalar tensor (i.e. ``ℒ.ndim`` is greater than 0), then calling + ``ℒ.backward()`` will behave as if ℒ was first reduced to a scalar via summation. I.e. it + will behave identically to ``ℒ.sum().backward()``; this ensures that each element of any + dℒ/dx will represent a derivative of a scalar function. + + Parameters + ---------- + grad : Optional[array_like], (must be broadcast-compatible with ``self`` + By default, the present tensor is treated as the terminus of the computational graph (ℒ). + Otherwise, one can specify a "downstream" derivative, representing ``dℒ/d(self)``. + This can be used to effectively connect otherwise separate computational graphs. + + Examples + -------- + >>> import mygrad as mg + >>> x = mg.tensor(2) + >>> y = mg.tensor(3) + >>> w = x * y + >>> ℒ = 2 * w + >>> ℒ.backward() # computes dℒ/dℒ, dℒ/dw, dℒ/dy, and dℒ/dx + + >>> ℒ.grad # dℒ/df == 1 by identity + array(1.) + >>> w.grad # dℒ/dw + array(2.) + >>> y.grad # dℒ/dy = dℒ/dw * dw/dy + array(4.) + >>> x.grad # dℒ/dx = dℒ/dw * dw/dx + array(6.) + + Calling ``ℒ.backward()`` from a non-scalar tensor is equivalent + to first summing that tensor. + + >>> tensor = mg.tensor([2.0, 4.0, 8.0]) + >>> ℒ = tensor * tensor[::-1] # [x0*x2, x1*x1, x2*x0] + >>> ℒ.backward() # behaves like ℒ = x0*x2 + x1*x1 + x2*x0 + >>> tensor.grad + array([16., 8., 4.]) + + >>> tensor = mg.Tensor([2.0, 4.0, 8.0]) + >>> ℒ = tensor * tensor[::-1] + >>> ℒ.sum().backward() + >>> tensor.grad + array([16., 8., 4.]) + + Specifying a value for ``grad`` + + >>> x = mg.Tensor(1.) + >>> x.backward(2.) + >>> x.grad # Would normally be dℒ/dℒ == 1 + array(2.) + """ + if not _track.TRACK_GRAPH: + return + + if self.constant: + self.clear_graph() + return + + topo_sorted_tensors: Deque["Tensor"] = deque([]) + seen: Set[int] = set() + + collect_all_tensors_and_clear_grads(self, seen, topo_sorted_tensors) + + # don't set self._grad yet because there is a grad-clearing step that + # occurs during graph creation + if grad is not None: + # `self` is guaranteed to be a tensor of floats + # so we can simply cast `grad` to be the same dtype + _grad = asarray(grad, dtype=self.dtype) + + if _grad.shape != self.shape: + try: + # See if grad can broadcast to `self` + # raises ValueError if not + _grad = np.multiply( + np.full_like(self.data, fill_value=1.0), + _grad, + dtype=self.dtype, + ) + if _grad.shape != self.shape: + # mutual broadcasting occurred + raise ValueError() + except ValueError: + raise ValueError( + f"`tensor.backward(grad)` was passed a gradient with an incompatible shape.\n" + f"`grad` must be broadcast-compatible with `tensor.shape={self.shape}`\n" + f"Got `grad.shape={_grad.shape}`" + ) + else: + _grad = np.full_like(self.data, fill_value=1.0) + + self._grad = _grad + + if self.creator is not None: + for t in topo_sorted_tensors: + t._backward() + + self.clear_graph()
+ + def _backward(self): + """ + **For dev-use only** + + If `self` has accumulated incoming gradients from all operations in the terminal node's + computational graph, back-propagate the accumulated gradient to the creator of `self`. + + Parameters + ---------- + graph : Set[Operation] + The set of all operations relevant to the terminal node of the computational graph, + which triggered back-propagation + + Raises + ------ + AssertionError + Raises if the tensor and its associated gradient possess different shapes. + Raises if `_backward` triggered on a tensor with gradient of `None`. + """ + assert self._grad is not None, ( + f"backprop, post grad-accumulation, was triggered " + f"on a tensor with no gradient" + f"\n{self}" + f"\nid {id(self._ops)}" + f"\ngrad: {self.grad}" + f"\ncreator: {self.creator}" + f"\nops: {self._ops}" + f"\nbase: {self.base}" + ) + assert self._grad.shape == self.shape, ( + f"A tensor and its associated gradient must possess the same shape. Got:" + f"\ntensor-shape: {self.shape}" + f"\ngrad-shape: {self._grad.shape}" + ) + if self._creator is not None: + self._creator.backward(self._grad) + return + +
[docs] def null_grad(self, *, _clear_view_info: bool = False) -> "Tensor": + """Sets this tensor's gradient to be ``None``. + + This operation is performed in-place, but a reference to the + tensor is returned in order to permit mapping semantics. + + Also removes any ``base`` reference from disconnected views. + + Returns + ------- + self + + Examples + -------- + >>> import mygrad as mg + >>> x = mg.Tensor(2.) + >>> (x ** 2).backward() + >>> x.grad + array(4.) + >>> x.null_grad() # returns a reference of `x` + Tensor(2.0) + >>> x.grad is None + True""" + self._view_grad = None + self._grad = None + + if _clear_view_info: + if self._base is not None and self._creator is None: + self._base = None + + return self
+ +
[docs] def null_gradients(self, clear_graph: bool = True): + """ + **Deprecated: Tensors will automatically have their computational graphs cleared during backprop. + Simply involving a tensor in a new computational graph will null its gradient.** + + Sets the gradient for this tensor and for all preceding tensors in the computation graph + to ``None``. + + Additionally, the computational graph that terminates in this tensor can also be cleared + during this process. + + Parameters + ---------- + clear_graph : bool, optional (default=True) + If ``True`` clear the computational graph in addition to nulling the gradients. + + Notes + ----- + It is advised to clear the computational graph when nulling gradients, i.e. invoke + ``null_gradients(clear_graph=True)`` (or simply ``null_gradients()``). This de-references + all intermediate operations and tensors in the computational graph and thus permits + garbage collection - freeing the memory that was used by the computational graph. + + Examples + -------- + >>> import mygrad as mg + >>> x = mg.tensor(2) + >>> y = mg.tensor(3) + >>> w = x * y + >>> f = 2 * w + >>> f.backward() # computes df/df, df/dw, df/dy, and df/dx + >>> any(tensor.grad is None for tensor in (f, w , x, y)) + False + + >>> f.null_gradients() # set tensor.grad to None for all tensors in the graph + >>> all(tensor.grad is None for tensor in (f, w , x, y)) + True + """ + import warnings + + warnings.warn( + "`tensor.null_gradients()` is deprecated. Calling it will raise an error " + "in future versions of MyGrad. A tensor will automatically " + "have its gradient nulled if you use it in a new computational graph. " + "Or, you can call `tensor.null_grad()` to null that individual tensor's " + "gradient.", + FutureWarning, + )
+ +
[docs] def clear_graph(self): + """ + Removes the current tensor – and tensors above it – from their shared + computational graph. + + This de-references all operations involved in the graph and the intermediate + tensors that were created by it. Arrays whose memory were locked by the + computational graph will have their writeability restored. + + Examples + -------- + >>> import mygrad as mg + >>> import numpy as np + >>> x = np.array([1., 2.]) + >>> y = mg.multiply(2., x) + >>> x.flags.writeable, y.creator + (False, <mygrad.math.arithmetic.ops.Multiply at 0x224f89cac48>) + >>> y.clear_graph() + >>> x.flags.writeable, y.creator + (True, None) + """ + if self._base is not None: + # "pull" on grad to force views to update their + # gradients from upstream before the graph info + # gets cleared + _ = self.grad + + self._view_children.clear() + self._ops.clear() + + if self._creator is None: + return + + creator = self._creator + self._creator = None # marks tensor as "visited" during graph-traversal + + for var in creator.variables: # type: "Tensor" + var.clear_graph()
+ + @property + def constant(self) -> bool: + """If ``True``, this tensor is a constant; it will not propagate any gradient. + + Additionally, any tensor that is a descendant of constant tensors will also + be a constant. + + Integer-valued tesnors, Python scalars and NumPy arrays are treated as constant + tensors when included in MyGrad computational graphs. + + Returns + ------- + bool + + Examples + -------- + Constant-tensors do not back-propagate gradients: + + >>> import mygrad as mg + >>> x = mg.Tensor([1., 2.], constant=True) + >>> y = mg.Tensor([0., 3.], constant=False) + >>> f = x * y + >>> f.backward() + + >>> x.grad is None # x has no gradient + True + >>> y.grad + array([1., 2.]) + + A tensor that is derived solely from constant tensors is also + a constant: + + >>> import numpy as np + >>> x = mg.Tensor([1., 2.], constant=True) + >>> y = mg.Tensor([0., 3.], constant=True) + >>> z = (x + y) ** 2 - np.array([8., 7.]) + >>> z.constant + True + + Integer-valued tensors are treated as constants + + >>> mg.Tensor([1, 2]).constant + True + """ + return self._constant + + @property + def creator(self) -> Optional[Operation]: + """The ``Operation`` instance that produced ``self``. + + Returns + ------- + creator : Optional[Operation] + The operation-instance that created the tensor, or `None`. + + Examples + -------- + >>> import mygrad as mg + >>> x = mg.Tensor(3) + >>> x.creator is None + True + >>> y = mg.Tensor(2) + >>> z = x * y # Multiply(x, y) -> z + >>> z.creator + <mygrad.math.arithmetic.ops.Multiply at 0x2df5a130438> + """ + return self._creator + + def __len__(self) -> int: + return len(self.data) + + def __contains__(self, item) -> bool: + return self.data.__contains__(item) + + def __getitem__(self, item: Index) -> "Tensor": + return self._op(GetItem, self, op_args=(item,)) + + def __iter__(self) -> Iterator["Tensor"]: + # In the same way that numpy doesn't let you iterate over 0-dimensional + # arrays, don't allow iteration over 0-dimensional arrays. + if self.ndim == 0: + raise TypeError("iteration over a 0-d tensor") + return iter(self[n] for n in range(len(self))) + + def _in_place_op( + self, + inplace_op: Type[Operation], + *input_vars: ArrayLike, + op_args: Optional[Sequence] = None, + op_kwargs: Optional[Dict] = None, + constant: Optional[bool] = None, + ): + if _track.TRACK_GRAPH is False: + return self._op( + inplace_op, + *input_vars, + op_args=op_args, + op_kwargs=op_kwargs, + constant=constant, + out=self.data, + ) + # + # ********************************************************************************** + # The way that in-place updates work in MyGrad is that any tensor that + # is about to undergo a mutation gets "cloned". Each resulting "placeholder" + # is used to represent that tensor in any non-view operations that the tensor + # was participating in. This ensures that the stateful computational graph + # is not corrupted by this mutation. + # + # Once the placeholders have been created, they have permanently replaced the + # rolls of their counterparts within the computational graph. Furthermore, they + # exist only internally to the computational graph and thus cannot be the + # targets of subsequent views or in-place updates. + # + # At this point, the "original" tensors merely reserve the publicly-available + # Tensor-instances (husks) that the users will access. We eventually need to + # populate these husks with the appropriate augmented contents and graph-history. + # + # Thus this method will compute the in-place operation on a new tensor, and + # will create a new, internal computational graph involving the base tensor + # affected by the mutation and any of its view-children. These tensors represent + # the mutated tensors that the users expect to have access to. + # + # We must connect this new computational graph to the preceding one – the one + # involving the placeholders; this way we can backpropagate appropriately and + # through all influencers. + # + # Finally we mirror each of these new tensors into the husks of the publicly + # -available tensors and reroute the computational graph through them so that + # the user sees that all of the relevant tensors have been augmented, and that + # they are connected to the appropriate "history" such that backprop occurs + # without error or inaccuracy. + # + # + # For illustration, consider the following graph: + # + # ... x------[square]-- y = x**2 + # \ + # ---[slice]-- z = view-x + # \ + # ---[mul]-- w = 3 * z + # + # Now suppose that we mutate `x` with `x[:] = 0`. This is a simpler case than + # mutating a view of `x`, since `x` is already the base tensor. + # - This should not affect `y` + # - It should affect `view_x` + # - It should *not* affect `w`, which depends on `view_x` in a "static" way. + # I.e. the value for `w` is already resolved and is not a view of z or x. + # + # + # As prescribed above, we will make the placeholders: px and pz, and we + # will reroute the operations that statically depend on the old values of x and z + # through these placeholders. + # + # Next we will have `x` point to a mutated version of itself, in accord with the + # in-place update being performed, and we will subsequently recreate any + # views of x (i.e. z), based off of this mutated tensor. + # + # The resulting graph is: + # + # ---[slice]-- z = view-x + # / + # -----[set-item] -- x = px.copy()[:]=0 + # / + # ... px------[square]-- y = px**2 + # \ + # ---[slice]-- pz = view-px + # \ + # ---[mul]-- w = 3 * pz + # + # Note that px and pz are strictly *internal* tensors; they cannot be accessed for + # use in any further operations, whereas `x` and `z` are available for further use. + # + # ********************************************************************************** + # + # Replace base and all of its views with "placeholder" tensors; + # they serve as internal references to all tensors pre-mutation + # and will preserve ops relying on the un-mutated tensors. + # + # These placeholder tensors are never publicly-available and thus cannot + # be involved directly in future in-place updates + + # In Tensor._op, any tensor entering an op has its grad/view-info cleared + # We must do this here up front since we need to consume information + # about ``self`` + self.null_grad(_clear_view_info=True) + if self._base is not None and not self._base._view_children: + self._base = None + + graph = _dup.DuplicatingGraph(self if self.base is None else self.base) + + # Create copy of base so that mutation has no impact on the + # state of any ops depending on it or its views + mutant_base = graph.base.tensor.copy() + mutant_base.data.flags.writeable = ( + graph.base.tensor.data.flags.writeable + or _mem.array_is_tracked(graph.base.tensor.data) + ) + + # Create view of base in correspondence to relationship + # that `self` has to base. Mutating this view will mutate + # base appropriately + inplace_target = mutant_base + + # stores view-fn sequence from base -> in-place target + view_fn_sequence: List[Callable[[np.ndarray], np.ndarray]] = [] + + with _track.no_autodiff: + # get view sequence from base -> in-place target + for node in graph.get_path_to_base(self)[::-1][1:]: # skip base + # need to point to place-holder replay op to avoid creating + # forwards references to downstream tensors + f = node.placeholder._replay_op + if self.base is not None: + # need sequence of view-ops + view_fn_sequence.append(_track.no_autodiff(f, to_numpy=True)) + inplace_target = f(inplace_target) + + # Constant info was not propagated through no-autodiff mode. + # It must be inferred from the original tensor + inplace_target._constant = mutant_base.constant + + mutant_base_data = mutant_base.data + del mutant_base + + try: + with _mem.mem_guard_off: + placeholder_mutant_view = ( + self._op( # will raise if original data not writeable + inplace_op, + *(graph.get_placeholder_if_exists(t) for t in input_vars), + op_args=op_args, + op_kwargs=op_kwargs, + constant=constant, + out=inplace_target.data, + ) + ) + except Exception as e: + graph.restore_old_graph() + raise e + + placeholder_mutant_view._constant = inplace_target._constant + + if _mem.MEM_GUARD: + _mem.force_lock_tensor_and_creators(placeholder_mutant_view) + + if placeholder_mutant_view.creator.where is not True: + # An operation like `multiply(x, y, where=mask, out=z)` occurred. + # `placeholder_mutant_view` is the mutated version of `z`. + # We need to connect the upstream version of `z` to the computational + # graph so that `~mask * dℒ/dz` backprops to it, whereas `~mask * dℒ/dz` + # will backprop to `x` and `y`. + # + # This is basically an alternative to treating + # `multiply(x, y, where=mask, out=z)` + # like a three-input operation, which adds complexity to the implementation + # of every op that supports `where` and `out`. + # + # old-z --------------------- + # | | + # multiply(x, y, where=mask, out=z) | + # | | + # z -------------------- + # | | + # ApplyMask + # | + # z + with _mem.mem_guard_off: + placeholder_mutant_view = type(self)._op( + _dup.ApplyMask, + placeholder_mutant_view, # gets passed through unchanged + # ~mask * grad backprops to upstream placeholder + graph[self].placeholder, + op_kwargs={ + "mask": placeholder_mutant_view.creator.where, + }, + ) + + # Connect public base tensor to placeholder graph via the mutated placeholder + # tensor `out`. + if self.base is None: + # The current graph: + # base-p --> | inplace | --> vp' + # Becomes: + # base-p --> | inplace | --> base' + # + # The base tensor itself was the target of the in-place operation, + # thus we need simply mirror original base against the mutant placeholder. + # This effectively connects the original base to the placeholder graph + mutant_base = placeholder_mutant_view + + else: + # in-place operation occurred on a view; must connect mutated base + # to graph and then reproduce downstream views + # + # The current graph: + # vp --> | inplace | --> vp' + # + # Becomes: + # + # vp --> | inplace | --> vp' --> | | + # | unview | --> base' + # base-p -----------------------> | | + # + # I.e. the mutated base is a combination of the placeholder + # base and of the mutant view. + + mutant_base = type(self)._op( + _dup.UnView, + graph.base.placeholder, + placeholder_mutant_view, + op_kwargs={ + # Copy to avoid upstream placeholder mutant view sharing memory + # with downstream mutant base + "mutant_base_data": mutant_base_data, + "view_fn_sequence": view_fn_sequence, + }, + ) + + del placeholder_mutant_view + + # The original base now points to the augmented array data + # and has the InPlaceOp as its creator + _dup.mirror_tensor(source=mutant_base, target=graph.base.tensor) + + del mutant_base + + # Now that the base-tensor has been incorporated into the graph, + # recreate the view-graph and reroute all tensors from previous + # graph to their downstream counterparts + # + # Note that iterating in a topologically-ordered way is critical + # here: each parent is updated before creating one of its children + # + # Iteration is always based off of the placeholders' relative positions + # in the graph since this will never be mutated. + for node in graph: + if node.parent is None: + continue + view = node.tensor._replay_op(node.parent) + _dup.mirror_tensor(source=view, target=node.tensor) + node.parent._view_children.append(node.tensor) + + @property + def shape(self) -> Shape: + """Tuple of tensor dimension-sizes. + + Sizes are reported in row-major order. + + Returns + ------- + Tuple[int, ...] + + Examples + -------- + >>> import mygrad as mg + >>> x = mg.Tensor([1, 2, 3, 4]) # axis-0 has size 4 + >>> x.shape + (4,) + >>> y = mg.Tensor([[1, 2, 3], # axis-0 has size 2, axis-1 has size 3 + ... [4, 5, 6]]) + >>> y.shape + (2, 3) + + The shape attribute can also be set to reshape the tensor in-place + + >>> y.shape = (1, 6, 1) + >>> y + Tensor([[[1], + [2], + [3], + [4], + [5], + [6]]]) + + See Also + -------- + mygrad.reshape : similar function + Tensor.reshape : similar method""" + return self.data.shape + + @shape.setter + def shape(self, newshape: Union[int, Shape]): + # Even though this op cannot mutate views, we still must + # do graph-replaying here so that views can still reference + # this tensor, but with the proper reshaping mediating them. + # + # E.g. + # x = arange(10) # shape-(10,) + # y = x[:6] # shape-(6,) + # x.shape = (2, 5) # shape-(2, 5) + # + # y.base points to the shape-(2,5) array + # even though y is a view of the flat array + # + # thus we need to play this graph as + # (history) + # | + # placeholder shape-(10,) + # |-reshape + # x shape-(2,5) + # |-reshape + # placeholder shape-(10,) + # |-getitem + # y shape-(4,) + + if not _track.TRACK_GRAPH: + self.data.shape = newshape + return + + if newshape == self.shape: + return + + old_shape = self.shape + + # raise here if the shape is not compatible + self.data.shape = newshape + self.data.shape = old_shape + + # create placeholders for self and all of its view-children + graph = _dup.DuplicatingGraph(self) + # need to iterate over all nodes now before we tinker + # with the view children + nodes = tuple(graph) + + # reshape placeholder of self + out = graph.base.placeholder.reshape(newshape) + + # Store contents of `out` in `self` and replace `out` in + # graph with `self` + out._base = graph.base.placeholder.base + _dup.mirror_tensor(source=out, target=self) + _dup.reroute_ops_through(source=out, target=self) + del out + + # although `self` is a view of placeholder, placeholder + # is strictly an internal tensor, we won't expose it as + # base + graph.base.placeholder._view_children.append(self) + base = graph.base.placeholder.base + + if base is not None: + # if `self` was a view, we need to update that parent's + # view children so that it points to the placeholder + creator = graph.base.placeholder.creator.variables[0] + creator._view_children = WeakRefIterable( + [ + w if w is not self else graph.base.placeholder + for w in graph.base.placeholder._view_children + ] + ) + + # Undo the reshape, and place this as the tensor joining + # the reshaped `self` with the views of unshaped `self` + unshaped = self.reshape(old_shape) + + for node in nodes: + if node.parent is None: + continue + # direct what would be views of `self` to be views of `unshaped`, + # which translates the mutated shape of `self` to the original + # shape used to create the views + parent = node.parent if node.parent is not self else unshaped + view = node.tensor._replay_op(parent) + _dup.mirror_tensor(source=view, target=node.tensor) + _dup.reroute_ops_through(source=view, target=node.tensor) + parent._view_children.append(node.tensor) + + def __setitem__(self, key: Index, value: ArrayLike): + self._in_place_op(SetItem, self, value, op_args=(key,)) + + def __add__(self, other: ArrayLike) -> "Tensor": + return self._op(Add, self, other) + + def __iadd__(self, other: ArrayLike) -> "Tensor": + self._in_place_op(Add, self, other) + return self + + def __radd__(self, other: ArrayLike) -> "Tensor": + return self._op(Add, other, self) + + def __sub__(self, other: ArrayLike) -> "Tensor": + return self._op(Subtract, self, other) + + def __isub__(self, other: ArrayLike) -> "Tensor": + self._in_place_op(Subtract, self, other) + return self + + def __rsub__(self, other: ArrayLike) -> "Tensor": + return self._op(Subtract, other, self) + + def __truediv__(self, other: ArrayLike) -> "Tensor": + return self._op(Divide, self, other) + + def __rtruediv__(self, other: ArrayLike) -> "Tensor": + return self._op(Divide, other, self) + + def __floordiv__(self, other: ArrayLike) -> np.ndarray: + return np.floor_divide(self, other) + + def __rfloordiv__(self, other: ArrayLike) -> np.ndarray: + return np.floor_divide(other, self) + + def __itruediv__(self, other: ArrayLike) -> "Tensor": + self._in_place_op(Divide, self, other) + return self + + def __mul__(self, other: ArrayLike) -> "Tensor": + return self._op(Multiply, self, other) + + def __imul__(self, other: ArrayLike) -> "Tensor": + self._in_place_op(Multiply, self, other) + return self + + def __rmul__(self, other: ArrayLike) -> "Tensor": + return self._op(Multiply, other, self) + + def __matmul__(self, other: ArrayLike) -> "Tensor": + return self._op(MatMul, self, other) + + def __rmatmul__(self, other: ArrayLike) -> "Tensor": + return self._op(MatMul, other, self) + + def __pow__(self, other: ArrayLike): + if isinstance(other, Number) or ( + isinstance(other, np.ndarray) and other.ndim == 0 + ): + if other == 1: + return self._op(Positive, self) + elif other == 2: + return self._op(Square, self) + + return self._op(Power, self, other) + + def __ipow__(self, other: ArrayLike) -> "Tensor": + if isinstance(other, Number) or ( + isinstance(other, np.ndarray) and other.ndim == 0 + ): + if other == 1: + self._in_place_op(Positive, self) + return self + elif other == 2: + self._in_place_op(Square, self) + return self + + self._in_place_op(Power, self, other) + return self + + def __rpow__(self, other: ArrayLike): + return self._op(Power, other, self) + + def __neg__(self): + return self._op(Negative, self) + + def __pos__(self): + return self._op(Positive, self) + + def __repr__(self) -> str: + return repr(self.data).replace("array", "Tensor").replace("\n", "\n ") + + def __copy__(self) -> "Tensor": + """Produces a copy of ``self`` with ``copy.creator=None``. + + Copies of the underlying numpy data array and gradient array are created. + + Returns + ------- + Tensor + """ + return self.copy() + +
[docs] def copy(self, *, constant: Optional[bool] = None) -> "Tensor": + """Produces a copy of ``self`` with ``copy.creator=None``. + + Copies of the underlying numpy data array and gradient array are created. + + No information regarding the tensor's participation in the computational + graph are copied. + + Parameters + ---------- + constant : Optional[bool] + + Returns + ------- + Tensor + + Examples + -------- + >>> import mygrad as mg + >>> x = mg.Tensor(data, constant=constant) + >>> y = x * 2 + >>> y.backward() + >>> y_copy = y.copy() + >>> y_copy + Tensor(6) + >>> y_copy.grad + array(1.) + >>> y_copy.creator is None + True + """ + copy = Tensor( + np.copy(self.data), + constant=(self.constant if constant is None else constant), + ) + copy._grad = np.copy(self._grad) if self._grad is not None else None + return copy
+ +
[docs] def item(self) -> Union[int, float]: + """Copy an element of a tensor to a standard Python scalar and return it. + + Note that the returned object does not support back-propagation. + + Returns + ------- + z : Standard Python scalar object + A copy of the specified element of the tensor as a suitable + Python scalar + + Examples + -------- + >>> import mygrad as mg + >>> x = Tensor([22.2]) + >>> x.item() + 22.2 + >>> type(x.item()) + float""" + if self.size > 1: + raise ValueError("can only convert a tensor of size 1 to a Python scalar") + return self.data.item()
+ + def __float__(self) -> float: + if self.size > 1: + raise TypeError("can only convert a tensor of size 1 to a Python scalar") + return float(self.data) + + def __int__(self) -> int: + if self.size > 1: + raise TypeError("can only convert a tensor of size 1 to a Python scalar") + return int(self.data) + + def __index__(self) -> int: + """Return self converted to an integer, if self is suitable for use as an index + into a list.""" + return self.data.__index__() + +
[docs] def flatten(self, *, constant: Optional[bool] = None) -> "Tensor": + """Return a copy of the tensor collapsed into one dimension. + + This docstring was adapted from ``numpy.ndarray.flatten``. + + Parameters + ---------- + constant : bool, optional(default=False) + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient) + + Returns + ------- + mygrad.Tensor + A copy of the input tensor, flattened to one dimension. + + Notes + ----- + To return a flattened view of the tensor, use ``x.reshape(-1)``. + + Examples + -------- + >>> import mygrad as mg + >>> x = mg.Tensor([[1, 2], + ... [3, 4]]) + >>> x.flatten() + Tensor([1, 2, 3, 4]) + """ + return Tensor._op(Flatten, self, constant=constant)
+ + @property + def base(self) -> Optional["Tensor"]: + """ + A reference to the base tensor that the present tensor is a view of. + + It this tensor owns its memory, then this returns ``None``. + + Examples + -------- + The base of a tensor that owns its memory is ``None``: + + >>> import mygrad as mg + >>> x = mg.arange(5) + >>> x.base is None + True + + Slicing creates a view, whose memory is shared with x: + + >>> y = x[2:] + >>> y.base is x + True + >>> y.data.base is x.data + True + + A view of a view has the same base as its "parent" + + >>> z = y[:] + >>> z.base is x + True + + The behavior of ``Tensor.base`` departs from that of ``ndarray.base`` in that + mygrad will never create an "internal" tensor to serve as a base; e.g. + + >>> import numpy as np + >>> np.reshape(2., (1,)).base + array(2.) + + >>> mg.reshape(2., (1,)).base is None + True + """ + return self._base + + @property + def size(self) -> int: + """ + Number of elements in the tensor. i.e., the product of the tensor's + dimensions. + + Returns + ------- + int + + Examples + -------- + >>> import mygrad as mg + >>> x = mg.zeros((3, 5, 2)) # creates a tensor with 3x5x2 (= 30) elements + >>> x.size + 30 + """ + return self.data.size + + @property + def ndim(self) -> int: + """Number of tensor dimensions. I.e. the number + of indices that must be supplied to uniquely specify + an element in the tensor. + + Returns + ------- + int + + Examples + -------- + >>> import mygrad as mg + >>> x = mg.Tensor([1, 2, 3]) + >>> x.ndim + 1 + >>> x[0] # a single index identifies an element in `x` + Tensor(1) + + >>> y = mg.Tensor([[1, 2, 3], + ... [4, 5, 6]]) + >>> y.ndim + 2 + >>> y[0, 0] # two indices are required to identify an element in `x` + Tensor(1)""" + return self.data.ndim + + @property + def dtype(self) -> np.dtype: + """Data-type of the tensor's elements. + + Returns + ------- + numpy dtype object + + Examples + -------- + >>> import mygrad as mg + >>> x = mg.Tensor([[0, 1], + ... [2, 3]]) + >>> x.dtype + dtype('int32') + >>> type(x.dtype) + <type 'numpy.dtype'>""" + return self.data.dtype + + def reshape( + self, *newshape: Union[int, Shape], constant: Optional[bool] = None + ) -> "Tensor": + """Returns a tensor with a new shape, without changing its data. + This docstring was adapted from ``numpy.reshape`` + + Parameters + ---------- + *newshape : Union[int, Tuple[int, ...]] + The new shape should be compatible with the original shape. If + an integer, then the result will be a 1-D tensor of that length. + One shape dimension can be -1. In this case, the value is + inferred from the length of the tensor and remaining dimensions. + + constant : bool, optional(default=False) + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient) + + Returns + ------- + mygrad.Tensor + ``a`` with its shape changed. A new tensor is returned. + + Notes + ----- + ``reshape`` utilizes C-ordering, meaning that it reads & writes elements using + C-like index ordering; the last axis index changing fastest, and, proceeding + in reverse order, the first axis index changing slowest. + + Examples + -------- + >>> import mygrad as mg + >>> a = mg.Tensor([[1, 2, 3], [4, 5, 6]]) + >>> a.reshape(6) + Tensor([1, 2, 3, 4, 5, 6]) + + >>> a.reshape(3, -1)) # the unspecified value is inferred to be 2 + Tensor([[1, 2], + [3, 4], + [5, 6]]) + """ + + if not newshape: + raise TypeError("reshape() takes at least 1 argument (0 given)") + if hasattr(newshape[0], "__iter__"): + if len(newshape) > 1: + raise TypeError("an integer is required") + newshape = newshape[0] + return Tensor._op(Reshape, self, op_args=(newshape,), constant=constant) + + @property + def T(self) -> "Tensor": + """Same as self.transpose(), except that self is returned if self.ndim < 2 and + a view of the underlying data is utilized whenever possible. + + Returns + ------- + Tensor + + Examples + -------- + >>> import mygrad as mg + >>> y = mg.Tensor([[1, 2, 3], + ... [4, 5, 6]]) + >>> y.T + Tensor([[1, 4], + [2, 5], + [3, 6]]) + """ + return self._op(Tensor_Transpose_Property, self) + + def __eq__(self, other: ArrayLike) -> np.ndarray: + return np.ndarray.__eq__(self.data, asarray(other)) + + def __ne__(self, other: ArrayLike) -> np.ndarray: + return np.ndarray.__ne__(self.data, asarray(other)) + + def __lt__(self, other: ArrayLike) -> np.ndarray: + return np.ndarray.__lt__(self.data, asarray(other)) + + def __le__(self, other: ArrayLike) -> np.ndarray: + return np.ndarray.__le__(self.data, asarray(other)) + + def __gt__(self, other: ArrayLike) -> np.ndarray: + return np.ndarray.__gt__(self.data, asarray(other)) + + def __ge__(self, other: ArrayLike) -> np.ndarray: + return np.ndarray.__ge__(self.data, asarray(other)) + + def __imatmul__(self, other): # pragma: no cover + raise TypeError( + "In-place matrix multiplication is not (yet) supported. " + "Use 'a = a @ b' instead of 'a @= b'" + ) + + def sum( + self, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + keepdims: bool = False, + *, + constant: Optional[bool] = None, + ) -> "Tensor": + """ + Sum of tensor elements over a given axis. + + Parameters + ---------- + axis : Optional[int, Tuple[ints, ...]] + Axis or axes along which a sum is performed. The default, + axis=None, will sum all of the elements of the input tensor. If + axis is negative it counts from the last to the first axis. + If axis is a tuple of ints, a sum is performed on all of the axes + specified in the tuple instead of a single axis or all the axes as + before. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input tensor. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + sum_along_axis : mygrad.Tensor + A Tensor with the same shape as `self`, with the specified + axis/axes removed. If `self` is a 0-d tensor, or if `axis` is None, + a 0-dim Tensor is returned. + + See Also + -------- + mygrad.Tensor.sum : Equivalent method. + + cumsum : Cumulative sum of array elements. + + mean, average + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + The sum of an empty tensor is the neutral element 0: + + >>> mygrad.sum([]) + Tensor(0.0) + + Examples + -------- + >>> import mygrad as mg + >>> import numpy as np + >>> x = mg.tensor([1., 1.]) + >>> x.sum() + Tensor(2.0) + >>> x = mg.tensor([0.5, 0.7, 0.2, 1.5]) + >>> x.sum(dtype=np.int32) + Tensor(1) + >>> x = mg.tensor([[0, 1], [0, 5]]) + >>> x.sum() + Tensor(6) + >>> x.sum(axis=0) + Tensor([0, 6]) + >>> x.sum(axis=1) + Tensor([1, 5]) + """ + return Tensor._op( + Sum, self, op_kwargs={"axis": axis, "keepdims": keepdims}, constant=constant + ) + + def prod( + self, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + keepdims: bool = False, + *, + constant: Optional[bool] = None, + ) -> "Tensor": + """ + Return the product of array elements over given axes. + + Parameters + ---------- + axis : Optional[Union[int, Tuple[int, ...]]] + Axis or axes along which to operate. By default, flattened input is used. + + keepdims : bool, optional (default=False) + If this is set to True, the axes which are reduced are left in the + result as dimensions with size one. With this option, the result + will broadcast correctly against the input array. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + product_along_axis : mygrad.Tensor + A tensor shaped as `a` but with the specified axis removed.""" + return Tensor._op( + Prod, + self, + op_kwargs={"axis": axis, "keepdims": keepdims}, + constant=constant, + ) + + def cumprod( + self, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + *, + constant: Optional[bool] = None, + ) -> "Tensor": + """ + Return the cumulative product of elements along a given axis. + + This docstring was adapted from the official numpy documentation + + Parameters + ---------- + axis : Optional[int] + Axis along which the cumulative product is computed. By default + the input is flattened. + + constant : bool, optional(default=False) + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient) + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + mygrad.Tensor + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow.""" + + return Tensor._op(CumProd, self, op_kwargs={"axis": axis}, constant=constant) + + def cumsum( + self, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + *, + constant: Optional[bool] = None, + ) -> "Tensor": + """ + Return the cumulative sum of the elements along a given axis. + + This docstring was adapted from the official numpy documentation + + Parameters + ---------- + axis : int, optional + Axis along which the cumulative sum is computed. The default + (None) is to compute the cumsum over the flattened array. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + mygrad.Tensor + """ + + return Tensor._op(CumSum, self, op_kwargs={"axis": axis}, constant=constant) + + def mean( + self, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + keepdims: bool = False, + *, + constant: Optional[bool] = None, + ) -> "Tensor": + """ + Mean of tensor elements over a given axis. + + Parameters + ---------- + x : ArrayLike + + axis : Optional[int, Tuple[ints, ...] + Axis or axes along which a mean is performed. The default, + axis=None, will mean all of the elements of the input tensor. If + axis is negative it counts from the last to the first axis. + + If axis is a tuple of ints, a mean is performed on all of the axes + specified in the tuple instead of a single axis or all the axes as + before. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input tensor. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + mean_along_axis : Tensor + A Tensor with the same shape as `self`, with the specified + axis/axes removed. If `self` is a 0-d tensor, or if `axis` is None, + a 0-dim Tensor is returned. + """ + return Tensor._op( + Mean, + self, + op_kwargs={"axis": axis, "keepdims": keepdims}, + constant=constant, + ) + + def std( + self, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + ddof: int = 0, + keepdims: bool = False, + *, + constant: Optional[bool] = None, + ) -> "Tensor": + """ + Compute the standard deviation along the specified axis. + + Returns the variance of the array elements, a measure of the spread of a + distribution. The variance is computed for the flattened array by + default, otherwise over the specified axis. + + Parameters + ---------- + axis : Optional[Union[int, Tuple[int, ...]]] + Axis or axes along which the variance is computed. The default is to + compute the variance of the flattened array. + + ddof : int, optional (default=0) + "Delta Degrees of Freedom": the divisor used in the calculation is + ``N - ddof``, where ``N`` represents the number of elements. By + default `ddof` is zero. + + keepdims : bool, optional (default=False) + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + std : mygrad.Tensor + + Notes + ----- + The variance is the average of the squared deviations from the mean, + i.e., ``var = mean(abs(x - x.mean())**2)``. + + The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``. + If, however, `ddof` is specified, the divisor ``N - ddof`` is used + instead. In standard statistical practice, ``ddof=1`` provides an + unbiased estimator of the variance of a hypothetical infinite population. + ``ddof=0`` provides a maximum likelihood estimate of the variance for + normally distributed variables.""" + return Tensor._op( + StdDev, + self, + op_kwargs={"axis": axis, "keepdims": keepdims, "ddof": ddof}, + constant=constant, + ) + + def var( + self, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + ddof: int = 0, + keepdims: bool = False, + *, + constant: Optional[bool] = None, + ) -> "Tensor": + """ + Compute the variance along the specified axis. + + Returns the variance of the array elements, a measure of the spread of a + distribution. The variance is computed for the flattened array by + default, otherwise over the specified axis. + + Parameters + ---------- + axis : Optional[int, Tuple[int, ...]] + Axis or axes along which the variance is computed. The default is to + compute the variance of the flattened array. + + ddof : int, optional (default=0) + "Delta Degrees of Freedom": the divisor used in the calculation is + ``N - ddof``, where ``N`` represents the number of elements. By + default `ddof` is zero. + + keepdims : bool, optional (default=False) + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array.. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + Returns + ------- + variance : mygrad.Tensor + + Notes + ----- + The variance is the average of the squared deviations from the mean, + i.e., ``var = mean(abs(x - x.mean())**2)``. + + The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``. + If, however, `ddof` is specified, the divisor ``N - ddof`` is used + instead. In standard statistical practice, ``ddof=1`` provides an + unbiased estimator of the variance of a hypothetical infinite population. + ``ddof=0`` provides a maximum likelihood estimate of the variance for + normally distributed variables.""" + return Tensor._op( + Variance, + self, + op_kwargs={"axis": axis, "keepdims": keepdims, "ddof": ddof}, + constant=constant, + ) + + def max( + self, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + keepdims: bool = False, + *, + constant: Optional[bool] = None, + ) -> "Tensor": + """ + Return the maximum of a tensor or maximum along its axes. + + Parameters + ---------- + x : ArrayLike + + axis : Optional[int, Tuple[int, ...]] + Axis or axes along which to operate. By default, flattened input is used. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + max : mygrad.Tensor + Maximum of `a`. If `axis` is None, the result is a 0-D tensor. + + Examples + -------- + >>> import mygrad as mg + >>> import numpy as np + >>> a = mg.arange(4).reshape((2,2)) + >>> a + Tensor([[0, 1], + [2, 3]]) + >>> mg.amax(a) # Maximum of the flattened array + Tensor(3) + >>> mg.amax(a, axis=0) # Maxima along the first axis + Tensor([2, 3]) + >>> mg.amax(a, axis=1) # Maxima along the second axis + Tensor([1, 3]) + >>> b = mg.arange(5, dtype=float) + >>> b[2] = np.NaN + >>> mg.amax(b) + Tensor(nan) + """ + return Tensor._op( + Max, + self, + op_kwargs={"axis": axis, "keepdims": keepdims, "dtype": _NoValue}, + constant=constant, + ) + + def min( + self, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + keepdims: bool = False, + *, + constant: Optional[bool] = None, + ) -> "Tensor": + """ + Return the minimum of a tensor or minimum along its axes. + + Parameters + ---------- + axis : Optional[int, Tuple[int, ...]] + Axis or axes along which to operate. By default, flattened input is used. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + min : mygrad.Tensor + Minimum of `a`. If `axis` is None, the result is a 0-D tensor. + + Examples + -------- + >>> import mygrad as mg + >>> import numpy as np + >>> a = mg.arange(4).reshape((2,2)) + >>> a + Tensor([[0, 1], + [2, 3]]) + >>> mg.amin(a) # Minimum of the flattened array + Tensor(0) + >>> mg.amin(a, axis=0) # Minima along the first axis + Tensor([0, 1]) + >>> mg.amin(a, axis=1) # Minima along the second axis + Tensor([0, 2]) + >>> b = mg.arange(5, dtype=float) + >>> b[2] = np.NaN + >>> mg.amin(b) + Tensor(nan) + """ + return Tensor._op( + Min, + self, + op_kwargs={"axis": axis, "keepdims": keepdims, "dtype": _NoValue}, + constant=constant, + ) + + def swapaxes( + self, axis1: int, axis2: int, *, constant: Optional[bool] = None + ) -> "Tensor": + """Interchange two axes of a tensor. + + Parameters + ---------- + axis1 : int + First axis. + + axis2 : int + Second axis. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + mygrad.Tensor + """ + return Tensor._op(SwapAxes, self, op_args=(axis1, axis2), constant=constant) + + def transpose( + self: ArrayLike, *axes: int, constant: Optional[bool] = None + ) -> "Tensor": + """Permute the dimensions of a tensor. + + Parameters + ---------- + axes : int + By default, reverse the dimensions, otherwise permute the axes + according to the values given. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + mygrad.Tensor + `a` with its axes permuted. A new tensor is returned. + + Examples + -------- + >>> import mygrad as mg + >>> a = mg.tensor([[1, 2], [3, 4]]) + >>> a + Tensor([[1, 2], + [3, 4]]) + >>> a.transpose() + Tensor([[1, 3], + [2, 4]]) + >>> a.transpose((1, 0)) + Tensor([[1, 3], + [2, 4]]) + >>> a.transpose(1, 0) + Tensor([[1, 3], + [2, 4]])""" + if not axes: + axes = None + elif hasattr(axes[0], "__iter__") or axes[0] is None: + if len(axes) > 1: + raise TypeError( + f"'{type(axes[0])}' object cannot be interpreted as an integer" + ) + axes = axes[0] + return Tensor._op(Transpose, self, op_args=(axes,), constant=constant) + + def moveaxis( + self, + source: Union[int, Tuple[int, ...]], + destination: Union[int, Tuple[int, ...]], + *, + constant: Optional[bool] = None, + ) -> "Tensor": + """Move axes of a tensor to new positions. Other axes remain in their + original order. + + + Parameters + ---------- + source : Union[int, Sequence[int]] + Original positions of the axes to move. These must be unique. + + destination : Union[int, Sequence[int]] + Destination positions for each of the original axes. These must also be + unique. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + result : mygrad.Tensor + Array with moved axes. This array is a view of the input array..""" + return Tensor._op( + MoveAxis, self, op_args=(source, destination), constant=constant + ) + + def squeeze( + self, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + *, + constant: Optional[bool] = None, + ) -> "Tensor": + """ + Remove single-dimensional entries from the shape of a tensor. + + This docstring was adapted from ``numpy.squeeze`` + + Parameters + ---------- + axis : Optional[int, Tuple[int, ...]] + Selects a subset of the single-dimensional entries in the + shape. If an axis is selected with shape entry greater than + one, an error is raised. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + + Returns + ------- + mygrad.Tensor + + Raises + ------ + ValueError + If ``axis`` is not ``None``, and an axis being squeezed is not of length 1 + """ + return Tensor._op(Squeeze, self, op_args=(axis,), constant=constant) + + def ravel(self, *, constant: Optional[bool] = None) -> "Tensor": + """ + Flattens contents of a tensor into a contiguous 1-D array. A copy is made only if needed. + + This docstring was adapted from ``numpy.ravel``. + + Parameters + ---------- + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + + Returns + ------- + mygrad.Tensor + + Notes + ----- + ``ravel`` utilizes C-ordering, meaning that it reads & writes elements using + C-like index ordering; the last axis index changing fastest, and, proceeding + in reverse order, the first axis index changing slowest. + """ + return Tensor._op(Ravel, self, constant=constant) + + def argmax( + self, axis: Optional[int] = None, out: Optional[np.ndarray] = None + ) -> np.ndarray: + """Returns the indices of the maximum values along an axis. + + Parameters + ---------- + a: array_like + + axis: int, optional + By default, the index is into the flattened array, otherwise along the specified axis. + + out: numpy.array, optional + If provided, the result will be inserted into this array. It should be of the appropriate shape and dtype. + + Returns + ------- + numpy.ndarray[int]""" + + return np.argmax(self.data, axis, out) + + def argmin( + self, axis: Optional[int] = None, out: Optional[np.ndarray] = None + ) -> np.ndarray: + """Returns the indices of the minimum values along an axis. + + Parameters + ---------- + axis: int, optional + By default, the index is into the flattened array, otherwise along the specified axis. + + out: numpy.array, optional + If provided, the result will be inserted into this array. It should be of the appropriate shape and dtype. + + Returns + ------- + numpy.ndarray[int]""" + + return np.argmin(self.data, axis, out) + + def any( + self, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + out: Optional[np.ndarray] = None, + keepdims: bool = False, + ) -> np.ndarray: + """Test whether any array or Tensor element along a given axis evaluates to True. + + Returns single boolean if `axis` is ``None`` + + This documentation was adapted from ``numpy.add`` + + Parameters + ---------- + axis : None or int or tuple of ints, optional + Axis or axes along which a logical OR reduction is performed. + The default (``axis=None``) is to perform a logical OR over all + the dimensions of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. + If this is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + + out : ndarray, optional + Alternate output array in which to place the result. It must have + the same shape as the expected output and its type is preserved + (e.g., if it is of type float, then it will remain so, returning + 1.0 for True and 0.0 for False, regardless of the type of `a`). + See `ufuncs-output-type` for more details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + If the default value is passed, then `keepdims` will not be + passed through to the `any` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + Returns + ------- + any : bool or ndarray + A new boolean or `ndarray` is returned unless `out` is specified, + in which case a reference to `out` is returned. + + See Also + -------- + Tensor.any : equivalent method + + """ + return np.any(self.data, axis=axis, out=out, keepdims=keepdims) + + def clip( + self, + a_min: ArrayLike, + a_max: ArrayLike, + out: Optional[Union[np.ndarray, "Tensor"]] = None, + *, + constant: Optional[bool] = None, + ) -> "Tensor": # pragma: no cover + """Clip (limit) the values in an array. + + Given an interval, values outside the interval are clipped to + the interval edges. For example, if an interval of ``[0, 1]`` + is specified, values smaller than 0 become 0, and values larger + than 1 become 1. + + Equivalent to `mg.minimum(a_max, mg.maximum(a, a_min))``. + + No check is performed to ensure ``a_min < a_max``. + + This docstring was adapted from that of `numpy.clip` + + Parameters + ---------- + a_min : Optional[float, ArrayLike] + Minimum value. If `None`, clipping is not performed on lower + interval edge. Not more than one of `a_min` and `a_max` may be + `None`. + + a_max : Optional[float, ArrayLike] + Maximum value. If `None`, clipping is not performed on upper + interval edge. Not more than one of `a_min` and `a_max` may be + `None`. If `a_min` or `a_max` are ArrayLike, then the three + arrays will be broadcasted to match their shapes. + + out : Optional[Union[ndarray, Tensor]] + A location into which the result is stored. If provided, it must have + a shape that the inputs broadcast to. If not provided or None, a + freshly-allocated tensor is returned. + + constant : bool, optional(default=False) + If ``True``, the returned tensor is a constant (it + does not backpropagate a gradient) + + Returns + ------- + Tensor + A tensor with the elements of `a`, but where values + < `a_min` are replaced with `a_min`, and those > `a_max` + with `a_max`. + + Examples + -------- + >>> import mygrad as mg + >>> a = mg.arange(10) + >>> a + Tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> a.clip(1, 8) + Tensor([1, 1, 2, 3, 4, 5, 6, 7, 8, 8]) + >>> a.clip([3, 4, 1, 1, 1, 4, 4, 4, 4, 4], 8) + Tensor([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])""" + # set in added in mygrad.__init__ + ... +
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/tensor_creation/funcs.html b/docs/_modules/mygrad/tensor_creation/funcs.html new file mode 100644 index 00000000..8ad74296 --- /dev/null +++ b/docs/_modules/mygrad/tensor_creation/funcs.html @@ -0,0 +1,1588 @@ + + + + + + + + + + mygrad.tensor_creation.funcs — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.tensor_creation.funcs

+from typing import Optional, Sequence, Union
+
+import numpy as np
+
+from mygrad.tensor_base import Tensor, _resolve_constant, implements_numpy_override
+from mygrad.typing import ArrayLike, DTypeLikeReals, Real
+
+Shape = Union[Sequence[int], int]
+
+
+def _anything_but_tensor(x):
+    if isinstance(x, Tensor):
+        x = x.data
+    return x
+
+
+__all__ = [
+    "arange",
+    "empty",
+    "empty_like",
+    "eye",
+    "geomspace",
+    "identity",
+    "linspace",
+    "logspace",
+    "ones",
+    "ones_like",
+    "full",
+    "full_like",
+    "zeros",
+    "zeros_like",
+]
+
+
+
[docs]def empty( + shape: Shape, dtype: DTypeLikeReals = np.float32, *, constant: Optional[bool] = None +) -> Tensor: + """Return a new Tensor of the given shape and type, without initializing entries. + + This docstring was adapted from ``numpy.empty`` [1]_ + + Parameters + ---------- + shape : Union[int, Tuple[int]] + The shape of the empty array. + + dtype : data-type, optional (default=numpy.float32) + The data type of the output Tensor. + + constant : Optional[bool] + If ``True``, this tensor is a constant, and thus does not facilitate + back propagation. + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + Tensor + A tensor of uninitialized data of the given shape and dtype. + + References + ---------- + .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.empty.html + + See Also + -------- + empty_like : Return an empty tensor with shape and type of input. + ones : Return a new tensor setting values to one. + zeros : Return a new tensor setting values to zero. + full : Return a new tensor of given shape filled with value. + + + Notes + ----- + `empty`, unlike `zeros`, does not set the array values to zero, + and may therefore be marginally faster. On the other hand, it requires + the user to manually set all the values in the array, and should be + used with caution. + + Examples + -------- + >>> import mygrad as mg + >>> mg.empty([2, 2], constant=True) + Tensor([[ -9.74499359e+001, 6.69583040e-309], + [ 2.13182611e-314, 3.06959433e-309]]) #random + + >>> mg.empty([2, 2], dtype=int) + Tensor([[-1073741821, -1067949133], + [ 496041986, 19249760]]) #random + """ + return Tensor(np.empty(shape=shape, dtype=dtype), constant=constant, copy=False)
+ + +
[docs]@implements_numpy_override() +def empty_like( + other: ArrayLike, + dtype: Optional[DTypeLikeReals] = None, + shape: Optional[Union[int, Sequence[int]]] = None, + *, + constant: Optional[bool] = None, +) -> Tensor: + """Return a new Tensor of the same shape and type as the given array. + + This docstring was adapted from ``numpy.empty_like`` [1]_ + + Parameters + ---------- + other : ArrayLike + The Tensor or array whose shape and datatype should be mirrored. + + dtype : Optional[DTypeLikeReals] + Override the data type of the returned Tensor with this value, or None to not override. + + shape : Optional[Union[int, Sequence[int]]] + If specified, overrides the shape of the result + + constant : Optional[bool] + If ``True``, this tensor is a constant, and thus does not facilitate + back propagation. If ``None`` then: + + Inferred from ``other``, if other is a tensor + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Returns + ------- + Tensor + A tensor of uninitialized data whose shape and type match `other`. + + References + ---------- + .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.empty_like.html + + See Also + -------- + empty : Return a new Tensor of the given shape and type, without initializing entries. + ones : Return a new tensor setting values to one. + zeros : Return a new tensor setting values to zero. + full : Return a new tensor of given shape filled with value. + + Examples + -------- + >>> import mygrad as mg + >>> x = mg.arange(4).reshape(2, 2) + >>> mg.empty_like(x, constant=True) + Tensor([[ -9.74499359e+001, 6.69583040e-309], + [ 2.13182611e-314, 3.06959433e-309]]) #random + + >>> mg.empty_like(x, dtype=int) + Tensor([[-1073741821, -1067949133], + [ 496041986, 19249760]]) #random + """ + constant = _resolve_constant(other, constant=constant) + return Tensor( + np.empty_like(_anything_but_tensor(other), dtype=dtype, shape=shape), + constant=constant, + copy=False, + )
+ + +
[docs]def eye( + N: int, + M: Optional[int] = None, + k: int = 0, + dtype: DTypeLikeReals = float, + *, + constant: Optional[bool] = None, +) -> Tensor: + """Return a 2D Tensor with ones on the diagonal and zeros elsewhere. + + This docstring was adapted from ``numpy.eye`` [1]_ + + Parameters + ---------- + N : int + The number of rows in the output Tensor. + + M : int, optional (default=None) + The number of columns in the output, or None to match `rows`. + + k : int, optional (default=0) + The index of the diagonal. 0 is the main diagonal; a positive value is the upper + diagonal, while a negative value refers to the lower diagonal. + + dtype : data-type, optional (default=numpy.float32) + The data type of the output Tensor. + + constant : Optional[bool] + If ``True``, this tensor is a constant, and thus does not facilitate + back propagation. + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + References + ---------- + .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.eye.html + + Returns + ------- + Tensor + A tensor whose elements are 0, except for the :math:`k`-th diagonal, whose values are 1. + + Examples + -------- + >>> import mygrad as mg + >>> mg.eye(2, dtype=int) + Tensor([[1, 0], + [0, 1]]) + >>> mg.eye(3, k=1) + Tensor([[ 0., 1., 0.], + [ 0., 0., 1.], + [ 0., 0., 0.]]) + """ + return Tensor( + np.eye(N, M=M, k=k, dtype=dtype), + constant=constant, + copy=False, + )
+ + +
[docs]def identity( + n: int, dtype: DTypeLikeReals = float, *, constant: Optional[bool] = None +) -> Tensor: + """Return the identity Tensor; a square Tensor with 1s on the main diagonal and 0s elsewhere. + + This docstring was adapted from ``numpy.identity`` [1]_ + + Parameters + ---------- + n : int + The number of rows and columns in the output Tensor. + + dtype : data-type, optional (default=numpy.float32) + The data type of the output Tensor. + + constant : Optional[bool] + If ``True``, this tensor is a constant, and thus does not facilitate + back propagation. + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + Tensor + A square Tensor whose main diagonal is 1 and all other elements are 0. + + References + ---------- + .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.identity.html + + Examples + -------- + >>> import mygrad as mg + >>> mg.identity(3) + Tensor([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + """ + return Tensor(np.identity(n, dtype=dtype), constant=constant, copy=False)
+ + +
[docs]def ones( + shape: Shape, dtype: DTypeLikeReals = np.float32, *, constant: Optional[bool] = None +) -> Tensor: + """ + Return a Tensor of the given shape and type, filled with ones. + + This docstring was adapted from ``numpy.ones`` [1]_ + + Parameters + ---------- + shape : Union[int, Tuple[int]] + The shape of the output Tensor. + + dtype : data-type, optional (default=numpy.float32) + The data type of the output Tensor. + + constant : Optional[bool] + If ``True``, this tensor is a constant, and thus does not facilitate + back propagation. + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + Tensor + A Tensor of ones with the given shape and data type. + + References + ---------- + .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.ones.html + + See Also + -------- + ones_like : Return an tensor of ones with shape and type of input. + empty : Return a new uninitialized tensor. + zeros : Return a new tensor setting values to zero. + full : Return a new tensor of given shape filled with value. + + Examples + -------- + >>> import mygrad as mg + >>> mg.ones(5) + Tensor([ 1., 1., 1., 1., 1.]) + + >>> mg.ones((5,), dtype=int) + Tensor([1, 1, 1, 1, 1]) + + >>> mg.ones((2, 1)) + Tensor([[ 1.], + [ 1.]]) + + >>> mg.ones((2, 2)) + Tensor([[ 1., 1.], + [ 1., 1.]]) + """ + return Tensor(np.ones(shape, dtype=dtype), constant=constant, copy=False)
+ + +
[docs]@implements_numpy_override() +def ones_like( + other: ArrayLike, + dtype: Optional[DTypeLikeReals] = None, + shape: Optional[Union[int, Sequence[int]]] = None, + *, + constant: Optional[bool] = None, +) -> Tensor: + """ + Return a Tensor of the same shape and type as the given, filled with ones. + + This docstring was adapted from ``numpy.ones_like`` [1]_ + + Parameters + ---------- + other : array_like + The Tensor or array whose shape and datatype should be mirrored. + + dtype : Optional[DTypeLikeReals] + Override the data type of the returned Tensor with this value, or None to not override. + + shape : Optional[Union[int, Sequence[int]]] + If specified, overrides the shape of the result + + constant : Optional[bool] + If ``True``, this tensor is a constant, and thus does not facilitate + back propagation. If ``None`` then: + + Inferred from ``other``, if other is a tensor + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + + Returns + ------- + Tensor + A Tensor of ones whose shape and data type match `other`. + + References + ---------- + .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.ones_like.html + + Examples + -------- + >>> import mygrad as mg + >>> x = mg.arange(6).reshape((2, 3)) + >>> x + Tensor([[0, 1, 2], + [3, 4, 5]]) + + >>> mg.ones_like(x) + Tensor([[1, 1, 1], + [1, 1, 1]]) + + >>> y = mg.arange(3, dtype=float) + >>> y + Tensor([ 0., 1., 2.]) + + >>> mg.ones_like(y) + Tensor([ 1., 1., 1.]) + """ + constant = _resolve_constant(other, constant=constant) + + return Tensor( + np.ones_like(_anything_but_tensor(other), dtype=dtype, shape=shape), + constant=constant, + copy=False, + )
+ + +
[docs]def zeros( + shape: Shape, dtype: DTypeLikeReals = np.float32, *, constant: Optional[bool] = None +) -> Tensor: + """ + Return a Tensor of the given shape and type, filled with zeros. + + This docstring was adapted from ``numpy.zeros`` [1]_ + + Parameters + ---------- + shape : Union[int, Tuple[int]] + The shape of the output Tensor. + + dtype : data-type, optional (default=numpy.float32) + The data type of the output Tensor. + + constant : Optional[bool] + If ``True``, this tensor is a constant, and thus does not facilitate + back propagation. + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + Tensor + A Tensor of zeros with the given shape and data type. + + References + ---------- + .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.zeros.html + + See Also + -------- + ones_like : Return an tensor of ones with shape and type of input. + empty : Return a new uninitialized tensor. + ones : Return a new tensor setting values to one. + full : Return a new tensor of given shape filled with value. + + Examples + -------- + >>> import mygrad as mg + >>> mg.zeros(5) + Tensor([ 0., 0., 0., 0., 0.]) + + >>> mg.zeros((5,), dtype=int, constant=True) # tensor will not back-propagate a gradient + Tensor([0, 0, 0, 0, 0]) + + >>> mg.zeros((2, 1)) + Tensor([[ 0.], + [ 0.]]) + + >>> mg.zeros((2, 2)) + Tensor([[ 0., 0.], + [ 0., 0.]]) + """ + return Tensor(np.zeros(shape, dtype), constant=constant, copy=False)
+ + +
[docs]@implements_numpy_override() +def zeros_like( + other: ArrayLike, + dtype: Optional[DTypeLikeReals] = None, + shape: Optional[Union[int, Shape]] = None, + *, + constant: Optional[bool] = None, +) -> Tensor: + """ + Return a Tensor of the same shape and type as the given, filled with zeros. + + This docstring was adapted from ``numpy.zeros_like`` [1]_ + + Parameters + ---------- + other : ArrayLike + The Tensor or array whose shape and datatype should be mirrored. + + dtype : Optional[DTypeLikeReals] + Override the data type of the returned Tensor with this value, or None to not override. + + shape : Optional[int, Sequence[int]] + If specified, overrides the shape of the result + + constant : Optional[bool] + If ``True``, this tensor is a constant, and thus does not facilitate + back propagation. If ``None`` then: + + Inferred from ``other``, if other is a tensor + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + Tensor + A Tensor of zeros whose shape and data type match `other`. + + References + ---------- + .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.zeros_like.html + + See Also + -------- + empty_like : Return an empty tensor with shape and type of input. + ones_like : Return an tensor of ones with shape and type of input. + full_like : Return a new tensor with shape of input filled with value. + zeros : Return a new tensor setting values to zero. + + Examples + -------- + >>> import mygrad as mg + >>> x = mg.arange(6).reshape((2, 3)) + >>> x + Tensor([[0, 1, 2], + [3, 4, 5]]) + + >>> mg.zeros_like(x, constant=True) # tensor will not back-propagate a gradient + Tensor([[0, 0, 0], + [0, 0, 0]]) + + >>> y = mg.arange(3, dtype=float) + >>> y + Tensor([ 0., 1., 2.]) + + >>> mg.zeros_like(y) + Tensor([ 0., 0., 0.]) + """ + constant = _resolve_constant(other, constant=constant) + return Tensor( + np.zeros_like(_anything_but_tensor(other), dtype=dtype, shape=shape), + constant=constant, + copy=False, + )
+ + +
[docs]def full( + shape: Shape, + fill_value: ArrayLike, + dtype: Optional[DTypeLikeReals] = None, + *, + constant: Optional[bool] = None, +) -> Tensor: + """ + Return a Tensor of the given shape and type, filled with `fill_value`. + + This docstring was adapted from ``numpy.full`` [1]_ + + Parameters + ---------- + shape : Union[int, Iterable[int]] + The shape of the output Tensor. + + fill_value : ArrayLike + The value with which to fill the output Tensor. Note that this function + is not differentiable – the resulting tensor will not backprop through + `fill_value`. + + The value with which to fill the output Tensor. + + dtype : Optional[DTypeLikeReals] + The data type of the output Tensor, or None to match `fill_value`.. + + constant : Optional[bool] + If ``True``, this tensor is a constant, and thus does not facilitate + back propagation. + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + Tensor + A Tensor of `fill_value` with the given shape and dtype. + + References + ---------- + .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.full.html + + Examples + -------- + >>> import mygrad as mg + >>> mg.full((2, 2), 33) + Tensor([[ 33, 33], + [ 33, 33]]) + + >>> mg.full((2, 2), 10) + Tensor([[10, 10], + [10, 10]]) + """ + return Tensor( + np.full(shape, fill_value=fill_value, dtype=dtype), + constant=constant, + copy=False, + )
+ + +
[docs]@implements_numpy_override() +def full_like( + other: ArrayLike, + fill_value: Real, + dtype: Optional[DTypeLikeReals] = None, + shape: Optional[Union[int, Shape]] = None, + constant: Optional[bool] = None, +) -> Tensor: + """Return a Tensor of the same shape and type as the given, filled with `fill_value`. + + This docstring was adapted from ``numpy.full_like`` [1]_ + + Parameters + ---------- + other : ArrayLike + The tensor or array whose shape and datatype should be mirrored. + + fill_value : Real + The value with which to fill the output Tensor. + + dtype : Optional[DTypeLikeReals] + Override the data type of the returned Tensor with this value, or None to not override. + + shape : Optional[int, Sequence[int]] + If specified, overrides the shape of the result + + constant : Optional[bool] + If ``True``, this tensor is a constant, and thus does not facilitate + back propagation. If ``None`` then: + + Inferred from ``other``, if other is a tensor + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Returns + ------- + Tensor + A Tensor of `fill_value` whose shape and data type match `other`. + + References + ---------- + .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.full_like.html + + Examples + -------- + >>> import mygrad as mg + >>> x = mg.arange(6, dtype=int) + >>> mg.full_like(x, 1) + Tensor([1, 1, 1, 1, 1, 1]) + >>> mg.full_like(x, 0.1) + Tensor([0, 0, 0, 0, 0, 0]) + >>> mg.full_like(x, 0.1, dtype=np.double) + Tensor([ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) + >>> mg.full_like(x, np.nan, dtype=np.double) + Tensor([ nan, nan, nan, nan, nan, nan]) + + >>> y = mg.arange(6, dtype=np.double) + >>> mg.full_like(y, 0.1) + Tensor([ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) + """ + constant = _resolve_constant(other, constant=constant) + + return Tensor( + np.full_like( + _anything_but_tensor(other), + fill_value=_anything_but_tensor(fill_value), + dtype=dtype, + shape=shape, + ), + constant=constant, + copy=False, + )
+ + +
[docs]def arange( + *args, + constant: Optional[bool] = None, + **kwargs, +) -> Tensor: + """ + arange([start,] stop[, step,], dtype=None, *, constant=None) + + Return a Tensor with evenly-spaced values within a given interval. + + Values are generated within [start, stop). Note that for non-integer steps, results may be + inconsistent; you are better off using `linspace` instead. + + This docstring was adapted from ``numpy.arange`` [1]_ + + Parameters + ---------- + start : Real, optional, default=0 + The start of the interval, inclusive. + + stop : Real + The end of the interval, exclusive. + + step : int, optional (default=1) + The spacing between successive values. + + dtype : Optional[DTypeLikeReals] + The data type of the output Tensor, or None to infer from the inputs. + + constant : Optional[bool] + If ``True``, this tensor is a constant, and thus does not facilitate + back propagation. + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + Tensor + A Tensor of evenly-spaced values in [start, end). + + References + ---------- + .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.arange.html + + Examples + -------- + >>> import mygrad as mg + >>> mg.arange(3) + Tensor([0, 1, 2]) + >>> mg.arange(3.0, constant=True) # resulting tensor will not back-propagate a gradient + Tensor([ 0., 1., 2.]) + >>> mg.arange(3,7) + Tensor([3, 4, 5, 6]) + >>> mg.arange(3,7,2) + Tensor([3, 5]) + """ + return Tensor(np.arange(*args, **kwargs), constant=constant, copy=False)
+ + +
[docs]def linspace( + start: ArrayLike, + stop: ArrayLike, + num: int = 50, + endpoint: bool = True, + dtype: Optional[DTypeLikeReals] = None, + axis: int = 0, + *, + constant: Optional[bool] = None, +) -> Tensor: + """Return a Tensor with evenly-spaced numbers over a specified interval. + + Values are generated within [start, stop], with the endpoint optionally excluded. + + This docstring was adapted from ``numpy.linspace`` [1]_ + + Parameters + ---------- + start : ArrayLike + The starting value of the sequence, inclusive. + + stop : ArrayLike + The ending value of the sequence, inclusive unless `include_endpoint` is False. + + num : int, optional (default=50) + The number of values to generate. Must be non-negative. + + endpoint : bool, optional (default=True) + Whether to include the endpoint in the Tensor. Note that if False, the step size changes + to accommodate the sequence excluding the endpoint. + + dtype : Optional[DTypeLikeReals] + The data type of the output Tensor, or None to infer from the inputs. + + axis : int, optional (default=0) + The axis in the result to store the samples - for array-like start/stop. + + constant : Optional[bool] + If ``True``, this tensor is a constant, and thus does not facilitate + back propagation. + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + Tensor + + References + ---------- + .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.linspace.html + + See Also + -------- + arange : Similar to `linspace`, but uses a step size (instead of the + number of samples). + logspace : Samples uniformly distributed in log space. + + Examples + -------- + >>> import mygrad as mg + >>> mg.linspace(2.0, 3.0, num=5) + Tensor([ 2. , 2.25, 2.5 , 2.75, 3. ]) + >>> mg.linspace(2.0, 3.0, num=5, endpoint=False) + Tensor([ 2. , 2.2, 2.4, 2.6, 2.8]) + """ + return Tensor( + np.linspace( + start, + stop, + num, + endpoint=endpoint, + dtype=dtype, + axis=axis, + ), + constant=constant, + copy=False, + )
+ + +
[docs]def logspace( + start: ArrayLike, + stop: ArrayLike, + num: int = 50, + endpoint: bool = True, + base: Real = 10, + dtype: Optional[DTypeLikeReals] = None, + axis: int = 0, + *, + constant: Optional[bool] = None, +) -> Tensor: + """Return a Tensor with evenly-spaced numbers over a specified interval on a log scale. + This is not a differentiable function - it does not propagate gradients to its inputs. + + In linear space, values are generated within [base**start, base**stop], with the endpoint + optionally excluded. + + This docstring was adapted from ``numpy.logspace`` [1]_ + + Parameters + ---------- + start : ArrayLike + The starting value of the sequence, inclusive; start at `base ** start`. + + stop : ArrayLike + The ending value of the sequence, inclusive unless `include_endpoint` is False; end at + `base ** stop`. + + num : int, optional (default=50) + The number of values to generate. Must be non-negative. + + endpoint : bool, optional (default=True) + Whether to include the endpoint in the Tensor. Note that if False, the step size changes + to accommodate the sequence excluding the endpoint. + + base : Real, optional (default=10) + The base of the log space. + + dtype : Optional[DTypeLikeReals] + The data type of the output Tensor, or None to infer from the inputs. + + axis : int, optional (default=0) + The axis in the result to store the samples - for array-like start/stop. + + constant : Optional[bool] + If ``True``, this tensor is a constant, and thus does not facilitate + back propagation. + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + Tensor + + See Also + -------- + arange : Similar to linspace, with the step size specified instead of the + number of samples. Note that, when used with a float endpoint, the + endpoint may or may not be included. + linspace : Similar to logspace, but with the samples uniformly distributed + in linear space, instead of log space. + geomspace : Similar to logspace, but with endpoints specified directly. + + References + ---------- + .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.logspace.html + + Examples + -------- + >>> import mygrad as mg + >>> mg.logspace(2.0, 3.0, num=4) + Tensor([ 100. , 215.443469 , 464.15888336, 1000. ]) + >>> mg.logspace(2.0, 3.0, num=4, endpoint=False) + Tensor([ 100. , 177.827941 , 316.22776602, 562.34132519]) + >>> mg.logspace(2.0, 3.0, num=4, base=2.0) + Tensor([ 4. , 5.0396842 , 6.34960421, 8. ]) + + """ + return Tensor( + np.logspace( + start=start, + stop=stop, + num=num, + endpoint=endpoint, + base=base, + dtype=dtype, + axis=axis, + ), + constant=constant, + copy=False, + )
+ + +
[docs]def geomspace( + start: ArrayLike, + stop: ArrayLike, + num=50, + endpoint=True, + dtype=None, + axis=0, + *, + constant: Optional[bool] = None, +) -> Tensor: + """Return a Tensor with evenly-spaced values in a geometric progression. + + Each output sample is a constant multiple of the previous output. + + This docstring was adapted from ``numpy.geomspace`` [1]_ + + Parameters + ---------- + start : ArrayLike + The starting value of the output. + + stop : ArrayLike + The ending value of the sequence, inclusive unless `endpoint` is false. + + num : int, optional (default=50) + The number of values to generate. Must be non-negative. + + endpoint : bool, optional (default=True) + Whether to include the endpoint in the Tensor. Note that if False, the step size changes + to accommodate the sequence excluding the endpoint. + + dtype : Optional[DTypeLikeReals] + The data type of the output Tensor, or None to infer from the inputs. + + axis : int, optional (default=0) + The axis in the result to store the samples - for array-like start/stop. + + constant : Optional[bool] + If ``True``, this tensor is a constant, and thus does not facilitate + back propagation. + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + Tensor + + References + ---------- + .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.geomspace.html + + See Also + -------- + logspace : Similar to geomspace, but with endpoints specified using log + and base. + linspace : Similar to geomspace, but with arithmetic instead of geometric + progression. + arange : Similar to linspace, with the step size specified instead of the + number of samples. + + Examples + -------- + >>> import mygrad as mg + >>> mg.geomspace(1, 1000, num=4) + Tensor([ 1., 10., 100., 1000.]) + >>> mg.geomspace(1, 1000, num=3, endpoint=False) + Tensor([ 1., 10., 100.]) + >>> mg.geomspace(1, 1000, num=4, endpoint=False) + Tensor([ 1. , 5.62341325, 31.6227766 , 177.827941 ]) + >>> mg.geomspace(1, 256, num=9) + Tensor([ 1., 2., 4., 8., 16., 32., 64., 128., 256.]) + + Note that the above may not produce exact integers: + + >>> mg.geomspace(1, 256, num=9, dtype=int) + Tensor([ 1, 2, 4, 7, 16, 32, 63, 127, 256]) + >>> np.around(mg.geomspace(1, 256, num=9).data).astype(int) + array([ 1, 2, 4, 8, 16, 32, 64, 128, 256]) + + Negative, and decreasing inputs are allowed: + + >>> mg.geomspace(1000, 1, num=4) + Tensor([ 1000., 100., 10., 1.]) + >>> mg.geomspace(-1000, -1, num=4) + Tensor([-1000., -100., -10., -1.]) + """ + return Tensor( + np.geomspace( + start=start, + stop=stop, + num=num, + endpoint=endpoint, + dtype=dtype, + axis=axis, + ), + constant=constant, + copy=False, + )
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/tensor_manip/array_shape/funcs.html b/docs/_modules/mygrad/tensor_manip/array_shape/funcs.html new file mode 100644 index 00000000..a2ca05ee --- /dev/null +++ b/docs/_modules/mygrad/tensor_manip/array_shape/funcs.html @@ -0,0 +1,1067 @@ + + + + + + + + + + mygrad.tensor_manip.array_shape.funcs — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.tensor_manip.array_shape.funcs

+from typing import Callable, List, Optional, Tuple, TypeVar, Union, cast, overload
+
+from mygrad.tensor_base import Tensor, implements_numpy_override
+from mygrad.typing import ArrayLike, Shape
+
+from .ops import *
+
+__all__ = [
+    "reshape",
+    "squeeze",
+    "ravel",
+    "expand_dims",
+    "broadcast_to",
+    "atleast_1d",
+    "atleast_2d",
+    "atleast_3d",
+]
+
+_T = TypeVar("_T")
+
+
+
[docs]@implements_numpy_override() +def reshape( + a: ArrayLike, newshape: Union[int, Shape], *, constant: Optional[bool] = None +) -> Tensor: + """Returns a tensor with a new shape, without changing its data. + + This docstring was adapted from ``numpy.reshape`` + + Parameters + ---------- + a : ArrayLike + The tensor to be reshaped + + newshape : Union[int, Tuple[int, ...]] + The new shape should be compatible with the original shape. If + an integer, then the result will be a 1-D tensor of that length. + One shape dimension can be -1. In this case, the value is + inferred from the length of the tensor and remaining dimensions. + + constant : bool, optional(default=False) + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient) + + Returns + ------- + mygrad.Tensor + ``a`` with its shape changed permuted. A new tensor is returned. + + Notes + ----- + ``reshape`` utilizes C-ordering, meaning that it reads & writes elements using + C-like index ordering; the last axis index changing fastest, and, proceeding + in reverse order, the first axis index changing slowest. + + Examples + -------- + >>> import mygrad as mg + >>> a = mg.Tensor([[1,2,3], [4,5,6]]) + >>> mg.reshape(a, 6) + Tensor([1, 2, 3, 4, 5, 6]) + + >>> mg.reshape(a, (3,-1)) # the unspecified value is inferred to be 2 + Tensor([[1, 2], + [3, 4], + [5, 6]])""" + return Tensor._op(Reshape, a, op_args=(newshape,), constant=constant)
+ + +
[docs]@implements_numpy_override() +def squeeze( + a: ArrayLike, + axis: Optional[Union[int, Tuple[int, ...]]] = None, + *, + constant: Optional[bool] = None, +) -> Tensor: + """ + Remove single-dimensional entries from the shape of a tensor. + + This docstring was adapted from ``numpy.squeeze`` + + Parameters + ---------- + a : ArrayLike + The tensor to be reshaped + + axis : Optional[int, Tuple[int, ...]] + Selects a subset of the single-dimensional entries in the + shape. If an axis is selected with shape entry greater than + one, an error is raised. + + constant : bool, optional(default=False) + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient) + + Returns + ------- + mygrad.Tensor + + Raises + ------ + ValueError + If ``axis`` is not ``None``, and an axis being squeezed is not of length 1 + + Examples + -------- + >>> import mygrad as mg + >>> x = mg.Tensor([[[0], [1], [2]]]) + >>> x.shape + (1, 3, 1) + >>> mg.squeeze(x).shape + (3,) + >>> mg.squeeze(x, axis=0).shape + (3, 1) + >>> mg.squeeze(x, axis=1).shape + Traceback (most recent call last) -> Tensor: + ... + ValueError: cannot select an axis to squeeze out which has size not equal to one + >>> mg.squeeze(x, axis=2).shape + (1, 3)""" + return Tensor._op(Squeeze, a, op_args=(axis,), constant=constant)
+ + +
[docs]@implements_numpy_override() +def ravel(a: ArrayLike, *, constant: Optional[bool] = None) -> Tensor: + """ + Flattens contents of a tensor into a contiguous 1-D array. A copy is made only if needed. + + This docstring was adapted from ``numpy.ravel``. + + Parameters + ---------- + a : ArrayLike + The tensor to be flattened + + constant : bool, optional(default=False) + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient) + + Returns + ------- + mygrad.Tensor + + Notes + ----- + ``ravel`` utilizes C-ordering, meaning that it reads & writes elements using + C-like index ordering; the last axis index changing fastest, and, proceeding + in reverse order, the first axis index changing slowest. + + Examples + -------- + >>> import mygrad as mg + >>> x = mg.Tensor([[1, 2], + ... [3, 4]]) + >>> mg.ravel(x) + Tensor([1, 2, 3, 4]) + """ + return Tensor._op(Ravel, a, constant=constant)
+ + +
[docs]@implements_numpy_override() +def expand_dims(a: ArrayLike, axis: int, *, constant: Optional[bool] = None) -> Tensor: + """ + Expand the dimensions of a tensor by adding a new axis. + + This docstring was adapted from ``numpy.expand_dims``. + + Parameters + ---------- + a : ArrayLike + The tensor to be expanded + + axis : int + The position of the new axis in the expanded array shape. + + constant : bool, optional(default=False) + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient) + + Returns + ------- + mygrad.Tensor + + Examples + -------- + >>> import mygrad as mg + >>> x = mg.Tensor([1, 2]) + >>> x.shape + (2,) + >>> y = mg.expand_dims(x, 1) + >>> y.shape + (2, 1) + >>> z = mg.expand_dims(y, 0) + >>> z.shape + (1, 2, 1) + """ + return Tensor._op(ExpandDims, a, op_args=(axis,), constant=constant)
+ + +
[docs]@implements_numpy_override() +def broadcast_to( + a: ArrayLike, shape: Shape, *, constant: Optional[bool] = None +) -> Tensor: + """ + Broadcast a tensor to a new shape. + + This docstring was adapted from ``numpy.broadcast_to``. + + Parameters + ---------- + a : ArrayLike + The tensor to be broadcasted + + shape: Tuple[int, ...] + The shape of the broadcasted tensor. This shape + should be broadcast-compatible with the original + shape. + + constant : bool, optional(default=False) + If ``True``, the returned tensor is a constant (it + does not back-propagate a gradient) + + Returns + ------- + mygrad.Tensor + + Raises + ------ + ValueError + If the array is not compatible with the new shape + according to Numpy's broadcasting rules. + + Examples + -------- + >>> import mygrad as mg + >>> x = mg.Tensor([1, 2, 3]) + >>> mg.broadcast_to(x, (3,3)) + Tensor([[1, 2, 3], + [1, 2, 3], + [1, 2, 3]]) + >>> mg.broadcast_to(x, (4,4)) + Traceback (most recent call last) -> Tensor: + ... + ValueError: operands could not be broadcast together with remapped + shapes [original->remapped]: (3,) and requested shape (4,4) + """ + return Tensor._op(BroadcastTo, a, op_args=(shape,), constant=constant)
+ + +def _dispatch_atleast_kd(func: Callable[..., _T], Op, *tensors, k: int, constant) -> _T: + if len(tensors) == 1: + (t,) = tensors + if ( + isinstance(t, Tensor) + and t.ndim >= k + and (constant is None or t.constant is constant) + ): + # return tensor unchanged + return cast(_T, t) + return cast(_T, Tensor._op(Op, t, constant=constant)) + else: + out = [func(t, constant=constant) for t in tensors] + return cast(_T, out) + + +@overload +def atleast_1d( + tensors: ArrayLike, *, constant: Optional[bool] = None +) -> Tensor: # pragma: no cover + ... + + +@overload +def atleast_1d( + *tensors: ArrayLike, constant: Optional[bool] = None +) -> List[Tensor]: # pragma: no cover + ... + + +
[docs]@implements_numpy_override() +def atleast_1d( + *tensors: ArrayLike, constant: Optional[bool] = None +) -> Union[Tensor, List[Tensor]]: + """ + Convert inputs to tensors with at least one dimension. + + Scalar inputs are converted to 1-dimensional tensors, whilst + higher-dimensional inputs are preserved. + + This docstring was adapted from ``numpy.atleast_1d``. + + Parameters + ---------- + tens1, tens2, ... : ArrayLike + One or more input tensors. + + Returns + ------- + ret : Tensor | List[Tensor] + A tensor, or list of tensors, each with ``a.ndim >= 1``. + Copies are made only if necessary. + + See Also + -------- + atleast_2d, atleast_3d + + Examples + -------- + >>> import mygrad as mg + >>> mg.atleast_1d(1.0) + array([1.]) + + >>> x = mg.arange(9.0).reshape(3,3) + >>> np.atleast_1d(x) + Tensor([[0., 1., 2.], + [3., 4., 5.], + [6., 7., 8.]]) + >>> mg.atleast_1d(x) is x + True + + >>> mg.atleast_1d(1, [3, 4]) + [Tensor([1]), Tensor([3, 4])] + + ``numpy.atleast_1d`` will dispatch appropriately on tensors. + + >>> x = mg.tensor(2.) + >>> np.atleast_1d(x) + Tensor([2.]) + + >>> np.atleast_1d(x).backward() + >>> x.grad + array(1.) + + If any argument to ``numpy.atleast_1d`` is a Tensor, ``mygrad.atleast_1d`` + will be dispatched on all of the arguments. + + >>> np.atleast_1d(x, 1.) + [Tensor([2.]), Tensor([1.])] + """ + return _dispatch_atleast_kd(atleast_1d, AtLeast1D, *tensors, k=1, constant=constant)
+ + +@overload +def atleast_2d( + tensors: ArrayLike, *, constant: Optional[bool] = None +) -> Tensor: # pragma: no cover + ... + + +@overload +def atleast_2d( + *tensors: ArrayLike, constant: Optional[bool] = None +) -> List[Tensor]: # pragma: no cover + ... + + +
[docs]@implements_numpy_override() +def atleast_2d( + *tensors: ArrayLike, constant: Optional[bool] = None +) -> Union[Tensor, List[Tensor]]: + """ + Convert inputs to tensors with at least one dimension. + + Scalar inputs are converted to 2-dimensional tensors, whilst + higher-dimensional inputs are preserved. + + This docstring was adapted from ``numpy.atleast_2d``. + + Parameters + ---------- + tens1, tens2, ... : ArrayLike + One or more input tensors. + + Returns + ------- + ret : Tensor | List[Tensor] + A tensor, or list of tensors, each with ``a.ndim >= 2``. + Copies are made only if necessary. + + See Also + -------- + atleast_1d, atleast_3d + + Examples + -------- + >>> import mygrad as mg + >>> mg.atleast_2d(3.0) + Tensor([[3.]]) + + >>> x = mg.arange(3.0) + >>> mg.atleast_2d(x) + array([[0., 1., 2.]]) + >>> mg.atleast_2d(x).base is x + True + + >>> mg.atleast_2d(1, [1, 2], [[1, 2]]) + [Tensor([[1]]), Tensor([[1, 2]]), Tensor([[1, 2]])] + + ``numpy.atleast_2d`` will dispatch appropriately on tensors. + + >>> x = mg.tensor(2.) + >>> np.atleast_2d(x) + Tensor([[2.]]) + + >>> np.atleast_2d(x).backward() + >>> x.grad + array(1.) + + If any argument to ``numpy.atleast_2d`` is a Tensor, ``mygrad.atleast_2d`` + will be dispatched on all of the arguments. + + >>> np.atleast_2d(x, 1.) + [Tensor([[2.]]), Tensor([[1.]])] + """ + return _dispatch_atleast_kd(atleast_2d, AtLeast2D, *tensors, k=2, constant=constant)
+ + +@overload +def atleast_3d( + tensors: ArrayLike, *, constant: Optional[bool] = None +) -> Tensor: # pragma: no cover + ... + + +@overload +def atleast_3d( + *tensors: ArrayLike, constant: Optional[bool] = None +) -> List[Tensor]: # pragma: no cover + ... + + +
[docs]@implements_numpy_override() +def atleast_3d( + *tensors: ArrayLike, constant: Optional[bool] = None +) -> Union[Tensor, List[Tensor]]: + """ + Convert inputs to tensors with at least one dimension. + + Scalar inputs are converted to 3-dimensional tensors, whilst + higher-dimensional inputs are preserved. + + This docstring was adapted from ``numpy.atleast_3d``. + + Parameters + ---------- + tens1, tens2, ... : ArrayLike + One or more input tensors. + + Returns + ------- + ret : Tensor | List[Tensor] + A tensor, or list of tensors, each with ``a.ndim >= 3``. + Copies are made only if necessary. For example, a 1-D tensor of shape ``(N,)`` + becomes a view of shape ``(1, N, 1)``, and a 2-D tensor of shape ``(M, N)`` + becomes a view of shape ``(M, N, 1)``. + + See Also + -------- + atleast_1d, atleast_3d + + Examples + -------- + >>> import mygrad as mg + >>> mg.atleast_3d(3.0) + Tensor([[[3.]]]) + + >>> x = mg.arange(3.0) + >>> mg.atleast_3d(x).shape + (1, 3, 1) + >>> mg.atleast_3d(x).base is x + True + + >>> x = mg.arange(12.0).reshape(4,3) + >>> mg.atleast_3d(x).shape + (4, 3, 1) + + >>> mg.atleast_3d(1, [[1, 2]], [[[[1, 2]]]]) + [Tensor([[[1]]]), Tensor([[[1, 2]]]), Tensor([[[[1, 2]]]])] + + ``numpy.atleast_3d`` will dispatch appropriately on tensors. + + >>> x = mg.tensor(2.) + >>> np.atleast_3d(x) + Tensor([[[2.]]]) + + >>> np.atleast_3d(x).backward() + >>> x.grad + array(1.) + + If any argument to ``numpy.atleast_3d`` is a Tensor, ``mygrad.atleast_3d`` + will be dispatched on all of the arguments. + + >>> np.atleast_3d(x, 1.) + [Tensor([[[2.]]]), Tensor([[[1.]]])] + """ + return _dispatch_atleast_kd(atleast_3d, AtLeast3D, *tensors, k=3, constant=constant)
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/tensor_manip/tensor_joining/funcs.html b/docs/_modules/mygrad/tensor_manip/tensor_joining/funcs.html new file mode 100644 index 00000000..bb5087d6 --- /dev/null +++ b/docs/_modules/mygrad/tensor_manip/tensor_joining/funcs.html @@ -0,0 +1,745 @@ + + + + + + + + + + mygrad.tensor_manip.tensor_joining.funcs — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.tensor_manip.tensor_joining.funcs

+from typing import Optional, Sequence, Union
+
+from numpy import ndarray
+
+from mygrad.tensor_base import Tensor, implements_numpy_override
+from mygrad.typing import ArrayLike, DTypeLikeReals
+
+from .ops import Concatenate, Stack
+
+__all__ = ["concatenate", "stack"]
+
+
+
[docs]@implements_numpy_override() +def concatenate( + tensors: Sequence[ArrayLike], + axis: Optional[int] = 0, + out: Optional[Union[ndarray, Tensor]] = None, + dtype: Optional[DTypeLikeReals] = None, + *, + constant: Optional[bool] = None, +) -> Tensor: + """ + concatenate((t1, t2, ...), axis=0, out=None, *, constant=None) + + Join a sequence of tensors along an existing axis. + + This docstring was adapted from that of numpy.concatenate [1]_ + + Parameters + ---------- + tensors : Sequence[ArrayLike] + The tensors must have the same shape, except in the dimension + corresponding to `axis` (the first, by default). + + axis : Optional[int] + The axis along which the tensors will be joined. If axis is ``None``, + tensors are flattened before use. Default is 0. + + out : Optional[Union[ndarray, Tensor]] + If provided, the destination to place the result. The shape must be + correct, matching that of what concatenate would have returned if no + out argument were specified. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + dtype : Optional[DTypeLikeReals] + If provided, the destination array will have this dtype. Cannot be provided + together with ``out``. + + Requires numpy 1.20 or higher. + + Returns + ------- + res : Tensor + The concatenated tensor. + + See Also + -------- + stack : Stack a sequence of tensors along a new axis. + hstack : Stack tensors in sequence horizontally (column wise). + vstack : Stack tensors in sequence vertically (row wise). + dstack : Stack tensors in sequence depth wise (along third dimension). + + References + ---------- + .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.concatenate.html + + Examples + -------- + >>> import mygrad as mg + >>> a = mg.tensor([[1, 2], [3, 4]]) + >>> b = mg.tensor([[5, 6]]) + >>> mg.concatenate((a, b), axis=0) + Tensor([[1, 2], + [3, 4], + [5, 6]]) + >>> mg.concatenate((a, b.T), axis=1) + Tensor([[1, 2, 5], + [3, 4, 6]]) + >>> mg.concatenate((a, b), axis=None) + Tensor([1, 2, 3, 4, 5, 6]) + """ + return Tensor._op( + Concatenate, + *tensors, + op_kwargs={"axis": axis, "dtype": dtype}, + constant=constant, + out=out, + )
+ + +
[docs]@implements_numpy_override() +def stack( + tensors: Sequence[ArrayLike], + axis: int = 0, + out: Optional[Union[ndarray, Tensor]] = None, + *, + constant: Optional[bool] = None, +) -> Tensor: + """ + stack((t1, t2, ...), axis=0, out=None, *, constant=None) + + Join a sequence of tensors along a new axis. + + This docstring was adapted from that of numpy.stack [1]_ + + Parameters + ---------- + tensors : Sequence[ArrayLike] + Each tensor must have the same shape. + + axis : Optional[int] + The axis in the result tensor along which the input tensors are stacked. + + out : Optional[Union[ndarray, Tensor]] + If provided, the destination to place the result. The shape must be + correct, matching that of what concatenate would have returned if no + out argument were specified. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + res : Tensor + The stacked tensor has one more dimension than the input arrays. + + See Also + -------- + concatenate : Join a sequence of tensors along an existing axis. + hstack : Stack tensors in sequence horizontally (column wise). + vstack : Stack tensors in sequence vertically (row wise). + dstack : Stack tensors in sequence depth wise (along third dimension). + + References + ---------- + .. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.stack.html + + Examples + -------- + >>> import mygrad as mg + >>> a = mg.tensor([1, 2, 3]) + >>> b = mg.tensor([-1, -2, -3]) + >>> mg.stack((a, b)) + Tensor([[ 1, 2, 3], + [-1, -2, -3]]) + + >>> mg.stack((a, b), axis=-1) + Tensor([[1, -1], + [2, -2], + [3, -3]]) + """ + return Tensor._op( + Stack, + *tensors, + op_kwargs={"axis": axis}, + constant=constant, + out=out, + )
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/tensor_manip/tiling/funcs.html b/docs/_modules/mygrad/tensor_manip/tiling/funcs.html new file mode 100644 index 00000000..ed01451d --- /dev/null +++ b/docs/_modules/mygrad/tensor_manip/tiling/funcs.html @@ -0,0 +1,639 @@ + + + + + + + + + + mygrad.tensor_manip.tiling.funcs — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.tensor_manip.tiling.funcs

+from typing import Optional, Sequence, Union
+
+from mygrad.tensor_base import Tensor, implements_numpy_override
+from mygrad.typing import ArrayLike
+
+from .ops import Repeat
+
+__all__ = ["repeat"]
+
+
+
[docs]@implements_numpy_override() +def repeat( + a: ArrayLike, + repeats: Union[int, Sequence[int]], + axis: Optional[int] = None, + *, + constant: Optional[bool] = None, +) -> Tensor: + """ + Repeat elements of a tensor. + + This docstring was adapted from ``numpy.repeat`` + + Parameters + ---------- + a : ArrayLike + Input tensor. + + repeats : Union[int, Sequence[int]] + The number of repetitions for each element. ``repeats`` + is broadcasted to fit the shape of the given axis. + + axis : Optional[int] + The axis along which to repeat values. By default, use the + flattened input array, and return a flat output tensor. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + Returns + ------- + repeated_tensor : Tensor + Output tensor which has the same shape as `a`, except along + the given axis. + + Examples + -------- + >>> import mygrad as mg + >>> mg.repeat(3, 4) + Tensor([3, 3, 3, 3]) + >>> x = mg.Tensor([[1, 2], [3, 4]]) + >>> mg.repeat(x, 2) + Tensor([1, 1, 2, 2, 3, 3, 4, 4]) + >>> mg.repeat(x, 3, axis=1) + Tensor([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 4, 4, 4]]) + >>> mg.repeat(x, [1, 2], axis=0) + Tensor([[1, 2], + [3, 4], + [3, 4]]) + """ + return Tensor._op(Repeat, a, op_args=(repeats, axis), constant=constant)
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_modules/mygrad/tensor_manip/transpose_like/funcs.html b/docs/_modules/mygrad/tensor_manip/transpose_like/funcs.html new file mode 100644 index 00000000..6a5a677f --- /dev/null +++ b/docs/_modules/mygrad/tensor_manip/transpose_like/funcs.html @@ -0,0 +1,811 @@ + + + + + + + + + + mygrad.tensor_manip.transpose_like.funcs — MyGrad 2.3.0.post1.dev6 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for mygrad.tensor_manip.transpose_like.funcs

+from typing import Optional, Tuple, Union
+
+from mygrad.tensor_base import Tensor, implements_numpy_override
+from mygrad.typing import ArrayLike
+
+from .ops import MoveAxis, Roll, SwapAxes, Transpose
+
+__all__ = ["transpose", "moveaxis", "swapaxes", "roll"]
+
+
+
[docs]@implements_numpy_override() +def transpose(a: ArrayLike, *axes: int, constant: Optional[bool] = None) -> Tensor: + """Permute the dimensions of a tensor. + + Parameters + ---------- + a : ArrayLike + The tensor to be transposed + + axes : int + By default, reverse the dimensions, otherwise permute the axes + according to the values given. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + mygrad.Tensor + `a` with its axes permuted. A new tensor is returned. + + Examples + -------- + >>> import mygrad as mg + >>> a = mg.tensor([[1, 2], [3, 4]]) + >>> a + Tensor([[1, 2], + [3, 4]]) + >>> a.transpose() + Tensor([[1, 3], + [2, 4]]) + >>> a.transpose((1, 0)) + Tensor([[1, 3], + [2, 4]]) + >>> a.transpose(1, 0) + Tensor([[1, 3], + [2, 4]])""" + if not axes: + axes = None + elif hasattr(axes[0], "__iter__") or axes[0] is None: + if len(axes) > 1: + raise TypeError( + f"'{type(axes[0])}' object cannot be interpreted as an integer" + ) + axes = axes[0] + return Tensor._op(Transpose, a, op_args=(axes,), constant=constant)
+ + +
[docs]@implements_numpy_override() +def moveaxis( + a: ArrayLike, + source: Union[int, Tuple[int, ...]], + destination: Union[int, Tuple[int, ...]], + *, + constant: Optional[bool] = None, +) -> Tensor: + """Move axes of a tensor to new positions. Other axes remain in their + original order. + + + Parameters + ---------- + a : ArrayLike + The array whose axes should be reordered. + + source : Union[int, Sequence[int]] + Original positions of the axes to move. These must be unique. + + destination : Union[int, Sequence[int]] + Destination positions for each of the original axes. These must also be + unique. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + Returns + ------- + result : mygrad.Tensor + Array with moved axes. This array is a view of the input array.. + + Examples + -------- + >>> from mygrad import zeros, moveaxis + >>> x = zeros((3, 4, 5)) + >>> moveaxis(x, 0, -1).shape + (4, 5, 3) + >>> moveaxis(x, -1, 0).shape + (5, 3, 4) + >>> moveaxis(x, [0, 1], [-1, -2]).shape + (5, 4, 3)""" + return Tensor._op(MoveAxis, a, op_args=(source, destination), constant=constant)
+ + +
[docs]@implements_numpy_override() +def swapaxes( + a: ArrayLike, axis1: int, axis2: int, *, constant: Optional[bool] = None +) -> Tensor: + """Interchange two axes of a tensor. + + Parameters + ---------- + a : ArrayLike + Input array. + + axis1 : int + First axis. + + axis2 : int + Second axis. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + mygrad.Tensor + + Examples + -------- + >>> from mygrad import Tensor, swapaxes + >>> x = Tensor([[1, 2, 3]]) + >>> swapaxes(x, 0, 1) + Tensor([[1], + [2], + [3]]) + >>> x = Tensor([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) + >>> x + Tensor([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> swapaxes(x, 0, 2) + Tensor([[[0, 4], + [2, 6]], + [[1, 5], + [3, 7]]]) + """ + return Tensor._op(SwapAxes, a, op_args=(axis1, axis2), constant=constant)
+ + +
[docs]@implements_numpy_override() +def roll( + a: ArrayLike, + shift: Union[int, Tuple[int, ...]], + axis=None, + *, + constant: Optional[bool] = None, +) -> Tensor: + """ + Roll tensor elements along a given axis. + + Elements that roll beyond the end of an axis "wrap back around" to the beginning. + + This docstring was adapted from ``numpy.roll`` + + Parameters + ---------- + a : ArrayLike + Input tensor. + + shift : Union[int, Tuple[int, ...]] + The number of places by which elements are shifted. If a tuple, + then `axis` must be a tuple of the same size, and each of the + given axes is shifted by the corresponding number. If an int + while `axis` is a tuple of ints, then the same value is used for + all given axes. + + axis : Optional[Union[int, Tuple[int, ...]]] + Axis or axes along which elements are shifted. By default, the + array is flattened before shifting, after which the original + shape is restored. + + constant : Optional[bool] + If ``True``, this tensor is treated as a constant, and thus does not + facilitate back propagation (i.e. ``constant.grad`` will always return + ``None``). + + Defaults to ``False`` for float-type data. + Defaults to ``True`` for integer-type data. + + Integer-type tensors must be constant. + + Returns + ------- + res : Tensor + Output array, with the same shape as `a`. + + Examples + -------- + >>> import mygrad as mg + >>> x = mg.arange(10) + >>> mg.roll(x, 2) + Tensor([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]) + >>> x2 = mg.reshape(x, (2,5)) + >>> x2 + Tensor([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]]) + >>> mg.roll(x2, 1) + Tensor([[9, 0, 1, 2, 3], + [4, 5, 6, 7, 8]]) + >>> mg.roll(x2, 1, axis=0) + Tensor([[5, 6, 7, 8, 9], + [0, 1, 2, 3, 4]]) + >>> mg.roll(x2, 1, axis=1) + Tensor([[4, 0, 1, 2, 3], + [9, 5, 6, 7, 8]]) + """ + return Tensor._op( + Roll, a, op_kwargs=dict(shift=shift, axis=axis), constant=constant + )
+
+ +
+ + + +
+ +
+ +
+
+
+ +
+ +
+ +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/docs/_sources/changes.rst.txt b/docs/_sources/changes.rst.txt new file mode 100644 index 00000000..8d047821 --- /dev/null +++ b/docs/_sources/changes.rst.txt @@ -0,0 +1,791 @@ +========= +Changelog +========= + +This is a record of all past mygrad releases and what went into them, +in reverse chronological order. All previous releases should still be available +on pip. + +.. _v2.3.0: + +------------------ +2.3.0 - 2024-09-07 +------------------ + +- Adds support for NumPy 2.0 +- Minimum supported NumPy version is now 1.24 +- Minimum supported Python version is now 3.9 +- Adds testing for Python 3.12 + +.. _v2.2.0: + +------------------ +2.2.0 - 2023-01-03 +------------------ + +- MyGrad is now tested against Python 3.11. (:pull:`411`) +- ``mygrad.bool8`` has been removed. Use ``mygrad.bool_`` instead. (:pull:`411`) +- Adds ufunc support for ``resolve_dtypes``. (:pull:`411`) +- Modifies automatic differentiation framework to be simpler and more memory efficient. In the future, MyGrad will be able to expose an API akin to `torch.autograd.grad `_. (:pull:`407`) +- MyGrad's CI now enforces formatting and spell check requirements on all pull requests. (:pull:`411`) + +.. _v2.1.0: + +------------------ +2.1.0 - 2022-01-01 +------------------ + +New Functions and Utilities +--------------------------- + +The following differentiable functions are now supported by MyGrad, and "drop-in" overrides for their NumPy counterparts are supported as well. + + - :func:`~mygrad.atleast_1d` + - :func:`~mygrad.atleast_2d` + - :func:`~mygrad.atleast_3d` + +Basic tensor save/load functionality has been added (thanks to @kw-0). + + - :func:`~mygrad.save` + - :func:`~mygrad.load` + +Improvements +------------ + +- :func:`~mygrad.clip` and ``Tensor.clip`` now accept an ``out`` target, permitting in-place operations. +- The method ``Tensor.__index__()`` is now implemented, which permits scalar integer-valued tensors to be used to index into Python sequences. +- Added Python 3.10 to our automated test matrix. + +Compatibility-Breaking Changes +------------------------------ + +- In accordance with `NEP 29 `_ we are dropping support for NumPy versions below 1.19. However, MyGrad will not drop support for Python 3.7; to remain as lightweight and flexible as possible we will support minor versions of Python up until their EOL or until our minimal NumPy dependency drops support -- whichever occurs first. +- The interface to :func:`~mygrad.arange` was changed from ``arange(start, stop=None, step=None, ...)`` to ``arange([start,] stop[, step,], ...)``. This provides exact parity with NumPy's arange function. +- The derivatives of :func:`~mygrad.absolute` and :func:`~mygrad.linalg.norm` have been revised such that in cases where the derivatives used to be ``nan``, those entries will now be ``0``. Both functions can now be passed ``nan_to_num=False`` to enable the previous, more rigorous behavior. See `PR #379 `_ for more details. + +.. _v2.0.2: + +------------------ +2.0.2 - 2021-04-10 +------------------ + +Exposes :func:`~mygrad.execute_op` at top-level namespace + +.. _v2.0.1: + +------------------ +2.0.1 - 2021-04-03 +------------------ + +Bug Fixes +--------- + +- :func:`~mygrad.matmul` and :func:`~mygrad.multi_matmul` were missing from the top-level namespace of ``mygrad``. +- A 0D tensor involved in a broadcasted operation would have a numpy-float set for its gradient instead of a 0D + array. + +New Functions +------------- +The following non-differentiable NumPy functions now work on mygrad tensors (and return ndarrays). +Aliases of these are available at the top-level namespace of ``mygrad`` + + - np.isnan + - np.isfinite + - np.isinf + - np.isnat + - np.signbit + - np.logical_not + - np.logical_and + - np.logical_or + - np.logical_xor + - np.greater + - np.greater_equal + - np.less + - np.less_equal + - np.equal + - np.not_equal + - np.floor_divide + - np.remainder + - np.mod + - np.fmod + - np.divmod + - np.rint + - np.sign + - np.floor + - np.ceil + - np.trunc + - np.isclose + + +.. _v2.0.0: + +------------------ +2.0.0 - 2021-03-30 +------------------ + +🎉🎉🎉 + +This is a compatibility-breaking update to MyGrad, and it's great! +MyGrad 2.0 represents a major overhaul to this project. +This release creates near parity between the experiences of using MyGrad and using NumPy, and uses NumPy's new +mechanisms for overriding functions so that NumPy functions can operate "directly" on MyGrad's tensors, and thus +can be used to construct differentiable computational graphs! + +.. code:: python + + >>> import numpy as np + >>> from mygrad import tensor + >>> x = tensor([1., 2.]) + >>> np.square(x).backward() # backprop through NumPy functions! + >>> x.grad + array([2., 4.]) + +Another important, but less exciting, feature is that MyGrad now protects users from inadvertently +corrupting the state of a computational graph by, say, mutating a NumPy array that is participating in +the graph. +This is very useful for protecting people – especially students – from unwittingly poisoning the results +of their calculations. + +Lastly... no more "nulling" gradients! MyGrad will now handle deleting gradients for you in a way that +is nicely compatible with gradient-based optimization work flows. + +New Functions and Utilities +--------------------------- + + - :func:`~mygrad.tensor` + - :func:`~mygrad.astensor` + - :func:`~mygrad.asarray` + - :func:`~mygrad.no_autodiff` + - :func:`~mygrad.mem_guard_off` + - :func:`~mygrad.mem_guard_on` + - :func:`~mygrad.turn_memory_guarding_off` + - :func:`~mygrad.turn_memory_guarding_on` + - :func:`~mygrad.concatenate` + - :func:`~mygrad.stack` + - :func:`~mygrad.linalg.norm` + + +Dropping Support for Python 3.6 and Numpy < 1.17 +------------------------------------------------ +MyGrad now abides by the `NEP 29 `_ recommendation, and adopts +a common “time window-based” policy for support of Python and NumPy versions. + +As such the Python 3.7 and Numpy 1.17 are the minimum versions supported by MyGrad 2.0. + + +The Interfaces Between ``mygrad.Tensor`` and ``numpy.array`` Match +------------------------------------------------------------------ + +You can now control the dimensionality of a tensor and whether or not a tensor copies its data upon initialization, via the +:func:`~mygrad.tensor` interface. This mirrors the behavior of :func:`~numpy.array` + ++-------------------------------------------------------+-------------------------------------------------------+-------------------------------------------------+ +| Numpy | MyGrad 1.X | MyGrad 2.0 | ++=======================================================+=======================================================+=================================================+ +| .. code:: python | .. code:: python | .. code:: python | +| | | | +| >>> np.array([1., 2.], copy=True, ndmin=2) | >>> mg.Tensor([1., 2.], copy=True, ndmin=2) | >>> mg.tensor([1., 2.], copy=True, ndmin=2) | +| array([[1., 2.]]) | | Tensor([[1., 2.]]) | ++-------------------------------------------------------+-------------------------------------------------------+-------------------------------------------------+ + + +Support for dtype, where, and out in ufuncs +------------------------------------------- + +MyGrad now implements ufuncs with support for specifying dtype, boolean masks, and in-place targets. The +additional methods, such as ``mygrad.add.reduce``, are not yet implemented. + ++---------------------------------------------------------------+ +| MyGrad 2.0 | ++===============================================================+ +| .. code:: python | +| | +| >>> mg.add([1, 2],[0, 2], where=[True, False], dtype=float)| +| Tensor([3., 1.]) | ++---------------------------------------------------------------+ + + +Augmented Updates on Tensors Now Match NumPy's Behavior +------------------------------------------------------- + +Previously, augmented assignment expressions, such as ``tensor *= 2``, behaved merely +as a shorthand for the simple assignment ``tensor = tensor * 2``. +This is in stark contrast to the behavior of an augmented assignment on a NumPy array, which +`mutates the array in-place `_. + +This meant that there was a major discrepancy between how these expressions behaved across MyGrad and +NumPy. +This has changed in MyGrad 2.0: all augmented assignment expressions operate in-place on tensors and +mutate their underlying data. + ++-----------------------------------+-----------------------------------+-----------------------------------+ +| Numpy | MyGrad 1.X | MyGrad 2.0 | ++===================================+===================================+===================================+ +| .. code:: python | .. code:: python | .. code:: python | +| | | | +| >>> x = np.array([1., 2.]) | >>> x = mg.Tensor([1., 2.]) | >>> x = mg.tensor([1., 2.]) | +| >>> y = x | >>> y = x | >>> y = x | +| >>> x *= 2 | >>> x *= 2 # x = 2 * x | >>> x *= 2 | +| >>> x is y | >>> x is y # doesn't match! | >>> x is y # matches! | +| True | False | True | ++-----------------------------------+-----------------------------------+-----------------------------------+ + + + +Creating and Augmenting Views of Tensors +---------------------------------------- + +MyGrad now provides rich support for creating and manipulating views of tensors. + +All `basic indexing `_ operations +performed on a tensor will produce a view of said tensor. +This means that these two tensors share memory +(While MyGrad 1.X created a view of the underlying NumPy array under the hood for basic indexing, its notion +of supporting views went no further than that.) +As with NumPy arrays the "parent" of a view can be accessed through the tensor's ``.base`` +attribute + ++-----------------------------------+-------------------------------------+-----------------------------------+ +| Numpy | MyGrad 1.X | MyGrad 2.0 | ++===================================+=====================================+===================================+ +| .. code:: python | .. code:: python | .. code:: python | +| | | | +| >>> x = np.array([1., 2., 3.]) | >>> x = mg.Tensor([1., 2., 3.]) | >>> x = mg.tensor([1., 2., 3.])| +| >>> y = x[:2] | >>> y = x[:2] | >>> y = x[:2] | +| >>> np.shares_memory(x, y) | >>> np.shares_memory(x, y) | >>> np.shares_memory(x, y) | +| True | True | True | +| >>> y.base is x | >>> y.base is x # doesn't match!| >>> y.base is x # matches! | +| True | | True | ++-----------------------------------+-------------------------------------+-----------------------------------+ + + +Mutating shared data will propagate through views: + + ++-----------------------------------+-------------------------------------+------------------------------------+ +| Numpy | MyGrad 1.X | MyGrad 2.0 | ++===================================+=====================================+====================================+ +| .. code:: python | .. code:: python | .. code:: python | +| | | | +| >>> y *= -1 | >>> y *= -1 | >>> y *= -1 | +| >>> y | >>> y | >>> y | +| array([-1., -2.]) | Tensor([-1., -2.]) | Tensor([-1., -2.]) | +| >>> x | >>> x # doesn't match! | >>> x # matches! | +| array([-1., -2., 3.]) | Tensor([1., 2., 3.]) | Tensor([-1., -2., 3.]) | ++-----------------------------------+-------------------------------------+------------------------------------+ + + +Furthermore, views of tensors now propagate corresponding gradient information as well! +This means that if ``y`` is a view of ``x``, then ``y.grad`` will be a corresponding view of ``x.grad``. +This is true for all varieties of views, views of views, etc., of ``x``. + +.. code-block:: python + + # Because `y` is a view of `x`, `y.grad` will be + # a corresponding view of `x.grad` + >>> (x ** 2).backward() + >>> x.grad + array([-2., -4., 6., 8.]) + >>> y.grad + array([-2., -4.]) + >>> y.grad.base is x.grad + True + +This rich support for views, augmented assignments, and in-place updates on tensors enables much more sophisticated +operations on tensors now. +For example, let's make a shape-(3, 3) tensor and perform and operations involving views of its diagonal and +its anti-diagonal. (Note that :func:`~mygrad.einsum` is capable of returning a view of a tensor's diagonal, +and that MyGrad fully supports backpropagation through all flavors of einsum!) + +.. code-block:: python + + >>> x = mg.tensor([[0., 1., 2.], + ... [3., 4., 5.], + ... [6., 7., 8.]]) + + # view of diagonal of `x` + >>> diag = mg.einsum("ii->i", x) + >>> diag + Tensor([0., 4., 8.]) + + # view of anti-diagonal of `x` + >>> anti_diag = mg.einsum("ii->i", x[:, ::-1]) + >>> anti_diag + Tensor([2., 4., 6.]) + + # Compute derivatives of their summed difference + >>> (diag - anti_diag).sum().backward() + >>> x.grad + array([[ 1., 0., -1.], + [ 0., 0., 0.], + [-1., 0., 1.]]) + + # The views of `x` have the appropriate corresponding + # views of `x.grad` + >>> diag.grad + array([1., 0., 1.]) + >>> anti_diag.grad + array([-1., 0., -1.]) + + +Bye-Bye Null Gradients! +----------------------- + +Gone are the days of having to manually clear your tensors' gradients and the computational graph that they were +in; now MyGrad does it for you! +This means that ``Tensor.null_gradients()`` no longer does anything other than emit a deprecation warning. +In an upcoming minor release this method will be removed entirely. + +In MyGrad 2.0, calling :func:`~mygrad.Tensor.backward` will finish its computation by clearing the computational graph that was involved +in the backpropagation. +Thus any internally-referenced tensors associated with that computational graph become free for garbage collection. +This is very nice behavior to help prevent students from filling up their RAM unwittingly. + +And instead of worrying about nulling gradients manually, a tensor will automatically have its gradient cleared any time that it is +involved in a new mathematical operation. +This enables the following common workflow for performing gradient-based optimization: + + ++-------------------------------------+-------------------------------------+ +| MyGrad 1.X | MyGrad 2.0 | ++=====================================+=====================================+ +| .. code:: python | .. code:: python | +| | | +| >>> x = mg.Tensor([1., 2.]) | >>> x = mg.tensor([1., 2.]) | +| >>> for _ in range(10): | >>> for _ in range(10): | +| ... y = 3 * x | ... y = 3 * x # nulls grad | +| ... assert x.grad is None | ... assert x.grad is None | +| ... y.backward() | ... y.backward() | +| ... assert all(x.grad == 3.) | ... assert all(x.grad == 3.) | +| ... y.null_gradients() | | ++-------------------------------------+-------------------------------------+ + + +.. code-block:: python + + for _ in range(num_optimization_steps): + # using `model_params` in a function will automatically + # set its gradients to `None` + loss = compute_loss(data, model_params) # gradients cleared + loss.backward() # compute gradients + optimize(model_params) # do stuff with gradients + + +You can also call :func:`~mygrad.Tensor.null_grad` to manually clear an individual tensor's gradient. + + + +Safety First: Memory Guarding Behavior in MyGrad 2.0 +---------------------------------------------------- + +In MyGrad 1.X it was all too easy to unwittingly corrupt the state of a computational graph by mutating +a NumPy array mid-computation. +This could lead to incorrect calculations of gradients! This is the stuff of horrifying nightmares. + +Now MyGrad tracks all of the arrays that are involved in active computational graphs and locks their memory +so that they are read-only (except for when the user mutates the array explicitly with a MyGrad operation). +This means that the sort of mutation that could have lurked silently in the dimly-lit alleyways of bugs-ville will +now get loudly narc'd on by MyGrad's merciless memory guard! + + ++---------------------------------------------+---------------------------------------+ +| MyGrad 1.X | MyGrad 2.0 | ++=============================================+=======================================+ +| .. code:: python | .. code:: python | +| | | +| >>> arr = np.array([1., 2.]) | >>> arr = np.array([1., 2.]) | +| >>> tn = mg.Tensor([1. 1.]) | >>> tn = mg.tensor([1. 1.]) | +| >>> z = x * y | >>> z = x * y | +| # mutating x will corrupt | # mutating x will corrupt | +| # backprop through z... | # backprop through z... | +| >>> x[:] = 0. | >>> x[:] = 0. # you shall not pass!| +| | ValueError: read-only! | +| >>> z.backward() # uh oh... | >>> z.backward() | +| >>> tn.grad # should be: (1., 2.) | >>> tn.grad | +| array([0., 0.]) | array([1., 2.]) | ++---------------------------------------------+---------------------------------------+ + +Any tensor or array that is no longer participating in an active computational graph will automatically +have its write-ability restored to its original state. + +.. code-block:: python + + # memory guarding is released once an array is no + # longer involved in an active computational graph + >>> import mygrad as mg + >>> import numpy as np + >>> x = np.array([1., 2.]) + >>> y = mg.ones_like(x) + >>> z = x * y # x and y are locked + >>> z.backward() # graph cleared; x and y are "released" + >>> x[:] = 0 # can write to x + >>> x + array([0., 0.]) + + # This result is not referenced, thus + # x and y are immediately released by the + # memory-guard; no graph-clearing is needed + >>> x * y + Tensor([0., 0.]) + >>> x[:] = 1. + + + +But with great responsibility comes great ...uhh... slowness? This memory-guarding feature can lead to slowdowns +of **up to 50% for computations involving many small tensors** +(It used to be **a lot** worse... like 5x worse. I worked really hard to speed it up! I promise!). +That being said, computations involving beefy tensors (e.g. standard neural networks) will not be significantly +affected by the overhead associated with the memory guard. +Please refer to :ref:`performance-tips` for responsible ways to disable this memory-guarding mechanism. + +Speaking of optimizations... + + +Disabling Automatic Differentiation +----------------------------------- + +Sometimes you want to use your MyGrad code to do calculations, but you don't actually need to compute +any derivatives. +A common example of this is evaluating the test-time performance of a machine learning model that you are +in the process of optimizing – you don't actually need to perform backpropagation when you are processing +the test data. + +In these circumstances, you can greatly reduce the overhead cost associated with building a computational +graph by using the :func:`~mygrad.no_autodiff` decorator / context manager. See the linked documentation +for extensive examples of its usage. + +.. code-block:: python + + # demonstrating mygrad in no-autodiff mode + >>> import mygrad as mg + >>> x = mg.Tensor([1., 2., 3., 4.]) + >>> with mg.no_autodiff: + ... y = x ** 2 # operation not tracked + >>> y.backward() + >>> y.grad, x.grad # x is not "connected" to y + (array([1., 1., 1.]), None) + +For computations involving many small tensors, this can produce **up to a 3x speedup**! So make sure you +make keen use of this when you don't actually need to perform autodiff. + +Revamping Constant Semantics to be Explicit +------------------------------------------- + +Previously, specifying ``constant=False`` in a mygrad function did not actually mean +that the function would necessarily produce a non-constant tensor. Rather, it simply +meant that the output would not be _forced_ to be a constant – whether or not the result +was a constant depended on the inputs (i.e. a function whose inputs were all constants +would thus produce a constant). + +This was a very bad design decision! Now, specifying ``constant=False`` guarantees that +the output of a function is a non-constant (meaning that it facilitates backpropagation +through a computational graph). + +That being said, we usually _do_ want constant information to propagate through functions. +Thus ``constant=None`` is now the default value – its behavior matches that of ``constant=False`` +from MyGrad 1.X – for all functions that accept the argument. + +It is also now standard to require that this argument be a keyword-only argument. + + ++---------------------------------------------+----------------------------------------------+ +| MyGrad 1.X | MyGrad 2.0 | ++=============================================+==============================================+ +| .. code:: python | .. code:: python | +| | | +| >>> t1 = mg.tensor(1., constant=True) | >>> t1 = mg.tensor(1., constant=True) | +| >>> t2 = mg.tensor(1., constant=True) | >>> t2 = mg.tensor(1., constant=True) | +| | | +| >>> out = mg.add(t1, t2, constant=False) | >>> out = mg.add(t1, t2, constant=False) | +| >>> out.constant | >>> out.constant | +| True | False | +| | | +| | # constant = None | +| | >>> out = mg.add(t1, t2) | +| | >>> out.constant | +| | True | ++---------------------------------------------+----------------------------------------------+ + +>>> t1 = mg.tensor(1., constant=True) +>>> t2 = mg.tensor(1., constant=True) + +# old behavior +>>> out = mg.add(t1, t2, constant=False) +>>> out.constant +True + +# new behavior +>>> out = mg.add(t1, t2, constant=False) +>>> out.constant +False + +>>> out = mg.add(t1, t2, constant=None) +>>> out.constant +True + +Remove Scalar-Only Conditions on Backpropagation +------------------------------------------------ + +Previously, one could only invoke backpropagation from a non-scalar tensor only if that tensor was +the culmination of operations that preserved a one-to-one mapping between the elements of an upstream +tensor with its downstream neighbor. Otherwise an error was raised. This ensured that ``tensor.grad`` +would always be the same shape as ``tensor``, and not represent a higher-dimensional tensor. + +Now calling ``tensor.backward()`` from a non-scalar tensor will behave as if the tensor was summed prior +to invoking backpropagation. This is simple, easy-to-understand behavior, which ensures that ``tensor.grad`` +can always be interpreted as an array of scalar-valued derivatives. + ++---------------------------------------------+---------------------------------------+ +| MyGrad 1.X | MyGrad 2.0 | ++=============================================+=======================================+ +| .. code:: python | .. code:: python | +| | | +| >>> t1 = mg.Tensor([[1., 2.], | >>> t1 = mg.tensor([[1., 2.], | +| ... [0., -1]]) | ... [0., -1]]) | +| >>> t2 = mg.Tensor([[0., 1.], | >>> t2 = mg.tensor([[0., 1.], | +| ... [3., -1]]) | ... [3., -1]]) | +| >>> z = t1 @ t2 | >>> z = t1 @ t2 | +| >>> z.backward() | >>> z.backward() | +| | >>> t1.grad | +| | array([[1., 2.], | +| | [1., 2.]]) | ++---------------------------------------------+---------------------------------------+ + + +Integer-valued Tensors Are Treated as Constants +----------------------------------------------- + +Derivatives involving integer-valued tensors are typically ill-defined, and in MyGrad 1.X they +were generally just wrong. Now integer-valued tensors can only be involved in computational +graphs as constants. + ++---------------------------------------------+-------------------------------------------------+ +| MyGrad 1.X | MyGrad 2.0 | ++=============================================+=================================================+ +| .. code:: python | .. code:: python | +| | | +| >>> t1 = mg.Tensor([[1, 2]).constant | >>> t1 = mg.tensor([[1, 2]]).constant | +| False | True | ++---------------------------------------------+-------------------------------------------------+ + +Is This Code Well-Tested? +------------------------- + +Yes! I consider MyGrad's test suite to be the most important part of the library. It is +the only reason why I feel comfortable releasing this code for students, teachers, and others to use. +I leverage thorough `property-based testing `_ using the `Hypothesis library `_ +to exercise this code as rigorously as I can manage. These tests `even found bugs in NumPy `_! + + +Special Thanks +-------------- + +Special thanks to Alex Silverstein, Zac Dodds, and Petar Griggs for all of the fruitful discussions, ideas, and influence that you provided +throughout this major update. + +.. _v1.9.0: + +------------------ +1.9.0 - 2020-08-28 +------------------ + +The most significant aspect of this release is the implementation of ``Tensor.__array__``, which enables a huge amount +of cross-compatibility with numpy utilities (`#288 `_). Note that any previous +reliance of a numpy function to produce an array of tensor-scalars will likely produce a standard numpy array instead. + +Improvements: + +- ``x**1`` and ``x**2`` are now special-cased in order to make these common operations more efficient (`#266 `_) +- The derivative of :func:`~mygrad.nnet.losses.focal_loss` was refactored to handle special edge-cases and the tests for focal loss were improved to exercise these edge cases (`#269 `_) +- Various improvements to the tests (`#271 `_, `#277 `_, `#290 `_, `#284 `_, `#289 `_, `#282 `_, `#292 `_, `#293 `_) +- The internal mechanism for tracking tensors in computational graph now depends on hashing tensor-IDs instead of hashing tensors directly. The fact that tensors could be hashed was due to the fact that its equality specialty methods were being monkey-patched (`#276 `_) +- :func:`~mygrad.nnet.activations.softmax` and :func:`~mygrad.nnet.activations.logsoftmax` both expose ``axis`` arguments (`#268 `_) + +Bug fixes: + +- `0D tensors could not be indexed into `_ – e.g. to insert a newaxis (`#273 `_) +- There was a potential numerical instability in :func:`mygrad.nnet.layers.batchnorm` (`#285 `_) +- The ``dtype`` argument in ``Tensor.__init__`` was ignored when the array-like argument, x, was another Tensor-instance (`#294 `_) + +New features: + +- ``Tensor.__array__`` now exposes the tensor's underlying numpy array – this enables a huge amount of cross-compatibility with numpy utilities (`#288 `_) +- Adds :func:`~mygrad.asarray` (`#279 `_) +- Adds :func:`~mygrad.astensor` (`#294 `_) + + +.. _v1.8.1: + +------------------ +1.8.1 - 2020-07-28 +------------------ + +This is an `internal change `_ to the backprop +mechanism for ``Tensor.__getitem__``, which produces considerable speedups (2x-4x) for backprop +through basic indexing and boolean indexing. Thanks to Petar Griggs for finding this. + + +.. _v1.8.0: + +------------------ +1.8.0 - 2020-07-25 +------------------ + +New features: + +- Adds :func:`~mygrad.any` and :func:`~mygrad.Tensor.any` +- Adds :func:`~mygrad.random.rand` +- Adds :func:`~mygrad.random.randint` +- Adds :func:`~mygrad.random.randn` +- Adds :func:`~mygrad.random.random` +- Adds :func:`~mygrad.random.random_integers` +- Adds :func:`~mygrad.random.random_sample` +- Adds :func:`~mygrad.random.ranf` +- Adds :func:`~mygrad.random.sample` +- Adds :func:`~mygrad.random.seed` + +Thanks to Darshan Krishnaswamy and Sam Carpenter for adding this functionality! + +Fixes a bug in the GRU layer where mixed floating point precision dtypes between data and weights raised an error. +Thanks to Petar Griggs for the fix! + +.. _v1.7.1: + +------------------ +1.7.1 - 2020-07-11 +------------------ + +Fixes a bug in :func:`~mygrad.nnet.losses.negative_log_likelihood`, where setting ``constant=True`` had no effect. + + +.. _v1.7.0: + +------------------ +1.7.0 - 2020-07-11 +------------------ + +This release continues the process of integrating functions from `mynn `_. + +New features: + +- Adds :func:`~mygrad.nnet.initializers.glorot_normal` +- Adds :func:`~mygrad.nnet.initializers.glorot_uniform` +- Adds :func:`~mygrad.nnet.initializers.he_normal` +- Adds :func:`~mygrad.nnet.initializers.he_uniform` +- Adds :func:`~mygrad.nnet.initializers.normal` +- Adds :func:`~mygrad.nnet.initializers.uniform` +- Adds :func:`~mygrad.nnet.losses.focal_loss` +- Adds :func:`~mygrad.nnet.losses.negative_log_likelihood` + +Big thanks to David Mascharka! + +Improvements: + +The interfaces to :func:`~mygrad.reshape` and :func:`~mygrad.Tensor.reshape` were adjusted to match exactly the interfaces to their NumPy counterparts. +I.e. :func:`~mygrad.reshape` now requires ``newshape`` to be a sequence, whereas :func:`~mygrad.Tensor.reshape` can accept an unpacked sequence for its +``newshape``. + +:func:`~mygrad.Tensor.shape` is now settable - triggering an in-place reshape of a tensor, matching the corresponding behavior in NumPy. + +Internal changes: + +The logic for writing an in-place operation has been consolidated into a convenient wrapper: :func:`~mygrad.Tensor._in_place_op`. + + +.. _v1.6.0: + +------------------ +1.6.0 - 2020-06-21 +------------------ + +New features: + +- Adds :func:`~mygrad.nnet.activations.elu` +- Adds :func:`~mygrad.nnet.activations.glu` +- Adds :func:`~mygrad.nnet.activations.leaky_relu` +- Adds :func:`~mygrad.nnet.activations.selu` +- Adds :func:`~mygrad.nnet.activations.soft_sign` + +Big thanks to David Mascharka! + + +.. _v1.5.0: + +------------------- +1.5.0 - 2020-02-16 +------------------- + +New features: + +- Adds :func:`~mygrad.Tensor.astype` method. +- Adds :func:`~mygrad.nnet.activations.hard_tanh` +- ``y_true`` can now be passed as a ``Tensor`` to :func:`~mygrad.nnet.losses.softmax_crossentropy` + + +This update also includes various improvements to the library's test suite. + +.. _v1.4.1: + +------------------- +1.4.1 - 2020-01-09 +------------------- + +This release performs an internal refactor in the ``nnet`` module of the library, as well as +an analogous refactor in the test suite. This also fixes a docstring in the ``multiclass_hinge`` +loss to properly show a description in the readthedocs page. + +.. _v1.4.0: + +------------------- +1.4.0 - 2019-12-19 +------------------- + +This release adds the :func:`~mygrad.repeat` operation. It also includes some minor +improvements to mygrad's test suite. + + +.. _v1.3.0: + +------------------- +1.3.0 - 2019-11-30 +------------------- + +This release adds :func:`~mygrad.clip` and :func:`~mygrad.where`. + +It also includes a major fix to the graph-traversal mechanism for null-gradients and clear-graph, +eliminating an exponentially-scaling runtime. + +``+x`` will now invoke ``mygrad.positive``, mirroring the numpy behavior + +There are improvements to user-facing error messages and input validation in addition to major +improvements to mygrad's test suite. There is now a 100% line-coverage gate in mygrad's CI system. + + +.. _v1.2.0: + +------------------- +1.2.0 - 2019-08-03 +------------------- + +We're finally keeping a formal changelog! + +This release makes substantial improvements to MyGrad's error-checking and handling, in order to make much simpler the process of debugging issues with buggy custom operations. Specifically, :func:`~mygrad.operation_base.Operation.backward` now checks for an invalid-gradients on each call of :func:`~mygrad.operation_base.Operation.backward_var`, and raises a descriptive error message. + +``mygrad.errors`` was introduced to provide descriptive, MyGrad-specific exceptions. For example, we no longer raise bare exceptions for scenarios like invalid backprop through a scalar-only graph; rather, we now raise a descriptive ``InvalidBackprop`` exception. + +MyGrad's testing framework received wide-ranging improvements, yielding complete test coverage and fewer flaky tests. Coverage checks were added to the project's CI process. + +:func:`~mygrad.maximum` and :func:`~mygrad.minimum` were patched to permit backpropagation through scalar inputs. + +Internal implementation details of :func:`~mygrad.einsum` were adjusted to remove redundant code in its backpropagation machinery. + +:func:`~mygrad.Tensor.null_gradients` was refactored to ensure that only a single traversal of the computational graph is performed to null all of the tensors' gradients. Furthermore, `Tensor.null_gradients(clear_graph=True)` now only performs a single graph traversal, instead of two. + +In keeping with NumPy's behavior, performing `+x` (where `x` is a mygrad-tensor) no longer returns a reference of `x`, but returns `mygrad.positive(x)`. + +Backpropagation through :func:`~mygrad.max` and :func:`~mygrad.min` now works for 0D tensors. + +Input validation was added to :func:`mygrad.nnet.layers.utils.sliding_window_view`. + +Fixed backpropagation through basic indexing, `x[ind] = b`, in which broadcasting occurred and `b` possess "excess" leading singleton dimensions. + diff --git a/docs/_sources/generated/mygrad.Tensor.T.rst.txt b/docs/_sources/generated/mygrad.Tensor.T.rst.txt new file mode 100644 index 00000000..362494ad --- /dev/null +++ b/docs/_sources/generated/mygrad.Tensor.T.rst.txt @@ -0,0 +1,6 @@ +mygrad.Tensor.T +=============== + +.. currentmodule:: mygrad + +.. autoproperty:: Tensor.T \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.Tensor.astype.rst.txt b/docs/_sources/generated/mygrad.Tensor.astype.rst.txt new file mode 100644 index 00000000..75dc0340 --- /dev/null +++ b/docs/_sources/generated/mygrad.Tensor.astype.rst.txt @@ -0,0 +1,6 @@ +mygrad.Tensor.astype +==================== + +.. currentmodule:: mygrad + +.. automethod:: Tensor.astype \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.Tensor.backward.rst.txt b/docs/_sources/generated/mygrad.Tensor.backward.rst.txt new file mode 100644 index 00000000..c8535e27 --- /dev/null +++ b/docs/_sources/generated/mygrad.Tensor.backward.rst.txt @@ -0,0 +1,6 @@ +mygrad.Tensor.backward +====================== + +.. currentmodule:: mygrad + +.. automethod:: Tensor.backward \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.Tensor.base.rst.txt b/docs/_sources/generated/mygrad.Tensor.base.rst.txt new file mode 100644 index 00000000..604f1b5a --- /dev/null +++ b/docs/_sources/generated/mygrad.Tensor.base.rst.txt @@ -0,0 +1,6 @@ +mygrad.Tensor.base +================== + +.. currentmodule:: mygrad + +.. autoproperty:: Tensor.base \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.Tensor.clear_graph.rst.txt b/docs/_sources/generated/mygrad.Tensor.clear_graph.rst.txt new file mode 100644 index 00000000..2367e0b9 --- /dev/null +++ b/docs/_sources/generated/mygrad.Tensor.clear_graph.rst.txt @@ -0,0 +1,6 @@ +mygrad.Tensor.clear\_graph +========================== + +.. currentmodule:: mygrad + +.. automethod:: Tensor.clear_graph \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.Tensor.constant.rst.txt b/docs/_sources/generated/mygrad.Tensor.constant.rst.txt new file mode 100644 index 00000000..f209bb2d --- /dev/null +++ b/docs/_sources/generated/mygrad.Tensor.constant.rst.txt @@ -0,0 +1,6 @@ +mygrad.Tensor.constant +====================== + +.. currentmodule:: mygrad + +.. autoproperty:: Tensor.constant \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.Tensor.copy.rst.txt b/docs/_sources/generated/mygrad.Tensor.copy.rst.txt new file mode 100644 index 00000000..94258d4f --- /dev/null +++ b/docs/_sources/generated/mygrad.Tensor.copy.rst.txt @@ -0,0 +1,6 @@ +mygrad.Tensor.copy +================== + +.. currentmodule:: mygrad + +.. automethod:: Tensor.copy \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.Tensor.creator.rst.txt b/docs/_sources/generated/mygrad.Tensor.creator.rst.txt new file mode 100644 index 00000000..e70f5560 --- /dev/null +++ b/docs/_sources/generated/mygrad.Tensor.creator.rst.txt @@ -0,0 +1,6 @@ +mygrad.Tensor.creator +===================== + +.. currentmodule:: mygrad + +.. autoproperty:: Tensor.creator \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.Tensor.dtype.rst.txt b/docs/_sources/generated/mygrad.Tensor.dtype.rst.txt new file mode 100644 index 00000000..310ee1d2 --- /dev/null +++ b/docs/_sources/generated/mygrad.Tensor.dtype.rst.txt @@ -0,0 +1,6 @@ +mygrad.Tensor.dtype +=================== + +.. currentmodule:: mygrad + +.. autoproperty:: Tensor.dtype \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.Tensor.flatten.rst.txt b/docs/_sources/generated/mygrad.Tensor.flatten.rst.txt new file mode 100644 index 00000000..cea6a5dd --- /dev/null +++ b/docs/_sources/generated/mygrad.Tensor.flatten.rst.txt @@ -0,0 +1,6 @@ +mygrad.Tensor.flatten +===================== + +.. currentmodule:: mygrad + +.. automethod:: Tensor.flatten \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.Tensor.grad.rst.txt b/docs/_sources/generated/mygrad.Tensor.grad.rst.txt new file mode 100644 index 00000000..53d386ef --- /dev/null +++ b/docs/_sources/generated/mygrad.Tensor.grad.rst.txt @@ -0,0 +1,6 @@ +mygrad.Tensor.grad +================== + +.. currentmodule:: mygrad + +.. autoproperty:: Tensor.grad \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.Tensor.item.rst.txt b/docs/_sources/generated/mygrad.Tensor.item.rst.txt new file mode 100644 index 00000000..e217f16e --- /dev/null +++ b/docs/_sources/generated/mygrad.Tensor.item.rst.txt @@ -0,0 +1,6 @@ +mygrad.Tensor.item +================== + +.. currentmodule:: mygrad + +.. automethod:: Tensor.item \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.Tensor.ndim.rst.txt b/docs/_sources/generated/mygrad.Tensor.ndim.rst.txt new file mode 100644 index 00000000..1082359e --- /dev/null +++ b/docs/_sources/generated/mygrad.Tensor.ndim.rst.txt @@ -0,0 +1,6 @@ +mygrad.Tensor.ndim +================== + +.. currentmodule:: mygrad + +.. autoproperty:: Tensor.ndim \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.Tensor.null_grad.rst.txt b/docs/_sources/generated/mygrad.Tensor.null_grad.rst.txt new file mode 100644 index 00000000..f26b2dba --- /dev/null +++ b/docs/_sources/generated/mygrad.Tensor.null_grad.rst.txt @@ -0,0 +1,6 @@ +mygrad.Tensor.null\_grad +======================== + +.. currentmodule:: mygrad + +.. automethod:: Tensor.null_grad \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.Tensor.null_gradients.rst.txt b/docs/_sources/generated/mygrad.Tensor.null_gradients.rst.txt new file mode 100644 index 00000000..7f3bda22 --- /dev/null +++ b/docs/_sources/generated/mygrad.Tensor.null_gradients.rst.txt @@ -0,0 +1,6 @@ +mygrad.Tensor.null\_gradients +============================= + +.. currentmodule:: mygrad + +.. automethod:: Tensor.null_gradients \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.Tensor.shape.rst.txt b/docs/_sources/generated/mygrad.Tensor.shape.rst.txt new file mode 100644 index 00000000..85000718 --- /dev/null +++ b/docs/_sources/generated/mygrad.Tensor.shape.rst.txt @@ -0,0 +1,6 @@ +mygrad.Tensor.shape +=================== + +.. currentmodule:: mygrad + +.. autoproperty:: Tensor.shape \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.Tensor.size.rst.txt b/docs/_sources/generated/mygrad.Tensor.size.rst.txt new file mode 100644 index 00000000..60a0dd0d --- /dev/null +++ b/docs/_sources/generated/mygrad.Tensor.size.rst.txt @@ -0,0 +1,6 @@ +mygrad.Tensor.size +================== + +.. currentmodule:: mygrad + +.. autoproperty:: Tensor.size \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.absolute.rst.txt b/docs/_sources/generated/mygrad.absolute.rst.txt new file mode 100644 index 00000000..605b6e1a --- /dev/null +++ b/docs/_sources/generated/mygrad.absolute.rst.txt @@ -0,0 +1,47 @@ +mygrad.absolute +=============== + +.. currentmodule:: mygrad + +.. autoclass:: absolute + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~absolute.__init__ + ~absolute.accumulate + ~absolute.at + ~absolute.outer + ~absolute.reduce + ~absolute.reduceat + ~absolute.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~absolute.identity + ~absolute.nargs + ~absolute.nin + ~absolute.nout + ~absolute.ntypes + ~absolute.signature + ~absolute.types + ~absolute.x + ~absolute.out + ~absolute.where + ~absolute.dtype + ~absolute.constant + ~absolute.nan_to_num + ~absolute.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.add.rst.txt b/docs/_sources/generated/mygrad.add.rst.txt new file mode 100644 index 00000000..ba0dda1f --- /dev/null +++ b/docs/_sources/generated/mygrad.add.rst.txt @@ -0,0 +1,47 @@ +mygrad.add +========== + +.. currentmodule:: mygrad + +.. autoclass:: add + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~add.__init__ + ~add.accumulate + ~add.at + ~add.outer + ~add.reduce + ~add.reduceat + ~add.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~add.identity + ~add.nargs + ~add.nin + ~add.nout + ~add.ntypes + ~add.signature + ~add.types + ~add.x1 + ~add.x2 + ~add.out + ~add.where + ~add.dtype + ~add.constant + ~add.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.add_sequence.rst.txt b/docs/_sources/generated/mygrad.add_sequence.rst.txt new file mode 100644 index 00000000..b7692348 --- /dev/null +++ b/docs/_sources/generated/mygrad.add_sequence.rst.txt @@ -0,0 +1,6 @@ +mygrad.add\_sequence +==================== + +.. currentmodule:: mygrad + +.. autofunction:: add_sequence \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.amax.rst.txt b/docs/_sources/generated/mygrad.amax.rst.txt new file mode 100644 index 00000000..5f8758d6 --- /dev/null +++ b/docs/_sources/generated/mygrad.amax.rst.txt @@ -0,0 +1,6 @@ +mygrad.amax +=========== + +.. currentmodule:: mygrad + +.. autofunction:: amax \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.amin.rst.txt b/docs/_sources/generated/mygrad.amin.rst.txt new file mode 100644 index 00000000..44044313 --- /dev/null +++ b/docs/_sources/generated/mygrad.amin.rst.txt @@ -0,0 +1,6 @@ +mygrad.amin +=========== + +.. currentmodule:: mygrad + +.. autofunction:: amin \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.arange.rst.txt b/docs/_sources/generated/mygrad.arange.rst.txt new file mode 100644 index 00000000..e4dc963a --- /dev/null +++ b/docs/_sources/generated/mygrad.arange.rst.txt @@ -0,0 +1,6 @@ +mygrad.arange +============= + +.. currentmodule:: mygrad + +.. autofunction:: arange \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.arccos.rst.txt b/docs/_sources/generated/mygrad.arccos.rst.txt new file mode 100644 index 00000000..db48b327 --- /dev/null +++ b/docs/_sources/generated/mygrad.arccos.rst.txt @@ -0,0 +1,46 @@ +mygrad.arccos +============= + +.. currentmodule:: mygrad + +.. autoclass:: arccos + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~arccos.__init__ + ~arccos.accumulate + ~arccos.at + ~arccos.outer + ~arccos.reduce + ~arccos.reduceat + ~arccos.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~arccos.identity + ~arccos.nargs + ~arccos.nin + ~arccos.nout + ~arccos.ntypes + ~arccos.signature + ~arccos.types + ~arccos.x + ~arccos.out + ~arccos.where + ~arccos.dtype + ~arccos.constant + ~arccos.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.arccosh.rst.txt b/docs/_sources/generated/mygrad.arccosh.rst.txt new file mode 100644 index 00000000..25357922 --- /dev/null +++ b/docs/_sources/generated/mygrad.arccosh.rst.txt @@ -0,0 +1,46 @@ +mygrad.arccosh +============== + +.. currentmodule:: mygrad + +.. autoclass:: arccosh + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~arccosh.__init__ + ~arccosh.accumulate + ~arccosh.at + ~arccosh.outer + ~arccosh.reduce + ~arccosh.reduceat + ~arccosh.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~arccosh.identity + ~arccosh.nargs + ~arccosh.nin + ~arccosh.nout + ~arccosh.ntypes + ~arccosh.signature + ~arccosh.types + ~arccosh.x + ~arccosh.out + ~arccosh.where + ~arccosh.dtype + ~arccosh.constant + ~arccosh.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.arcsin.rst.txt b/docs/_sources/generated/mygrad.arcsin.rst.txt new file mode 100644 index 00000000..ba9f044e --- /dev/null +++ b/docs/_sources/generated/mygrad.arcsin.rst.txt @@ -0,0 +1,46 @@ +mygrad.arcsin +============= + +.. currentmodule:: mygrad + +.. autoclass:: arcsin + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~arcsin.__init__ + ~arcsin.accumulate + ~arcsin.at + ~arcsin.outer + ~arcsin.reduce + ~arcsin.reduceat + ~arcsin.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~arcsin.identity + ~arcsin.nargs + ~arcsin.nin + ~arcsin.nout + ~arcsin.ntypes + ~arcsin.signature + ~arcsin.types + ~arcsin.x + ~arcsin.out + ~arcsin.where + ~arcsin.dtype + ~arcsin.constant + ~arcsin.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.arcsinh.rst.txt b/docs/_sources/generated/mygrad.arcsinh.rst.txt new file mode 100644 index 00000000..c646c73a --- /dev/null +++ b/docs/_sources/generated/mygrad.arcsinh.rst.txt @@ -0,0 +1,46 @@ +mygrad.arcsinh +============== + +.. currentmodule:: mygrad + +.. autoclass:: arcsinh + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~arcsinh.__init__ + ~arcsinh.accumulate + ~arcsinh.at + ~arcsinh.outer + ~arcsinh.reduce + ~arcsinh.reduceat + ~arcsinh.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~arcsinh.identity + ~arcsinh.nargs + ~arcsinh.nin + ~arcsinh.nout + ~arcsinh.ntypes + ~arcsinh.signature + ~arcsinh.types + ~arcsinh.x + ~arcsinh.out + ~arcsinh.where + ~arcsinh.dtype + ~arcsinh.constant + ~arcsinh.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.arctan.rst.txt b/docs/_sources/generated/mygrad.arctan.rst.txt new file mode 100644 index 00000000..8db75964 --- /dev/null +++ b/docs/_sources/generated/mygrad.arctan.rst.txt @@ -0,0 +1,46 @@ +mygrad.arctan +============= + +.. currentmodule:: mygrad + +.. autoclass:: arctan + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~arctan.__init__ + ~arctan.accumulate + ~arctan.at + ~arctan.outer + ~arctan.reduce + ~arctan.reduceat + ~arctan.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~arctan.identity + ~arctan.nargs + ~arctan.nin + ~arctan.nout + ~arctan.ntypes + ~arctan.signature + ~arctan.types + ~arctan.x + ~arctan.out + ~arctan.where + ~arctan.dtype + ~arctan.constant + ~arctan.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.arctan2.rst.txt b/docs/_sources/generated/mygrad.arctan2.rst.txt new file mode 100644 index 00000000..d9c6ed38 --- /dev/null +++ b/docs/_sources/generated/mygrad.arctan2.rst.txt @@ -0,0 +1,47 @@ +mygrad.arctan2 +============== + +.. currentmodule:: mygrad + +.. autoclass:: arctan2 + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~arctan2.__init__ + ~arctan2.accumulate + ~arctan2.at + ~arctan2.outer + ~arctan2.reduce + ~arctan2.reduceat + ~arctan2.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~arctan2.identity + ~arctan2.nargs + ~arctan2.nin + ~arctan2.nout + ~arctan2.ntypes + ~arctan2.signature + ~arctan2.types + ~arctan2.x1 + ~arctan2.x2 + ~arctan2.out + ~arctan2.where + ~arctan2.dtype + ~arctan2.constant + ~arctan2.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.arctanh.rst.txt b/docs/_sources/generated/mygrad.arctanh.rst.txt new file mode 100644 index 00000000..38ff7903 --- /dev/null +++ b/docs/_sources/generated/mygrad.arctanh.rst.txt @@ -0,0 +1,46 @@ +mygrad.arctanh +============== + +.. currentmodule:: mygrad + +.. autoclass:: arctanh + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~arctanh.__init__ + ~arctanh.accumulate + ~arctanh.at + ~arctanh.outer + ~arctanh.reduce + ~arctanh.reduceat + ~arctanh.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~arctanh.identity + ~arctanh.nargs + ~arctanh.nin + ~arctanh.nout + ~arctanh.ntypes + ~arctanh.signature + ~arctanh.types + ~arctanh.x + ~arctanh.out + ~arctanh.where + ~arctanh.dtype + ~arctanh.constant + ~arctanh.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.asarray.rst.txt b/docs/_sources/generated/mygrad.asarray.rst.txt new file mode 100644 index 00000000..45bd689e --- /dev/null +++ b/docs/_sources/generated/mygrad.asarray.rst.txt @@ -0,0 +1,6 @@ +mygrad.asarray +============== + +.. currentmodule:: mygrad + +.. autofunction:: asarray \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.astensor.rst.txt b/docs/_sources/generated/mygrad.astensor.rst.txt new file mode 100644 index 00000000..f621a016 --- /dev/null +++ b/docs/_sources/generated/mygrad.astensor.rst.txt @@ -0,0 +1,6 @@ +mygrad.astensor +=============== + +.. currentmodule:: mygrad + +.. autofunction:: astensor \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.atleast_1d.rst.txt b/docs/_sources/generated/mygrad.atleast_1d.rst.txt new file mode 100644 index 00000000..85560bf8 --- /dev/null +++ b/docs/_sources/generated/mygrad.atleast_1d.rst.txt @@ -0,0 +1,6 @@ +mygrad.atleast\_1d +================== + +.. currentmodule:: mygrad + +.. autofunction:: atleast_1d \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.atleast_2d.rst.txt b/docs/_sources/generated/mygrad.atleast_2d.rst.txt new file mode 100644 index 00000000..353e7c5e --- /dev/null +++ b/docs/_sources/generated/mygrad.atleast_2d.rst.txt @@ -0,0 +1,6 @@ +mygrad.atleast\_2d +================== + +.. currentmodule:: mygrad + +.. autofunction:: atleast_2d \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.atleast_3d.rst.txt b/docs/_sources/generated/mygrad.atleast_3d.rst.txt new file mode 100644 index 00000000..b8d273b1 --- /dev/null +++ b/docs/_sources/generated/mygrad.atleast_3d.rst.txt @@ -0,0 +1,6 @@ +mygrad.atleast\_3d +================== + +.. currentmodule:: mygrad + +.. autofunction:: atleast_3d \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.broadcast_to.rst.txt b/docs/_sources/generated/mygrad.broadcast_to.rst.txt new file mode 100644 index 00000000..41c78c34 --- /dev/null +++ b/docs/_sources/generated/mygrad.broadcast_to.rst.txt @@ -0,0 +1,6 @@ +mygrad.broadcast\_to +==================== + +.. currentmodule:: mygrad + +.. autofunction:: broadcast_to \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.cbrt.rst.txt b/docs/_sources/generated/mygrad.cbrt.rst.txt new file mode 100644 index 00000000..d1b66d0e --- /dev/null +++ b/docs/_sources/generated/mygrad.cbrt.rst.txt @@ -0,0 +1,46 @@ +mygrad.cbrt +=========== + +.. currentmodule:: mygrad + +.. autoclass:: cbrt + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~cbrt.__init__ + ~cbrt.accumulate + ~cbrt.at + ~cbrt.outer + ~cbrt.reduce + ~cbrt.reduceat + ~cbrt.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~cbrt.identity + ~cbrt.nargs + ~cbrt.nin + ~cbrt.nout + ~cbrt.ntypes + ~cbrt.signature + ~cbrt.types + ~cbrt.x + ~cbrt.out + ~cbrt.where + ~cbrt.dtype + ~cbrt.constant + ~cbrt.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.clip.rst.txt b/docs/_sources/generated/mygrad.clip.rst.txt new file mode 100644 index 00000000..af8289ce --- /dev/null +++ b/docs/_sources/generated/mygrad.clip.rst.txt @@ -0,0 +1,6 @@ +mygrad.clip +=========== + +.. currentmodule:: mygrad + +.. autofunction:: clip \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.computational_graph.build_graph.rst.txt b/docs/_sources/generated/mygrad.computational_graph.build_graph.rst.txt new file mode 100644 index 00000000..a00b8a06 --- /dev/null +++ b/docs/_sources/generated/mygrad.computational_graph.build_graph.rst.txt @@ -0,0 +1,6 @@ +mygrad.computational\_graph.build\_graph +======================================== + +.. currentmodule:: mygrad.computational_graph + +.. autofunction:: build_graph \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.concatenate.rst.txt b/docs/_sources/generated/mygrad.concatenate.rst.txt new file mode 100644 index 00000000..8863b702 --- /dev/null +++ b/docs/_sources/generated/mygrad.concatenate.rst.txt @@ -0,0 +1,6 @@ +mygrad.concatenate +================== + +.. currentmodule:: mygrad + +.. autofunction:: concatenate \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.cos.rst.txt b/docs/_sources/generated/mygrad.cos.rst.txt new file mode 100644 index 00000000..7b094f88 --- /dev/null +++ b/docs/_sources/generated/mygrad.cos.rst.txt @@ -0,0 +1,46 @@ +mygrad.cos +========== + +.. currentmodule:: mygrad + +.. autoclass:: cos + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~cos.__init__ + ~cos.accumulate + ~cos.at + ~cos.outer + ~cos.reduce + ~cos.reduceat + ~cos.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~cos.identity + ~cos.nargs + ~cos.nin + ~cos.nout + ~cos.ntypes + ~cos.signature + ~cos.types + ~cos.x + ~cos.out + ~cos.where + ~cos.dtype + ~cos.constant + ~cos.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.cosh.rst.txt b/docs/_sources/generated/mygrad.cosh.rst.txt new file mode 100644 index 00000000..fc5f0d46 --- /dev/null +++ b/docs/_sources/generated/mygrad.cosh.rst.txt @@ -0,0 +1,46 @@ +mygrad.cosh +=========== + +.. currentmodule:: mygrad + +.. autoclass:: cosh + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~cosh.__init__ + ~cosh.accumulate + ~cosh.at + ~cosh.outer + ~cosh.reduce + ~cosh.reduceat + ~cosh.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~cosh.identity + ~cosh.nargs + ~cosh.nin + ~cosh.nout + ~cosh.ntypes + ~cosh.signature + ~cosh.types + ~cosh.x + ~cosh.out + ~cosh.where + ~cosh.dtype + ~cosh.constant + ~cosh.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.cumprod.rst.txt b/docs/_sources/generated/mygrad.cumprod.rst.txt new file mode 100644 index 00000000..59ff8120 --- /dev/null +++ b/docs/_sources/generated/mygrad.cumprod.rst.txt @@ -0,0 +1,6 @@ +mygrad.cumprod +============== + +.. currentmodule:: mygrad + +.. autofunction:: cumprod \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.cumsum.rst.txt b/docs/_sources/generated/mygrad.cumsum.rst.txt new file mode 100644 index 00000000..ac407dc5 --- /dev/null +++ b/docs/_sources/generated/mygrad.cumsum.rst.txt @@ -0,0 +1,6 @@ +mygrad.cumsum +============= + +.. currentmodule:: mygrad + +.. autofunction:: cumsum \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.divide.rst.txt b/docs/_sources/generated/mygrad.divide.rst.txt new file mode 100644 index 00000000..eb43ebb8 --- /dev/null +++ b/docs/_sources/generated/mygrad.divide.rst.txt @@ -0,0 +1,47 @@ +mygrad.divide +============= + +.. currentmodule:: mygrad + +.. autoclass:: divide + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~divide.__init__ + ~divide.accumulate + ~divide.at + ~divide.outer + ~divide.reduce + ~divide.reduceat + ~divide.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~divide.identity + ~divide.nargs + ~divide.nin + ~divide.nout + ~divide.ntypes + ~divide.signature + ~divide.types + ~divide.x1 + ~divide.x2 + ~divide.out + ~divide.where + ~divide.dtype + ~divide.constant + ~divide.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.einsum.rst.txt b/docs/_sources/generated/mygrad.einsum.rst.txt new file mode 100644 index 00000000..050092fe --- /dev/null +++ b/docs/_sources/generated/mygrad.einsum.rst.txt @@ -0,0 +1,6 @@ +mygrad.einsum +============= + +.. currentmodule:: mygrad + +.. autofunction:: einsum \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.empty.rst.txt b/docs/_sources/generated/mygrad.empty.rst.txt new file mode 100644 index 00000000..cb404ff3 --- /dev/null +++ b/docs/_sources/generated/mygrad.empty.rst.txt @@ -0,0 +1,6 @@ +mygrad.empty +============ + +.. currentmodule:: mygrad + +.. autofunction:: empty \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.empty_like.rst.txt b/docs/_sources/generated/mygrad.empty_like.rst.txt new file mode 100644 index 00000000..32dd193c --- /dev/null +++ b/docs/_sources/generated/mygrad.empty_like.rst.txt @@ -0,0 +1,6 @@ +mygrad.empty\_like +================== + +.. currentmodule:: mygrad + +.. autofunction:: empty_like \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.exp.rst.txt b/docs/_sources/generated/mygrad.exp.rst.txt new file mode 100644 index 00000000..43fdc92a --- /dev/null +++ b/docs/_sources/generated/mygrad.exp.rst.txt @@ -0,0 +1,46 @@ +mygrad.exp +========== + +.. currentmodule:: mygrad + +.. autoclass:: exp + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~exp.__init__ + ~exp.accumulate + ~exp.at + ~exp.outer + ~exp.reduce + ~exp.reduceat + ~exp.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~exp.identity + ~exp.nargs + ~exp.nin + ~exp.nout + ~exp.ntypes + ~exp.signature + ~exp.types + ~exp.x1 + ~exp.out + ~exp.where + ~exp.dtype + ~exp.constant + ~exp.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.exp2.rst.txt b/docs/_sources/generated/mygrad.exp2.rst.txt new file mode 100644 index 00000000..c0e90877 --- /dev/null +++ b/docs/_sources/generated/mygrad.exp2.rst.txt @@ -0,0 +1,46 @@ +mygrad.exp2 +=========== + +.. currentmodule:: mygrad + +.. autoclass:: exp2 + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~exp2.__init__ + ~exp2.accumulate + ~exp2.at + ~exp2.outer + ~exp2.reduce + ~exp2.reduceat + ~exp2.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~exp2.identity + ~exp2.nargs + ~exp2.nin + ~exp2.nout + ~exp2.ntypes + ~exp2.signature + ~exp2.types + ~exp2.x1 + ~exp2.out + ~exp2.where + ~exp2.dtype + ~exp2.constant + ~exp2.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.expand_dims.rst.txt b/docs/_sources/generated/mygrad.expand_dims.rst.txt new file mode 100644 index 00000000..10f1aa6e --- /dev/null +++ b/docs/_sources/generated/mygrad.expand_dims.rst.txt @@ -0,0 +1,6 @@ +mygrad.expand\_dims +=================== + +.. currentmodule:: mygrad + +.. autofunction:: expand_dims \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.expm1.rst.txt b/docs/_sources/generated/mygrad.expm1.rst.txt new file mode 100644 index 00000000..ef39667e --- /dev/null +++ b/docs/_sources/generated/mygrad.expm1.rst.txt @@ -0,0 +1,46 @@ +mygrad.expm1 +============ + +.. currentmodule:: mygrad + +.. autoclass:: expm1 + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~expm1.__init__ + ~expm1.accumulate + ~expm1.at + ~expm1.outer + ~expm1.reduce + ~expm1.reduceat + ~expm1.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~expm1.identity + ~expm1.nargs + ~expm1.nin + ~expm1.nout + ~expm1.ntypes + ~expm1.signature + ~expm1.types + ~expm1.x1 + ~expm1.out + ~expm1.where + ~expm1.dtype + ~expm1.constant + ~expm1.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.eye.rst.txt b/docs/_sources/generated/mygrad.eye.rst.txt new file mode 100644 index 00000000..35234862 --- /dev/null +++ b/docs/_sources/generated/mygrad.eye.rst.txt @@ -0,0 +1,6 @@ +mygrad.eye +========== + +.. currentmodule:: mygrad + +.. autofunction:: eye \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.full.rst.txt b/docs/_sources/generated/mygrad.full.rst.txt new file mode 100644 index 00000000..a4124460 --- /dev/null +++ b/docs/_sources/generated/mygrad.full.rst.txt @@ -0,0 +1,6 @@ +mygrad.full +=========== + +.. currentmodule:: mygrad + +.. autofunction:: full \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.full_like.rst.txt b/docs/_sources/generated/mygrad.full_like.rst.txt new file mode 100644 index 00000000..1dc0588f --- /dev/null +++ b/docs/_sources/generated/mygrad.full_like.rst.txt @@ -0,0 +1,6 @@ +mygrad.full\_like +================= + +.. currentmodule:: mygrad + +.. autofunction:: full_like \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.geomspace.rst.txt b/docs/_sources/generated/mygrad.geomspace.rst.txt new file mode 100644 index 00000000..e93f85f6 --- /dev/null +++ b/docs/_sources/generated/mygrad.geomspace.rst.txt @@ -0,0 +1,6 @@ +mygrad.geomspace +================ + +.. currentmodule:: mygrad + +.. autofunction:: geomspace \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.identity.rst.txt b/docs/_sources/generated/mygrad.identity.rst.txt new file mode 100644 index 00000000..6988d8f9 --- /dev/null +++ b/docs/_sources/generated/mygrad.identity.rst.txt @@ -0,0 +1,6 @@ +mygrad.identity +=============== + +.. currentmodule:: mygrad + +.. autofunction:: identity \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.linalg.norm.rst.txt b/docs/_sources/generated/mygrad.linalg.norm.rst.txt new file mode 100644 index 00000000..5f9c4e1a --- /dev/null +++ b/docs/_sources/generated/mygrad.linalg.norm.rst.txt @@ -0,0 +1,6 @@ +mygrad.linalg.norm +================== + +.. currentmodule:: mygrad.linalg + +.. autofunction:: norm \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.linspace.rst.txt b/docs/_sources/generated/mygrad.linspace.rst.txt new file mode 100644 index 00000000..402306b2 --- /dev/null +++ b/docs/_sources/generated/mygrad.linspace.rst.txt @@ -0,0 +1,6 @@ +mygrad.linspace +=============== + +.. currentmodule:: mygrad + +.. autofunction:: linspace \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.load.rst.txt b/docs/_sources/generated/mygrad.load.rst.txt new file mode 100644 index 00000000..706e3ed1 --- /dev/null +++ b/docs/_sources/generated/mygrad.load.rst.txt @@ -0,0 +1,6 @@ +mygrad.load +=========== + +.. currentmodule:: mygrad + +.. autofunction:: load \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.log.rst.txt b/docs/_sources/generated/mygrad.log.rst.txt new file mode 100644 index 00000000..d3cc4558 --- /dev/null +++ b/docs/_sources/generated/mygrad.log.rst.txt @@ -0,0 +1,46 @@ +mygrad.log +========== + +.. currentmodule:: mygrad + +.. autoclass:: log + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~log.__init__ + ~log.accumulate + ~log.at + ~log.outer + ~log.reduce + ~log.reduceat + ~log.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~log.identity + ~log.nargs + ~log.nin + ~log.nout + ~log.ntypes + ~log.signature + ~log.types + ~log.x1 + ~log.out + ~log.where + ~log.dtype + ~log.constant + ~log.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.log10.rst.txt b/docs/_sources/generated/mygrad.log10.rst.txt new file mode 100644 index 00000000..551d41c6 --- /dev/null +++ b/docs/_sources/generated/mygrad.log10.rst.txt @@ -0,0 +1,46 @@ +mygrad.log10 +============ + +.. currentmodule:: mygrad + +.. autoclass:: log10 + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~log10.__init__ + ~log10.accumulate + ~log10.at + ~log10.outer + ~log10.reduce + ~log10.reduceat + ~log10.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~log10.identity + ~log10.nargs + ~log10.nin + ~log10.nout + ~log10.ntypes + ~log10.signature + ~log10.types + ~log10.x1 + ~log10.out + ~log10.where + ~log10.dtype + ~log10.constant + ~log10.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.log1p.rst.txt b/docs/_sources/generated/mygrad.log1p.rst.txt new file mode 100644 index 00000000..1bdf4a96 --- /dev/null +++ b/docs/_sources/generated/mygrad.log1p.rst.txt @@ -0,0 +1,46 @@ +mygrad.log1p +============ + +.. currentmodule:: mygrad + +.. autoclass:: log1p + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~log1p.__init__ + ~log1p.accumulate + ~log1p.at + ~log1p.outer + ~log1p.reduce + ~log1p.reduceat + ~log1p.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~log1p.identity + ~log1p.nargs + ~log1p.nin + ~log1p.nout + ~log1p.ntypes + ~log1p.signature + ~log1p.types + ~log1p.x1 + ~log1p.out + ~log1p.where + ~log1p.dtype + ~log1p.constant + ~log1p.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.log2.rst.txt b/docs/_sources/generated/mygrad.log2.rst.txt new file mode 100644 index 00000000..2a6713c5 --- /dev/null +++ b/docs/_sources/generated/mygrad.log2.rst.txt @@ -0,0 +1,46 @@ +mygrad.log2 +=========== + +.. currentmodule:: mygrad + +.. autoclass:: log2 + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~log2.__init__ + ~log2.accumulate + ~log2.at + ~log2.outer + ~log2.reduce + ~log2.reduceat + ~log2.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~log2.identity + ~log2.nargs + ~log2.nin + ~log2.nout + ~log2.ntypes + ~log2.signature + ~log2.types + ~log2.x1 + ~log2.out + ~log2.where + ~log2.dtype + ~log2.constant + ~log2.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.logaddexp.rst.txt b/docs/_sources/generated/mygrad.logaddexp.rst.txt new file mode 100644 index 00000000..1b8ee14d --- /dev/null +++ b/docs/_sources/generated/mygrad.logaddexp.rst.txt @@ -0,0 +1,47 @@ +mygrad.logaddexp +================ + +.. currentmodule:: mygrad + +.. autoclass:: logaddexp + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~logaddexp.__init__ + ~logaddexp.accumulate + ~logaddexp.at + ~logaddexp.outer + ~logaddexp.reduce + ~logaddexp.reduceat + ~logaddexp.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~logaddexp.identity + ~logaddexp.nargs + ~logaddexp.nin + ~logaddexp.nout + ~logaddexp.ntypes + ~logaddexp.signature + ~logaddexp.types + ~logaddexp.x1 + ~logaddexp.x2 + ~logaddexp.out + ~logaddexp.where + ~logaddexp.dtype + ~logaddexp.constant + ~logaddexp.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.logaddexp2.rst.txt b/docs/_sources/generated/mygrad.logaddexp2.rst.txt new file mode 100644 index 00000000..08049556 --- /dev/null +++ b/docs/_sources/generated/mygrad.logaddexp2.rst.txt @@ -0,0 +1,47 @@ +mygrad.logaddexp2 +================= + +.. currentmodule:: mygrad + +.. autoclass:: logaddexp2 + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~logaddexp2.__init__ + ~logaddexp2.accumulate + ~logaddexp2.at + ~logaddexp2.outer + ~logaddexp2.reduce + ~logaddexp2.reduceat + ~logaddexp2.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~logaddexp2.identity + ~logaddexp2.nargs + ~logaddexp2.nin + ~logaddexp2.nout + ~logaddexp2.ntypes + ~logaddexp2.signature + ~logaddexp2.types + ~logaddexp2.x1 + ~logaddexp2.x2 + ~logaddexp2.out + ~logaddexp2.where + ~logaddexp2.dtype + ~logaddexp2.constant + ~logaddexp2.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.logspace.rst.txt b/docs/_sources/generated/mygrad.logspace.rst.txt new file mode 100644 index 00000000..5824d7d4 --- /dev/null +++ b/docs/_sources/generated/mygrad.logspace.rst.txt @@ -0,0 +1,6 @@ +mygrad.logspace +=============== + +.. currentmodule:: mygrad + +.. autofunction:: logspace \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.matmul.rst.txt b/docs/_sources/generated/mygrad.matmul.rst.txt new file mode 100644 index 00000000..bd496a46 --- /dev/null +++ b/docs/_sources/generated/mygrad.matmul.rst.txt @@ -0,0 +1,46 @@ +mygrad.matmul +============= + +.. currentmodule:: mygrad + +.. autoclass:: matmul + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~matmul.__init__ + ~matmul.accumulate + ~matmul.at + ~matmul.outer + ~matmul.reduce + ~matmul.reduceat + ~matmul.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~matmul.identity + ~matmul.nargs + ~matmul.nin + ~matmul.nout + ~matmul.ntypes + ~matmul.signature + ~matmul.types + ~matmul.x1 + ~matmul.x2 + ~matmul.out + ~matmul.dtype + ~matmul.constant + ~matmul.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.max.rst.txt b/docs/_sources/generated/mygrad.max.rst.txt new file mode 100644 index 00000000..11e17efb --- /dev/null +++ b/docs/_sources/generated/mygrad.max.rst.txt @@ -0,0 +1,6 @@ +mygrad.max +========== + +.. currentmodule:: mygrad + +.. autofunction:: max \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.maximum.rst.txt b/docs/_sources/generated/mygrad.maximum.rst.txt new file mode 100644 index 00000000..ffa6cddf --- /dev/null +++ b/docs/_sources/generated/mygrad.maximum.rst.txt @@ -0,0 +1,47 @@ +mygrad.maximum +============== + +.. currentmodule:: mygrad + +.. autoclass:: maximum + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~maximum.__init__ + ~maximum.accumulate + ~maximum.at + ~maximum.outer + ~maximum.reduce + ~maximum.reduceat + ~maximum.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~maximum.identity + ~maximum.nargs + ~maximum.nin + ~maximum.nout + ~maximum.ntypes + ~maximum.signature + ~maximum.types + ~maximum.x1 + ~maximum.x2 + ~maximum.out + ~maximum.where + ~maximum.dtype + ~maximum.constant + ~maximum.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.mean.rst.txt b/docs/_sources/generated/mygrad.mean.rst.txt new file mode 100644 index 00000000..5a958760 --- /dev/null +++ b/docs/_sources/generated/mygrad.mean.rst.txt @@ -0,0 +1,6 @@ +mygrad.mean +=========== + +.. currentmodule:: mygrad + +.. autofunction:: mean \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.mem_guard_off.rst.txt b/docs/_sources/generated/mygrad.mem_guard_off.rst.txt new file mode 100644 index 00000000..bc9ec7af --- /dev/null +++ b/docs/_sources/generated/mygrad.mem_guard_off.rst.txt @@ -0,0 +1,6 @@ +mygrad.mem\_guard\_off +====================== + +.. currentmodule:: mygrad + +.. autodata:: mem_guard_off \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.mem_guard_on.rst.txt b/docs/_sources/generated/mygrad.mem_guard_on.rst.txt new file mode 100644 index 00000000..d8cb4eee --- /dev/null +++ b/docs/_sources/generated/mygrad.mem_guard_on.rst.txt @@ -0,0 +1,6 @@ +mygrad.mem\_guard\_on +===================== + +.. currentmodule:: mygrad + +.. autodata:: mem_guard_on \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.min.rst.txt b/docs/_sources/generated/mygrad.min.rst.txt new file mode 100644 index 00000000..cf899cc9 --- /dev/null +++ b/docs/_sources/generated/mygrad.min.rst.txt @@ -0,0 +1,6 @@ +mygrad.min +========== + +.. currentmodule:: mygrad + +.. autofunction:: min \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.minimum.rst.txt b/docs/_sources/generated/mygrad.minimum.rst.txt new file mode 100644 index 00000000..fe6b63e5 --- /dev/null +++ b/docs/_sources/generated/mygrad.minimum.rst.txt @@ -0,0 +1,47 @@ +mygrad.minimum +============== + +.. currentmodule:: mygrad + +.. autoclass:: minimum + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~minimum.__init__ + ~minimum.accumulate + ~minimum.at + ~minimum.outer + ~minimum.reduce + ~minimum.reduceat + ~minimum.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~minimum.identity + ~minimum.nargs + ~minimum.nin + ~minimum.nout + ~minimum.ntypes + ~minimum.signature + ~minimum.types + ~minimum.x1 + ~minimum.x2 + ~minimum.out + ~minimum.where + ~minimum.dtype + ~minimum.constant + ~minimum.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.moveaxis.rst.txt b/docs/_sources/generated/mygrad.moveaxis.rst.txt new file mode 100644 index 00000000..779aeedb --- /dev/null +++ b/docs/_sources/generated/mygrad.moveaxis.rst.txt @@ -0,0 +1,6 @@ +mygrad.moveaxis +=============== + +.. currentmodule:: mygrad + +.. autofunction:: moveaxis \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.multi_matmul.rst.txt b/docs/_sources/generated/mygrad.multi_matmul.rst.txt new file mode 100644 index 00000000..34212fca --- /dev/null +++ b/docs/_sources/generated/mygrad.multi_matmul.rst.txt @@ -0,0 +1,6 @@ +mygrad.multi\_matmul +==================== + +.. currentmodule:: mygrad + +.. autofunction:: multi_matmul \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.multiply.rst.txt b/docs/_sources/generated/mygrad.multiply.rst.txt new file mode 100644 index 00000000..d63905ec --- /dev/null +++ b/docs/_sources/generated/mygrad.multiply.rst.txt @@ -0,0 +1,47 @@ +mygrad.multiply +=============== + +.. currentmodule:: mygrad + +.. autoclass:: multiply + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~multiply.__init__ + ~multiply.accumulate + ~multiply.at + ~multiply.outer + ~multiply.reduce + ~multiply.reduceat + ~multiply.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~multiply.identity + ~multiply.nargs + ~multiply.nin + ~multiply.nout + ~multiply.ntypes + ~multiply.signature + ~multiply.types + ~multiply.x1 + ~multiply.x2 + ~multiply.out + ~multiply.where + ~multiply.dtype + ~multiply.constant + ~multiply.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.multiply_sequence.rst.txt b/docs/_sources/generated/mygrad.multiply_sequence.rst.txt new file mode 100644 index 00000000..a8efdc7e --- /dev/null +++ b/docs/_sources/generated/mygrad.multiply_sequence.rst.txt @@ -0,0 +1,6 @@ +mygrad.multiply\_sequence +========================= + +.. currentmodule:: mygrad + +.. autofunction:: multiply_sequence \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.negative.rst.txt b/docs/_sources/generated/mygrad.negative.rst.txt new file mode 100644 index 00000000..05a6dc90 --- /dev/null +++ b/docs/_sources/generated/mygrad.negative.rst.txt @@ -0,0 +1,46 @@ +mygrad.negative +=============== + +.. currentmodule:: mygrad + +.. autoclass:: negative + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~negative.__init__ + ~negative.accumulate + ~negative.at + ~negative.outer + ~negative.reduce + ~negative.reduceat + ~negative.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~negative.identity + ~negative.nargs + ~negative.nin + ~negative.nout + ~negative.ntypes + ~negative.signature + ~negative.types + ~negative.x + ~negative.out + ~negative.where + ~negative.dtype + ~negative.constant + ~negative.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.activations.elu.rst.txt b/docs/_sources/generated/mygrad.nnet.activations.elu.rst.txt new file mode 100644 index 00000000..f0e53601 --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.activations.elu.rst.txt @@ -0,0 +1,6 @@ +mygrad.nnet.activations.elu +=========================== + +.. currentmodule:: mygrad.nnet.activations + +.. autofunction:: elu \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.activations.glu.rst.txt b/docs/_sources/generated/mygrad.nnet.activations.glu.rst.txt new file mode 100644 index 00000000..8d8c7fa4 --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.activations.glu.rst.txt @@ -0,0 +1,6 @@ +mygrad.nnet.activations.glu +=========================== + +.. currentmodule:: mygrad.nnet.activations + +.. autofunction:: glu \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.activations.hard_tanh.rst.txt b/docs/_sources/generated/mygrad.nnet.activations.hard_tanh.rst.txt new file mode 100644 index 00000000..d4bf4d24 --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.activations.hard_tanh.rst.txt @@ -0,0 +1,6 @@ +mygrad.nnet.activations.hard\_tanh +================================== + +.. currentmodule:: mygrad.nnet.activations + +.. autofunction:: hard_tanh \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.activations.leaky_relu.rst.txt b/docs/_sources/generated/mygrad.nnet.activations.leaky_relu.rst.txt new file mode 100644 index 00000000..6890bc61 --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.activations.leaky_relu.rst.txt @@ -0,0 +1,6 @@ +mygrad.nnet.activations.leaky\_relu +=================================== + +.. currentmodule:: mygrad.nnet.activations + +.. autofunction:: leaky_relu \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.activations.logsoftmax.rst.txt b/docs/_sources/generated/mygrad.nnet.activations.logsoftmax.rst.txt new file mode 100644 index 00000000..909004f6 --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.activations.logsoftmax.rst.txt @@ -0,0 +1,6 @@ +mygrad.nnet.activations.logsoftmax +================================== + +.. currentmodule:: mygrad.nnet.activations + +.. autofunction:: logsoftmax \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.activations.relu.rst.txt b/docs/_sources/generated/mygrad.nnet.activations.relu.rst.txt new file mode 100644 index 00000000..fd6a5ec5 --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.activations.relu.rst.txt @@ -0,0 +1,6 @@ +mygrad.nnet.activations.relu +============================ + +.. currentmodule:: mygrad.nnet.activations + +.. autofunction:: relu \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.activations.selu.rst.txt b/docs/_sources/generated/mygrad.nnet.activations.selu.rst.txt new file mode 100644 index 00000000..56687163 --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.activations.selu.rst.txt @@ -0,0 +1,6 @@ +mygrad.nnet.activations.selu +============================ + +.. currentmodule:: mygrad.nnet.activations + +.. autofunction:: selu \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.activations.sigmoid.rst.txt b/docs/_sources/generated/mygrad.nnet.activations.sigmoid.rst.txt new file mode 100644 index 00000000..1076aa17 --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.activations.sigmoid.rst.txt @@ -0,0 +1,6 @@ +mygrad.nnet.activations.sigmoid +=============================== + +.. currentmodule:: mygrad.nnet.activations + +.. autofunction:: sigmoid \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.activations.soft_sign.rst.txt b/docs/_sources/generated/mygrad.nnet.activations.soft_sign.rst.txt new file mode 100644 index 00000000..77650220 --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.activations.soft_sign.rst.txt @@ -0,0 +1,6 @@ +mygrad.nnet.activations.soft\_sign +================================== + +.. currentmodule:: mygrad.nnet.activations + +.. autofunction:: soft_sign \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.activations.softmax.rst.txt b/docs/_sources/generated/mygrad.nnet.activations.softmax.rst.txt new file mode 100644 index 00000000..68f58789 --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.activations.softmax.rst.txt @@ -0,0 +1,6 @@ +mygrad.nnet.activations.softmax +=============================== + +.. currentmodule:: mygrad.nnet.activations + +.. autofunction:: softmax \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.activations.tanh.rst.txt b/docs/_sources/generated/mygrad.nnet.activations.tanh.rst.txt new file mode 100644 index 00000000..20589e59 --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.activations.tanh.rst.txt @@ -0,0 +1,46 @@ +mygrad.nnet.activations.tanh +============================ + +.. currentmodule:: mygrad.nnet.activations + +.. autoclass:: tanh + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~tanh.__init__ + ~tanh.accumulate + ~tanh.at + ~tanh.outer + ~tanh.reduce + ~tanh.reduceat + ~tanh.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~tanh.identity + ~tanh.nargs + ~tanh.nin + ~tanh.nout + ~tanh.ntypes + ~tanh.signature + ~tanh.types + ~tanh.x + ~tanh.out + ~tanh.where + ~tanh.dtype + ~tanh.constant + ~tanh.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.initializers.dirac.rst.txt b/docs/_sources/generated/mygrad.nnet.initializers.dirac.rst.txt new file mode 100644 index 00000000..2370aa3c --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.initializers.dirac.rst.txt @@ -0,0 +1,6 @@ +mygrad.nnet.initializers.dirac +============================== + +.. currentmodule:: mygrad.nnet.initializers + +.. autofunction:: dirac \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.initializers.glorot_normal.rst.txt b/docs/_sources/generated/mygrad.nnet.initializers.glorot_normal.rst.txt new file mode 100644 index 00000000..66da2372 --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.initializers.glorot_normal.rst.txt @@ -0,0 +1,6 @@ +mygrad.nnet.initializers.glorot\_normal +======================================= + +.. currentmodule:: mygrad.nnet.initializers + +.. autofunction:: glorot_normal \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.initializers.glorot_uniform.rst.txt b/docs/_sources/generated/mygrad.nnet.initializers.glorot_uniform.rst.txt new file mode 100644 index 00000000..e43b8dd6 --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.initializers.glorot_uniform.rst.txt @@ -0,0 +1,6 @@ +mygrad.nnet.initializers.glorot\_uniform +======================================== + +.. currentmodule:: mygrad.nnet.initializers + +.. autofunction:: glorot_uniform \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.initializers.he_normal.rst.txt b/docs/_sources/generated/mygrad.nnet.initializers.he_normal.rst.txt new file mode 100644 index 00000000..f0adb50f --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.initializers.he_normal.rst.txt @@ -0,0 +1,6 @@ +mygrad.nnet.initializers.he\_normal +=================================== + +.. currentmodule:: mygrad.nnet.initializers + +.. autofunction:: he_normal \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.initializers.he_uniform.rst.txt b/docs/_sources/generated/mygrad.nnet.initializers.he_uniform.rst.txt new file mode 100644 index 00000000..b5691f6d --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.initializers.he_uniform.rst.txt @@ -0,0 +1,6 @@ +mygrad.nnet.initializers.he\_uniform +==================================== + +.. currentmodule:: mygrad.nnet.initializers + +.. autofunction:: he_uniform \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.initializers.normal.rst.txt b/docs/_sources/generated/mygrad.nnet.initializers.normal.rst.txt new file mode 100644 index 00000000..a675d825 --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.initializers.normal.rst.txt @@ -0,0 +1,6 @@ +mygrad.nnet.initializers.normal +=============================== + +.. currentmodule:: mygrad.nnet.initializers + +.. autofunction:: normal \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.initializers.uniform.rst.txt b/docs/_sources/generated/mygrad.nnet.initializers.uniform.rst.txt new file mode 100644 index 00000000..0fb53d9b --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.initializers.uniform.rst.txt @@ -0,0 +1,6 @@ +mygrad.nnet.initializers.uniform +================================ + +.. currentmodule:: mygrad.nnet.initializers + +.. autofunction:: uniform \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.layers.batchnorm.rst.txt b/docs/_sources/generated/mygrad.nnet.layers.batchnorm.rst.txt new file mode 100644 index 00000000..477e6df0 --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.layers.batchnorm.rst.txt @@ -0,0 +1,6 @@ +mygrad.nnet.layers.batchnorm +============================ + +.. currentmodule:: mygrad.nnet.layers + +.. autofunction:: batchnorm \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.layers.conv_nd.rst.txt b/docs/_sources/generated/mygrad.nnet.layers.conv_nd.rst.txt new file mode 100644 index 00000000..bc8ecd3d --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.layers.conv_nd.rst.txt @@ -0,0 +1,6 @@ +mygrad.nnet.layers.conv\_nd +=========================== + +.. currentmodule:: mygrad.nnet.layers + +.. autofunction:: conv_nd \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.layers.gru.rst.txt b/docs/_sources/generated/mygrad.nnet.layers.gru.rst.txt new file mode 100644 index 00000000..ed405122 --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.layers.gru.rst.txt @@ -0,0 +1,6 @@ +mygrad.nnet.layers.gru +====================== + +.. currentmodule:: mygrad.nnet.layers + +.. autofunction:: gru \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.layers.max_pool.rst.txt b/docs/_sources/generated/mygrad.nnet.layers.max_pool.rst.txt new file mode 100644 index 00000000..de21a80b --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.layers.max_pool.rst.txt @@ -0,0 +1,6 @@ +mygrad.nnet.layers.max\_pool +============================ + +.. currentmodule:: mygrad.nnet.layers + +.. autofunction:: max_pool \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.losses.focal_loss.rst.txt b/docs/_sources/generated/mygrad.nnet.losses.focal_loss.rst.txt new file mode 100644 index 00000000..2285c302 --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.losses.focal_loss.rst.txt @@ -0,0 +1,6 @@ +mygrad.nnet.losses.focal\_loss +============================== + +.. currentmodule:: mygrad.nnet.losses + +.. autofunction:: focal_loss \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.losses.margin_ranking_loss.rst.txt b/docs/_sources/generated/mygrad.nnet.losses.margin_ranking_loss.rst.txt new file mode 100644 index 00000000..86f9a9d7 --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.losses.margin_ranking_loss.rst.txt @@ -0,0 +1,6 @@ +mygrad.nnet.losses.margin\_ranking\_loss +======================================== + +.. currentmodule:: mygrad.nnet.losses + +.. autofunction:: margin_ranking_loss \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.losses.multiclass_hinge.rst.txt b/docs/_sources/generated/mygrad.nnet.losses.multiclass_hinge.rst.txt new file mode 100644 index 00000000..dee95e6e --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.losses.multiclass_hinge.rst.txt @@ -0,0 +1,6 @@ +mygrad.nnet.losses.multiclass\_hinge +==================================== + +.. currentmodule:: mygrad.nnet.losses + +.. autofunction:: multiclass_hinge \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.losses.negative_log_likelihood.rst.txt b/docs/_sources/generated/mygrad.nnet.losses.negative_log_likelihood.rst.txt new file mode 100644 index 00000000..3ed24f9b --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.losses.negative_log_likelihood.rst.txt @@ -0,0 +1,6 @@ +mygrad.nnet.losses.negative\_log\_likelihood +============================================ + +.. currentmodule:: mygrad.nnet.losses + +.. autofunction:: negative_log_likelihood \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.losses.softmax_crossentropy.rst.txt b/docs/_sources/generated/mygrad.nnet.losses.softmax_crossentropy.rst.txt new file mode 100644 index 00000000..cf1b8888 --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.losses.softmax_crossentropy.rst.txt @@ -0,0 +1,6 @@ +mygrad.nnet.losses.softmax\_crossentropy +======================================== + +.. currentmodule:: mygrad.nnet.losses + +.. autofunction:: softmax_crossentropy \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.nnet.losses.softmax_focal_loss.rst.txt b/docs/_sources/generated/mygrad.nnet.losses.softmax_focal_loss.rst.txt new file mode 100644 index 00000000..f04b9936 --- /dev/null +++ b/docs/_sources/generated/mygrad.nnet.losses.softmax_focal_loss.rst.txt @@ -0,0 +1,6 @@ +mygrad.nnet.losses.softmax\_focal\_loss +======================================= + +.. currentmodule:: mygrad.nnet.losses + +.. autofunction:: softmax_focal_loss \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.no_autodiff.rst.txt b/docs/_sources/generated/mygrad.no_autodiff.rst.txt new file mode 100644 index 00000000..51aa8736 --- /dev/null +++ b/docs/_sources/generated/mygrad.no_autodiff.rst.txt @@ -0,0 +1,6 @@ +mygrad.no\_autodiff +=================== + +.. currentmodule:: mygrad + +.. autodata:: no_autodiff \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.ones.rst.txt b/docs/_sources/generated/mygrad.ones.rst.txt new file mode 100644 index 00000000..4c27d9d3 --- /dev/null +++ b/docs/_sources/generated/mygrad.ones.rst.txt @@ -0,0 +1,6 @@ +mygrad.ones +=========== + +.. currentmodule:: mygrad + +.. autofunction:: ones \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.ones_like.rst.txt b/docs/_sources/generated/mygrad.ones_like.rst.txt new file mode 100644 index 00000000..004b9a47 --- /dev/null +++ b/docs/_sources/generated/mygrad.ones_like.rst.txt @@ -0,0 +1,6 @@ +mygrad.ones\_like +================= + +.. currentmodule:: mygrad + +.. autofunction:: ones_like \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.operation_base.Operation.backward.rst.txt b/docs/_sources/generated/mygrad.operation_base.Operation.backward.rst.txt new file mode 100644 index 00000000..0c156365 --- /dev/null +++ b/docs/_sources/generated/mygrad.operation_base.Operation.backward.rst.txt @@ -0,0 +1,6 @@ +mygrad.operation\_base.Operation.backward +========================================= + +.. currentmodule:: mygrad.operation_base + +.. automethod:: Operation.backward \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.operation_base.Operation.backward_var.rst.txt b/docs/_sources/generated/mygrad.operation_base.Operation.backward_var.rst.txt new file mode 100644 index 00000000..afd9daf9 --- /dev/null +++ b/docs/_sources/generated/mygrad.operation_base.Operation.backward_var.rst.txt @@ -0,0 +1,6 @@ +mygrad.operation\_base.Operation.backward\_var +============================================== + +.. currentmodule:: mygrad.operation_base + +.. automethod:: Operation.backward_var \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.operation_base.Operation.rst.txt b/docs/_sources/generated/mygrad.operation_base.Operation.rst.txt new file mode 100644 index 00000000..400274c5 --- /dev/null +++ b/docs/_sources/generated/mygrad.operation_base.Operation.rst.txt @@ -0,0 +1,32 @@ +mygrad.operation\_base.Operation +================================ + +.. currentmodule:: mygrad.operation_base + +.. autoclass:: Operation + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~Operation.__init__ + ~Operation.backward + ~Operation.backward_var + ~Operation.grad_post_process_fn + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Operation.can_return_view + ~Operation.variables + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.positive.rst.txt b/docs/_sources/generated/mygrad.positive.rst.txt new file mode 100644 index 00000000..334ec41c --- /dev/null +++ b/docs/_sources/generated/mygrad.positive.rst.txt @@ -0,0 +1,46 @@ +mygrad.positive +=============== + +.. currentmodule:: mygrad + +.. autoclass:: positive + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~positive.__init__ + ~positive.accumulate + ~positive.at + ~positive.outer + ~positive.reduce + ~positive.reduceat + ~positive.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~positive.identity + ~positive.nargs + ~positive.nin + ~positive.nout + ~positive.ntypes + ~positive.signature + ~positive.types + ~positive.x + ~positive.out + ~positive.where + ~positive.dtype + ~positive.constant + ~positive.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.power.rst.txt b/docs/_sources/generated/mygrad.power.rst.txt new file mode 100644 index 00000000..37d8e32d --- /dev/null +++ b/docs/_sources/generated/mygrad.power.rst.txt @@ -0,0 +1,47 @@ +mygrad.power +============ + +.. currentmodule:: mygrad + +.. autoclass:: power + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~power.__init__ + ~power.accumulate + ~power.at + ~power.outer + ~power.reduce + ~power.reduceat + ~power.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~power.identity + ~power.nargs + ~power.nin + ~power.nout + ~power.ntypes + ~power.signature + ~power.types + ~power.x1 + ~power.x2 + ~power.out + ~power.where + ~power.dtype + ~power.constant + ~power.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.prod.rst.txt b/docs/_sources/generated/mygrad.prod.rst.txt new file mode 100644 index 00000000..33effd08 --- /dev/null +++ b/docs/_sources/generated/mygrad.prod.rst.txt @@ -0,0 +1,6 @@ +mygrad.prod +=========== + +.. currentmodule:: mygrad + +.. autofunction:: prod \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.random.rand.rst.txt b/docs/_sources/generated/mygrad.random.rand.rst.txt new file mode 100644 index 00000000..7d4a9d14 --- /dev/null +++ b/docs/_sources/generated/mygrad.random.rand.rst.txt @@ -0,0 +1,6 @@ +mygrad.random.rand +================== + +.. currentmodule:: mygrad.random + +.. autofunction:: rand \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.random.randint.rst.txt b/docs/_sources/generated/mygrad.random.randint.rst.txt new file mode 100644 index 00000000..9872a2b4 --- /dev/null +++ b/docs/_sources/generated/mygrad.random.randint.rst.txt @@ -0,0 +1,6 @@ +mygrad.random.randint +===================== + +.. currentmodule:: mygrad.random + +.. autofunction:: randint \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.random.randn.rst.txt b/docs/_sources/generated/mygrad.random.randn.rst.txt new file mode 100644 index 00000000..c6affe68 --- /dev/null +++ b/docs/_sources/generated/mygrad.random.randn.rst.txt @@ -0,0 +1,6 @@ +mygrad.random.randn +=================== + +.. currentmodule:: mygrad.random + +.. autofunction:: randn \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.random.random.rst.txt b/docs/_sources/generated/mygrad.random.random.rst.txt new file mode 100644 index 00000000..e793a702 --- /dev/null +++ b/docs/_sources/generated/mygrad.random.random.rst.txt @@ -0,0 +1,6 @@ +mygrad.random.random +==================== + +.. currentmodule:: mygrad.random + +.. autofunction:: random \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.random.random_sample.rst.txt b/docs/_sources/generated/mygrad.random.random_sample.rst.txt new file mode 100644 index 00000000..4d49cd15 --- /dev/null +++ b/docs/_sources/generated/mygrad.random.random_sample.rst.txt @@ -0,0 +1,6 @@ +mygrad.random.random\_sample +============================ + +.. currentmodule:: mygrad.random + +.. autofunction:: random_sample \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.random.ranf.rst.txt b/docs/_sources/generated/mygrad.random.ranf.rst.txt new file mode 100644 index 00000000..2310a6ad --- /dev/null +++ b/docs/_sources/generated/mygrad.random.ranf.rst.txt @@ -0,0 +1,6 @@ +mygrad.random.ranf +================== + +.. currentmodule:: mygrad.random + +.. autofunction:: ranf \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.random.sample.rst.txt b/docs/_sources/generated/mygrad.random.sample.rst.txt new file mode 100644 index 00000000..d89116c2 --- /dev/null +++ b/docs/_sources/generated/mygrad.random.sample.rst.txt @@ -0,0 +1,6 @@ +mygrad.random.sample +==================== + +.. currentmodule:: mygrad.random + +.. autofunction:: sample \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.random.seed.rst.txt b/docs/_sources/generated/mygrad.random.seed.rst.txt new file mode 100644 index 00000000..fb36c632 --- /dev/null +++ b/docs/_sources/generated/mygrad.random.seed.rst.txt @@ -0,0 +1,6 @@ +mygrad.random.seed +================== + +.. currentmodule:: mygrad.random + +.. autofunction:: seed \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.ravel.rst.txt b/docs/_sources/generated/mygrad.ravel.rst.txt new file mode 100644 index 00000000..129453f5 --- /dev/null +++ b/docs/_sources/generated/mygrad.ravel.rst.txt @@ -0,0 +1,6 @@ +mygrad.ravel +============ + +.. currentmodule:: mygrad + +.. autofunction:: ravel \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.reciprocal.rst.txt b/docs/_sources/generated/mygrad.reciprocal.rst.txt new file mode 100644 index 00000000..8ed9fa1a --- /dev/null +++ b/docs/_sources/generated/mygrad.reciprocal.rst.txt @@ -0,0 +1,46 @@ +mygrad.reciprocal +================= + +.. currentmodule:: mygrad + +.. autoclass:: reciprocal + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~reciprocal.__init__ + ~reciprocal.accumulate + ~reciprocal.at + ~reciprocal.outer + ~reciprocal.reduce + ~reciprocal.reduceat + ~reciprocal.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~reciprocal.identity + ~reciprocal.nargs + ~reciprocal.nin + ~reciprocal.nout + ~reciprocal.ntypes + ~reciprocal.signature + ~reciprocal.types + ~reciprocal.x + ~reciprocal.out + ~reciprocal.where + ~reciprocal.dtype + ~reciprocal.constant + ~reciprocal.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.repeat.rst.txt b/docs/_sources/generated/mygrad.repeat.rst.txt new file mode 100644 index 00000000..0d4d76c4 --- /dev/null +++ b/docs/_sources/generated/mygrad.repeat.rst.txt @@ -0,0 +1,6 @@ +mygrad.repeat +============= + +.. currentmodule:: mygrad + +.. autofunction:: repeat \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.reshape.rst.txt b/docs/_sources/generated/mygrad.reshape.rst.txt new file mode 100644 index 00000000..c605fa8d --- /dev/null +++ b/docs/_sources/generated/mygrad.reshape.rst.txt @@ -0,0 +1,6 @@ +mygrad.reshape +============== + +.. currentmodule:: mygrad + +.. autofunction:: reshape \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.roll.rst.txt b/docs/_sources/generated/mygrad.roll.rst.txt new file mode 100644 index 00000000..bf0fb5fd --- /dev/null +++ b/docs/_sources/generated/mygrad.roll.rst.txt @@ -0,0 +1,6 @@ +mygrad.roll +=========== + +.. currentmodule:: mygrad + +.. autofunction:: roll \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.save.rst.txt b/docs/_sources/generated/mygrad.save.rst.txt new file mode 100644 index 00000000..94eba1be --- /dev/null +++ b/docs/_sources/generated/mygrad.save.rst.txt @@ -0,0 +1,6 @@ +mygrad.save +=========== + +.. currentmodule:: mygrad + +.. autofunction:: save \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.sin.rst.txt b/docs/_sources/generated/mygrad.sin.rst.txt new file mode 100644 index 00000000..b75b1715 --- /dev/null +++ b/docs/_sources/generated/mygrad.sin.rst.txt @@ -0,0 +1,46 @@ +mygrad.sin +========== + +.. currentmodule:: mygrad + +.. autoclass:: sin + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~sin.__init__ + ~sin.accumulate + ~sin.at + ~sin.outer + ~sin.reduce + ~sin.reduceat + ~sin.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~sin.identity + ~sin.nargs + ~sin.nin + ~sin.nout + ~sin.ntypes + ~sin.signature + ~sin.types + ~sin.x + ~sin.out + ~sin.where + ~sin.dtype + ~sin.constant + ~sin.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.sinc.rst.txt b/docs/_sources/generated/mygrad.sinc.rst.txt new file mode 100644 index 00000000..241b2104 --- /dev/null +++ b/docs/_sources/generated/mygrad.sinc.rst.txt @@ -0,0 +1,6 @@ +mygrad.sinc +=========== + +.. currentmodule:: mygrad + +.. autofunction:: sinc \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.sinh.rst.txt b/docs/_sources/generated/mygrad.sinh.rst.txt new file mode 100644 index 00000000..1609d2a0 --- /dev/null +++ b/docs/_sources/generated/mygrad.sinh.rst.txt @@ -0,0 +1,46 @@ +mygrad.sinh +=========== + +.. currentmodule:: mygrad + +.. autoclass:: sinh + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~sinh.__init__ + ~sinh.accumulate + ~sinh.at + ~sinh.outer + ~sinh.reduce + ~sinh.reduceat + ~sinh.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~sinh.identity + ~sinh.nargs + ~sinh.nin + ~sinh.nout + ~sinh.ntypes + ~sinh.signature + ~sinh.types + ~sinh.x + ~sinh.out + ~sinh.where + ~sinh.dtype + ~sinh.constant + ~sinh.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.sliding_window_view.rst.txt b/docs/_sources/generated/mygrad.sliding_window_view.rst.txt new file mode 100644 index 00000000..316314bd --- /dev/null +++ b/docs/_sources/generated/mygrad.sliding_window_view.rst.txt @@ -0,0 +1,6 @@ +mygrad.sliding\_window\_view +============================ + +.. currentmodule:: mygrad + +.. autofunction:: sliding_window_view \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.sqrt.rst.txt b/docs/_sources/generated/mygrad.sqrt.rst.txt new file mode 100644 index 00000000..c9b636a4 --- /dev/null +++ b/docs/_sources/generated/mygrad.sqrt.rst.txt @@ -0,0 +1,46 @@ +mygrad.sqrt +=========== + +.. currentmodule:: mygrad + +.. autoclass:: sqrt + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~sqrt.__init__ + ~sqrt.accumulate + ~sqrt.at + ~sqrt.outer + ~sqrt.reduce + ~sqrt.reduceat + ~sqrt.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~sqrt.identity + ~sqrt.nargs + ~sqrt.nin + ~sqrt.nout + ~sqrt.ntypes + ~sqrt.signature + ~sqrt.types + ~sqrt.x + ~sqrt.out + ~sqrt.where + ~sqrt.dtype + ~sqrt.constant + ~sqrt.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.square.rst.txt b/docs/_sources/generated/mygrad.square.rst.txt new file mode 100644 index 00000000..d720443d --- /dev/null +++ b/docs/_sources/generated/mygrad.square.rst.txt @@ -0,0 +1,46 @@ +mygrad.square +============= + +.. currentmodule:: mygrad + +.. autoclass:: square + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~square.__init__ + ~square.accumulate + ~square.at + ~square.outer + ~square.reduce + ~square.reduceat + ~square.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~square.identity + ~square.nargs + ~square.nin + ~square.nout + ~square.ntypes + ~square.signature + ~square.types + ~square.x + ~square.out + ~square.where + ~square.dtype + ~square.constant + ~square.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.squeeze.rst.txt b/docs/_sources/generated/mygrad.squeeze.rst.txt new file mode 100644 index 00000000..79c24b17 --- /dev/null +++ b/docs/_sources/generated/mygrad.squeeze.rst.txt @@ -0,0 +1,6 @@ +mygrad.squeeze +============== + +.. currentmodule:: mygrad + +.. autofunction:: squeeze \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.stack.rst.txt b/docs/_sources/generated/mygrad.stack.rst.txt new file mode 100644 index 00000000..dc793959 --- /dev/null +++ b/docs/_sources/generated/mygrad.stack.rst.txt @@ -0,0 +1,6 @@ +mygrad.stack +============ + +.. currentmodule:: mygrad + +.. autofunction:: stack \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.std.rst.txt b/docs/_sources/generated/mygrad.std.rst.txt new file mode 100644 index 00000000..f8844aac --- /dev/null +++ b/docs/_sources/generated/mygrad.std.rst.txt @@ -0,0 +1,6 @@ +mygrad.std +========== + +.. currentmodule:: mygrad + +.. autofunction:: std \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.subtract.rst.txt b/docs/_sources/generated/mygrad.subtract.rst.txt new file mode 100644 index 00000000..eda5ca1c --- /dev/null +++ b/docs/_sources/generated/mygrad.subtract.rst.txt @@ -0,0 +1,47 @@ +mygrad.subtract +=============== + +.. currentmodule:: mygrad + +.. autoclass:: subtract + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~subtract.__init__ + ~subtract.accumulate + ~subtract.at + ~subtract.outer + ~subtract.reduce + ~subtract.reduceat + ~subtract.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~subtract.identity + ~subtract.nargs + ~subtract.nin + ~subtract.nout + ~subtract.ntypes + ~subtract.signature + ~subtract.types + ~subtract.x1 + ~subtract.x2 + ~subtract.out + ~subtract.where + ~subtract.dtype + ~subtract.constant + ~subtract.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.sum.rst.txt b/docs/_sources/generated/mygrad.sum.rst.txt new file mode 100644 index 00000000..02bbe838 --- /dev/null +++ b/docs/_sources/generated/mygrad.sum.rst.txt @@ -0,0 +1,6 @@ +mygrad.sum +========== + +.. currentmodule:: mygrad + +.. autofunction:: sum \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.swapaxes.rst.txt b/docs/_sources/generated/mygrad.swapaxes.rst.txt new file mode 100644 index 00000000..4e878c51 --- /dev/null +++ b/docs/_sources/generated/mygrad.swapaxes.rst.txt @@ -0,0 +1,6 @@ +mygrad.swapaxes +=============== + +.. currentmodule:: mygrad + +.. autofunction:: swapaxes \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.tan.rst.txt b/docs/_sources/generated/mygrad.tan.rst.txt new file mode 100644 index 00000000..816d26e1 --- /dev/null +++ b/docs/_sources/generated/mygrad.tan.rst.txt @@ -0,0 +1,46 @@ +mygrad.tan +========== + +.. currentmodule:: mygrad + +.. autoclass:: tan + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~tan.__init__ + ~tan.accumulate + ~tan.at + ~tan.outer + ~tan.reduce + ~tan.reduceat + ~tan.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~tan.identity + ~tan.nargs + ~tan.nin + ~tan.nout + ~tan.ntypes + ~tan.signature + ~tan.types + ~tan.x + ~tan.out + ~tan.where + ~tan.dtype + ~tan.constant + ~tan.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.tanh.rst.txt b/docs/_sources/generated/mygrad.tanh.rst.txt new file mode 100644 index 00000000..fb94adea --- /dev/null +++ b/docs/_sources/generated/mygrad.tanh.rst.txt @@ -0,0 +1,46 @@ +mygrad.tanh +=========== + +.. currentmodule:: mygrad + +.. autoclass:: tanh + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~tanh.__init__ + ~tanh.accumulate + ~tanh.at + ~tanh.outer + ~tanh.reduce + ~tanh.reduceat + ~tanh.resolve_dtypes + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~tanh.identity + ~tanh.nargs + ~tanh.nin + ~tanh.nout + ~tanh.ntypes + ~tanh.signature + ~tanh.types + ~tanh.x + ~tanh.out + ~tanh.where + ~tanh.dtype + ~tanh.constant + ~tanh.return + + \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.tensor.rst.txt b/docs/_sources/generated/mygrad.tensor.rst.txt new file mode 100644 index 00000000..f448382a --- /dev/null +++ b/docs/_sources/generated/mygrad.tensor.rst.txt @@ -0,0 +1,6 @@ +mygrad.tensor +============= + +.. currentmodule:: mygrad + +.. autofunction:: tensor \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.transpose.rst.txt b/docs/_sources/generated/mygrad.transpose.rst.txt new file mode 100644 index 00000000..0b6bc16c --- /dev/null +++ b/docs/_sources/generated/mygrad.transpose.rst.txt @@ -0,0 +1,6 @@ +mygrad.transpose +================ + +.. currentmodule:: mygrad + +.. autofunction:: transpose \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.turn_memory_guarding_off.rst.txt b/docs/_sources/generated/mygrad.turn_memory_guarding_off.rst.txt new file mode 100644 index 00000000..f387fac1 --- /dev/null +++ b/docs/_sources/generated/mygrad.turn_memory_guarding_off.rst.txt @@ -0,0 +1,6 @@ +mygrad.turn\_memory\_guarding\_off +================================== + +.. currentmodule:: mygrad + +.. autofunction:: turn_memory_guarding_off \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.var.rst.txt b/docs/_sources/generated/mygrad.var.rst.txt new file mode 100644 index 00000000..55905ed0 --- /dev/null +++ b/docs/_sources/generated/mygrad.var.rst.txt @@ -0,0 +1,6 @@ +mygrad.var +========== + +.. currentmodule:: mygrad + +.. autofunction:: var \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.where.rst.txt b/docs/_sources/generated/mygrad.where.rst.txt new file mode 100644 index 00000000..1c445e21 --- /dev/null +++ b/docs/_sources/generated/mygrad.where.rst.txt @@ -0,0 +1,6 @@ +mygrad.where +============ + +.. currentmodule:: mygrad + +.. autofunction:: where \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.zeros.rst.txt b/docs/_sources/generated/mygrad.zeros.rst.txt new file mode 100644 index 00000000..0c19d3cf --- /dev/null +++ b/docs/_sources/generated/mygrad.zeros.rst.txt @@ -0,0 +1,6 @@ +mygrad.zeros +============ + +.. currentmodule:: mygrad + +.. autofunction:: zeros \ No newline at end of file diff --git a/docs/_sources/generated/mygrad.zeros_like.rst.txt b/docs/_sources/generated/mygrad.zeros_like.rst.txt new file mode 100644 index 00000000..a4c05220 --- /dev/null +++ b/docs/_sources/generated/mygrad.zeros_like.rst.txt @@ -0,0 +1,6 @@ +mygrad.zeros\_like +================== + +.. currentmodule:: mygrad + +.. autofunction:: zeros_like \ No newline at end of file diff --git a/docs/_sources/graph_viz.rst.txt b/docs/_sources/graph_viz.rst.txt new file mode 100644 index 00000000..d270a272 --- /dev/null +++ b/docs/_sources/graph_viz.rst.txt @@ -0,0 +1,10 @@ +Computational graph visualization(:mod:`mygrad.computational_graph`) +******************************************************************** + +.. currentmodule:: mygrad.computational_graph +.. autosummary:: + :toctree: generated/ + + build_graph + + diff --git a/docs/_sources/index.rst.txt b/docs/_sources/index.rst.txt new file mode 100644 index 00000000..eb084a93 --- /dev/null +++ b/docs/_sources/index.rst.txt @@ -0,0 +1,156 @@ +.. MyGrad documentation master file, created by + sphinx-quickstart on Sun Oct 21 09:57:03 2018. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +====== +MyGrad +====== +MyGrad is a lightweight library that adds automatic differentiation to NumPy – its only +dependency is NumPy. Simply "drop in" a MyGrad tensor into your NumPy-based code, and +start differentiating! + +.. code-block:: pycon + + >>> import mygrad as mg + >>> import numpy as np + + >>> x = mg.tensor([1., 2., 3.]) # like numpy.array, but supports backprop + >>> f = np.sum(x * x) # tensors can be passed directly to native numpy functions! + >>> f.backward() # triggers automatic differentiation + >>> x.grad # stores [df/dx0, df/dx1, df/dx2] + array([2., 4., 6.]) + + +MyGrad's primary goal is to make automatic differentiation accessible and easy to use across the Python/NumPy ecosystem. +As such, it strives to behave and feel exactly like NumPy so that users need not learn yet another array-based math library. + +Of the various modes and flavors of auto-diff, MyGrad currently only supports back-propagation from a scalar quantity. + + +"Drop in" automatic differentiation? +==================================== +What we mean by drop in automatic differentiation is that you can take a third party function, which is written in NumPy, and pass MyGrad tensors as its inputs – this will coerce it into using MyGrad functions internally so that we can differentiate the function. + +.. code-block:: python + :caption: What we mean by drop in autodiff + + from third_party_lib import some_numpy_func + + import mygrad as mg + + arr1 = mg.tensor(...) # some MyGrad Tensor (instead of a NumPy array) + arr2 = mg.tensor(...) # some MyGrad Tensor (instead of a NumPy array) + + output = some_numpy_func(arr1, arr2) # "drop in" the MyGrad tensors + + output.backward() # output is a MyGrad tensor, not a NumPy array! + + arr1.grad # stores d(some_numpy_func) / d(arr1) + arr2.grad # stores d(some_numpy_func) / d(arr2) + + +MyGrad aims for parity with NumPy's major features +================================================== +NumPy's ufuncs are richly supported. We can even differentiate through an operation that occur in-place on a tensor and applies a boolean mask to +the results: + +.. code-block:: pycon + + >>> x = mg.tensor([1., 2., 3.]) + >>> y = mg.zeros_like(x) + >>> np.multiply(x, x, where=[True, False, True], out=y) + >>> y.backward() + >>> x.grad + array([2., 0., 6.]) + + +NumPy's `view semantics `_ are also mirrored to a high fidelity: performing basic +indexing and similar operations on tensors will produce a "view" of that tensor's data, thus a tensor and its view share memory. +This relationship will also manifest between the derivatives stored by a tensor and its views! + +.. code-block:: pycon + + >>> x = mg.arange(9.).reshape(3, 3) + >>> diag_view = np.einsum("ii->i", x) # returns a view of the diagonal elements of `x` + >>> x, diag_view + (Tensor([[0., 1., 2.], + [3., 4., 5.], + [6., 7., 8.]]), + Tensor([0., 4., 8.])) + + # views share memory + >>> np.shares_memory(x, diag_view) + True + + # mutating a view affects its base (and all other views) + >>> diag_view *= -1 # mutates x in-place + >>> x + Tensor([[-0., 1., 2.], + [ 3., -4., 5.], + [ 6., 7., -8.]]) + + >>> (x ** 2).backward() + >>> x.grad, diag_view.grad + (array([[ -0., 2., 4.], + [ 6., -8., 10.], + [ 12., 14., -16.]]), + array([ -0., -8., -16.])) + + # the gradients have the same view relationship! + >>> np.shares_memory(x.grad, diag_view.grad) + True + + +Basic and advanced indexing is fully supported + +.. code-block:: pycon + + >>> (x[x < 4] ** 2).backward() + >>> x.grad + array([[0., 2., 4.], + [6., 0., 0.], + [0., 0., 0.]]) + + +NumPy arrays and other array-likes play nicely with MyGrad's tensor. These behave like constants +during automatic differentiation + +.. code-block:: pycon + + >>> x = mg.tensor([1., 2., 3.]) + >>> constant = [-1., 0., 10] # can be a numpy array, list, or any other array-like + >>> (x * constant).backward() # all array-likes are treated as constants + >>> x.grad + array([-1., 0., 10.]) + + +What About JAX? +=============== +Doesn't JAX already provide drop in automatic differentiation? Not quite; JAX provides *swap-out* automatic differentiation: you must swap out the version of NumPy you are using *before* you write your code. Thus you cannot simply differentiate some third party function by passing it a JAX array. + +"Is MyGrad a competitor to JAX? Should I stop using JAX and start using MyGrad?" + +**Goodness gracious, no!** MyGrad is *not* meant to compete with the likes of JAX, which offers far more functionality in the way of computing higher-order derivatives, Jacobian vector projects, in terms of providing a jit... this list goes on. +MyGrad is meant to be a simple and highly accessible way to provide basic automatic differentiation capabilities to the NumPy ecosystem. Anyone who knows how to use NumPy can very easily learn to use MyGrad. It is especially great for teaching. But once your auto-diff needs extend beyond derivatives of scalars, it is time to graduate to JAX. + + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + install + intro + tensor + views + performance_tips + operation + tensor_creation + tensor_manipulation + linalg + math + indexing + nnet + io + graph_viz + changes diff --git a/docs/_sources/indexing.rst.txt b/docs/_sources/indexing.rst.txt new file mode 100644 index 00000000..019d04e4 --- /dev/null +++ b/docs/_sources/indexing.rst.txt @@ -0,0 +1,12 @@ +.. _routines.indexing: +Indexing Routines (:mod:`mygrad.indexing_routines`) +*************************************************** + +.. currentmodule:: mygrad + +Generating index tensors +------------------------ +.. autosummary:: + :toctree: generated/ + + where \ No newline at end of file diff --git a/docs/_sources/install.rst.txt b/docs/_sources/install.rst.txt new file mode 100644 index 00000000..8606657b --- /dev/null +++ b/docs/_sources/install.rst.txt @@ -0,0 +1,34 @@ +.. MyGrad documentation master file, created by + sphinx-quickstart on Sun Oct 21 09:57:03 2018. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Installing MyGrad +================= +MyGrad requires numpy. It is highly recommended that you utilize numpy built with MKL for access to optimized math +routines (e.g. install numpy via anaconda). You can install MyGrad using pip: + +.. code-block:: shell + + pip install mygrad + + +You can instead install MyGrad from its source code. Clone `this repository `_ and +navigate to the MyGrad directory, then run: + +.. code-block:: shell + + pip install . + + +Support for Python and NumPy +---------------------------- +MyGrad abides by the `NEP 29 `_ recommendation, and adopts +a common “time window-based” policy for support of NumPy versions. Accordingly, MyGrad's drop schedule for NumPy versions can be found `here `_. + +Note, however, that MyGrad will maintain a wider window of support for minor Python +versions than is specified by NEP 29. Because our only dependency is NumPy, and because +we strive to remain an exceptionally lightweight and flexible dependency to our users, +we will support minor versions of Python until their end of life, *or* until our lowest +supported version of NumPy drops support for that version of Python -- whichever occurs +first. \ No newline at end of file diff --git a/docs/_sources/intro.rst.txt b/docs/_sources/intro.rst.txt new file mode 100644 index 00000000..22bc62f1 --- /dev/null +++ b/docs/_sources/intro.rst.txt @@ -0,0 +1,189 @@ +################## +Introducing MyGrad +################## + +MyGrad is a lightweight library that adds automatic differentiation to NumPy – its only dependency is NumPy! + +.. code:: python + + >>> import mygrad as mg + >>> import numpy as np + + >>> x = mg.tensor([1., 2., 3.]) # like numpy.array, but supports backprop! + >>> f = np.sum(x * x) # tensors work with numpy functions! + >>> f.backward() # triggers automatic differentiation + >>> x.grad # stores [df/dx0, df/dx1, df/dx2] + array([2., 4., 6.]) + + +Its primary goal is to make automatic differentiation an accessible and easy to use across the Python/NumPy ecosystem. +As such, it strives to behave and feel exactly like NumPy so that users need not learn yet another array-based math library. +You can pass MyGrad's :class:`~mygrad.Tensor` to NumPy's functions in order to make them differentiable! +Of the various modes and flavors of auto-diff, MyGrad supports backpropagation from a scalar quantity. + + +A Simple Application +#################### + +Let's use ``mygrad`` to compute the derivative of +:math:`f(x) = x^2` evaluated at :math:`x = 3` (which is :math:`\frac{df}{dx}\rvert_{x=3} = 2\times 3`). + +:class:`~mygrad.Tensor` behaves nearly identically to NumPy's ndarray, in addition to having the machinery needed to +compute the analytic derivatives of functions. Suppose we want to compute this derivative at ``x = 3``. We can create a +0-dimensional tensor (a scalar) for x and compute ``f(x)``: + +.. code:: pycon + + >>> import mygrad as mg + >>> import numpy as np + >>> x = mg.tensor(3.0) + >>> f = np.square(x) # mygrad's tensors can be passed into NumPy functions + >>> f + Tensor(9.0) + + +Invoking :meth:`~mygrad.Tensor.backward` on ``f`` instructs ``mygrad`` to trace through the computational graph that produced ``f`` and compute the +derivatives of ``f`` with respect to all of its independent variables. Thus, executing ``f.backward()`` will compute :math:`\frac{df}{dx} = 2x` at :math:`x=3`, and will store the resulting value in ``x.grad``: + +.. code:: pycon + + >>> f.backward() # triggers computation of ``df/dx`` + >>> x.grad # df/dx = 2x = 6.0 + array(6.0) + + +This is the absolute tip of the iceberg. ``mygrad`` can compute derivatives of multivariable composite +functions of tensor-valued variables! + +Gradient Descent with MyGrad +############################ + +Performing gradient descent on :math:`\mathscr{L}(w) = w ^ 2` + +.. code:: python + + w = mg.tensor(10.0) + learning_rate = 0.3 + num_steps = 10 + print(w) + + for step_cnt in range(num_steps): + ℒ = w ** 2 # compute L(w) (this also "nulls" any derivatives") + ℒ.backward() # compute derivative of L + + # Update w via gradient-step.. + # We do an augmented update on the underlying numpy-array + # stored by `w` + w.data -= learning_rate * w.grad + print(w) + +The following steps are printed out.. see that gradient descent leads us towards +the minimum of :math:`w = 0` + +.. code:: pycon + + Tensor(10.) + Tensor(4.) + Tensor(1.6) + Tensor(0.64) + Tensor(0.256) + Tensor(0.1024) + Tensor(0.04096) + Tensor(0.016384) + Tensor(0.0065536) + Tensor(0.00262144) + Tensor(0.00104858) + + +Some Bells and Whistles +####################### + +``mygrad`` supports all of NumPy's essential features, including: + + - `N-dimensional tensors `_ that can be reshaped and have their axes transposed + - creating and operating on `views of tensors `_ + - `in-place operations on tensors `_ + - `vectorization `_ + - `broadcasting `_ + - `basic and advanced indexing `_ (including all varieties of mixed indexing schemes) for both getting and setting items. + - fully-fledged support for `einsum `_ (including broadcasting and traces) + + :class:`~mygrad.Tensor` plays nicely with NumPy-arrays, which behave as constants when they are used in computational graphs: + +.. code:: pycon + + >>> import numpy as np + >>> x = mg.tensor([2.0, 2.0, 2.0]) + >>> y = np.array([1.0, 2.0, 3.0]) + >>> f = x ** y # (2 ** 1, 2 ** 2, 2 ** 3) + >>> f.backward() + >>> x.grad + array([ 1., 4., 12.]) + + +:mod:`~mygrad.nnet` supplies essential functions for machine learning, including: + +- `N-dimensional convolutions (with striding, dilation, and padding) `_ +- N-dimensional pooling +- A `gated recurrent unit `_ for sequence-learning (with input-level + dropout and variational hidden-hidden dropout) + +It leverages a nice `sliding window +view `_ +function, which produces convolution-style windowed views of arrays/tensors without making copies of them, to +intuitively (and quite efficiently) perform the neural network-style convolutions and pooling. + + +Advanced Example +################ + +The following is an example of using ``mygrad`` to compute the `hinge loss `_ of classification scores and to "back-propagate" through (compute the gradient of) this loss. This example demonstrates some of mygrad's ability to perform back-propagation through broadcasted operations, basic indexing, advanced indexing, and in-place assignments. + +.. code:: pycon + + >>> from mygrad import Tensor + >>> import numpy as np + >>> class_scores = Tensor(10 * np.random.rand(100, 10)) # 100 samples, 10 possible classes for each + >>> class_labels = np.random.randint(low=0, high=10, size=100) # correct label for each datum + >>> class_labels = (range(len(class_labels)), class_labels) + >>> correct_class_scores = class_scores[class_labels] + + >>> Lij = class_scores - correct_class_scores[:, np.newaxis] + 1. 0 # 100x10 margins + >>> Lij[Lij <= 0] = 0 # scores within the hinge incur no loss + >>> Lij[class_labels] = 0 # the score corresponding to the correct label incurs no loss + + >>> loss = Lij.sum() / class_scores.shape[0] # compute mean hinge loss + >>> loss.backward() # compute gradient of loss w.r.t all dependent tensors + >>> class_scores.grad # d(loss)/d(class_scores) + array([[ 0. , 0.01, 0. , -0.04, 0. , 0. , 0.01, 0. , 0.01, 0.01], ...]) + +Computational Graph Visualization +################################# + +MyGrad provides the capability to visually render diagrams of your computational graphs: + +.. code:: python + + import mygrad as mg + from mygrad.computational_graph import build_graph + x = mg.tensor(2) + y = mg.tensor(3) + f = x * y + g = f + x - 2 + + build_graph(g, names=locals()) + +.. image:: _static/example_graph.svg + + +`mygrad` uses `Graphviz `_ and a `Python interface for Graphviz `_ to render the computational graphs built using tensors. These graphs can be rendered in Jupyter notebooks, allowing for quick checks of graph structure, or can be saved to file for later reference. + +The dependencies can be installed with: + +.. code:: shell + + conda install graphviz + conda install python-graphviz + + +Big thanks to `Petar Griggs `_ for implementing these fantastic viz capabilities! diff --git a/docs/_sources/io.rst.txt b/docs/_sources/io.rst.txt new file mode 100644 index 00000000..89f0c643 --- /dev/null +++ b/docs/_sources/io.rst.txt @@ -0,0 +1,12 @@ +Input and Output +**************** + +.. currentmodule:: mygrad + +NumPy binary files (NPY, NPZ) +----------------------------- +.. autosummary:: + :toctree: generated/ + + save + load \ No newline at end of file diff --git a/docs/_sources/linalg.rst.txt b/docs/_sources/linalg.rst.txt new file mode 100644 index 00000000..4bf273be --- /dev/null +++ b/docs/_sources/linalg.rst.txt @@ -0,0 +1,27 @@ +.. _routines.linalg: + +Linear algebra (:mod:`mygrad.linalg`) +************************************* + +.. currentmodule:: mygrad + +Matrix and vector products +-------------------------- +.. autosummary:: + :toctree: generated/ + + matmul + multi_matmul + einsum + + +Norms and other numbers +----------------------- + +.. currentmodule:: mygrad + + +.. autosummary:: + :toctree: generated/ + + linalg.norm diff --git a/docs/_sources/math.rst.txt b/docs/_sources/math.rst.txt new file mode 100644 index 00000000..b06e3d07 --- /dev/null +++ b/docs/_sources/math.rst.txt @@ -0,0 +1,101 @@ +Mathematical functions (:mod:`mygrad.math`) +******************************************* + +.. currentmodule:: mygrad + +Trigonometric functions +----------------------- +.. autosummary:: + :toctree: generated/ + + sin + cos + tan + arcsin + arccos + arctan + arctan2 + + +Hyperbolic functions +-------------------- +.. autosummary:: + :toctree: generated/ + + sinh + cosh + tanh + arcsinh + arccosh + arctanh + +Sums, products, differences +--------------------------- +.. autosummary:: + :toctree: generated/ + + prod + sum + cumprod + cumsum + mean + var + std + amax + amin + max + min + + +Exponents and logarithms +------------------------ +.. autosummary:: + :toctree: generated/ + + exp + expm1 + exp2 + log + log10 + log2 + log1p + logaddexp + logaddexp2 + +Other special functions +----------------------- +.. autosummary:: + :toctree: generated/ + + add_sequence + multiply_sequence + sinc + +Arithmetic operations +--------------------- +.. autosummary:: + :toctree: generated/ + + add + reciprocal + positive + negative + multiply + divide + power + subtract + + +Miscellaneous +------------- +.. autosummary:: + :toctree: generated/ + + clip + sqrt + cbrt + square + absolute + maximum + minimum + diff --git a/docs/_sources/nnet.rst.txt b/docs/_sources/nnet.rst.txt new file mode 100644 index 00000000..e7a05a75 --- /dev/null +++ b/docs/_sources/nnet.rst.txt @@ -0,0 +1,76 @@ +Neural network operations (:mod:`mygrad.nnet`) +********************************************** + +.. currentmodule:: mygrad.nnet.layers + + +Layer operations +---------------- +.. autosummary:: + :toctree: generated/ + + batchnorm + conv_nd + max_pool + gru + +.. currentmodule:: mygrad.nnet.losses + +Losses +------ +.. autosummary:: + :toctree: generated/ + + focal_loss + margin_ranking_loss + multiclass_hinge + negative_log_likelihood + softmax_crossentropy + softmax_focal_loss + + +.. currentmodule:: mygrad.nnet.activations + +Activations +----------- +.. autosummary:: + :toctree: generated/ + + + elu + glu + hard_tanh + leaky_relu + logsoftmax + selu + sigmoid + softmax + soft_sign + relu + tanh + + +.. currentmodule:: mygrad.nnet.initializers + +Initializers +------------ +.. autosummary:: + :toctree: generated/ + + + dirac + glorot_normal + glorot_uniform + he_normal + he_uniform + normal + uniform + +.. currentmodule:: mygrad + +Sliding Window View Utility +--------------------------- +.. autosummary:: + :toctree: generated/ + + sliding_window_view diff --git a/docs/_sources/operation.rst.txt b/docs/_sources/operation.rst.txt new file mode 100644 index 00000000..055c24be --- /dev/null +++ b/docs/_sources/operation.rst.txt @@ -0,0 +1,103 @@ +Writing Your Own Operations +*************************** + +Let's write our own "multiply" operation. There are two components to doing this: + - Defining an operation class (a subclass of :class:`~mygrad.operation_base.Operation`) + - Writing a function that ultimately calls ``mygrad.execute_op(YourOp, ...)`` + +.. code:: python + + import numpy as np + + import mygrad as mg + from mygrad import execute_op + from mygrad.operation_base import Operation + from mygrad.typing import ArrayLike + + # All operations should inherit from Operation, or one of its subclasses + class CustomMultiply(Operation): + """ Performs f(x, y) = x * y """ + + def __call__(self, x: mg.Tensor, y: mg.Tensor) -> np.ndarray: + # This method defines the "forward pass" of the operation. + # It must bind the variable tensors to the op and compute + # the output of the operation as a numpy array + + # All tensors must be bound as a tuple to the `variables` + # instance variable. + self.variables = (x, y) + + # The forward pass should be performed using numpy arrays, + # not the tensors themselves. + x_arr = x.data + y_arr = y.data + return x_arr * y_arr + + def backward_var(self, grad, index, **kwargs): + """Given ``grad = dℒ/df``, computes ``∂ℒ/∂x`` and ``∂ℒ/∂y`` + + ``ℒ`` is assumed to be the terminal node from which ``ℒ.backward()`` was + called. + + Parameters + ---------- + grad : numpy.ndarray + The back-propagated total derivative with respect to the present + operation: dℒ/df. This will have the same shape as f, the result + of the forward pass. + + index : Literal[0, 1] + The index-location of ``var`` in ``self.variables`` + + Returns + ------- + numpy.ndarray + ∂ℒ/∂x_{i} + + Raises + ------ + SkipGradient""" + x, y = self.variables + x_arr = x.data + y_arr = y.data + + # The operation need not incorporate specialized logic for + # broadcasting. The appropriate sum-reductions will be performed + # by MyGrad's autodiff system. + if index == 0: # backprop through a + return grad * y.data # ∂ℒ/∂x = (∂ℒ/∂f)(∂f/∂x) + elif index == 1: # backprop through b + return grad * x.data # ∂ℒ/∂y = (∂ℒ/∂f)(∂f/∂y) + + + # Our function stitches together our operation class with the + # operation arguments via `mygrad.prepare_op` + def custom_multiply(x: ArrayLike, y: ArrayLike, constant=None) -> mg.Tensor: + # `execute_op` will take care of: + # - casting `x` and `y` to tensors if they are instead array-likes + # - propagating 'constant' status to the resulting output based on the inputs + # - handling in-place operations (specified via the `out` parameter) + return execute_op(CustomMultiply, x, y, constant=constant) + +We can now use our differentiable function! + +.. code:: pycon + + >>> x = mg.tensor(2.0) + >>> y = mg.tensor([1.0, 2.0, 3.0]) + + >>> custom_multiply(x, y).backward() + >>> x.grad, y.grad + (array(6.), array([2., 2., 2.])) + +Documentation for mygrad.Operation +---------------------------------- + +.. currentmodule:: mygrad.operation_base + +.. autosummary:: + :toctree: generated/ + + Operation + Operation.backward + Operation.backward_var diff --git a/docs/_sources/performance_tips.rst.txt b/docs/_sources/performance_tips.rst.txt new file mode 100644 index 00000000..c2925c99 --- /dev/null +++ b/docs/_sources/performance_tips.rst.txt @@ -0,0 +1,127 @@ +.. _performance-tips: + +Performance Tips +**************** + +The following functions provide users with controls for optimizing MyGrad code +by either suspending its memory-guarding behavior or by disabling automatic differentiation +altogether. These are important utilities for speeding up your code. + +Beyond the points made below, general performance tips for NumPy – e.g. leveraging +`vectorized operations `_, +heeding NumPy's `row-major memory layout for arrays `_ +when constructing tensors, and using `basic indexing `_ +to create views of arrays instead of copies – apply equally to MyGrad and its tensors. +After all, MyGrad operates almost entirely in NumPy arrays and NumPy functions under the hood. + + +.. currentmodule:: mygrad + +Suspending Graph-Tracking for Automatic Differentiation +------------------------------------------------------- +.. autosummary:: + :toctree: generated/ + + no_autodiff + +In the case that you want to run a computation involving MyGrad tensors, but you don't need to access +their gradients (e.g. when measuring the "test-time" performance of a model that you are training), then +you can use the provided decorator/context-manager for suspending all of MyGrad's +"graph-tracking" features. + +.. code-block:: python + + >>> import mygrad as mg + >>> with mg.no_autodiff: + ... # any mygrad code in this context will run faster + ... # but will not produce any gradients + + +Note that this also suspends all memory-guarding (see below), since MyGrad doesn't need to ensure the +preservation of any state. + +Suspending all graph-tracking features can speed up code involving many small tensors substantially - about +a 3x speedup. + + +Controlling Memory-Guarding Behavior +------------------------------------ +.. autosummary:: + :toctree: generated/ + + mem_guard_off + mem_guard_on + turn_memory_guarding_off + turn_memory_guarding_off + + +By default, MyGrad tracks and locks the readability of all of the NumPy arrays that are involved in computational graphs +involving tensors. + +These stateful graphs are how MyGrad is able to perform backpropagation and +compute the gradients of tensors involved in a given calculation. +Because of the stateful nature of a computational graph, mutating a NumPy array inplace could +corrupt the state of the computational graph - i.e. the derivatives computed would not accurately +reflect the values that were used during the "forward pass". +Read the following code to see such a mutation rear its head. + +.. code-block:: python + + >>> import mygrad as mg + >>> import numpy as np + >>> mg.turn_memory_guarding_off() # speeds up calculations, but with risks involved.. + >>> x = np.arange(3.) + >>> y = mg.ones_like(x) + >>> z = x * y + >>> x[:] = 0 # mutates x, corrupting state associated with z + >>> z.backward() + >>> y.grad # would be array([0., 1., 2.]) if graph wasn't corrupted + array([0., 0., 0.]) + + +Note that, were ``x`` an instance of :class:`~mygrad.Tensor`, there would not be any issue with the +above calculation, since MyGrad can track the in-place update on a tensor. MyGrad cannot, on the otherhand +track such operations involving only NumPy arrays + +Thus MyGrad prohibits such mutations with its aforementioned "memory guarding" behavior, however it is +smart about restoring the writeability of all arrays once they are no longer participating in a computational +graph (e.g. backpropagation has been performed through the graph). + +.. code-block:: python + + >>> import mygrad as mg + >>> import numpy as np + >>> x = np.arange(3.) + >>> y = mg.ones_like(x) + >>> z = x * y + >>> try: + ... x[:] = 0 # raises because `x` is made read-only + ... except ValueError: + ... pass + >>> z.backward() + >>> y.grad # correct gradient is computed + array([0., 1., 2.]) + >>> x[:] = 0 # the writeability of `x` is restored once backprop is complete + +This memory-guarding behavior comes at a cost: for computations involving many small tensors (e.g. in an handmade RNN) +this can lead to slowdowns of ~50%. Thus MyGrad provides various mechanisms for disabling all such memory-guards. +Note, however, for computations involving large tensors (e.g. for typical dense and convolutional neural networks), the +overhead associated with the memory-guarding feature is likely negligible compared to the core numerical computations +at play. + +If one wants to enjoy the optimizations associated with removing memory guarding, it is recommended that you first test +your code with the default memory guarding enabled; once you have witnessed that MyGrad didn't raise any errors, you can +then proceed to run your code "at scale" with memory-guarding disabled. + + +Make Use of Views but Avoid Involving them in In-Place Operations +----------------------------------------------------------------- + +Please refer to the section on views and in-place operations for more details. +The upshot is: views of tensors are efficient to create, as they do not involve copying any memory, but performing +an in-place operations on a tensor will copy that tensor. Furthermore, performing an in-place operation on a view +will lead to the creation of a copy of its associated base tensor. + +If you are relying on this mutation propagating to many various views, then this can still be a net-gain in performance +compared to updating all of them "manually". But, generally, in-place updates on tensors do not have the same performance +benefits as do augmentations on NumPy arrays. \ No newline at end of file diff --git a/docs/_sources/random.rst.txt b/docs/_sources/random.rst.txt new file mode 100644 index 00000000..b59ba1b0 --- /dev/null +++ b/docs/_sources/random.rst.txt @@ -0,0 +1,29 @@ +Drawing from distributions (:mod:`mygrad.random`) +************************************************* + +.. currentmodule:: mygrad.random + + +Simple random data +------------------ +.. autosummary:: + :toctree: generated/ + + rand + randint + randn + random + random_sample + ranf + sample + + +Random generator +---------------- + +MyGrad simply mirrors NumPy's system for managing random number generation + +.. autosummary:: + :toctree: generated/ + + seed diff --git a/docs/_sources/reference.rst.txt b/docs/_sources/reference.rst.txt new file mode 100644 index 00000000..072c9ceb --- /dev/null +++ b/docs/_sources/reference.rst.txt @@ -0,0 +1,16 @@ +Welcome to MyGrad's documentation! +================================== + + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + intro + tensor + tensor_creation + tensor_manipulation + linalg + math + nnet + graph_viz \ No newline at end of file diff --git a/docs/_sources/tensor.rst.txt b/docs/_sources/tensor.rst.txt new file mode 100644 index 00000000..265dde99 --- /dev/null +++ b/docs/_sources/tensor.rst.txt @@ -0,0 +1,172 @@ +MyGrad's Tensor +*************** +:class:`~mygrad.Tensor` is the most critical piece of MyGrad. It is a +numpy-array-like object capable of serving as a node in a computational +graph that supports back-propagation of derivatives via the chain rule. + +You can effectively do a drop-in replacement of a numpy array with a :class:`~mygrad.Tensor` +for all basic mathematical operations. This includes `basic and advanced indexing `_, +`broadcasting `_, sums `over axes `_, etc; it will simply just work. + +>>> import mygrad as mg # note that we replace numpy with mygrad here +>>> x = mg.arange(9).reshape(3, 3) +>>> x +Tensor([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) +>>> y = x[x == 4] ** 2 +>>> y +Tensor([16], dtype=int32) + +Thus MyGrad users can spend their time mastering `numpy `_ +and their skills will transfer seamlessly when using this autograd library. + +Creating a Tensor +----------------- +:class:`~mygrad.Tensor` can be passed any "array-like" object of numerical data. +This includes numbers, sequences (e.g. lists), nested sequences, numpy-ndarrays, +and other mygrad-tensors. mygrad also provides familiar numpy-style tensor-creation +functions (e.g. :func:`~mygrad.arange`, :func:`~mygrad.linspace`, etc.) + +>>> import mygrad as mg +>>> mg.tensor(2.3) # creating a 0-dimensional tensor +Tensor(2.3) +>>> mg.tensor(np.array([1.2, 3.0])) # casting a numpy-array to a tensor +Tensor([1.2, 3.0]) +>>> mg.tensor([[1, 2], [3, 4]]) # creating a 2-dimensional tensor from lists +Tensor([[1, 2], + [3, 4]]) +>>> mg.arange(4) # using numpy-style tensor creation functions +Tensor([0, 1, 2, 3]) + +Integer-valued tensors are treated as constants + +>>> mg.astensor(1, dtype=np.int8).constant +True + +By default, float-valued tensors are not treated as constants + +>>> mg.astensor(1, dtype=np.float32).constant +False + +Forward and Back-Propagation +---------------------------- +Let's construct a computational graph consisting of two zero-dimensional +tensors, ``x`` and ``y``, which are used to compute an output tensor, +``ℒ``. This is a "forward pass imperative" style for creating a computational +graph - the graph is constructed as we carry out the forward-pass computation. + +>>> x = Tensor(3.0) +>>> y = Tensor(2.0) +>>> ℒ = 2 * x + y ** 2 + +Invoking ``ℒ.backward()`` signals the computational graph to +compute the total-derivative of ``ℒ`` with respect to each one of its dependent +variables. I.e. ``x.grad`` will store ``dℒ/dx`` and ``y.grad`` will store +``dℒ/dy``. Thus we have back-propagated a gradient from ``ℒ`` through our graph. + +Each tensor of derivatives is computed elementwise. That is, if ``x = Tensor(x0, x1, x2)``, +then ``dℒ/dx`` represents ``[dℒ/d(x0), dℒ/d(x1), dℒ/d(x2)]`` + +>>> ℒ.backward() # computes dℒ/dx and dℒ/dy +>>> x.grad # dℒ/dx +array(6.0) +>>> y.grad # dℒ/dy +array(4.0) +>>> ℒ.grad +array(1.0) # dℒ/dℒ + +Once the gradients are computed, the computational graph containing ``x``, +``y``, and ``ℒ`` is cleared automatically. Additionally, involving any +of these tensors in a new computational graph will automatically null +their gradients. + +>>> 2 * x +>>> x.grad is None +True + +Or, you can use the :func:`~mygrad.Tensor.null_grad` method to manually clear a +tensor's gradient + +>>> y.null_grad() +Tensor(2.) +>>> y.grad is None +True + + +Accessing the Underlying NumPy Array +------------------------------------ +:class:`~mygrad.Tensor` is a thin wrapper on ``numpy.ndarray``. A tensor's +underlying numpy-array can be accessed via ``.data``. This returns +a direct reference to the numpy array. + +>>> x = mg.tensor([1, 2]) +>>> x.data +array([1, 2]) + +>>> import numpy as np +>>> np.asarray(x) +array([1, 2]) + +Producing a "View" of a Tensor +------------------------------ +MyGrad's tensors exhibit the same view semantics and memory-sharing relationships +as NumPy arrays. I.e. any (non-scalar) tensor produced via basic indexing will share +memory with its parent. + +>>> x = mg.tensor([1., 2., 3., 4.]) +>>> y = x[:2] # the view: Tensor([1., 2.]) +>>> y.base is x +True +>>> np.shares_memory(x, y) +True + +Mutating shared data will propagate through views: + +>>> y *= -1 +>>> x +Tensor([-1., -2., 3., 4.]) +>>> y +Tensor([-1., -2.]) + +And this view relationship will also manifest between the tensors' gradients + +>>> (x ** 2).backward() +>>> x.grad +array([-2., -4., 6., 8.]) +>>> y.grad +array([-2., -4.]) + +Documentation for mygrad.Tensor +------------------------------- + +.. toctree:: + :maxdepth: 1 + :caption: Contents: + + generated/TensorClass + +.. currentmodule:: mygrad + +.. autosummary:: + :toctree: generated/ + + Tensor.astype + Tensor.backward + Tensor.base + Tensor.clear_graph + Tensor.constant + Tensor.copy + Tensor.creator + Tensor.dtype + Tensor.grad + Tensor.item + Tensor.ndim + Tensor.null_grad + Tensor.null_gradients + Tensor.shape + Tensor.size + Tensor.T + + + diff --git a/docs/_sources/tensor_creation.rst.txt b/docs/_sources/tensor_creation.rst.txt new file mode 100644 index 00000000..2b3e406d --- /dev/null +++ b/docs/_sources/tensor_creation.rst.txt @@ -0,0 +1,41 @@ +Tensor creation routines (:mod:`mygrad.tensor_creation`) +******************************************************** + +.. currentmodule:: mygrad + +Array-Like +---------- +.. autosummary:: + :toctree: generated/ + + tensor + asarray + astensor + + +Ones and zeros +-------------- +.. autosummary:: + :toctree: generated/ + + ones + ones_like + zeros + zeros_like + eye + identity + full + full_like + empty + empty_like + + +Numerical ranges +---------------- +.. autosummary:: + :toctree: generated/ + + arange + linspace + logspace + geomspace \ No newline at end of file diff --git a/docs/_sources/tensor_manipulation.rst.txt b/docs/_sources/tensor_manipulation.rst.txt new file mode 100644 index 00000000..35818239 --- /dev/null +++ b/docs/_sources/tensor_manipulation.rst.txt @@ -0,0 +1,55 @@ +Tensor manipulation routines (:mod:`mygrad.tensor_manip`) +********************************************************* + +.. currentmodule:: mygrad + +Changing array shape +-------------------- +.. autosummary:: + :toctree: generated/ + + ravel + reshape + Tensor.flatten + + +Transpose-like operations +------------------------- +.. autosummary:: + :toctree: generated/ + + moveaxis + roll + swapaxes + Tensor.T + transpose + + +Changing number of dimensions +----------------------------- +.. autosummary:: + :toctree: generated/ + + atleast_1d + atleast_2d + atleast_3d + broadcast_to + expand_dims + squeeze + + +Joining tensors +--------------- +.. autosummary:: + :toctree: generated/ + + concatenate + stack + + +Tiling tensors +-------------- +.. autosummary:: + :toctree: generated/ + + repeat diff --git a/docs/_sources/views.rst.txt b/docs/_sources/views.rst.txt new file mode 100644 index 00000000..aec84a9d --- /dev/null +++ b/docs/_sources/views.rst.txt @@ -0,0 +1,55 @@ +############################# +Views and In-Place Operations +############################# + +Producing a "View" of a Tensor +============================== + +MyGrad's tensors exhibit the same view semantics and memory-sharing relationships +as NumPy arrays. I.e. any (non-scalar) tensor produced via basic indexing will share +memory with its parent. + +>>> x = mg.tensor([1., 2., 3., 4.]) +>>> y = x[:2] # the view: Tensor([1., 2.]) +>>> y.base is x +True +>>> np.shares_memory(x, y) +True + +Mutating shared data will propagate through views: + +>>> y *= -1 +>>> x +Tensor([-1., -2., 3., 4.]) +>>> y +Tensor([-1., -2.]) + +And this view relationship will also manifest between the tensors' gradients + +>>> (x ** 2).backward() +>>> x.grad +array([-2., -4., 6., 8.]) +>>> y.grad +array([-2., -4.]) + +In-Place Operations are not Efficient +===================================== +It is important to note that although MyGrad's view semantics promote a rich parity +with NumPy, certain aspects should be avoided in the interest of optimized performance. +Namely, performing in-place operations on tensors is generally not more efficient than +their non-mutating counterparts. + +This is because MyGrad has to track the state of tensors that are involved in a computational +graph. Thus a mutated tensor must have its pre-augmented state stored for future reference; this +defeats the performance benefit of writing to an array's memory in-place. This is especially +inefficient if you are mutating a tensor involved with multiple views of the same memory( +By contrast, producing a view of a tensor *is* efficient as one would expect). + +Thus these NumPy-like in-place semantics are supported by MyGrad not for the same performance +purposes, but instead to support convenient and familiar code-patterns and to enable one to +port NumPy code to MyGrad (or, in the future, inject MyGrad tensors into NumPy!!) and get +the exact same behavior. + +A final note: MyGrad's in-place operations, when run under :func:`~mygrad.no_autodiff` mode, +do not incur the extra costs noted above, and thus your code will benefit from the performance +benefits of in-place operations. \ No newline at end of file diff --git a/docs/_static/basic.css b/docs/_static/basic.css new file mode 100644 index 00000000..61572969 --- /dev/null +++ b/docs/_static/basic.css @@ -0,0 +1,903 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 270px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 360px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} + +nav.contents, +aside.topic, +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ + +nav.contents, +aside.topic, +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} + +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/docs/_static/doctools.js b/docs/_static/doctools.js new file mode 100644 index 00000000..d06a71d7 --- /dev/null +++ b/docs/_static/doctools.js @@ -0,0 +1,156 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Base JavaScript utilities for all Sphinx HTML documentation. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", +]); + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); + } +}; + +/** + * Small JavaScript module for the documentation. + */ +const Documentation = { + init: () => { + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); + }, + + /** + * i18n support + */ + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } + }, + + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; + }, + + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; + }, + + /** + * helper function to focus on search bar + */ + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); + }, + + /** + * Initialise the domain index toggle buttons + */ + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; + + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); + }, + + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.altKey || event.ctrlKey || event.metaKey) return; + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + } + } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } + }); + }, +}; + +// quick alias for translations +const _ = Documentation.gettext; + +_ready(Documentation.init); diff --git a/docs/_static/documentation_options.js b/docs/_static/documentation_options.js new file mode 100644 index 00000000..7afbfc01 --- /dev/null +++ b/docs/_static/documentation_options.js @@ -0,0 +1,14 @@ +var DOCUMENTATION_OPTIONS = { + URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), + VERSION: '2.3.0.post1.dev6', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: true, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, +}; \ No newline at end of file diff --git a/docs/_static/example_graph.svg b/docs/_static/example_graph.svg new file mode 100644 index 00000000..a042b348 --- /dev/null +++ b/docs/_static/example_graph.svg @@ -0,0 +1,103 @@ + + + + + + +%3 + + +3102162992712 + +g + + +3102162990528 + +Subtract + + +3102162990528->3102162992712 + + + + +3102162990472 + +8 + + +3102162990472->3102162990528 + + + + +3102137770952 + +Add + + +3102137770952->3102162990472 + + + + +3102137770728 + +f + + +3102137770728->3102137770952 + + + + +3102137771176 + +Multiply + + +3102137771176->3102137770728 + + + + +3102137771064 + +x + + +3102137771064->3102137770952 + + + + +3102137771064->3102137771176 + + + + +3102137771288 + +y + + +3102137771288->3102137771176 + + + + +3102162990696 + +2 + + +3102162990696->3102162990528 + + + + + diff --git a/docs/_static/file.png b/docs/_static/file.png new file mode 100644 index 00000000..a858a410 Binary files /dev/null and b/docs/_static/file.png differ diff --git a/docs/_static/language_data.js b/docs/_static/language_data.js new file mode 100644 index 00000000..250f5665 --- /dev/null +++ b/docs/_static/language_data.js @@ -0,0 +1,199 @@ +/* + * language_data.js + * ~~~~~~~~~~~~~~~~ + * + * This script contains the language-specific data used by searchtools.js, + * namely the list of stopwords, stemmer, scorer and splitter. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"]; + + +/* Non-minified version is copied as a separate JS file, is available */ + +/** + * Porter Stemmer + */ +var Stemmer = function() { + + var step2list = { + ational: 'ate', + tional: 'tion', + enci: 'ence', + anci: 'ance', + izer: 'ize', + bli: 'ble', + alli: 'al', + entli: 'ent', + eli: 'e', + ousli: 'ous', + ization: 'ize', + ation: 'ate', + ator: 'ate', + alism: 'al', + iveness: 'ive', + fulness: 'ful', + ousness: 'ous', + aliti: 'al', + iviti: 'ive', + biliti: 'ble', + logi: 'log' + }; + + var step3list = { + icate: 'ic', + ative: '', + alize: 'al', + iciti: 'ic', + ical: 'ic', + ful: '', + ness: '' + }; + + var c = "[^aeiou]"; // consonant + var v = "[aeiouy]"; // vowel + var C = c + "[^aeiouy]*"; // consonant sequence + var V = v + "[aeiou]*"; // vowel sequence + + var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 + var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 + var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 + var s_v = "^(" + C + ")?" + v; // vowel in stem + + this.stemWord = function (w) { + var stem; + var suffix; + var firstch; + var origword = w; + + if (w.length < 3) + return w; + + var re; + var re2; + var re3; + var re4; + + firstch = w.substr(0,1); + if (firstch == "y") + w = firstch.toUpperCase() + w.substr(1); + + // Step 1a + re = /^(.+?)(ss|i)es$/; + re2 = /^(.+?)([^s])s$/; + + if (re.test(w)) + w = w.replace(re,"$1$2"); + else if (re2.test(w)) + w = w.replace(re2,"$1$2"); + + // Step 1b + re = /^(.+?)eed$/; + re2 = /^(.+?)(ed|ing)$/; + if (re.test(w)) { + var fp = re.exec(w); + re = new RegExp(mgr0); + if (re.test(fp[1])) { + re = /.$/; + w = w.replace(re,""); + } + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1]; + re2 = new RegExp(s_v); + if (re2.test(stem)) { + w = stem; + re2 = /(at|bl|iz)$/; + re3 = new RegExp("([^aeiouylsz])\\1$"); + re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re2.test(w)) + w = w + "e"; + else if (re3.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + else if (re4.test(w)) + w = w + "e"; + } + } + + // Step 1c + re = /^(.+?)y$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(s_v); + if (re.test(stem)) + w = stem + "i"; + } + + // Step 2 + re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step2list[suffix]; + } + + // Step 3 + re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step3list[suffix]; + } + + // Step 4 + re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; + re2 = /^(.+?)(s|t)(ion)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + if (re.test(stem)) + w = stem; + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1] + fp[2]; + re2 = new RegExp(mgr1); + if (re2.test(stem)) + w = stem; + } + + // Step 5 + re = /^(.+?)e$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + re2 = new RegExp(meq1); + re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) + w = stem; + } + re = /ll$/; + re2 = new RegExp(mgr1); + if (re.test(w) && re2.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + + // and turn initial Y back to y + if (firstch == "y") + w = firstch.toLowerCase() + w.substr(1); + return w; + } +} + diff --git a/docs/_static/meerkat.png b/docs/_static/meerkat.png new file mode 100644 index 00000000..1b3f0c27 Binary files /dev/null and b/docs/_static/meerkat.png differ diff --git a/docs/_static/minus.png b/docs/_static/minus.png new file mode 100644 index 00000000..d96755fd Binary files /dev/null and b/docs/_static/minus.png differ diff --git a/docs/_static/my_theme.css b/docs/_static/my_theme.css new file mode 100644 index 00000000..d0c03e60 --- /dev/null +++ b/docs/_static/my_theme.css @@ -0,0 +1,3 @@ +.wy-nav-content { +max-width: 1000px !important; +} diff --git a/docs/_static/plot_directive.css b/docs/_static/plot_directive.css new file mode 100644 index 00000000..d45593c9 --- /dev/null +++ b/docs/_static/plot_directive.css @@ -0,0 +1,16 @@ +/* + * plot_directive.css + * ~~~~~~~~~~~~ + * + * Stylesheet controlling images created using the `plot` directive within + * Sphinx. + * + * :copyright: Copyright 2020-* by the Matplotlib development team. + * :license: Matplotlib, see LICENSE for details. + * + */ + +img.plot-directive { + border: 0; + max-width: 100%; +} diff --git a/docs/_static/plus.png b/docs/_static/plus.png new file mode 100644 index 00000000..7107cec9 Binary files /dev/null and b/docs/_static/plus.png differ diff --git a/docs/_static/pygments.css b/docs/_static/pygments.css new file mode 100644 index 00000000..012e6a00 --- /dev/null +++ b/docs/_static/pygments.css @@ -0,0 +1,152 @@ +html[data-theme="light"] .highlight pre { line-height: 125%; } +html[data-theme="light"] .highlight td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +html[data-theme="light"] .highlight span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +html[data-theme="light"] .highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +html[data-theme="light"] .highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +html[data-theme="light"] .highlight .hll { background-color: #fae4c2 } +html[data-theme="light"] .highlight { background: #fefefe; color: #080808 } +html[data-theme="light"] .highlight .c { color: #515151 } /* Comment */ +html[data-theme="light"] .highlight .err { color: #a12236 } /* Error */ +html[data-theme="light"] .highlight .k { color: #6730c5 } /* Keyword */ +html[data-theme="light"] .highlight .l { color: #7f4707 } /* Literal */ +html[data-theme="light"] .highlight .n { color: #080808 } /* Name */ +html[data-theme="light"] .highlight .o { color: #00622f } /* Operator */ +html[data-theme="light"] .highlight .p { color: #080808 } /* Punctuation */ +html[data-theme="light"] .highlight .ch { color: #515151 } /* Comment.Hashbang */ +html[data-theme="light"] .highlight .cm { color: #515151 } /* Comment.Multiline */ +html[data-theme="light"] .highlight .cp { color: #515151 } /* Comment.Preproc */ +html[data-theme="light"] .highlight .cpf { color: #515151 } /* Comment.PreprocFile */ +html[data-theme="light"] .highlight .c1 { color: #515151 } /* Comment.Single */ +html[data-theme="light"] .highlight .cs { color: #515151 } /* Comment.Special */ +html[data-theme="light"] .highlight .gd { color: #005b82 } /* Generic.Deleted */ +html[data-theme="light"] .highlight .ge { font-style: italic } /* Generic.Emph */ +html[data-theme="light"] .highlight .gh { color: #005b82 } /* Generic.Heading */ +html[data-theme="light"] .highlight .gs { font-weight: bold } /* Generic.Strong */ +html[data-theme="light"] .highlight .gu { color: #005b82 } /* Generic.Subheading */ +html[data-theme="light"] .highlight .kc { color: #6730c5 } /* Keyword.Constant */ +html[data-theme="light"] .highlight .kd { color: #6730c5 } /* Keyword.Declaration */ +html[data-theme="light"] .highlight .kn { color: #6730c5 } /* Keyword.Namespace */ +html[data-theme="light"] .highlight .kp { color: #6730c5 } /* Keyword.Pseudo */ +html[data-theme="light"] .highlight .kr { color: #6730c5 } /* Keyword.Reserved */ +html[data-theme="light"] .highlight .kt { color: #7f4707 } /* Keyword.Type */ +html[data-theme="light"] .highlight .ld { color: #7f4707 } /* Literal.Date */ +html[data-theme="light"] .highlight .m { color: #7f4707 } /* Literal.Number */ +html[data-theme="light"] .highlight .s { color: #00622f } /* Literal.String */ +html[data-theme="light"] .highlight .na { color: #912583 } /* Name.Attribute */ +html[data-theme="light"] .highlight .nb { color: #7f4707 } /* Name.Builtin */ +html[data-theme="light"] .highlight .nc { color: #005b82 } /* Name.Class */ +html[data-theme="light"] .highlight .no { color: #005b82 } /* Name.Constant */ +html[data-theme="light"] .highlight .nd { color: #7f4707 } /* Name.Decorator */ +html[data-theme="light"] .highlight .ni { color: #00622f } /* Name.Entity */ +html[data-theme="light"] .highlight .ne { color: #6730c5 } /* Name.Exception */ +html[data-theme="light"] .highlight .nf { color: #005b82 } /* Name.Function */ +html[data-theme="light"] .highlight .nl { color: #7f4707 } /* Name.Label */ +html[data-theme="light"] .highlight .nn { color: #080808 } /* Name.Namespace */ +html[data-theme="light"] .highlight .nx { color: #080808 } /* Name.Other */ +html[data-theme="light"] .highlight .py { color: #005b82 } /* Name.Property */ +html[data-theme="light"] .highlight .nt { color: #005b82 } /* Name.Tag */ +html[data-theme="light"] .highlight .nv { color: #a12236 } /* Name.Variable */ +html[data-theme="light"] .highlight .ow { color: #6730c5 } /* Operator.Word */ +html[data-theme="light"] .highlight .pm { color: #080808 } /* Punctuation.Marker */ +html[data-theme="light"] .highlight .w { color: #080808 } /* Text.Whitespace */ +html[data-theme="light"] .highlight .mb { color: #7f4707 } /* Literal.Number.Bin */ +html[data-theme="light"] .highlight .mf { color: #7f4707 } /* Literal.Number.Float */ +html[data-theme="light"] .highlight .mh { color: #7f4707 } /* Literal.Number.Hex */ +html[data-theme="light"] .highlight .mi { color: #7f4707 } /* Literal.Number.Integer */ +html[data-theme="light"] .highlight .mo { color: #7f4707 } /* Literal.Number.Oct */ +html[data-theme="light"] .highlight .sa { color: #00622f } /* Literal.String.Affix */ +html[data-theme="light"] .highlight .sb { color: #00622f } /* Literal.String.Backtick */ +html[data-theme="light"] .highlight .sc { color: #00622f } /* Literal.String.Char */ +html[data-theme="light"] .highlight .dl { color: #00622f } /* Literal.String.Delimiter */ +html[data-theme="light"] .highlight .sd { color: #00622f } /* Literal.String.Doc */ +html[data-theme="light"] .highlight .s2 { color: #00622f } /* Literal.String.Double */ +html[data-theme="light"] .highlight .se { color: #00622f } /* Literal.String.Escape */ +html[data-theme="light"] .highlight .sh { color: #00622f } /* Literal.String.Heredoc */ +html[data-theme="light"] .highlight .si { color: #00622f } /* Literal.String.Interpol */ +html[data-theme="light"] .highlight .sx { color: #00622f } /* Literal.String.Other */ +html[data-theme="light"] .highlight .sr { color: #a12236 } /* Literal.String.Regex */ +html[data-theme="light"] .highlight .s1 { color: #00622f } /* Literal.String.Single */ +html[data-theme="light"] .highlight .ss { color: #005b82 } /* Literal.String.Symbol */ +html[data-theme="light"] .highlight .bp { color: #7f4707 } /* Name.Builtin.Pseudo */ +html[data-theme="light"] .highlight .fm { color: #005b82 } /* Name.Function.Magic */ +html[data-theme="light"] .highlight .vc { color: #a12236 } /* Name.Variable.Class */ +html[data-theme="light"] .highlight .vg { color: #a12236 } /* Name.Variable.Global */ +html[data-theme="light"] .highlight .vi { color: #a12236 } /* Name.Variable.Instance */ +html[data-theme="light"] .highlight .vm { color: #7f4707 } /* Name.Variable.Magic */ +html[data-theme="light"] .highlight .il { color: #7f4707 } /* Literal.Number.Integer.Long */ +html[data-theme="dark"] .highlight pre { line-height: 125%; } +html[data-theme="dark"] .highlight td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +html[data-theme="dark"] .highlight span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +html[data-theme="dark"] .highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +html[data-theme="dark"] .highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +html[data-theme="dark"] .highlight .hll { background-color: #ffd9002e } +html[data-theme="dark"] .highlight { background: #2b2b2b; color: #f8f8f2 } +html[data-theme="dark"] .highlight .c { color: #ffd900 } /* Comment */ +html[data-theme="dark"] .highlight .err { color: #ffa07a } /* Error */ +html[data-theme="dark"] .highlight .k { color: #dcc6e0 } /* Keyword */ +html[data-theme="dark"] .highlight .l { color: #ffd900 } /* Literal */ +html[data-theme="dark"] .highlight .n { color: #f8f8f2 } /* Name */ +html[data-theme="dark"] .highlight .o { color: #abe338 } /* Operator */ +html[data-theme="dark"] .highlight .p { color: #f8f8f2 } /* Punctuation */ +html[data-theme="dark"] .highlight .ch { color: #ffd900 } /* Comment.Hashbang */ +html[data-theme="dark"] .highlight .cm { color: #ffd900 } /* Comment.Multiline */ +html[data-theme="dark"] .highlight .cp { color: #ffd900 } /* Comment.Preproc */ +html[data-theme="dark"] .highlight .cpf { color: #ffd900 } /* Comment.PreprocFile */ +html[data-theme="dark"] .highlight .c1 { color: #ffd900 } /* Comment.Single */ +html[data-theme="dark"] .highlight .cs { color: #ffd900 } /* Comment.Special */ +html[data-theme="dark"] .highlight .gd { color: #00e0e0 } /* Generic.Deleted */ +html[data-theme="dark"] .highlight .ge { font-style: italic } /* Generic.Emph */ +html[data-theme="dark"] .highlight .gh { color: #00e0e0 } /* Generic.Heading */ +html[data-theme="dark"] .highlight .gs { font-weight: bold } /* Generic.Strong */ +html[data-theme="dark"] .highlight .gu { color: #00e0e0 } /* Generic.Subheading */ +html[data-theme="dark"] .highlight .kc { color: #dcc6e0 } /* Keyword.Constant */ +html[data-theme="dark"] .highlight .kd { color: #dcc6e0 } /* Keyword.Declaration */ +html[data-theme="dark"] .highlight .kn { color: #dcc6e0 } /* Keyword.Namespace */ +html[data-theme="dark"] .highlight .kp { color: #dcc6e0 } /* Keyword.Pseudo */ +html[data-theme="dark"] .highlight .kr { color: #dcc6e0 } /* Keyword.Reserved */ +html[data-theme="dark"] .highlight .kt { color: #ffd900 } /* Keyword.Type */ +html[data-theme="dark"] .highlight .ld { color: #ffd900 } /* Literal.Date */ +html[data-theme="dark"] .highlight .m { color: #ffd900 } /* Literal.Number */ +html[data-theme="dark"] .highlight .s { color: #abe338 } /* Literal.String */ +html[data-theme="dark"] .highlight .na { color: #ffd900 } /* Name.Attribute */ +html[data-theme="dark"] .highlight .nb { color: #ffd900 } /* Name.Builtin */ +html[data-theme="dark"] .highlight .nc { color: #00e0e0 } /* Name.Class */ +html[data-theme="dark"] .highlight .no { color: #00e0e0 } /* Name.Constant */ +html[data-theme="dark"] .highlight .nd { color: #ffd900 } /* Name.Decorator */ +html[data-theme="dark"] .highlight .ni { color: #abe338 } /* Name.Entity */ +html[data-theme="dark"] .highlight .ne { color: #dcc6e0 } /* Name.Exception */ +html[data-theme="dark"] .highlight .nf { color: #00e0e0 } /* Name.Function */ +html[data-theme="dark"] .highlight .nl { color: #ffd900 } /* Name.Label */ +html[data-theme="dark"] .highlight .nn { color: #f8f8f2 } /* Name.Namespace */ +html[data-theme="dark"] .highlight .nx { color: #f8f8f2 } /* Name.Other */ +html[data-theme="dark"] .highlight .py { color: #00e0e0 } /* Name.Property */ +html[data-theme="dark"] .highlight .nt { color: #00e0e0 } /* Name.Tag */ +html[data-theme="dark"] .highlight .nv { color: #ffa07a } /* Name.Variable */ +html[data-theme="dark"] .highlight .ow { color: #dcc6e0 } /* Operator.Word */ +html[data-theme="dark"] .highlight .pm { color: #f8f8f2 } /* Punctuation.Marker */ +html[data-theme="dark"] .highlight .w { color: #f8f8f2 } /* Text.Whitespace */ +html[data-theme="dark"] .highlight .mb { color: #ffd900 } /* Literal.Number.Bin */ +html[data-theme="dark"] .highlight .mf { color: #ffd900 } /* Literal.Number.Float */ +html[data-theme="dark"] .highlight .mh { color: #ffd900 } /* Literal.Number.Hex */ +html[data-theme="dark"] .highlight .mi { color: #ffd900 } /* Literal.Number.Integer */ +html[data-theme="dark"] .highlight .mo { color: #ffd900 } /* Literal.Number.Oct */ +html[data-theme="dark"] .highlight .sa { color: #abe338 } /* Literal.String.Affix */ +html[data-theme="dark"] .highlight .sb { color: #abe338 } /* Literal.String.Backtick */ +html[data-theme="dark"] .highlight .sc { color: #abe338 } /* Literal.String.Char */ +html[data-theme="dark"] .highlight .dl { color: #abe338 } /* Literal.String.Delimiter */ +html[data-theme="dark"] .highlight .sd { color: #abe338 } /* Literal.String.Doc */ +html[data-theme="dark"] .highlight .s2 { color: #abe338 } /* Literal.String.Double */ +html[data-theme="dark"] .highlight .se { color: #abe338 } /* Literal.String.Escape */ +html[data-theme="dark"] .highlight .sh { color: #abe338 } /* Literal.String.Heredoc */ +html[data-theme="dark"] .highlight .si { color: #abe338 } /* Literal.String.Interpol */ +html[data-theme="dark"] .highlight .sx { color: #abe338 } /* Literal.String.Other */ +html[data-theme="dark"] .highlight .sr { color: #ffa07a } /* Literal.String.Regex */ +html[data-theme="dark"] .highlight .s1 { color: #abe338 } /* Literal.String.Single */ +html[data-theme="dark"] .highlight .ss { color: #00e0e0 } /* Literal.String.Symbol */ +html[data-theme="dark"] .highlight .bp { color: #ffd900 } /* Name.Builtin.Pseudo */ +html[data-theme="dark"] .highlight .fm { color: #00e0e0 } /* Name.Function.Magic */ +html[data-theme="dark"] .highlight .vc { color: #ffa07a } /* Name.Variable.Class */ +html[data-theme="dark"] .highlight .vg { color: #ffa07a } /* Name.Variable.Global */ +html[data-theme="dark"] .highlight .vi { color: #ffa07a } /* Name.Variable.Instance */ +html[data-theme="dark"] .highlight .vm { color: #ffd900 } /* Name.Variable.Magic */ +html[data-theme="dark"] .highlight .il { color: #ffd900 } /* Literal.Number.Integer.Long */ \ No newline at end of file diff --git a/docs/_static/scripts/bootstrap.js b/docs/_static/scripts/bootstrap.js new file mode 100644 index 00000000..766173ab --- /dev/null +++ b/docs/_static/scripts/bootstrap.js @@ -0,0 +1,3 @@ +/*! For license information please see bootstrap.js.LICENSE.txt */ +(()=>{"use strict";var t={d:(e,i)=>{for(var n in i)t.o(i,n)&&!t.o(e,n)&&Object.defineProperty(e,n,{enumerable:!0,get:i[n]})},o:(t,e)=>Object.prototype.hasOwnProperty.call(t,e),r:t=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})}},e={};t.r(e),t.d(e,{afterMain:()=>w,afterRead:()=>b,afterWrite:()=>C,applyStyles:()=>$,arrow:()=>G,auto:()=>r,basePlacements:()=>a,beforeMain:()=>v,beforeRead:()=>m,beforeWrite:()=>A,bottom:()=>n,clippingParents:()=>h,computeStyles:()=>et,createPopper:()=>Dt,createPopperBase:()=>Lt,createPopperLite:()=>$t,detectOverflow:()=>mt,end:()=>c,eventListeners:()=>nt,flip:()=>_t,hide:()=>yt,left:()=>o,main:()=>y,modifierPhases:()=>T,offset:()=>wt,placements:()=>g,popper:()=>d,popperGenerator:()=>kt,popperOffsets:()=>At,preventOverflow:()=>Et,read:()=>_,reference:()=>f,right:()=>s,start:()=>l,top:()=>i,variationPlacements:()=>p,viewport:()=>u,write:()=>E});var i="top",n="bottom",s="right",o="left",r="auto",a=[i,n,s,o],l="start",c="end",h="clippingParents",u="viewport",d="popper",f="reference",p=a.reduce((function(t,e){return t.concat([e+"-"+l,e+"-"+c])}),[]),g=[].concat(a,[r]).reduce((function(t,e){return t.concat([e,e+"-"+l,e+"-"+c])}),[]),m="beforeRead",_="read",b="afterRead",v="beforeMain",y="main",w="afterMain",A="beforeWrite",E="write",C="afterWrite",T=[m,_,b,v,y,w,A,E,C];function O(t){return t?(t.nodeName||"").toLowerCase():null}function x(t){if(null==t)return window;if("[object Window]"!==t.toString()){var e=t.ownerDocument;return e&&e.defaultView||window}return t}function k(t){return t instanceof x(t).Element||t instanceof Element}function L(t){return t instanceof x(t).HTMLElement||t instanceof HTMLElement}function D(t){return"undefined"!=typeof ShadowRoot&&(t instanceof x(t).ShadowRoot||t instanceof ShadowRoot)}const $={name:"applyStyles",enabled:!0,phase:"write",fn:function(t){var e=t.state;Object.keys(e.elements).forEach((function(t){var i=e.styles[t]||{},n=e.attributes[t]||{},s=e.elements[t];L(s)&&O(s)&&(Object.assign(s.style,i),Object.keys(n).forEach((function(t){var e=n[t];!1===e?s.removeAttribute(t):s.setAttribute(t,!0===e?"":e)})))}))},effect:function(t){var e=t.state,i={popper:{position:e.options.strategy,left:"0",top:"0",margin:"0"},arrow:{position:"absolute"},reference:{}};return Object.assign(e.elements.popper.style,i.popper),e.styles=i,e.elements.arrow&&Object.assign(e.elements.arrow.style,i.arrow),function(){Object.keys(e.elements).forEach((function(t){var n=e.elements[t],s=e.attributes[t]||{},o=Object.keys(e.styles.hasOwnProperty(t)?e.styles[t]:i[t]).reduce((function(t,e){return t[e]="",t}),{});L(n)&&O(n)&&(Object.assign(n.style,o),Object.keys(s).forEach((function(t){n.removeAttribute(t)})))}))}},requires:["computeStyles"]};function S(t){return t.split("-")[0]}var I=Math.max,N=Math.min,P=Math.round;function j(){var t=navigator.userAgentData;return null!=t&&t.brands&&Array.isArray(t.brands)?t.brands.map((function(t){return t.brand+"/"+t.version})).join(" "):navigator.userAgent}function M(){return!/^((?!chrome|android).)*safari/i.test(j())}function H(t,e,i){void 0===e&&(e=!1),void 0===i&&(i=!1);var n=t.getBoundingClientRect(),s=1,o=1;e&&L(t)&&(s=t.offsetWidth>0&&P(n.width)/t.offsetWidth||1,o=t.offsetHeight>0&&P(n.height)/t.offsetHeight||1);var r=(k(t)?x(t):window).visualViewport,a=!M()&&i,l=(n.left+(a&&r?r.offsetLeft:0))/s,c=(n.top+(a&&r?r.offsetTop:0))/o,h=n.width/s,u=n.height/o;return{width:h,height:u,top:c,right:l+h,bottom:c+u,left:l,x:l,y:c}}function W(t){var e=H(t),i=t.offsetWidth,n=t.offsetHeight;return Math.abs(e.width-i)<=1&&(i=e.width),Math.abs(e.height-n)<=1&&(n=e.height),{x:t.offsetLeft,y:t.offsetTop,width:i,height:n}}function F(t,e){var i=e.getRootNode&&e.getRootNode();if(t.contains(e))return!0;if(i&&D(i)){var n=e;do{if(n&&t.isSameNode(n))return!0;n=n.parentNode||n.host}while(n)}return!1}function B(t){return x(t).getComputedStyle(t)}function z(t){return["table","td","th"].indexOf(O(t))>=0}function q(t){return((k(t)?t.ownerDocument:t.document)||window.document).documentElement}function R(t){return"html"===O(t)?t:t.assignedSlot||t.parentNode||(D(t)?t.host:null)||q(t)}function V(t){return L(t)&&"fixed"!==B(t).position?t.offsetParent:null}function K(t){for(var e=x(t),i=V(t);i&&z(i)&&"static"===B(i).position;)i=V(i);return i&&("html"===O(i)||"body"===O(i)&&"static"===B(i).position)?e:i||function(t){var e=/firefox/i.test(j());if(/Trident/i.test(j())&&L(t)&&"fixed"===B(t).position)return null;var i=R(t);for(D(i)&&(i=i.host);L(i)&&["html","body"].indexOf(O(i))<0;){var n=B(i);if("none"!==n.transform||"none"!==n.perspective||"paint"===n.contain||-1!==["transform","perspective"].indexOf(n.willChange)||e&&"filter"===n.willChange||e&&n.filter&&"none"!==n.filter)return i;i=i.parentNode}return null}(t)||e}function Q(t){return["top","bottom"].indexOf(t)>=0?"x":"y"}function X(t,e,i){return I(t,N(e,i))}function Y(t){return Object.assign({},{top:0,right:0,bottom:0,left:0},t)}function U(t,e){return e.reduce((function(e,i){return e[i]=t,e}),{})}const G={name:"arrow",enabled:!0,phase:"main",fn:function(t){var e,r=t.state,l=t.name,c=t.options,h=r.elements.arrow,u=r.modifiersData.popperOffsets,d=S(r.placement),f=Q(d),p=[o,s].indexOf(d)>=0?"height":"width";if(h&&u){var g=function(t,e){return Y("number"!=typeof(t="function"==typeof t?t(Object.assign({},e.rects,{placement:e.placement})):t)?t:U(t,a))}(c.padding,r),m=W(h),_="y"===f?i:o,b="y"===f?n:s,v=r.rects.reference[p]+r.rects.reference[f]-u[f]-r.rects.popper[p],y=u[f]-r.rects.reference[f],w=K(h),A=w?"y"===f?w.clientHeight||0:w.clientWidth||0:0,E=v/2-y/2,C=g[_],T=A-m[p]-g[b],O=A/2-m[p]/2+E,x=X(C,O,T),k=f;r.modifiersData[l]=((e={})[k]=x,e.centerOffset=x-O,e)}},effect:function(t){var e=t.state,i=t.options.element,n=void 0===i?"[data-popper-arrow]":i;null!=n&&("string"!=typeof n||(n=e.elements.popper.querySelector(n)))&&F(e.elements.popper,n)&&(e.elements.arrow=n)},requires:["popperOffsets"],requiresIfExists:["preventOverflow"]};function J(t){return t.split("-")[1]}var Z={top:"auto",right:"auto",bottom:"auto",left:"auto"};function tt(t){var e,r=t.popper,a=t.popperRect,l=t.placement,h=t.variation,u=t.offsets,d=t.position,f=t.gpuAcceleration,p=t.adaptive,g=t.roundOffsets,m=t.isFixed,_=u.x,b=void 0===_?0:_,v=u.y,y=void 0===v?0:v,w="function"==typeof g?g({x:b,y}):{x:b,y};b=w.x,y=w.y;var A=u.hasOwnProperty("x"),E=u.hasOwnProperty("y"),C=o,T=i,O=window;if(p){var k=K(r),L="clientHeight",D="clientWidth";k===x(r)&&"static"!==B(k=q(r)).position&&"absolute"===d&&(L="scrollHeight",D="scrollWidth"),(l===i||(l===o||l===s)&&h===c)&&(T=n,y-=(m&&k===O&&O.visualViewport?O.visualViewport.height:k[L])-a.height,y*=f?1:-1),l!==o&&(l!==i&&l!==n||h!==c)||(C=s,b-=(m&&k===O&&O.visualViewport?O.visualViewport.width:k[D])-a.width,b*=f?1:-1)}var $,S=Object.assign({position:d},p&&Z),I=!0===g?function(t,e){var i=t.x,n=t.y,s=e.devicePixelRatio||1;return{x:P(i*s)/s||0,y:P(n*s)/s||0}}({x:b,y},x(r)):{x:b,y};return b=I.x,y=I.y,f?Object.assign({},S,(($={})[T]=E?"0":"",$[C]=A?"0":"",$.transform=(O.devicePixelRatio||1)<=1?"translate("+b+"px, "+y+"px)":"translate3d("+b+"px, "+y+"px, 0)",$)):Object.assign({},S,((e={})[T]=E?y+"px":"",e[C]=A?b+"px":"",e.transform="",e))}const et={name:"computeStyles",enabled:!0,phase:"beforeWrite",fn:function(t){var e=t.state,i=t.options,n=i.gpuAcceleration,s=void 0===n||n,o=i.adaptive,r=void 0===o||o,a=i.roundOffsets,l=void 0===a||a,c={placement:S(e.placement),variation:J(e.placement),popper:e.elements.popper,popperRect:e.rects.popper,gpuAcceleration:s,isFixed:"fixed"===e.options.strategy};null!=e.modifiersData.popperOffsets&&(e.styles.popper=Object.assign({},e.styles.popper,tt(Object.assign({},c,{offsets:e.modifiersData.popperOffsets,position:e.options.strategy,adaptive:r,roundOffsets:l})))),null!=e.modifiersData.arrow&&(e.styles.arrow=Object.assign({},e.styles.arrow,tt(Object.assign({},c,{offsets:e.modifiersData.arrow,position:"absolute",adaptive:!1,roundOffsets:l})))),e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-placement":e.placement})},data:{}};var it={passive:!0};const nt={name:"eventListeners",enabled:!0,phase:"write",fn:function(){},effect:function(t){var e=t.state,i=t.instance,n=t.options,s=n.scroll,o=void 0===s||s,r=n.resize,a=void 0===r||r,l=x(e.elements.popper),c=[].concat(e.scrollParents.reference,e.scrollParents.popper);return o&&c.forEach((function(t){t.addEventListener("scroll",i.update,it)})),a&&l.addEventListener("resize",i.update,it),function(){o&&c.forEach((function(t){t.removeEventListener("scroll",i.update,it)})),a&&l.removeEventListener("resize",i.update,it)}},data:{}};var st={left:"right",right:"left",bottom:"top",top:"bottom"};function ot(t){return t.replace(/left|right|bottom|top/g,(function(t){return st[t]}))}var rt={start:"end",end:"start"};function at(t){return t.replace(/start|end/g,(function(t){return rt[t]}))}function lt(t){var e=x(t);return{scrollLeft:e.pageXOffset,scrollTop:e.pageYOffset}}function ct(t){return H(q(t)).left+lt(t).scrollLeft}function ht(t){var e=B(t),i=e.overflow,n=e.overflowX,s=e.overflowY;return/auto|scroll|overlay|hidden/.test(i+s+n)}function ut(t){return["html","body","#document"].indexOf(O(t))>=0?t.ownerDocument.body:L(t)&&ht(t)?t:ut(R(t))}function dt(t,e){var i;void 0===e&&(e=[]);var n=ut(t),s=n===(null==(i=t.ownerDocument)?void 0:i.body),o=x(n),r=s?[o].concat(o.visualViewport||[],ht(n)?n:[]):n,a=e.concat(r);return s?a:a.concat(dt(R(r)))}function ft(t){return Object.assign({},t,{left:t.x,top:t.y,right:t.x+t.width,bottom:t.y+t.height})}function pt(t,e,i){return e===u?ft(function(t,e){var i=x(t),n=q(t),s=i.visualViewport,o=n.clientWidth,r=n.clientHeight,a=0,l=0;if(s){o=s.width,r=s.height;var c=M();(c||!c&&"fixed"===e)&&(a=s.offsetLeft,l=s.offsetTop)}return{width:o,height:r,x:a+ct(t),y:l}}(t,i)):k(e)?function(t,e){var i=H(t,!1,"fixed"===e);return i.top=i.top+t.clientTop,i.left=i.left+t.clientLeft,i.bottom=i.top+t.clientHeight,i.right=i.left+t.clientWidth,i.width=t.clientWidth,i.height=t.clientHeight,i.x=i.left,i.y=i.top,i}(e,i):ft(function(t){var e,i=q(t),n=lt(t),s=null==(e=t.ownerDocument)?void 0:e.body,o=I(i.scrollWidth,i.clientWidth,s?s.scrollWidth:0,s?s.clientWidth:0),r=I(i.scrollHeight,i.clientHeight,s?s.scrollHeight:0,s?s.clientHeight:0),a=-n.scrollLeft+ct(t),l=-n.scrollTop;return"rtl"===B(s||i).direction&&(a+=I(i.clientWidth,s?s.clientWidth:0)-o),{width:o,height:r,x:a,y:l}}(q(t)))}function gt(t){var e,r=t.reference,a=t.element,h=t.placement,u=h?S(h):null,d=h?J(h):null,f=r.x+r.width/2-a.width/2,p=r.y+r.height/2-a.height/2;switch(u){case i:e={x:f,y:r.y-a.height};break;case n:e={x:f,y:r.y+r.height};break;case s:e={x:r.x+r.width,y:p};break;case o:e={x:r.x-a.width,y:p};break;default:e={x:r.x,y:r.y}}var g=u?Q(u):null;if(null!=g){var m="y"===g?"height":"width";switch(d){case l:e[g]=e[g]-(r[m]/2-a[m]/2);break;case c:e[g]=e[g]+(r[m]/2-a[m]/2)}}return e}function mt(t,e){void 0===e&&(e={});var o=e,r=o.placement,l=void 0===r?t.placement:r,c=o.strategy,p=void 0===c?t.strategy:c,g=o.boundary,m=void 0===g?h:g,_=o.rootBoundary,b=void 0===_?u:_,v=o.elementContext,y=void 0===v?d:v,w=o.altBoundary,A=void 0!==w&&w,E=o.padding,C=void 0===E?0:E,T=Y("number"!=typeof C?C:U(C,a)),x=y===d?f:d,D=t.rects.popper,$=t.elements[A?x:y],S=function(t,e,i,n){var s="clippingParents"===e?function(t){var e=dt(R(t)),i=["absolute","fixed"].indexOf(B(t).position)>=0&&L(t)?K(t):t;return k(i)?e.filter((function(t){return k(t)&&F(t,i)&&"body"!==O(t)})):[]}(t):[].concat(e),o=[].concat(s,[i]),r=o[0],a=o.reduce((function(e,i){var s=pt(t,i,n);return e.top=I(s.top,e.top),e.right=N(s.right,e.right),e.bottom=N(s.bottom,e.bottom),e.left=I(s.left,e.left),e}),pt(t,r,n));return a.width=a.right-a.left,a.height=a.bottom-a.top,a.x=a.left,a.y=a.top,a}(k($)?$:$.contextElement||q(t.elements.popper),m,b,p),P=H(t.elements.reference),j=gt({reference:P,element:D,strategy:"absolute",placement:l}),M=ft(Object.assign({},D,j)),W=y===d?M:P,z={top:S.top-W.top+T.top,bottom:W.bottom-S.bottom+T.bottom,left:S.left-W.left+T.left,right:W.right-S.right+T.right},V=t.modifiersData.offset;if(y===d&&V){var Q=V[l];Object.keys(z).forEach((function(t){var e=[s,n].indexOf(t)>=0?1:-1,o=[i,n].indexOf(t)>=0?"y":"x";z[t]+=Q[o]*e}))}return z}const _t={name:"flip",enabled:!0,phase:"main",fn:function(t){var e=t.state,c=t.options,h=t.name;if(!e.modifiersData[h]._skip){for(var u=c.mainAxis,d=void 0===u||u,f=c.altAxis,m=void 0===f||f,_=c.fallbackPlacements,b=c.padding,v=c.boundary,y=c.rootBoundary,w=c.altBoundary,A=c.flipVariations,E=void 0===A||A,C=c.allowedAutoPlacements,T=e.options.placement,O=S(T),x=_||(O!==T&&E?function(t){if(S(t)===r)return[];var e=ot(t);return[at(t),e,at(e)]}(T):[ot(T)]),k=[T].concat(x).reduce((function(t,i){return t.concat(S(i)===r?function(t,e){void 0===e&&(e={});var i=e,n=i.placement,s=i.boundary,o=i.rootBoundary,r=i.padding,l=i.flipVariations,c=i.allowedAutoPlacements,h=void 0===c?g:c,u=J(n),d=u?l?p:p.filter((function(t){return J(t)===u})):a,f=d.filter((function(t){return h.indexOf(t)>=0}));0===f.length&&(f=d);var m=f.reduce((function(e,i){return e[i]=mt(t,{placement:i,boundary:s,rootBoundary:o,padding:r})[S(i)],e}),{});return Object.keys(m).sort((function(t,e){return m[t]-m[e]}))}(e,{placement:i,boundary:v,rootBoundary:y,padding:b,flipVariations:E,allowedAutoPlacements:C}):i)}),[]),L=e.rects.reference,D=e.rects.popper,$=new Map,I=!0,N=k[0],P=0;P=0,F=W?"width":"height",B=mt(e,{placement:j,boundary:v,rootBoundary:y,altBoundary:w,padding:b}),z=W?H?s:o:H?n:i;L[F]>D[F]&&(z=ot(z));var q=ot(z),R=[];if(d&&R.push(B[M]<=0),m&&R.push(B[z]<=0,B[q]<=0),R.every((function(t){return t}))){N=j,I=!1;break}$.set(j,R)}if(I)for(var V=function(t){var e=k.find((function(e){var i=$.get(e);if(i)return i.slice(0,t).every((function(t){return t}))}));if(e)return N=e,"break"},K=E?3:1;K>0&&"break"!==V(K);K--);e.placement!==N&&(e.modifiersData[h]._skip=!0,e.placement=N,e.reset=!0)}},requiresIfExists:["offset"],data:{_skip:!1}};function bt(t,e,i){return void 0===i&&(i={x:0,y:0}),{top:t.top-e.height-i.y,right:t.right-e.width+i.x,bottom:t.bottom-e.height+i.y,left:t.left-e.width-i.x}}function vt(t){return[i,s,n,o].some((function(e){return t[e]>=0}))}const yt={name:"hide",enabled:!0,phase:"main",requiresIfExists:["preventOverflow"],fn:function(t){var e=t.state,i=t.name,n=e.rects.reference,s=e.rects.popper,o=e.modifiersData.preventOverflow,r=mt(e,{elementContext:"reference"}),a=mt(e,{altBoundary:!0}),l=bt(r,n),c=bt(a,s,o),h=vt(l),u=vt(c);e.modifiersData[i]={referenceClippingOffsets:l,popperEscapeOffsets:c,isReferenceHidden:h,hasPopperEscaped:u},e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-reference-hidden":h,"data-popper-escaped":u})}},wt={name:"offset",enabled:!0,phase:"main",requires:["popperOffsets"],fn:function(t){var e=t.state,n=t.options,r=t.name,a=n.offset,l=void 0===a?[0,0]:a,c=g.reduce((function(t,n){return t[n]=function(t,e,n){var r=S(t),a=[o,i].indexOf(r)>=0?-1:1,l="function"==typeof n?n(Object.assign({},e,{placement:t})):n,c=l[0],h=l[1];return c=c||0,h=(h||0)*a,[o,s].indexOf(r)>=0?{x:h,y:c}:{x:c,y:h}}(n,e.rects,l),t}),{}),h=c[e.placement],u=h.x,d=h.y;null!=e.modifiersData.popperOffsets&&(e.modifiersData.popperOffsets.x+=u,e.modifiersData.popperOffsets.y+=d),e.modifiersData[r]=c}},At={name:"popperOffsets",enabled:!0,phase:"read",fn:function(t){var e=t.state,i=t.name;e.modifiersData[i]=gt({reference:e.rects.reference,element:e.rects.popper,strategy:"absolute",placement:e.placement})},data:{}},Et={name:"preventOverflow",enabled:!0,phase:"main",fn:function(t){var e=t.state,r=t.options,a=t.name,c=r.mainAxis,h=void 0===c||c,u=r.altAxis,d=void 0!==u&&u,f=r.boundary,p=r.rootBoundary,g=r.altBoundary,m=r.padding,_=r.tether,b=void 0===_||_,v=r.tetherOffset,y=void 0===v?0:v,w=mt(e,{boundary:f,rootBoundary:p,padding:m,altBoundary:g}),A=S(e.placement),E=J(e.placement),C=!E,T=Q(A),O="x"===T?"y":"x",x=e.modifiersData.popperOffsets,k=e.rects.reference,L=e.rects.popper,D="function"==typeof y?y(Object.assign({},e.rects,{placement:e.placement})):y,$="number"==typeof D?{mainAxis:D,altAxis:D}:Object.assign({mainAxis:0,altAxis:0},D),P=e.modifiersData.offset?e.modifiersData.offset[e.placement]:null,j={x:0,y:0};if(x){if(h){var M,H="y"===T?i:o,F="y"===T?n:s,B="y"===T?"height":"width",z=x[T],q=z+w[H],R=z-w[F],V=b?-L[B]/2:0,Y=E===l?k[B]:L[B],U=E===l?-L[B]:-k[B],G=e.elements.arrow,Z=b&&G?W(G):{width:0,height:0},tt=e.modifiersData["arrow#persistent"]?e.modifiersData["arrow#persistent"].padding:{top:0,right:0,bottom:0,left:0},et=tt[H],it=tt[F],nt=X(0,k[B],Z[B]),st=C?k[B]/2-V-nt-et-$.mainAxis:Y-nt-et-$.mainAxis,ot=C?-k[B]/2+V+nt+it+$.mainAxis:U+nt+it+$.mainAxis,rt=e.elements.arrow&&K(e.elements.arrow),at=rt?"y"===T?rt.clientTop||0:rt.clientLeft||0:0,lt=null!=(M=null==P?void 0:P[T])?M:0,ct=z+ot-lt,ht=X(b?N(q,z+st-lt-at):q,z,b?I(R,ct):R);x[T]=ht,j[T]=ht-z}if(d){var ut,dt="x"===T?i:o,ft="x"===T?n:s,pt=x[O],gt="y"===O?"height":"width",_t=pt+w[dt],bt=pt-w[ft],vt=-1!==[i,o].indexOf(A),yt=null!=(ut=null==P?void 0:P[O])?ut:0,wt=vt?_t:pt-k[gt]-L[gt]-yt+$.altAxis,At=vt?pt+k[gt]+L[gt]-yt-$.altAxis:bt,Et=b&&vt?function(t,e,i){var n=X(t,e,i);return n>i?i:n}(wt,pt,At):X(b?wt:_t,pt,b?At:bt);x[O]=Et,j[O]=Et-pt}e.modifiersData[a]=j}},requiresIfExists:["offset"]};function Ct(t,e,i){void 0===i&&(i=!1);var n,s,o=L(e),r=L(e)&&function(t){var e=t.getBoundingClientRect(),i=P(e.width)/t.offsetWidth||1,n=P(e.height)/t.offsetHeight||1;return 1!==i||1!==n}(e),a=q(e),l=H(t,r,i),c={scrollLeft:0,scrollTop:0},h={x:0,y:0};return(o||!o&&!i)&&(("body"!==O(e)||ht(a))&&(c=(n=e)!==x(n)&&L(n)?{scrollLeft:(s=n).scrollLeft,scrollTop:s.scrollTop}:lt(n)),L(e)?((h=H(e,!0)).x+=e.clientLeft,h.y+=e.clientTop):a&&(h.x=ct(a))),{x:l.left+c.scrollLeft-h.x,y:l.top+c.scrollTop-h.y,width:l.width,height:l.height}}function Tt(t){var e=new Map,i=new Set,n=[];function s(t){i.add(t.name),[].concat(t.requires||[],t.requiresIfExists||[]).forEach((function(t){if(!i.has(t)){var n=e.get(t);n&&s(n)}})),n.push(t)}return t.forEach((function(t){e.set(t.name,t)})),t.forEach((function(t){i.has(t.name)||s(t)})),n}var Ot={placement:"bottom",modifiers:[],strategy:"absolute"};function xt(){for(var t=arguments.length,e=new Array(t),i=0;i{let e=t.getAttribute("data-bs-target");if(!e||"#"===e){let i=t.getAttribute("href");if(!i||!i.includes("#")&&!i.startsWith("."))return null;i.includes("#")&&!i.startsWith("#")&&(i=`#${i.split("#")[1]}`),e=i&&"#"!==i?i.trim():null}return e},Nt=t=>{const e=It(t);return e&&document.querySelector(e)?e:null},Pt=t=>{const e=It(t);return e?document.querySelector(e):null},jt=t=>{t.dispatchEvent(new Event(St))},Mt=t=>!(!t||"object"!=typeof t)&&(void 0!==t.jquery&&(t=t[0]),void 0!==t.nodeType),Ht=t=>Mt(t)?t.jquery?t[0]:t:"string"==typeof t&&t.length>0?document.querySelector(t):null,Wt=t=>{if(!Mt(t)||0===t.getClientRects().length)return!1;const e="visible"===getComputedStyle(t).getPropertyValue("visibility"),i=t.closest("details:not([open])");if(!i)return e;if(i!==t){const e=t.closest("summary");if(e&&e.parentNode!==i)return!1;if(null===e)return!1}return e},Ft=t=>!t||t.nodeType!==Node.ELEMENT_NODE||!!t.classList.contains("disabled")||(void 0!==t.disabled?t.disabled:t.hasAttribute("disabled")&&"false"!==t.getAttribute("disabled")),Bt=t=>{if(!document.documentElement.attachShadow)return null;if("function"==typeof t.getRootNode){const e=t.getRootNode();return e instanceof ShadowRoot?e:null}return t instanceof ShadowRoot?t:t.parentNode?Bt(t.parentNode):null},zt=()=>{},qt=t=>{t.offsetHeight},Rt=()=>window.jQuery&&!document.body.hasAttribute("data-bs-no-jquery")?window.jQuery:null,Vt=[],Kt=()=>"rtl"===document.documentElement.dir,Qt=t=>{var e;e=()=>{const e=Rt();if(e){const i=t.NAME,n=e.fn[i];e.fn[i]=t.jQueryInterface,e.fn[i].Constructor=t,e.fn[i].noConflict=()=>(e.fn[i]=n,t.jQueryInterface)}},"loading"===document.readyState?(Vt.length||document.addEventListener("DOMContentLoaded",(()=>{for(const t of Vt)t()})),Vt.push(e)):e()},Xt=t=>{"function"==typeof t&&t()},Yt=(t,e,i=!0)=>{if(!i)return void Xt(t);const n=(t=>{if(!t)return 0;let{transitionDuration:e,transitionDelay:i}=window.getComputedStyle(t);const n=Number.parseFloat(e),s=Number.parseFloat(i);return n||s?(e=e.split(",")[0],i=i.split(",")[0],1e3*(Number.parseFloat(e)+Number.parseFloat(i))):0})(e)+5;let s=!1;const o=({target:i})=>{i===e&&(s=!0,e.removeEventListener(St,o),Xt(t))};e.addEventListener(St,o),setTimeout((()=>{s||jt(e)}),n)},Ut=(t,e,i,n)=>{const s=t.length;let o=t.indexOf(e);return-1===o?!i&&n?t[s-1]:t[0]:(o+=i?1:-1,n&&(o=(o+s)%s),t[Math.max(0,Math.min(o,s-1))])},Gt=/[^.]*(?=\..*)\.|.*/,Jt=/\..*/,Zt=/::\d+$/,te={};let ee=1;const ie={mouseenter:"mouseover",mouseleave:"mouseout"},ne=new Set(["click","dblclick","mouseup","mousedown","contextmenu","mousewheel","DOMMouseScroll","mouseover","mouseout","mousemove","selectstart","selectend","keydown","keypress","keyup","orientationchange","touchstart","touchmove","touchend","touchcancel","pointerdown","pointermove","pointerup","pointerleave","pointercancel","gesturestart","gesturechange","gestureend","focus","blur","change","reset","select","submit","focusin","focusout","load","unload","beforeunload","resize","move","DOMContentLoaded","readystatechange","error","abort","scroll"]);function se(t,e){return e&&`${e}::${ee++}`||t.uidEvent||ee++}function oe(t){const e=se(t);return t.uidEvent=e,te[e]=te[e]||{},te[e]}function re(t,e,i=null){return Object.values(t).find((t=>t.callable===e&&t.delegationSelector===i))}function ae(t,e,i){const n="string"==typeof e,s=n?i:e||i;let o=ue(t);return ne.has(o)||(o=t),[n,s,o]}function le(t,e,i,n,s){if("string"!=typeof e||!t)return;let[o,r,a]=ae(e,i,n);if(e in ie){const t=t=>function(e){if(!e.relatedTarget||e.relatedTarget!==e.delegateTarget&&!e.delegateTarget.contains(e.relatedTarget))return t.call(this,e)};r=t(r)}const l=oe(t),c=l[a]||(l[a]={}),h=re(c,r,o?i:null);if(h)return void(h.oneOff=h.oneOff&&s);const u=se(r,e.replace(Gt,"")),d=o?function(t,e,i){return function n(s){const o=t.querySelectorAll(e);for(let{target:r}=s;r&&r!==this;r=r.parentNode)for(const a of o)if(a===r)return fe(s,{delegateTarget:r}),n.oneOff&&de.off(t,s.type,e,i),i.apply(r,[s])}}(t,i,r):function(t,e){return function i(n){return fe(n,{delegateTarget:t}),i.oneOff&&de.off(t,n.type,e),e.apply(t,[n])}}(t,r);d.delegationSelector=o?i:null,d.callable=r,d.oneOff=s,d.uidEvent=u,c[u]=d,t.addEventListener(a,d,o)}function ce(t,e,i,n,s){const o=re(e[i],n,s);o&&(t.removeEventListener(i,o,Boolean(s)),delete e[i][o.uidEvent])}function he(t,e,i,n){const s=e[i]||{};for(const o of Object.keys(s))if(o.includes(n)){const n=s[o];ce(t,e,i,n.callable,n.delegationSelector)}}function ue(t){return t=t.replace(Jt,""),ie[t]||t}const de={on(t,e,i,n){le(t,e,i,n,!1)},one(t,e,i,n){le(t,e,i,n,!0)},off(t,e,i,n){if("string"!=typeof e||!t)return;const[s,o,r]=ae(e,i,n),a=r!==e,l=oe(t),c=l[r]||{},h=e.startsWith(".");if(void 0===o){if(h)for(const i of Object.keys(l))he(t,l,i,e.slice(1));for(const i of Object.keys(c)){const n=i.replace(Zt,"");if(!a||e.includes(n)){const e=c[i];ce(t,l,r,e.callable,e.delegationSelector)}}}else{if(!Object.keys(c).length)return;ce(t,l,r,o,s?i:null)}},trigger(t,e,i){if("string"!=typeof e||!t)return null;const n=Rt();let s=null,o=!0,r=!0,a=!1;e!==ue(e)&&n&&(s=n.Event(e,i),n(t).trigger(s),o=!s.isPropagationStopped(),r=!s.isImmediatePropagationStopped(),a=s.isDefaultPrevented());let l=new Event(e,{bubbles:o,cancelable:!0});return l=fe(l,i),a&&l.preventDefault(),r&&t.dispatchEvent(l),l.defaultPrevented&&s&&s.preventDefault(),l}};function fe(t,e){for(const[i,n]of Object.entries(e||{}))try{t[i]=n}catch(e){Object.defineProperty(t,i,{configurable:!0,get:()=>n})}return t}const pe=new Map,ge={set(t,e,i){pe.has(t)||pe.set(t,new Map);const n=pe.get(t);n.has(e)||0===n.size?n.set(e,i):console.error(`Bootstrap doesn't allow more than one instance per element. Bound instance: ${Array.from(n.keys())[0]}.`)},get:(t,e)=>pe.has(t)&&pe.get(t).get(e)||null,remove(t,e){if(!pe.has(t))return;const i=pe.get(t);i.delete(e),0===i.size&&pe.delete(t)}};function me(t){if("true"===t)return!0;if("false"===t)return!1;if(t===Number(t).toString())return Number(t);if(""===t||"null"===t)return null;if("string"!=typeof t)return t;try{return JSON.parse(decodeURIComponent(t))}catch(e){return t}}function _e(t){return t.replace(/[A-Z]/g,(t=>`-${t.toLowerCase()}`))}const be={setDataAttribute(t,e,i){t.setAttribute(`data-bs-${_e(e)}`,i)},removeDataAttribute(t,e){t.removeAttribute(`data-bs-${_e(e)}`)},getDataAttributes(t){if(!t)return{};const e={},i=Object.keys(t.dataset).filter((t=>t.startsWith("bs")&&!t.startsWith("bsConfig")));for(const n of i){let i=n.replace(/^bs/,"");i=i.charAt(0).toLowerCase()+i.slice(1,i.length),e[i]=me(t.dataset[n])}return e},getDataAttribute:(t,e)=>me(t.getAttribute(`data-bs-${_e(e)}`))};class ve{static get Default(){return{}}static get DefaultType(){return{}}static get NAME(){throw new Error('You have to implement the static method "NAME", for each component!')}_getConfig(t){return t=this._mergeConfigObj(t),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}_configAfterMerge(t){return t}_mergeConfigObj(t,e){const i=Mt(e)?be.getDataAttribute(e,"config"):{};return{...this.constructor.Default,..."object"==typeof i?i:{},...Mt(e)?be.getDataAttributes(e):{},..."object"==typeof t?t:{}}}_typeCheckConfig(t,e=this.constructor.DefaultType){for(const n of Object.keys(e)){const s=e[n],o=t[n],r=Mt(o)?"element":null==(i=o)?`${i}`:Object.prototype.toString.call(i).match(/\s([a-z]+)/i)[1].toLowerCase();if(!new RegExp(s).test(r))throw new TypeError(`${this.constructor.NAME.toUpperCase()}: Option "${n}" provided type "${r}" but expected type "${s}".`)}var i}}class ye extends ve{constructor(t,e){super(),(t=Ht(t))&&(this._element=t,this._config=this._getConfig(e),ge.set(this._element,this.constructor.DATA_KEY,this))}dispose(){ge.remove(this._element,this.constructor.DATA_KEY),de.off(this._element,this.constructor.EVENT_KEY);for(const t of Object.getOwnPropertyNames(this))this[t]=null}_queueCallback(t,e,i=!0){Yt(t,e,i)}_getConfig(t){return t=this._mergeConfigObj(t,this._element),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}static getInstance(t){return ge.get(Ht(t),this.DATA_KEY)}static getOrCreateInstance(t,e={}){return this.getInstance(t)||new this(t,"object"==typeof e?e:null)}static get VERSION(){return"5.2.3"}static get DATA_KEY(){return`bs.${this.NAME}`}static get EVENT_KEY(){return`.${this.DATA_KEY}`}static eventName(t){return`${t}${this.EVENT_KEY}`}}const we=(t,e="hide")=>{const i=`click.dismiss${t.EVENT_KEY}`,n=t.NAME;de.on(document,i,`[data-bs-dismiss="${n}"]`,(function(i){if(["A","AREA"].includes(this.tagName)&&i.preventDefault(),Ft(this))return;const s=Pt(this)||this.closest(`.${n}`);t.getOrCreateInstance(s)[e]()}))},Ae=".bs.alert",Ee=`close${Ae}`,Ce=`closed${Ae}`;class Te extends ye{static get NAME(){return"alert"}close(){if(de.trigger(this._element,Ee).defaultPrevented)return;this._element.classList.remove("show");const t=this._element.classList.contains("fade");this._queueCallback((()=>this._destroyElement()),this._element,t)}_destroyElement(){this._element.remove(),de.trigger(this._element,Ce),this.dispose()}static jQueryInterface(t){return this.each((function(){const e=Te.getOrCreateInstance(this);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}we(Te,"close"),Qt(Te);const Oe='[data-bs-toggle="button"]';class xe extends ye{static get NAME(){return"button"}toggle(){this._element.setAttribute("aria-pressed",this._element.classList.toggle("active"))}static jQueryInterface(t){return this.each((function(){const e=xe.getOrCreateInstance(this);"toggle"===t&&e[t]()}))}}de.on(document,"click.bs.button.data-api",Oe,(t=>{t.preventDefault();const e=t.target.closest(Oe);xe.getOrCreateInstance(e).toggle()})),Qt(xe);const ke={find:(t,e=document.documentElement)=>[].concat(...Element.prototype.querySelectorAll.call(e,t)),findOne:(t,e=document.documentElement)=>Element.prototype.querySelector.call(e,t),children:(t,e)=>[].concat(...t.children).filter((t=>t.matches(e))),parents(t,e){const i=[];let n=t.parentNode.closest(e);for(;n;)i.push(n),n=n.parentNode.closest(e);return i},prev(t,e){let i=t.previousElementSibling;for(;i;){if(i.matches(e))return[i];i=i.previousElementSibling}return[]},next(t,e){let i=t.nextElementSibling;for(;i;){if(i.matches(e))return[i];i=i.nextElementSibling}return[]},focusableChildren(t){const e=["a","button","input","textarea","select","details","[tabindex]",'[contenteditable="true"]'].map((t=>`${t}:not([tabindex^="-"])`)).join(",");return this.find(e,t).filter((t=>!Ft(t)&&Wt(t)))}},Le=".bs.swipe",De=`touchstart${Le}`,$e=`touchmove${Le}`,Se=`touchend${Le}`,Ie=`pointerdown${Le}`,Ne=`pointerup${Le}`,Pe={endCallback:null,leftCallback:null,rightCallback:null},je={endCallback:"(function|null)",leftCallback:"(function|null)",rightCallback:"(function|null)"};class Me extends ve{constructor(t,e){super(),this._element=t,t&&Me.isSupported()&&(this._config=this._getConfig(e),this._deltaX=0,this._supportPointerEvents=Boolean(window.PointerEvent),this._initEvents())}static get Default(){return Pe}static get DefaultType(){return je}static get NAME(){return"swipe"}dispose(){de.off(this._element,Le)}_start(t){this._supportPointerEvents?this._eventIsPointerPenTouch(t)&&(this._deltaX=t.clientX):this._deltaX=t.touches[0].clientX}_end(t){this._eventIsPointerPenTouch(t)&&(this._deltaX=t.clientX-this._deltaX),this._handleSwipe(),Xt(this._config.endCallback)}_move(t){this._deltaX=t.touches&&t.touches.length>1?0:t.touches[0].clientX-this._deltaX}_handleSwipe(){const t=Math.abs(this._deltaX);if(t<=40)return;const e=t/this._deltaX;this._deltaX=0,e&&Xt(e>0?this._config.rightCallback:this._config.leftCallback)}_initEvents(){this._supportPointerEvents?(de.on(this._element,Ie,(t=>this._start(t))),de.on(this._element,Ne,(t=>this._end(t))),this._element.classList.add("pointer-event")):(de.on(this._element,De,(t=>this._start(t))),de.on(this._element,$e,(t=>this._move(t))),de.on(this._element,Se,(t=>this._end(t))))}_eventIsPointerPenTouch(t){return this._supportPointerEvents&&("pen"===t.pointerType||"touch"===t.pointerType)}static isSupported(){return"ontouchstart"in document.documentElement||navigator.maxTouchPoints>0}}const He=".bs.carousel",We=".data-api",Fe="next",Be="prev",ze="left",qe="right",Re=`slide${He}`,Ve=`slid${He}`,Ke=`keydown${He}`,Qe=`mouseenter${He}`,Xe=`mouseleave${He}`,Ye=`dragstart${He}`,Ue=`load${He}${We}`,Ge=`click${He}${We}`,Je="carousel",Ze="active",ti=".active",ei=".carousel-item",ii=ti+ei,ni={ArrowLeft:qe,ArrowRight:ze},si={interval:5e3,keyboard:!0,pause:"hover",ride:!1,touch:!0,wrap:!0},oi={interval:"(number|boolean)",keyboard:"boolean",pause:"(string|boolean)",ride:"(boolean|string)",touch:"boolean",wrap:"boolean"};class ri extends ye{constructor(t,e){super(t,e),this._interval=null,this._activeElement=null,this._isSliding=!1,this.touchTimeout=null,this._swipeHelper=null,this._indicatorsElement=ke.findOne(".carousel-indicators",this._element),this._addEventListeners(),this._config.ride===Je&&this.cycle()}static get Default(){return si}static get DefaultType(){return oi}static get NAME(){return"carousel"}next(){this._slide(Fe)}nextWhenVisible(){!document.hidden&&Wt(this._element)&&this.next()}prev(){this._slide(Be)}pause(){this._isSliding&&jt(this._element),this._clearInterval()}cycle(){this._clearInterval(),this._updateInterval(),this._interval=setInterval((()=>this.nextWhenVisible()),this._config.interval)}_maybeEnableCycle(){this._config.ride&&(this._isSliding?de.one(this._element,Ve,(()=>this.cycle())):this.cycle())}to(t){const e=this._getItems();if(t>e.length-1||t<0)return;if(this._isSliding)return void de.one(this._element,Ve,(()=>this.to(t)));const i=this._getItemIndex(this._getActive());if(i===t)return;const n=t>i?Fe:Be;this._slide(n,e[t])}dispose(){this._swipeHelper&&this._swipeHelper.dispose(),super.dispose()}_configAfterMerge(t){return t.defaultInterval=t.interval,t}_addEventListeners(){this._config.keyboard&&de.on(this._element,Ke,(t=>this._keydown(t))),"hover"===this._config.pause&&(de.on(this._element,Qe,(()=>this.pause())),de.on(this._element,Xe,(()=>this._maybeEnableCycle()))),this._config.touch&&Me.isSupported()&&this._addTouchEventListeners()}_addTouchEventListeners(){for(const t of ke.find(".carousel-item img",this._element))de.on(t,Ye,(t=>t.preventDefault()));const t={leftCallback:()=>this._slide(this._directionToOrder(ze)),rightCallback:()=>this._slide(this._directionToOrder(qe)),endCallback:()=>{"hover"===this._config.pause&&(this.pause(),this.touchTimeout&&clearTimeout(this.touchTimeout),this.touchTimeout=setTimeout((()=>this._maybeEnableCycle()),500+this._config.interval))}};this._swipeHelper=new Me(this._element,t)}_keydown(t){if(/input|textarea/i.test(t.target.tagName))return;const e=ni[t.key];e&&(t.preventDefault(),this._slide(this._directionToOrder(e)))}_getItemIndex(t){return this._getItems().indexOf(t)}_setActiveIndicatorElement(t){if(!this._indicatorsElement)return;const e=ke.findOne(ti,this._indicatorsElement);e.classList.remove(Ze),e.removeAttribute("aria-current");const i=ke.findOne(`[data-bs-slide-to="${t}"]`,this._indicatorsElement);i&&(i.classList.add(Ze),i.setAttribute("aria-current","true"))}_updateInterval(){const t=this._activeElement||this._getActive();if(!t)return;const e=Number.parseInt(t.getAttribute("data-bs-interval"),10);this._config.interval=e||this._config.defaultInterval}_slide(t,e=null){if(this._isSliding)return;const i=this._getActive(),n=t===Fe,s=e||Ut(this._getItems(),i,n,this._config.wrap);if(s===i)return;const o=this._getItemIndex(s),r=e=>de.trigger(this._element,e,{relatedTarget:s,direction:this._orderToDirection(t),from:this._getItemIndex(i),to:o});if(r(Re).defaultPrevented)return;if(!i||!s)return;const a=Boolean(this._interval);this.pause(),this._isSliding=!0,this._setActiveIndicatorElement(o),this._activeElement=s;const l=n?"carousel-item-start":"carousel-item-end",c=n?"carousel-item-next":"carousel-item-prev";s.classList.add(c),qt(s),i.classList.add(l),s.classList.add(l),this._queueCallback((()=>{s.classList.remove(l,c),s.classList.add(Ze),i.classList.remove(Ze,c,l),this._isSliding=!1,r(Ve)}),i,this._isAnimated()),a&&this.cycle()}_isAnimated(){return this._element.classList.contains("slide")}_getActive(){return ke.findOne(ii,this._element)}_getItems(){return ke.find(ei,this._element)}_clearInterval(){this._interval&&(clearInterval(this._interval),this._interval=null)}_directionToOrder(t){return Kt()?t===ze?Be:Fe:t===ze?Fe:Be}_orderToDirection(t){return Kt()?t===Be?ze:qe:t===Be?qe:ze}static jQueryInterface(t){return this.each((function(){const e=ri.getOrCreateInstance(this,t);if("number"!=typeof t){if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}else e.to(t)}))}}de.on(document,Ge,"[data-bs-slide], [data-bs-slide-to]",(function(t){const e=Pt(this);if(!e||!e.classList.contains(Je))return;t.preventDefault();const i=ri.getOrCreateInstance(e),n=this.getAttribute("data-bs-slide-to");return n?(i.to(n),void i._maybeEnableCycle()):"next"===be.getDataAttribute(this,"slide")?(i.next(),void i._maybeEnableCycle()):(i.prev(),void i._maybeEnableCycle())})),de.on(window,Ue,(()=>{const t=ke.find('[data-bs-ride="carousel"]');for(const e of t)ri.getOrCreateInstance(e)})),Qt(ri);const ai=".bs.collapse",li=`show${ai}`,ci=`shown${ai}`,hi=`hide${ai}`,ui=`hidden${ai}`,di=`click${ai}.data-api`,fi="show",pi="collapse",gi="collapsing",mi=`:scope .${pi} .${pi}`,_i='[data-bs-toggle="collapse"]',bi={parent:null,toggle:!0},vi={parent:"(null|element)",toggle:"boolean"};class yi extends ye{constructor(t,e){super(t,e),this._isTransitioning=!1,this._triggerArray=[];const i=ke.find(_i);for(const t of i){const e=Nt(t),i=ke.find(e).filter((t=>t===this._element));null!==e&&i.length&&this._triggerArray.push(t)}this._initializeChildren(),this._config.parent||this._addAriaAndCollapsedClass(this._triggerArray,this._isShown()),this._config.toggle&&this.toggle()}static get Default(){return bi}static get DefaultType(){return vi}static get NAME(){return"collapse"}toggle(){this._isShown()?this.hide():this.show()}show(){if(this._isTransitioning||this._isShown())return;let t=[];if(this._config.parent&&(t=this._getFirstLevelChildren(".collapse.show, .collapse.collapsing").filter((t=>t!==this._element)).map((t=>yi.getOrCreateInstance(t,{toggle:!1})))),t.length&&t[0]._isTransitioning)return;if(de.trigger(this._element,li).defaultPrevented)return;for(const e of t)e.hide();const e=this._getDimension();this._element.classList.remove(pi),this._element.classList.add(gi),this._element.style[e]=0,this._addAriaAndCollapsedClass(this._triggerArray,!0),this._isTransitioning=!0;const i=`scroll${e[0].toUpperCase()+e.slice(1)}`;this._queueCallback((()=>{this._isTransitioning=!1,this._element.classList.remove(gi),this._element.classList.add(pi,fi),this._element.style[e]="",de.trigger(this._element,ci)}),this._element,!0),this._element.style[e]=`${this._element[i]}px`}hide(){if(this._isTransitioning||!this._isShown())return;if(de.trigger(this._element,hi).defaultPrevented)return;const t=this._getDimension();this._element.style[t]=`${this._element.getBoundingClientRect()[t]}px`,qt(this._element),this._element.classList.add(gi),this._element.classList.remove(pi,fi);for(const t of this._triggerArray){const e=Pt(t);e&&!this._isShown(e)&&this._addAriaAndCollapsedClass([t],!1)}this._isTransitioning=!0,this._element.style[t]="",this._queueCallback((()=>{this._isTransitioning=!1,this._element.classList.remove(gi),this._element.classList.add(pi),de.trigger(this._element,ui)}),this._element,!0)}_isShown(t=this._element){return t.classList.contains(fi)}_configAfterMerge(t){return t.toggle=Boolean(t.toggle),t.parent=Ht(t.parent),t}_getDimension(){return this._element.classList.contains("collapse-horizontal")?"width":"height"}_initializeChildren(){if(!this._config.parent)return;const t=this._getFirstLevelChildren(_i);for(const e of t){const t=Pt(e);t&&this._addAriaAndCollapsedClass([e],this._isShown(t))}}_getFirstLevelChildren(t){const e=ke.find(mi,this._config.parent);return ke.find(t,this._config.parent).filter((t=>!e.includes(t)))}_addAriaAndCollapsedClass(t,e){if(t.length)for(const i of t)i.classList.toggle("collapsed",!e),i.setAttribute("aria-expanded",e)}static jQueryInterface(t){const e={};return"string"==typeof t&&/show|hide/.test(t)&&(e.toggle=!1),this.each((function(){const i=yi.getOrCreateInstance(this,e);if("string"==typeof t){if(void 0===i[t])throw new TypeError(`No method named "${t}"`);i[t]()}}))}}de.on(document,di,_i,(function(t){("A"===t.target.tagName||t.delegateTarget&&"A"===t.delegateTarget.tagName)&&t.preventDefault();const e=Nt(this),i=ke.find(e);for(const t of i)yi.getOrCreateInstance(t,{toggle:!1}).toggle()})),Qt(yi);const wi="dropdown",Ai=".bs.dropdown",Ei=".data-api",Ci="ArrowUp",Ti="ArrowDown",Oi=`hide${Ai}`,xi=`hidden${Ai}`,ki=`show${Ai}`,Li=`shown${Ai}`,Di=`click${Ai}${Ei}`,$i=`keydown${Ai}${Ei}`,Si=`keyup${Ai}${Ei}`,Ii="show",Ni='[data-bs-toggle="dropdown"]:not(.disabled):not(:disabled)',Pi=`${Ni}.${Ii}`,ji=".dropdown-menu",Mi=Kt()?"top-end":"top-start",Hi=Kt()?"top-start":"top-end",Wi=Kt()?"bottom-end":"bottom-start",Fi=Kt()?"bottom-start":"bottom-end",Bi=Kt()?"left-start":"right-start",zi=Kt()?"right-start":"left-start",qi={autoClose:!0,boundary:"clippingParents",display:"dynamic",offset:[0,2],popperConfig:null,reference:"toggle"},Ri={autoClose:"(boolean|string)",boundary:"(string|element)",display:"string",offset:"(array|string|function)",popperConfig:"(null|object|function)",reference:"(string|element|object)"};class Vi extends ye{constructor(t,e){super(t,e),this._popper=null,this._parent=this._element.parentNode,this._menu=ke.next(this._element,ji)[0]||ke.prev(this._element,ji)[0]||ke.findOne(ji,this._parent),this._inNavbar=this._detectNavbar()}static get Default(){return qi}static get DefaultType(){return Ri}static get NAME(){return wi}toggle(){return this._isShown()?this.hide():this.show()}show(){if(Ft(this._element)||this._isShown())return;const t={relatedTarget:this._element};if(!de.trigger(this._element,ki,t).defaultPrevented){if(this._createPopper(),"ontouchstart"in document.documentElement&&!this._parent.closest(".navbar-nav"))for(const t of[].concat(...document.body.children))de.on(t,"mouseover",zt);this._element.focus(),this._element.setAttribute("aria-expanded",!0),this._menu.classList.add(Ii),this._element.classList.add(Ii),de.trigger(this._element,Li,t)}}hide(){if(Ft(this._element)||!this._isShown())return;const t={relatedTarget:this._element};this._completeHide(t)}dispose(){this._popper&&this._popper.destroy(),super.dispose()}update(){this._inNavbar=this._detectNavbar(),this._popper&&this._popper.update()}_completeHide(t){if(!de.trigger(this._element,Oi,t).defaultPrevented){if("ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))de.off(t,"mouseover",zt);this._popper&&this._popper.destroy(),this._menu.classList.remove(Ii),this._element.classList.remove(Ii),this._element.setAttribute("aria-expanded","false"),be.removeDataAttribute(this._menu,"popper"),de.trigger(this._element,xi,t)}}_getConfig(t){if("object"==typeof(t=super._getConfig(t)).reference&&!Mt(t.reference)&&"function"!=typeof t.reference.getBoundingClientRect)throw new TypeError(`${wi.toUpperCase()}: Option "reference" provided type "object" without a required "getBoundingClientRect" method.`);return t}_createPopper(){if(void 0===e)throw new TypeError("Bootstrap's dropdowns require Popper (https://popper.js.org)");let t=this._element;"parent"===this._config.reference?t=this._parent:Mt(this._config.reference)?t=Ht(this._config.reference):"object"==typeof this._config.reference&&(t=this._config.reference);const i=this._getPopperConfig();this._popper=Dt(t,this._menu,i)}_isShown(){return this._menu.classList.contains(Ii)}_getPlacement(){const t=this._parent;if(t.classList.contains("dropend"))return Bi;if(t.classList.contains("dropstart"))return zi;if(t.classList.contains("dropup-center"))return"top";if(t.classList.contains("dropdown-center"))return"bottom";const e="end"===getComputedStyle(this._menu).getPropertyValue("--bs-position").trim();return t.classList.contains("dropup")?e?Hi:Mi:e?Fi:Wi}_detectNavbar(){return null!==this._element.closest(".navbar")}_getOffset(){const{offset:t}=this._config;return"string"==typeof t?t.split(",").map((t=>Number.parseInt(t,10))):"function"==typeof t?e=>t(e,this._element):t}_getPopperConfig(){const t={placement:this._getPlacement(),modifiers:[{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"offset",options:{offset:this._getOffset()}}]};return(this._inNavbar||"static"===this._config.display)&&(be.setDataAttribute(this._menu,"popper","static"),t.modifiers=[{name:"applyStyles",enabled:!1}]),{...t,..."function"==typeof this._config.popperConfig?this._config.popperConfig(t):this._config.popperConfig}}_selectMenuItem({key:t,target:e}){const i=ke.find(".dropdown-menu .dropdown-item:not(.disabled):not(:disabled)",this._menu).filter((t=>Wt(t)));i.length&&Ut(i,e,t===Ti,!i.includes(e)).focus()}static jQueryInterface(t){return this.each((function(){const e=Vi.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}static clearMenus(t){if(2===t.button||"keyup"===t.type&&"Tab"!==t.key)return;const e=ke.find(Pi);for(const i of e){const e=Vi.getInstance(i);if(!e||!1===e._config.autoClose)continue;const n=t.composedPath(),s=n.includes(e._menu);if(n.includes(e._element)||"inside"===e._config.autoClose&&!s||"outside"===e._config.autoClose&&s)continue;if(e._menu.contains(t.target)&&("keyup"===t.type&&"Tab"===t.key||/input|select|option|textarea|form/i.test(t.target.tagName)))continue;const o={relatedTarget:e._element};"click"===t.type&&(o.clickEvent=t),e._completeHide(o)}}static dataApiKeydownHandler(t){const e=/input|textarea/i.test(t.target.tagName),i="Escape"===t.key,n=[Ci,Ti].includes(t.key);if(!n&&!i)return;if(e&&!i)return;t.preventDefault();const s=this.matches(Ni)?this:ke.prev(this,Ni)[0]||ke.next(this,Ni)[0]||ke.findOne(Ni,t.delegateTarget.parentNode),o=Vi.getOrCreateInstance(s);if(n)return t.stopPropagation(),o.show(),void o._selectMenuItem(t);o._isShown()&&(t.stopPropagation(),o.hide(),s.focus())}}de.on(document,$i,Ni,Vi.dataApiKeydownHandler),de.on(document,$i,ji,Vi.dataApiKeydownHandler),de.on(document,Di,Vi.clearMenus),de.on(document,Si,Vi.clearMenus),de.on(document,Di,Ni,(function(t){t.preventDefault(),Vi.getOrCreateInstance(this).toggle()})),Qt(Vi);const Ki=".fixed-top, .fixed-bottom, .is-fixed, .sticky-top",Qi=".sticky-top",Xi="padding-right",Yi="margin-right";class Ui{constructor(){this._element=document.body}getWidth(){const t=document.documentElement.clientWidth;return Math.abs(window.innerWidth-t)}hide(){const t=this.getWidth();this._disableOverFlow(),this._setElementAttributes(this._element,Xi,(e=>e+t)),this._setElementAttributes(Ki,Xi,(e=>e+t)),this._setElementAttributes(Qi,Yi,(e=>e-t))}reset(){this._resetElementAttributes(this._element,"overflow"),this._resetElementAttributes(this._element,Xi),this._resetElementAttributes(Ki,Xi),this._resetElementAttributes(Qi,Yi)}isOverflowing(){return this.getWidth()>0}_disableOverFlow(){this._saveInitialAttribute(this._element,"overflow"),this._element.style.overflow="hidden"}_setElementAttributes(t,e,i){const n=this.getWidth();this._applyManipulationCallback(t,(t=>{if(t!==this._element&&window.innerWidth>t.clientWidth+n)return;this._saveInitialAttribute(t,e);const s=window.getComputedStyle(t).getPropertyValue(e);t.style.setProperty(e,`${i(Number.parseFloat(s))}px`)}))}_saveInitialAttribute(t,e){const i=t.style.getPropertyValue(e);i&&be.setDataAttribute(t,e,i)}_resetElementAttributes(t,e){this._applyManipulationCallback(t,(t=>{const i=be.getDataAttribute(t,e);null!==i?(be.removeDataAttribute(t,e),t.style.setProperty(e,i)):t.style.removeProperty(e)}))}_applyManipulationCallback(t,e){if(Mt(t))e(t);else for(const i of ke.find(t,this._element))e(i)}}const Gi="backdrop",Ji="show",Zi=`mousedown.bs.${Gi}`,tn={className:"modal-backdrop",clickCallback:null,isAnimated:!1,isVisible:!0,rootElement:"body"},en={className:"string",clickCallback:"(function|null)",isAnimated:"boolean",isVisible:"boolean",rootElement:"(element|string)"};class nn extends ve{constructor(t){super(),this._config=this._getConfig(t),this._isAppended=!1,this._element=null}static get Default(){return tn}static get DefaultType(){return en}static get NAME(){return Gi}show(t){if(!this._config.isVisible)return void Xt(t);this._append();const e=this._getElement();this._config.isAnimated&&qt(e),e.classList.add(Ji),this._emulateAnimation((()=>{Xt(t)}))}hide(t){this._config.isVisible?(this._getElement().classList.remove(Ji),this._emulateAnimation((()=>{this.dispose(),Xt(t)}))):Xt(t)}dispose(){this._isAppended&&(de.off(this._element,Zi),this._element.remove(),this._isAppended=!1)}_getElement(){if(!this._element){const t=document.createElement("div");t.className=this._config.className,this._config.isAnimated&&t.classList.add("fade"),this._element=t}return this._element}_configAfterMerge(t){return t.rootElement=Ht(t.rootElement),t}_append(){if(this._isAppended)return;const t=this._getElement();this._config.rootElement.append(t),de.on(t,Zi,(()=>{Xt(this._config.clickCallback)})),this._isAppended=!0}_emulateAnimation(t){Yt(t,this._getElement(),this._config.isAnimated)}}const sn=".bs.focustrap",on=`focusin${sn}`,rn=`keydown.tab${sn}`,an="backward",ln={autofocus:!0,trapElement:null},cn={autofocus:"boolean",trapElement:"element"};class hn extends ve{constructor(t){super(),this._config=this._getConfig(t),this._isActive=!1,this._lastTabNavDirection=null}static get Default(){return ln}static get DefaultType(){return cn}static get NAME(){return"focustrap"}activate(){this._isActive||(this._config.autofocus&&this._config.trapElement.focus(),de.off(document,sn),de.on(document,on,(t=>this._handleFocusin(t))),de.on(document,rn,(t=>this._handleKeydown(t))),this._isActive=!0)}deactivate(){this._isActive&&(this._isActive=!1,de.off(document,sn))}_handleFocusin(t){const{trapElement:e}=this._config;if(t.target===document||t.target===e||e.contains(t.target))return;const i=ke.focusableChildren(e);0===i.length?e.focus():this._lastTabNavDirection===an?i[i.length-1].focus():i[0].focus()}_handleKeydown(t){"Tab"===t.key&&(this._lastTabNavDirection=t.shiftKey?an:"forward")}}const un=".bs.modal",dn=`hide${un}`,fn=`hidePrevented${un}`,pn=`hidden${un}`,gn=`show${un}`,mn=`shown${un}`,_n=`resize${un}`,bn=`click.dismiss${un}`,vn=`mousedown.dismiss${un}`,yn=`keydown.dismiss${un}`,wn=`click${un}.data-api`,An="modal-open",En="show",Cn="modal-static",Tn={backdrop:!0,focus:!0,keyboard:!0},On={backdrop:"(boolean|string)",focus:"boolean",keyboard:"boolean"};class xn extends ye{constructor(t,e){super(t,e),this._dialog=ke.findOne(".modal-dialog",this._element),this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._isShown=!1,this._isTransitioning=!1,this._scrollBar=new Ui,this._addEventListeners()}static get Default(){return Tn}static get DefaultType(){return On}static get NAME(){return"modal"}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){this._isShown||this._isTransitioning||de.trigger(this._element,gn,{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._isTransitioning=!0,this._scrollBar.hide(),document.body.classList.add(An),this._adjustDialog(),this._backdrop.show((()=>this._showElement(t))))}hide(){this._isShown&&!this._isTransitioning&&(de.trigger(this._element,dn).defaultPrevented||(this._isShown=!1,this._isTransitioning=!0,this._focustrap.deactivate(),this._element.classList.remove(En),this._queueCallback((()=>this._hideModal()),this._element,this._isAnimated())))}dispose(){for(const t of[window,this._dialog])de.off(t,un);this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}handleUpdate(){this._adjustDialog()}_initializeBackDrop(){return new nn({isVisible:Boolean(this._config.backdrop),isAnimated:this._isAnimated()})}_initializeFocusTrap(){return new hn({trapElement:this._element})}_showElement(t){document.body.contains(this._element)||document.body.append(this._element),this._element.style.display="block",this._element.removeAttribute("aria-hidden"),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.scrollTop=0;const e=ke.findOne(".modal-body",this._dialog);e&&(e.scrollTop=0),qt(this._element),this._element.classList.add(En),this._queueCallback((()=>{this._config.focus&&this._focustrap.activate(),this._isTransitioning=!1,de.trigger(this._element,mn,{relatedTarget:t})}),this._dialog,this._isAnimated())}_addEventListeners(){de.on(this._element,yn,(t=>{if("Escape"===t.key)return this._config.keyboard?(t.preventDefault(),void this.hide()):void this._triggerBackdropTransition()})),de.on(window,_n,(()=>{this._isShown&&!this._isTransitioning&&this._adjustDialog()})),de.on(this._element,vn,(t=>{de.one(this._element,bn,(e=>{this._element===t.target&&this._element===e.target&&("static"!==this._config.backdrop?this._config.backdrop&&this.hide():this._triggerBackdropTransition())}))}))}_hideModal(){this._element.style.display="none",this._element.setAttribute("aria-hidden",!0),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._isTransitioning=!1,this._backdrop.hide((()=>{document.body.classList.remove(An),this._resetAdjustments(),this._scrollBar.reset(),de.trigger(this._element,pn)}))}_isAnimated(){return this._element.classList.contains("fade")}_triggerBackdropTransition(){if(de.trigger(this._element,fn).defaultPrevented)return;const t=this._element.scrollHeight>document.documentElement.clientHeight,e=this._element.style.overflowY;"hidden"===e||this._element.classList.contains(Cn)||(t||(this._element.style.overflowY="hidden"),this._element.classList.add(Cn),this._queueCallback((()=>{this._element.classList.remove(Cn),this._queueCallback((()=>{this._element.style.overflowY=e}),this._dialog)}),this._dialog),this._element.focus())}_adjustDialog(){const t=this._element.scrollHeight>document.documentElement.clientHeight,e=this._scrollBar.getWidth(),i=e>0;if(i&&!t){const t=Kt()?"paddingLeft":"paddingRight";this._element.style[t]=`${e}px`}if(!i&&t){const t=Kt()?"paddingRight":"paddingLeft";this._element.style[t]=`${e}px`}}_resetAdjustments(){this._element.style.paddingLeft="",this._element.style.paddingRight=""}static jQueryInterface(t,e){return this.each((function(){const i=xn.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===i[t])throw new TypeError(`No method named "${t}"`);i[t](e)}}))}}de.on(document,wn,'[data-bs-toggle="modal"]',(function(t){const e=Pt(this);["A","AREA"].includes(this.tagName)&&t.preventDefault(),de.one(e,gn,(t=>{t.defaultPrevented||de.one(e,pn,(()=>{Wt(this)&&this.focus()}))}));const i=ke.findOne(".modal.show");i&&xn.getInstance(i).hide(),xn.getOrCreateInstance(e).toggle(this)})),we(xn),Qt(xn);const kn=".bs.offcanvas",Ln=".data-api",Dn=`load${kn}${Ln}`,$n="show",Sn="showing",In="hiding",Nn=".offcanvas.show",Pn=`show${kn}`,jn=`shown${kn}`,Mn=`hide${kn}`,Hn=`hidePrevented${kn}`,Wn=`hidden${kn}`,Fn=`resize${kn}`,Bn=`click${kn}${Ln}`,zn=`keydown.dismiss${kn}`,qn={backdrop:!0,keyboard:!0,scroll:!1},Rn={backdrop:"(boolean|string)",keyboard:"boolean",scroll:"boolean"};class Vn extends ye{constructor(t,e){super(t,e),this._isShown=!1,this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._addEventListeners()}static get Default(){return qn}static get DefaultType(){return Rn}static get NAME(){return"offcanvas"}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){this._isShown||de.trigger(this._element,Pn,{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._backdrop.show(),this._config.scroll||(new Ui).hide(),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.classList.add(Sn),this._queueCallback((()=>{this._config.scroll&&!this._config.backdrop||this._focustrap.activate(),this._element.classList.add($n),this._element.classList.remove(Sn),de.trigger(this._element,jn,{relatedTarget:t})}),this._element,!0))}hide(){this._isShown&&(de.trigger(this._element,Mn).defaultPrevented||(this._focustrap.deactivate(),this._element.blur(),this._isShown=!1,this._element.classList.add(In),this._backdrop.hide(),this._queueCallback((()=>{this._element.classList.remove($n,In),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._config.scroll||(new Ui).reset(),de.trigger(this._element,Wn)}),this._element,!0)))}dispose(){this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}_initializeBackDrop(){const t=Boolean(this._config.backdrop);return new nn({className:"offcanvas-backdrop",isVisible:t,isAnimated:!0,rootElement:this._element.parentNode,clickCallback:t?()=>{"static"!==this._config.backdrop?this.hide():de.trigger(this._element,Hn)}:null})}_initializeFocusTrap(){return new hn({trapElement:this._element})}_addEventListeners(){de.on(this._element,zn,(t=>{"Escape"===t.key&&(this._config.keyboard?this.hide():de.trigger(this._element,Hn))}))}static jQueryInterface(t){return this.each((function(){const e=Vn.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}de.on(document,Bn,'[data-bs-toggle="offcanvas"]',(function(t){const e=Pt(this);if(["A","AREA"].includes(this.tagName)&&t.preventDefault(),Ft(this))return;de.one(e,Wn,(()=>{Wt(this)&&this.focus()}));const i=ke.findOne(Nn);i&&i!==e&&Vn.getInstance(i).hide(),Vn.getOrCreateInstance(e).toggle(this)})),de.on(window,Dn,(()=>{for(const t of ke.find(Nn))Vn.getOrCreateInstance(t).show()})),de.on(window,Fn,(()=>{for(const t of ke.find("[aria-modal][class*=show][class*=offcanvas-]"))"fixed"!==getComputedStyle(t).position&&Vn.getOrCreateInstance(t).hide()})),we(Vn),Qt(Vn);const Kn=new Set(["background","cite","href","itemtype","longdesc","poster","src","xlink:href"]),Qn=/^(?:(?:https?|mailto|ftp|tel|file|sms):|[^#&/:?]*(?:[#/?]|$))/i,Xn=/^data:(?:image\/(?:bmp|gif|jpeg|jpg|png|tiff|webp)|video\/(?:mpeg|mp4|ogg|webm)|audio\/(?:mp3|oga|ogg|opus));base64,[\d+/a-z]+=*$/i,Yn=(t,e)=>{const i=t.nodeName.toLowerCase();return e.includes(i)?!Kn.has(i)||Boolean(Qn.test(t.nodeValue)||Xn.test(t.nodeValue)):e.filter((t=>t instanceof RegExp)).some((t=>t.test(i)))},Un={"*":["class","dir","id","lang","role",/^aria-[\w-]*$/i],a:["target","href","title","rel"],area:[],b:[],br:[],col:[],code:[],div:[],em:[],hr:[],h1:[],h2:[],h3:[],h4:[],h5:[],h6:[],i:[],img:["src","srcset","alt","title","width","height"],li:[],ol:[],p:[],pre:[],s:[],small:[],span:[],sub:[],sup:[],strong:[],u:[],ul:[]},Gn={allowList:Un,content:{},extraClass:"",html:!1,sanitize:!0,sanitizeFn:null,template:"
"},Jn={allowList:"object",content:"object",extraClass:"(string|function)",html:"boolean",sanitize:"boolean",sanitizeFn:"(null|function)",template:"string"},Zn={entry:"(string|element|function|null)",selector:"(string|element)"};class ts extends ve{constructor(t){super(),this._config=this._getConfig(t)}static get Default(){return Gn}static get DefaultType(){return Jn}static get NAME(){return"TemplateFactory"}getContent(){return Object.values(this._config.content).map((t=>this._resolvePossibleFunction(t))).filter(Boolean)}hasContent(){return this.getContent().length>0}changeContent(t){return this._checkContent(t),this._config.content={...this._config.content,...t},this}toHtml(){const t=document.createElement("div");t.innerHTML=this._maybeSanitize(this._config.template);for(const[e,i]of Object.entries(this._config.content))this._setContent(t,i,e);const e=t.children[0],i=this._resolvePossibleFunction(this._config.extraClass);return i&&e.classList.add(...i.split(" ")),e}_typeCheckConfig(t){super._typeCheckConfig(t),this._checkContent(t.content)}_checkContent(t){for(const[e,i]of Object.entries(t))super._typeCheckConfig({selector:e,entry:i},Zn)}_setContent(t,e,i){const n=ke.findOne(i,t);n&&((e=this._resolvePossibleFunction(e))?Mt(e)?this._putElementInTemplate(Ht(e),n):this._config.html?n.innerHTML=this._maybeSanitize(e):n.textContent=e:n.remove())}_maybeSanitize(t){return this._config.sanitize?function(t,e,i){if(!t.length)return t;if(i&&"function"==typeof i)return i(t);const n=(new window.DOMParser).parseFromString(t,"text/html"),s=[].concat(...n.body.querySelectorAll("*"));for(const t of s){const i=t.nodeName.toLowerCase();if(!Object.keys(e).includes(i)){t.remove();continue}const n=[].concat(...t.attributes),s=[].concat(e["*"]||[],e[i]||[]);for(const e of n)Yn(e,s)||t.removeAttribute(e.nodeName)}return n.body.innerHTML}(t,this._config.allowList,this._config.sanitizeFn):t}_resolvePossibleFunction(t){return"function"==typeof t?t(this):t}_putElementInTemplate(t,e){if(this._config.html)return e.innerHTML="",void e.append(t);e.textContent=t.textContent}}const es=new Set(["sanitize","allowList","sanitizeFn"]),is="fade",ns="show",ss=".modal",os="hide.bs.modal",rs="hover",as="focus",ls={AUTO:"auto",TOP:"top",RIGHT:Kt()?"left":"right",BOTTOM:"bottom",LEFT:Kt()?"right":"left"},cs={allowList:Un,animation:!0,boundary:"clippingParents",container:!1,customClass:"",delay:0,fallbackPlacements:["top","right","bottom","left"],html:!1,offset:[0,0],placement:"top",popperConfig:null,sanitize:!0,sanitizeFn:null,selector:!1,template:'',title:"",trigger:"hover focus"},hs={allowList:"object",animation:"boolean",boundary:"(string|element)",container:"(string|element|boolean)",customClass:"(string|function)",delay:"(number|object)",fallbackPlacements:"array",html:"boolean",offset:"(array|string|function)",placement:"(string|function)",popperConfig:"(null|object|function)",sanitize:"boolean",sanitizeFn:"(null|function)",selector:"(string|boolean)",template:"string",title:"(string|element|function)",trigger:"string"};class us extends ye{constructor(t,i){if(void 0===e)throw new TypeError("Bootstrap's tooltips require Popper (https://popper.js.org)");super(t,i),this._isEnabled=!0,this._timeout=0,this._isHovered=null,this._activeTrigger={},this._popper=null,this._templateFactory=null,this._newContent=null,this.tip=null,this._setListeners(),this._config.selector||this._fixTitle()}static get Default(){return cs}static get DefaultType(){return hs}static get NAME(){return"tooltip"}enable(){this._isEnabled=!0}disable(){this._isEnabled=!1}toggleEnabled(){this._isEnabled=!this._isEnabled}toggle(){this._isEnabled&&(this._activeTrigger.click=!this._activeTrigger.click,this._isShown()?this._leave():this._enter())}dispose(){clearTimeout(this._timeout),de.off(this._element.closest(ss),os,this._hideModalHandler),this._element.getAttribute("data-bs-original-title")&&this._element.setAttribute("title",this._element.getAttribute("data-bs-original-title")),this._disposePopper(),super.dispose()}show(){if("none"===this._element.style.display)throw new Error("Please use show on visible elements");if(!this._isWithContent()||!this._isEnabled)return;const t=de.trigger(this._element,this.constructor.eventName("show")),e=(Bt(this._element)||this._element.ownerDocument.documentElement).contains(this._element);if(t.defaultPrevented||!e)return;this._disposePopper();const i=this._getTipElement();this._element.setAttribute("aria-describedby",i.getAttribute("id"));const{container:n}=this._config;if(this._element.ownerDocument.documentElement.contains(this.tip)||(n.append(i),de.trigger(this._element,this.constructor.eventName("inserted"))),this._popper=this._createPopper(i),i.classList.add(ns),"ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))de.on(t,"mouseover",zt);this._queueCallback((()=>{de.trigger(this._element,this.constructor.eventName("shown")),!1===this._isHovered&&this._leave(),this._isHovered=!1}),this.tip,this._isAnimated())}hide(){if(this._isShown()&&!de.trigger(this._element,this.constructor.eventName("hide")).defaultPrevented){if(this._getTipElement().classList.remove(ns),"ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))de.off(t,"mouseover",zt);this._activeTrigger.click=!1,this._activeTrigger[as]=!1,this._activeTrigger[rs]=!1,this._isHovered=null,this._queueCallback((()=>{this._isWithActiveTrigger()||(this._isHovered||this._disposePopper(),this._element.removeAttribute("aria-describedby"),de.trigger(this._element,this.constructor.eventName("hidden")))}),this.tip,this._isAnimated())}}update(){this._popper&&this._popper.update()}_isWithContent(){return Boolean(this._getTitle())}_getTipElement(){return this.tip||(this.tip=this._createTipElement(this._newContent||this._getContentForTemplate())),this.tip}_createTipElement(t){const e=this._getTemplateFactory(t).toHtml();if(!e)return null;e.classList.remove(is,ns),e.classList.add(`bs-${this.constructor.NAME}-auto`);const i=(t=>{do{t+=Math.floor(1e6*Math.random())}while(document.getElementById(t));return t})(this.constructor.NAME).toString();return e.setAttribute("id",i),this._isAnimated()&&e.classList.add(is),e}setContent(t){this._newContent=t,this._isShown()&&(this._disposePopper(),this.show())}_getTemplateFactory(t){return this._templateFactory?this._templateFactory.changeContent(t):this._templateFactory=new ts({...this._config,content:t,extraClass:this._resolvePossibleFunction(this._config.customClass)}),this._templateFactory}_getContentForTemplate(){return{".tooltip-inner":this._getTitle()}}_getTitle(){return this._resolvePossibleFunction(this._config.title)||this._element.getAttribute("data-bs-original-title")}_initializeOnDelegatedTarget(t){return this.constructor.getOrCreateInstance(t.delegateTarget,this._getDelegateConfig())}_isAnimated(){return this._config.animation||this.tip&&this.tip.classList.contains(is)}_isShown(){return this.tip&&this.tip.classList.contains(ns)}_createPopper(t){const e="function"==typeof this._config.placement?this._config.placement.call(this,t,this._element):this._config.placement,i=ls[e.toUpperCase()];return Dt(this._element,t,this._getPopperConfig(i))}_getOffset(){const{offset:t}=this._config;return"string"==typeof t?t.split(",").map((t=>Number.parseInt(t,10))):"function"==typeof t?e=>t(e,this._element):t}_resolvePossibleFunction(t){return"function"==typeof t?t.call(this._element):t}_getPopperConfig(t){const e={placement:t,modifiers:[{name:"flip",options:{fallbackPlacements:this._config.fallbackPlacements}},{name:"offset",options:{offset:this._getOffset()}},{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"arrow",options:{element:`.${this.constructor.NAME}-arrow`}},{name:"preSetPlacement",enabled:!0,phase:"beforeMain",fn:t=>{this._getTipElement().setAttribute("data-popper-placement",t.state.placement)}}]};return{...e,..."function"==typeof this._config.popperConfig?this._config.popperConfig(e):this._config.popperConfig}}_setListeners(){const t=this._config.trigger.split(" ");for(const e of t)if("click"===e)de.on(this._element,this.constructor.eventName("click"),this._config.selector,(t=>{this._initializeOnDelegatedTarget(t).toggle()}));else if("manual"!==e){const t=e===rs?this.constructor.eventName("mouseenter"):this.constructor.eventName("focusin"),i=e===rs?this.constructor.eventName("mouseleave"):this.constructor.eventName("focusout");de.on(this._element,t,this._config.selector,(t=>{const e=this._initializeOnDelegatedTarget(t);e._activeTrigger["focusin"===t.type?as:rs]=!0,e._enter()})),de.on(this._element,i,this._config.selector,(t=>{const e=this._initializeOnDelegatedTarget(t);e._activeTrigger["focusout"===t.type?as:rs]=e._element.contains(t.relatedTarget),e._leave()}))}this._hideModalHandler=()=>{this._element&&this.hide()},de.on(this._element.closest(ss),os,this._hideModalHandler)}_fixTitle(){const t=this._element.getAttribute("title");t&&(this._element.getAttribute("aria-label")||this._element.textContent.trim()||this._element.setAttribute("aria-label",t),this._element.setAttribute("data-bs-original-title",t),this._element.removeAttribute("title"))}_enter(){this._isShown()||this._isHovered?this._isHovered=!0:(this._isHovered=!0,this._setTimeout((()=>{this._isHovered&&this.show()}),this._config.delay.show))}_leave(){this._isWithActiveTrigger()||(this._isHovered=!1,this._setTimeout((()=>{this._isHovered||this.hide()}),this._config.delay.hide))}_setTimeout(t,e){clearTimeout(this._timeout),this._timeout=setTimeout(t,e)}_isWithActiveTrigger(){return Object.values(this._activeTrigger).includes(!0)}_getConfig(t){const e=be.getDataAttributes(this._element);for(const t of Object.keys(e))es.has(t)&&delete e[t];return t={...e,..."object"==typeof t&&t?t:{}},t=this._mergeConfigObj(t),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}_configAfterMerge(t){return t.container=!1===t.container?document.body:Ht(t.container),"number"==typeof t.delay&&(t.delay={show:t.delay,hide:t.delay}),"number"==typeof t.title&&(t.title=t.title.toString()),"number"==typeof t.content&&(t.content=t.content.toString()),t}_getDelegateConfig(){const t={};for(const e in this._config)this.constructor.Default[e]!==this._config[e]&&(t[e]=this._config[e]);return t.selector=!1,t.trigger="manual",t}_disposePopper(){this._popper&&(this._popper.destroy(),this._popper=null),this.tip&&(this.tip.remove(),this.tip=null)}static jQueryInterface(t){return this.each((function(){const e=us.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}Qt(us);const ds={...us.Default,content:"",offset:[0,8],placement:"right",template:'',trigger:"click"},fs={...us.DefaultType,content:"(null|string|element|function)"};class ps extends us{static get Default(){return ds}static get DefaultType(){return fs}static get NAME(){return"popover"}_isWithContent(){return this._getTitle()||this._getContent()}_getContentForTemplate(){return{".popover-header":this._getTitle(),".popover-body":this._getContent()}}_getContent(){return this._resolvePossibleFunction(this._config.content)}static jQueryInterface(t){return this.each((function(){const e=ps.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}Qt(ps);const gs=".bs.scrollspy",ms=`activate${gs}`,_s=`click${gs}`,bs=`load${gs}.data-api`,vs="active",ys="[href]",ws=".nav-link",As=`${ws}, .nav-item > ${ws}, .list-group-item`,Es={offset:null,rootMargin:"0px 0px -25%",smoothScroll:!1,target:null,threshold:[.1,.5,1]},Cs={offset:"(number|null)",rootMargin:"string",smoothScroll:"boolean",target:"element",threshold:"array"};class Ts extends ye{constructor(t,e){super(t,e),this._targetLinks=new Map,this._observableSections=new Map,this._rootElement="visible"===getComputedStyle(this._element).overflowY?null:this._element,this._activeTarget=null,this._observer=null,this._previousScrollData={visibleEntryTop:0,parentScrollTop:0},this.refresh()}static get Default(){return Es}static get DefaultType(){return Cs}static get NAME(){return"scrollspy"}refresh(){this._initializeTargetsAndObservables(),this._maybeEnableSmoothScroll(),this._observer?this._observer.disconnect():this._observer=this._getNewObserver();for(const t of this._observableSections.values())this._observer.observe(t)}dispose(){this._observer.disconnect(),super.dispose()}_configAfterMerge(t){return t.target=Ht(t.target)||document.body,t.rootMargin=t.offset?`${t.offset}px 0px -30%`:t.rootMargin,"string"==typeof t.threshold&&(t.threshold=t.threshold.split(",").map((t=>Number.parseFloat(t)))),t}_maybeEnableSmoothScroll(){this._config.smoothScroll&&(de.off(this._config.target,_s),de.on(this._config.target,_s,ys,(t=>{const e=this._observableSections.get(t.target.hash);if(e){t.preventDefault();const i=this._rootElement||window,n=e.offsetTop-this._element.offsetTop;if(i.scrollTo)return void i.scrollTo({top:n,behavior:"smooth"});i.scrollTop=n}})))}_getNewObserver(){const t={root:this._rootElement,threshold:this._config.threshold,rootMargin:this._config.rootMargin};return new IntersectionObserver((t=>this._observerCallback(t)),t)}_observerCallback(t){const e=t=>this._targetLinks.get(`#${t.target.id}`),i=t=>{this._previousScrollData.visibleEntryTop=t.target.offsetTop,this._process(e(t))},n=(this._rootElement||document.documentElement).scrollTop,s=n>=this._previousScrollData.parentScrollTop;this._previousScrollData.parentScrollTop=n;for(const o of t){if(!o.isIntersecting){this._activeTarget=null,this._clearActiveClass(e(o));continue}const t=o.target.offsetTop>=this._previousScrollData.visibleEntryTop;if(s&&t){if(i(o),!n)return}else s||t||i(o)}}_initializeTargetsAndObservables(){this._targetLinks=new Map,this._observableSections=new Map;const t=ke.find(ys,this._config.target);for(const e of t){if(!e.hash||Ft(e))continue;const t=ke.findOne(e.hash,this._element);Wt(t)&&(this._targetLinks.set(e.hash,e),this._observableSections.set(e.hash,t))}}_process(t){this._activeTarget!==t&&(this._clearActiveClass(this._config.target),this._activeTarget=t,t.classList.add(vs),this._activateParents(t),de.trigger(this._element,ms,{relatedTarget:t}))}_activateParents(t){if(t.classList.contains("dropdown-item"))ke.findOne(".dropdown-toggle",t.closest(".dropdown")).classList.add(vs);else for(const e of ke.parents(t,".nav, .list-group"))for(const t of ke.prev(e,As))t.classList.add(vs)}_clearActiveClass(t){t.classList.remove(vs);const e=ke.find(`${ys}.${vs}`,t);for(const t of e)t.classList.remove(vs)}static jQueryInterface(t){return this.each((function(){const e=Ts.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}))}}de.on(window,bs,(()=>{for(const t of ke.find('[data-bs-spy="scroll"]'))Ts.getOrCreateInstance(t)})),Qt(Ts);const Os=".bs.tab",xs=`hide${Os}`,ks=`hidden${Os}`,Ls=`show${Os}`,Ds=`shown${Os}`,$s=`click${Os}`,Ss=`keydown${Os}`,Is=`load${Os}`,Ns="ArrowLeft",Ps="ArrowRight",js="ArrowUp",Ms="ArrowDown",Hs="active",Ws="fade",Fs="show",Bs=":not(.dropdown-toggle)",zs='[data-bs-toggle="tab"], [data-bs-toggle="pill"], [data-bs-toggle="list"]',qs=`.nav-link${Bs}, .list-group-item${Bs}, [role="tab"]${Bs}, ${zs}`,Rs=`.${Hs}[data-bs-toggle="tab"], .${Hs}[data-bs-toggle="pill"], .${Hs}[data-bs-toggle="list"]`;class Vs extends ye{constructor(t){super(t),this._parent=this._element.closest('.list-group, .nav, [role="tablist"]'),this._parent&&(this._setInitialAttributes(this._parent,this._getChildren()),de.on(this._element,Ss,(t=>this._keydown(t))))}static get NAME(){return"tab"}show(){const t=this._element;if(this._elemIsActive(t))return;const e=this._getActiveElem(),i=e?de.trigger(e,xs,{relatedTarget:t}):null;de.trigger(t,Ls,{relatedTarget:e}).defaultPrevented||i&&i.defaultPrevented||(this._deactivate(e,t),this._activate(t,e))}_activate(t,e){t&&(t.classList.add(Hs),this._activate(Pt(t)),this._queueCallback((()=>{"tab"===t.getAttribute("role")?(t.removeAttribute("tabindex"),t.setAttribute("aria-selected",!0),this._toggleDropDown(t,!0),de.trigger(t,Ds,{relatedTarget:e})):t.classList.add(Fs)}),t,t.classList.contains(Ws)))}_deactivate(t,e){t&&(t.classList.remove(Hs),t.blur(),this._deactivate(Pt(t)),this._queueCallback((()=>{"tab"===t.getAttribute("role")?(t.setAttribute("aria-selected",!1),t.setAttribute("tabindex","-1"),this._toggleDropDown(t,!1),de.trigger(t,ks,{relatedTarget:e})):t.classList.remove(Fs)}),t,t.classList.contains(Ws)))}_keydown(t){if(![Ns,Ps,js,Ms].includes(t.key))return;t.stopPropagation(),t.preventDefault();const e=[Ps,Ms].includes(t.key),i=Ut(this._getChildren().filter((t=>!Ft(t))),t.target,e,!0);i&&(i.focus({preventScroll:!0}),Vs.getOrCreateInstance(i).show())}_getChildren(){return ke.find(qs,this._parent)}_getActiveElem(){return this._getChildren().find((t=>this._elemIsActive(t)))||null}_setInitialAttributes(t,e){this._setAttributeIfNotExists(t,"role","tablist");for(const t of e)this._setInitialAttributesOnChild(t)}_setInitialAttributesOnChild(t){t=this._getInnerElement(t);const e=this._elemIsActive(t),i=this._getOuterElement(t);t.setAttribute("aria-selected",e),i!==t&&this._setAttributeIfNotExists(i,"role","presentation"),e||t.setAttribute("tabindex","-1"),this._setAttributeIfNotExists(t,"role","tab"),this._setInitialAttributesOnTargetPanel(t)}_setInitialAttributesOnTargetPanel(t){const e=Pt(t);e&&(this._setAttributeIfNotExists(e,"role","tabpanel"),t.id&&this._setAttributeIfNotExists(e,"aria-labelledby",`#${t.id}`))}_toggleDropDown(t,e){const i=this._getOuterElement(t);if(!i.classList.contains("dropdown"))return;const n=(t,n)=>{const s=ke.findOne(t,i);s&&s.classList.toggle(n,e)};n(".dropdown-toggle",Hs),n(".dropdown-menu",Fs),i.setAttribute("aria-expanded",e)}_setAttributeIfNotExists(t,e,i){t.hasAttribute(e)||t.setAttribute(e,i)}_elemIsActive(t){return t.classList.contains(Hs)}_getInnerElement(t){return t.matches(qs)?t:ke.findOne(qs,t)}_getOuterElement(t){return t.closest(".nav-item, .list-group-item")||t}static jQueryInterface(t){return this.each((function(){const e=Vs.getOrCreateInstance(this);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}))}}de.on(document,$s,zs,(function(t){["A","AREA"].includes(this.tagName)&&t.preventDefault(),Ft(this)||Vs.getOrCreateInstance(this).show()})),de.on(window,Is,(()=>{for(const t of ke.find(Rs))Vs.getOrCreateInstance(t)})),Qt(Vs);const Ks=".bs.toast",Qs=`mouseover${Ks}`,Xs=`mouseout${Ks}`,Ys=`focusin${Ks}`,Us=`focusout${Ks}`,Gs=`hide${Ks}`,Js=`hidden${Ks}`,Zs=`show${Ks}`,to=`shown${Ks}`,eo="hide",io="show",no="showing",so={animation:"boolean",autohide:"boolean",delay:"number"},oo={animation:!0,autohide:!0,delay:5e3};class ro extends ye{constructor(t,e){super(t,e),this._timeout=null,this._hasMouseInteraction=!1,this._hasKeyboardInteraction=!1,this._setListeners()}static get Default(){return oo}static get DefaultType(){return so}static get NAME(){return"toast"}show(){de.trigger(this._element,Zs).defaultPrevented||(this._clearTimeout(),this._config.animation&&this._element.classList.add("fade"),this._element.classList.remove(eo),qt(this._element),this._element.classList.add(io,no),this._queueCallback((()=>{this._element.classList.remove(no),de.trigger(this._element,to),this._maybeScheduleHide()}),this._element,this._config.animation))}hide(){this.isShown()&&(de.trigger(this._element,Gs).defaultPrevented||(this._element.classList.add(no),this._queueCallback((()=>{this._element.classList.add(eo),this._element.classList.remove(no,io),de.trigger(this._element,Js)}),this._element,this._config.animation)))}dispose(){this._clearTimeout(),this.isShown()&&this._element.classList.remove(io),super.dispose()}isShown(){return this._element.classList.contains(io)}_maybeScheduleHide(){this._config.autohide&&(this._hasMouseInteraction||this._hasKeyboardInteraction||(this._timeout=setTimeout((()=>{this.hide()}),this._config.delay)))}_onInteraction(t,e){switch(t.type){case"mouseover":case"mouseout":this._hasMouseInteraction=e;break;case"focusin":case"focusout":this._hasKeyboardInteraction=e}if(e)return void this._clearTimeout();const i=t.relatedTarget;this._element===i||this._element.contains(i)||this._maybeScheduleHide()}_setListeners(){de.on(this._element,Qs,(t=>this._onInteraction(t,!0))),de.on(this._element,Xs,(t=>this._onInteraction(t,!1))),de.on(this._element,Ys,(t=>this._onInteraction(t,!0))),de.on(this._element,Us,(t=>this._onInteraction(t,!1)))}_clearTimeout(){clearTimeout(this._timeout),this._timeout=null}static jQueryInterface(t){return this.each((function(){const e=ro.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}var ao;we(ro),Qt(ro),ao=function(){[].slice.call(document.querySelectorAll('[data-bs-toggle="tooltip"]')).map((function(t){return new us(t,{delay:{show:500,hide:100}})}))},"loading"!=document.readyState?ao():document.addEventListener("DOMContentLoaded",ao)})(); +//# sourceMappingURL=bootstrap.js.map \ No newline at end of file diff --git a/docs/_static/scripts/bootstrap.js.LICENSE.txt b/docs/_static/scripts/bootstrap.js.LICENSE.txt new file mode 100644 index 00000000..91ad10aa --- /dev/null +++ b/docs/_static/scripts/bootstrap.js.LICENSE.txt @@ -0,0 +1,5 @@ +/*! + * Bootstrap v5.2.3 (https://getbootstrap.com/) + * Copyright 2011-2022 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors) + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE) + */ diff --git a/docs/_static/scripts/bootstrap.js.map b/docs/_static/scripts/bootstrap.js.map new file mode 100644 index 00000000..d83e2f7c --- /dev/null +++ b/docs/_static/scripts/bootstrap.js.map @@ -0,0 +1 @@ +{"version":3,"file":"scripts/bootstrap.js","mappings":";mBACA,IAAIA,EAAsB,CCA1BA,EAAwB,CAACC,EAASC,KACjC,IAAI,IAAIC,KAAOD,EACXF,EAAoBI,EAAEF,EAAYC,KAASH,EAAoBI,EAAEH,EAASE,IAC5EE,OAAOC,eAAeL,EAASE,EAAK,CAAEI,YAAY,EAAMC,IAAKN,EAAWC,IAE1E,ECNDH,EAAwB,CAACS,EAAKC,IAAUL,OAAOM,UAAUC,eAAeC,KAAKJ,EAAKC,GCClFV,EAAyBC,IACH,oBAAXa,QAA0BA,OAAOC,aAC1CV,OAAOC,eAAeL,EAASa,OAAOC,YAAa,CAAEC,MAAO,WAE7DX,OAAOC,eAAeL,EAAS,aAAc,CAAEe,OAAO,GAAO,ipBCLvD,IAAI,EAAM,MACNC,EAAS,SACTC,EAAQ,QACRC,EAAO,OACPC,EAAO,OACPC,EAAiB,CAAC,EAAKJ,EAAQC,EAAOC,GACtCG,EAAQ,QACRC,EAAM,MACNC,EAAkB,kBAClBC,EAAW,WACXC,EAAS,SACTC,EAAY,YACZC,EAAmCP,EAAeQ,QAAO,SAAUC,EAAKC,GACjF,OAAOD,EAAIE,OAAO,CAACD,EAAY,IAAMT,EAAOS,EAAY,IAAMR,GAChE,GAAG,IACQ,EAA0B,GAAGS,OAAOX,EAAgB,CAACD,IAAOS,QAAO,SAAUC,EAAKC,GAC3F,OAAOD,EAAIE,OAAO,CAACD,EAAWA,EAAY,IAAMT,EAAOS,EAAY,IAAMR,GAC3E,GAAG,IAEQU,EAAa,aACbC,EAAO,OACPC,EAAY,YAEZC,EAAa,aACbC,EAAO,OACPC,EAAY,YAEZC,EAAc,cACdC,EAAQ,QACRC,EAAa,aACbC,EAAiB,CAACT,EAAYC,EAAMC,EAAWC,EAAYC,EAAMC,EAAWC,EAAaC,EAAOC,GC9B5F,SAASE,EAAYC,GAClC,OAAOA,GAAWA,EAAQC,UAAY,IAAIC,cAAgB,IAC5D,CCFe,SAASC,EAAUC,GAChC,GAAY,MAARA,EACF,OAAOC,OAGT,GAAwB,oBAApBD,EAAKE,WAAkC,CACzC,IAAIC,EAAgBH,EAAKG,cACzB,OAAOA,GAAgBA,EAAcC,aAAwBH,MAC/D,CAEA,OAAOD,CACT,CCTA,SAASK,EAAUL,GAEjB,OAAOA,aADUD,EAAUC,GAAMM,SACIN,aAAgBM,OACvD,CAEA,SAASC,EAAcP,GAErB,OAAOA,aADUD,EAAUC,GAAMQ,aACIR,aAAgBQ,WACvD,CAEA,SAASC,EAAaT,GAEpB,MAA0B,oBAAfU,aAKJV,aADUD,EAAUC,GAAMU,YACIV,aAAgBU,WACvD,CCwDA,SACEC,KAAM,cACNC,SAAS,EACTC,MAAO,QACPC,GA5EF,SAAqBC,GACnB,IAAIC,EAAQD,EAAKC,MACjB3D,OAAO4D,KAAKD,EAAME,UAAUC,SAAQ,SAAUR,GAC5C,IAAIS,EAAQJ,EAAMK,OAAOV,IAAS,CAAC,EAC/BW,EAAaN,EAAMM,WAAWX,IAAS,CAAC,EACxCf,EAAUoB,EAAME,SAASP,GAExBJ,EAAcX,IAAaD,EAAYC,KAO5CvC,OAAOkE,OAAO3B,EAAQwB,MAAOA,GAC7B/D,OAAO4D,KAAKK,GAAYH,SAAQ,SAAUR,GACxC,IAAI3C,EAAQsD,EAAWX,IAET,IAAV3C,EACF4B,EAAQ4B,gBAAgBb,GAExBf,EAAQ6B,aAAad,GAAgB,IAAV3C,EAAiB,GAAKA,EAErD,IACF,GACF,EAoDE0D,OAlDF,SAAgBC,GACd,IAAIX,EAAQW,EAAMX,MACdY,EAAgB,CAClBlD,OAAQ,CACNmD,SAAUb,EAAMc,QAAQC,SACxB5D,KAAM,IACN6D,IAAK,IACLC,OAAQ,KAEVC,MAAO,CACLL,SAAU,YAEZlD,UAAW,CAAC,GASd,OAPAtB,OAAOkE,OAAOP,EAAME,SAASxC,OAAO0C,MAAOQ,EAAclD,QACzDsC,EAAMK,OAASO,EAEXZ,EAAME,SAASgB,OACjB7E,OAAOkE,OAAOP,EAAME,SAASgB,MAAMd,MAAOQ,EAAcM,OAGnD,WACL7E,OAAO4D,KAAKD,EAAME,UAAUC,SAAQ,SAAUR,GAC5C,IAAIf,EAAUoB,EAAME,SAASP,GACzBW,EAAaN,EAAMM,WAAWX,IAAS,CAAC,EAGxCS,EAFkB/D,OAAO4D,KAAKD,EAAMK,OAAOzD,eAAe+C,GAAQK,EAAMK,OAAOV,GAAQiB,EAAcjB,IAE7E9B,QAAO,SAAUuC,EAAOe,GAElD,OADAf,EAAMe,GAAY,GACXf,CACT,GAAG,CAAC,GAECb,EAAcX,IAAaD,EAAYC,KAI5CvC,OAAOkE,OAAO3B,EAAQwB,MAAOA,GAC7B/D,OAAO4D,KAAKK,GAAYH,SAAQ,SAAUiB,GACxCxC,EAAQ4B,gBAAgBY,EAC1B,IACF,GACF,CACF,EASEC,SAAU,CAAC,kBCjFE,SAASC,EAAiBvD,GACvC,OAAOA,EAAUwD,MAAM,KAAK,EAC9B,CCHO,IAAI,EAAMC,KAAKC,IACX,EAAMD,KAAKE,IACXC,EAAQH,KAAKG,MCFT,SAASC,IACtB,IAAIC,EAASC,UAAUC,cAEvB,OAAc,MAAVF,GAAkBA,EAAOG,QAAUC,MAAMC,QAAQL,EAAOG,QACnDH,EAAOG,OAAOG,KAAI,SAAUC,GACjC,OAAOA,EAAKC,MAAQ,IAAMD,EAAKE,OACjC,IAAGC,KAAK,KAGHT,UAAUU,SACnB,CCTe,SAASC,IACtB,OAAQ,iCAAiCC,KAAKd,IAChD,CCCe,SAASe,EAAsB/D,EAASgE,EAAcC,QAC9C,IAAjBD,IACFA,GAAe,QAGO,IAApBC,IACFA,GAAkB,GAGpB,IAAIC,EAAalE,EAAQ+D,wBACrBI,EAAS,EACTC,EAAS,EAETJ,GAAgBrD,EAAcX,KAChCmE,EAASnE,EAAQqE,YAAc,GAAItB,EAAMmB,EAAWI,OAAStE,EAAQqE,aAAmB,EACxFD,EAASpE,EAAQuE,aAAe,GAAIxB,EAAMmB,EAAWM,QAAUxE,EAAQuE,cAAoB,GAG7F,IACIE,GADOhE,EAAUT,GAAWG,EAAUH,GAAWK,QAC3BoE,eAEtBC,GAAoBb,KAAsBI,EAC1CU,GAAKT,EAAW3F,MAAQmG,GAAoBD,EAAiBA,EAAeG,WAAa,IAAMT,EAC/FU,GAAKX,EAAW9B,KAAOsC,GAAoBD,EAAiBA,EAAeK,UAAY,IAAMV,EAC7FE,EAAQJ,EAAWI,MAAQH,EAC3BK,EAASN,EAAWM,OAASJ,EACjC,MAAO,CACLE,MAAOA,EACPE,OAAQA,EACRpC,IAAKyC,EACLvG,MAAOqG,EAAIL,EACXjG,OAAQwG,EAAIL,EACZjG,KAAMoG,EACNA,EAAGA,EACHE,EAAGA,EAEP,CCrCe,SAASE,EAAc/E,GACpC,IAAIkE,EAAaH,EAAsB/D,GAGnCsE,EAAQtE,EAAQqE,YAChBG,EAASxE,EAAQuE,aAUrB,OARI3B,KAAKoC,IAAId,EAAWI,MAAQA,IAAU,IACxCA,EAAQJ,EAAWI,OAGjB1B,KAAKoC,IAAId,EAAWM,OAASA,IAAW,IAC1CA,EAASN,EAAWM,QAGf,CACLG,EAAG3E,EAAQ4E,WACXC,EAAG7E,EAAQ8E,UACXR,MAAOA,EACPE,OAAQA,EAEZ,CCvBe,SAASS,EAASC,EAAQC,GACvC,IAAIC,EAAWD,EAAME,aAAeF,EAAME,cAE1C,GAAIH,EAAOD,SAASE,GAClB,OAAO,EAEJ,GAAIC,GAAYvE,EAAauE,GAAW,CACzC,IAAIE,EAAOH,EAEX,EAAG,CACD,GAAIG,GAAQJ,EAAOK,WAAWD,GAC5B,OAAO,EAITA,EAAOA,EAAKE,YAAcF,EAAKG,IACjC,OAASH,EACX,CAGF,OAAO,CACT,CCrBe,SAAS,EAAiBtF,GACvC,OAAOG,EAAUH,GAAS0F,iBAAiB1F,EAC7C,CCFe,SAAS2F,EAAe3F,GACrC,MAAO,CAAC,QAAS,KAAM,MAAM4F,QAAQ7F,EAAYC,KAAa,CAChE,CCFe,SAAS6F,EAAmB7F,GAEzC,QAASS,EAAUT,GAAWA,EAAQO,cACtCP,EAAQ8F,WAAazF,OAAOyF,UAAUC,eACxC,CCFe,SAASC,EAAchG,GACpC,MAA6B,SAAzBD,EAAYC,GACPA,EAMPA,EAAQiG,cACRjG,EAAQwF,aACR3E,EAAab,GAAWA,EAAQyF,KAAO,OAEvCI,EAAmB7F,EAGvB,CCVA,SAASkG,EAAoBlG,GAC3B,OAAKW,EAAcX,IACoB,UAAvC,EAAiBA,GAASiC,SAInBjC,EAAQmG,aAHN,IAIX,CAwCe,SAASC,EAAgBpG,GAItC,IAHA,IAAIK,EAASF,EAAUH,GACnBmG,EAAeD,EAAoBlG,GAEhCmG,GAAgBR,EAAeQ,IAA6D,WAA5C,EAAiBA,GAAclE,UACpFkE,EAAeD,EAAoBC,GAGrC,OAAIA,IAA+C,SAA9BpG,EAAYoG,IAA0D,SAA9BpG,EAAYoG,IAAwE,WAA5C,EAAiBA,GAAclE,UAC3H5B,EAGF8F,GAhDT,SAA4BnG,GAC1B,IAAIqG,EAAY,WAAWvC,KAAKd,KAGhC,GAFW,WAAWc,KAAKd,MAEfrC,EAAcX,IAII,UAFX,EAAiBA,GAEnBiC,SACb,OAAO,KAIX,IAAIqE,EAAcN,EAAchG,GAMhC,IAJIa,EAAayF,KACfA,EAAcA,EAAYb,MAGrB9E,EAAc2F,IAAgB,CAAC,OAAQ,QAAQV,QAAQ7F,EAAYuG,IAAgB,GAAG,CAC3F,IAAIC,EAAM,EAAiBD,GAI3B,GAAsB,SAAlBC,EAAIC,WAA4C,SAApBD,EAAIE,aAA0C,UAAhBF,EAAIG,UAAiF,IAA1D,CAAC,YAAa,eAAed,QAAQW,EAAII,aAAsBN,GAAgC,WAAnBE,EAAII,YAA2BN,GAAaE,EAAIK,QAAyB,SAAfL,EAAIK,OACjO,OAAON,EAEPA,EAAcA,EAAYd,UAE9B,CAEA,OAAO,IACT,CAgByBqB,CAAmB7G,IAAYK,CACxD,CCpEe,SAASyG,EAAyB3H,GAC/C,MAAO,CAAC,MAAO,UAAUyG,QAAQzG,IAAc,EAAI,IAAM,GAC3D,CCDO,SAAS4H,EAAOjE,EAAK1E,EAAOyE,GACjC,OAAO,EAAQC,EAAK,EAAQ1E,EAAOyE,GACrC,CCFe,SAASmE,EAAmBC,GACzC,OAAOxJ,OAAOkE,OAAO,CAAC,ECDf,CACLS,IAAK,EACL9D,MAAO,EACPD,OAAQ,EACRE,KAAM,GDHuC0I,EACjD,CEHe,SAASC,EAAgB9I,EAAOiD,GAC7C,OAAOA,EAAKpC,QAAO,SAAUkI,EAAS5J,GAEpC,OADA4J,EAAQ5J,GAAOa,EACR+I,CACT,GAAG,CAAC,EACN,CCuFA,SACEpG,KAAM,QACNC,SAAS,EACTC,MAAO,OACPC,GA9EF,SAAeC,GACb,IAAIiG,EAEAhG,EAAQD,EAAKC,MACbL,EAAOI,EAAKJ,KACZmB,EAAUf,EAAKe,QACfmF,EAAejG,EAAME,SAASgB,MAC9BgF,EAAgBlG,EAAMmG,cAAcD,cACpCE,EAAgB9E,EAAiBtB,EAAMjC,WACvCsI,EAAOX,EAAyBU,GAEhCE,EADa,CAACnJ,EAAMD,GAAOsH,QAAQ4B,IAAkB,EAClC,SAAW,QAElC,GAAKH,GAAiBC,EAAtB,CAIA,IAAIL,EAxBgB,SAAyBU,EAASvG,GAItD,OAAO4F,EAAsC,iBAH7CW,EAA6B,mBAAZA,EAAyBA,EAAQlK,OAAOkE,OAAO,CAAC,EAAGP,EAAMwG,MAAO,CAC/EzI,UAAWiC,EAAMjC,aACbwI,GACkDA,EAAUT,EAAgBS,EAASlJ,GAC7F,CAmBsBoJ,CAAgB3F,EAAQyF,QAASvG,GACjD0G,EAAY/C,EAAcsC,GAC1BU,EAAmB,MAATN,EAAe,EAAMlJ,EAC/ByJ,EAAmB,MAATP,EAAepJ,EAASC,EAClC2J,EAAU7G,EAAMwG,MAAM7I,UAAU2I,GAAOtG,EAAMwG,MAAM7I,UAAU0I,GAAQH,EAAcG,GAAQrG,EAAMwG,MAAM9I,OAAO4I,GAC9GQ,EAAYZ,EAAcG,GAAQrG,EAAMwG,MAAM7I,UAAU0I,GACxDU,EAAoB/B,EAAgBiB,GACpCe,EAAaD,EAA6B,MAATV,EAAeU,EAAkBE,cAAgB,EAAIF,EAAkBG,aAAe,EAAI,EAC3HC,EAAoBN,EAAU,EAAIC,EAAY,EAG9CpF,EAAMmE,EAAcc,GACpBlF,EAAMuF,EAAaN,EAAUJ,GAAOT,EAAce,GAClDQ,EAASJ,EAAa,EAAIN,EAAUJ,GAAO,EAAIa,EAC/CE,EAAS1B,EAAOjE,EAAK0F,EAAQ3F,GAE7B6F,EAAWjB,EACfrG,EAAMmG,cAAcxG,KAASqG,EAAwB,CAAC,GAAyBsB,GAAYD,EAAQrB,EAAsBuB,aAAeF,EAASD,EAAQpB,EAnBzJ,CAoBF,EA4CEtF,OA1CF,SAAgBC,GACd,IAAIX,EAAQW,EAAMX,MAEdwH,EADU7G,EAAMG,QACWlC,QAC3BqH,OAAoC,IAArBuB,EAA8B,sBAAwBA,EAErD,MAAhBvB,IAKwB,iBAAjBA,IACTA,EAAejG,EAAME,SAASxC,OAAO+J,cAAcxB,MAahDpC,EAAS7D,EAAME,SAASxC,OAAQuI,KAQrCjG,EAAME,SAASgB,MAAQ+E,EACzB,EASE5E,SAAU,CAAC,iBACXqG,iBAAkB,CAAC,oBCnGN,SAASC,EAAa5J,GACnC,OAAOA,EAAUwD,MAAM,KAAK,EAC9B,CCOA,IAAIqG,EAAa,CACf5G,IAAK,OACL9D,MAAO,OACPD,OAAQ,OACRE,KAAM,QAeD,SAAS0K,GAAYlH,GAC1B,IAAImH,EAEApK,EAASiD,EAAMjD,OACfqK,EAAapH,EAAMoH,WACnBhK,EAAY4C,EAAM5C,UAClBiK,EAAYrH,EAAMqH,UAClBC,EAAUtH,EAAMsH,QAChBpH,EAAWF,EAAME,SACjBqH,EAAkBvH,EAAMuH,gBACxBC,EAAWxH,EAAMwH,SACjBC,EAAezH,EAAMyH,aACrBC,EAAU1H,EAAM0H,QAChBC,EAAaL,EAAQ1E,EACrBA,OAAmB,IAAf+E,EAAwB,EAAIA,EAChCC,EAAaN,EAAQxE,EACrBA,OAAmB,IAAf8E,EAAwB,EAAIA,EAEhCC,EAAgC,mBAAjBJ,EAA8BA,EAAa,CAC5D7E,EAAGA,EACHE,IACG,CACHF,EAAGA,EACHE,GAGFF,EAAIiF,EAAMjF,EACVE,EAAI+E,EAAM/E,EACV,IAAIgF,EAAOR,EAAQrL,eAAe,KAC9B8L,EAAOT,EAAQrL,eAAe,KAC9B+L,EAAQxL,EACRyL,EAAQ,EACRC,EAAM5J,OAEV,GAAIkJ,EAAU,CACZ,IAAIpD,EAAeC,EAAgBtH,GAC/BoL,EAAa,eACbC,EAAY,cAEZhE,IAAiBhG,EAAUrB,IAGmB,WAA5C,EAFJqH,EAAeN,EAAmB/G,IAECmD,UAAsC,aAAbA,IAC1DiI,EAAa,eACbC,EAAY,gBAOZhL,IAAc,IAAQA,IAAcZ,GAAQY,IAAcb,IAAU8K,IAAczK,KACpFqL,EAAQ3L,EAGRwG,IAFc4E,GAAWtD,IAAiB8D,GAAOA,EAAIxF,eAAiBwF,EAAIxF,eAAeD,OACzF2B,EAAa+D,IACEf,EAAW3E,OAC1BK,GAAKyE,EAAkB,GAAK,GAG1BnK,IAAcZ,IAASY,IAAc,GAAOA,IAAcd,GAAW+K,IAAczK,KACrFoL,EAAQzL,EAGRqG,IAFc8E,GAAWtD,IAAiB8D,GAAOA,EAAIxF,eAAiBwF,EAAIxF,eAAeH,MACzF6B,EAAagE,IACEhB,EAAW7E,MAC1BK,GAAK2E,EAAkB,GAAK,EAEhC,CAEA,IAgBMc,EAhBFC,EAAe5M,OAAOkE,OAAO,CAC/BM,SAAUA,GACTsH,GAAYP,GAEXsB,GAAyB,IAAjBd,EAlFd,SAA2BrI,EAAM8I,GAC/B,IAAItF,EAAIxD,EAAKwD,EACTE,EAAI1D,EAAK0D,EACT0F,EAAMN,EAAIO,kBAAoB,EAClC,MAAO,CACL7F,EAAG5B,EAAM4B,EAAI4F,GAAOA,GAAO,EAC3B1F,EAAG9B,EAAM8B,EAAI0F,GAAOA,GAAO,EAE/B,CA0EsCE,CAAkB,CACpD9F,EAAGA,EACHE,GACC1E,EAAUrB,IAAW,CACtB6F,EAAGA,EACHE,GAMF,OAHAF,EAAI2F,EAAM3F,EACVE,EAAIyF,EAAMzF,EAENyE,EAGK7L,OAAOkE,OAAO,CAAC,EAAG0I,IAAeD,EAAiB,CAAC,GAAkBJ,GAASF,EAAO,IAAM,GAAIM,EAAeL,GAASF,EAAO,IAAM,GAAIO,EAAe5D,WAAayD,EAAIO,kBAAoB,IAAM,EAAI,aAAe7F,EAAI,OAASE,EAAI,MAAQ,eAAiBF,EAAI,OAASE,EAAI,SAAUuF,IAG5R3M,OAAOkE,OAAO,CAAC,EAAG0I,IAAenB,EAAkB,CAAC,GAAmBc,GAASF,EAAOjF,EAAI,KAAO,GAAIqE,EAAgBa,GAASF,EAAOlF,EAAI,KAAO,GAAIuE,EAAgB1C,UAAY,GAAI0C,GAC9L,CAuDA,UACEnI,KAAM,gBACNC,SAAS,EACTC,MAAO,cACPC,GAzDF,SAAuBwJ,GACrB,IAAItJ,EAAQsJ,EAAMtJ,MACdc,EAAUwI,EAAMxI,QAChByI,EAAwBzI,EAAQoH,gBAChCA,OAA4C,IAA1BqB,GAA0CA,EAC5DC,EAAoB1I,EAAQqH,SAC5BA,OAAiC,IAAtBqB,GAAsCA,EACjDC,EAAwB3I,EAAQsH,aAChCA,OAAyC,IAA1BqB,GAA0CA,EAYzDR,EAAe,CACjBlL,UAAWuD,EAAiBtB,EAAMjC,WAClCiK,UAAWL,EAAa3H,EAAMjC,WAC9BL,OAAQsC,EAAME,SAASxC,OACvBqK,WAAY/H,EAAMwG,MAAM9I,OACxBwK,gBAAiBA,EACjBG,QAAoC,UAA3BrI,EAAMc,QAAQC,UAGgB,MAArCf,EAAMmG,cAAcD,gBACtBlG,EAAMK,OAAO3C,OAASrB,OAAOkE,OAAO,CAAC,EAAGP,EAAMK,OAAO3C,OAAQmK,GAAYxL,OAAOkE,OAAO,CAAC,EAAG0I,EAAc,CACvGhB,QAASjI,EAAMmG,cAAcD,cAC7BrF,SAAUb,EAAMc,QAAQC,SACxBoH,SAAUA,EACVC,aAAcA,OAIe,MAA7BpI,EAAMmG,cAAcjF,QACtBlB,EAAMK,OAAOa,MAAQ7E,OAAOkE,OAAO,CAAC,EAAGP,EAAMK,OAAOa,MAAO2G,GAAYxL,OAAOkE,OAAO,CAAC,EAAG0I,EAAc,CACrGhB,QAASjI,EAAMmG,cAAcjF,MAC7BL,SAAU,WACVsH,UAAU,EACVC,aAAcA,OAIlBpI,EAAMM,WAAW5C,OAASrB,OAAOkE,OAAO,CAAC,EAAGP,EAAMM,WAAW5C,OAAQ,CACnE,wBAAyBsC,EAAMjC,WAEnC,EAQE2L,KAAM,CAAC,GChLT,IAAIC,GAAU,CACZA,SAAS,GAsCX,UACEhK,KAAM,iBACNC,SAAS,EACTC,MAAO,QACPC,GAAI,WAAe,EACnBY,OAxCF,SAAgBX,GACd,IAAIC,EAAQD,EAAKC,MACb4J,EAAW7J,EAAK6J,SAChB9I,EAAUf,EAAKe,QACf+I,EAAkB/I,EAAQgJ,OAC1BA,OAA6B,IAApBD,GAAoCA,EAC7CE,EAAkBjJ,EAAQkJ,OAC1BA,OAA6B,IAApBD,GAAoCA,EAC7C9K,EAASF,EAAUiB,EAAME,SAASxC,QAClCuM,EAAgB,GAAGjM,OAAOgC,EAAMiK,cAActM,UAAWqC,EAAMiK,cAAcvM,QAYjF,OAVIoM,GACFG,EAAc9J,SAAQ,SAAU+J,GAC9BA,EAAaC,iBAAiB,SAAUP,EAASQ,OAAQT,GAC3D,IAGEK,GACF/K,EAAOkL,iBAAiB,SAAUP,EAASQ,OAAQT,IAG9C,WACDG,GACFG,EAAc9J,SAAQ,SAAU+J,GAC9BA,EAAaG,oBAAoB,SAAUT,EAASQ,OAAQT,GAC9D,IAGEK,GACF/K,EAAOoL,oBAAoB,SAAUT,EAASQ,OAAQT,GAE1D,CACF,EASED,KAAM,CAAC,GC/CT,IAAIY,GAAO,CACTnN,KAAM,QACND,MAAO,OACPD,OAAQ,MACR+D,IAAK,UAEQ,SAASuJ,GAAqBxM,GAC3C,OAAOA,EAAUyM,QAAQ,0BAA0B,SAAUC,GAC3D,OAAOH,GAAKG,EACd,GACF,CCVA,IAAI,GAAO,CACTnN,MAAO,MACPC,IAAK,SAEQ,SAASmN,GAA8B3M,GACpD,OAAOA,EAAUyM,QAAQ,cAAc,SAAUC,GAC/C,OAAO,GAAKA,EACd,GACF,CCPe,SAASE,GAAgB3L,GACtC,IAAI6J,EAAM9J,EAAUC,GAGpB,MAAO,CACL4L,WAHe/B,EAAIgC,YAInBC,UAHcjC,EAAIkC,YAKtB,CCNe,SAASC,GAAoBpM,GAQ1C,OAAO+D,EAAsB8B,EAAmB7F,IAAUzB,KAAOwN,GAAgB/L,GAASgM,UAC5F,CCXe,SAASK,GAAerM,GAErC,IAAIsM,EAAoB,EAAiBtM,GACrCuM,EAAWD,EAAkBC,SAC7BC,EAAYF,EAAkBE,UAC9BC,EAAYH,EAAkBG,UAElC,MAAO,6BAA6B3I,KAAKyI,EAAWE,EAAYD,EAClE,CCLe,SAASE,GAAgBtM,GACtC,MAAI,CAAC,OAAQ,OAAQ,aAAawF,QAAQ7F,EAAYK,KAAU,EAEvDA,EAAKG,cAAcoM,KAGxBhM,EAAcP,IAASiM,GAAejM,GACjCA,EAGFsM,GAAgB1G,EAAc5F,GACvC,CCJe,SAASwM,GAAkB5M,EAAS6M,GACjD,IAAIC,OAES,IAATD,IACFA,EAAO,IAGT,IAAIvB,EAAeoB,GAAgB1M,GAC/B+M,EAASzB,KAAqE,OAAlDwB,EAAwB9M,EAAQO,oBAAyB,EAASuM,EAAsBH,MACpH1C,EAAM9J,EAAUmL,GAChB0B,EAASD,EAAS,CAAC9C,GAAK7K,OAAO6K,EAAIxF,gBAAkB,GAAI4H,GAAef,GAAgBA,EAAe,IAAMA,EAC7G2B,EAAcJ,EAAKzN,OAAO4N,GAC9B,OAAOD,EAASE,EAChBA,EAAY7N,OAAOwN,GAAkB5G,EAAcgH,IACrD,CCzBe,SAASE,GAAiBC,GACvC,OAAO1P,OAAOkE,OAAO,CAAC,EAAGwL,EAAM,CAC7B5O,KAAM4O,EAAKxI,EACXvC,IAAK+K,EAAKtI,EACVvG,MAAO6O,EAAKxI,EAAIwI,EAAK7I,MACrBjG,OAAQ8O,EAAKtI,EAAIsI,EAAK3I,QAE1B,CCqBA,SAAS4I,GAA2BpN,EAASqN,EAAgBlL,GAC3D,OAAOkL,IAAmBxO,EAAWqO,GCzBxB,SAAyBlN,EAASmC,GAC/C,IAAI8H,EAAM9J,EAAUH,GAChBsN,EAAOzH,EAAmB7F,GAC1ByE,EAAiBwF,EAAIxF,eACrBH,EAAQgJ,EAAKhF,YACb9D,EAAS8I,EAAKjF,aACd1D,EAAI,EACJE,EAAI,EAER,GAAIJ,EAAgB,CAClBH,EAAQG,EAAeH,MACvBE,EAASC,EAAeD,OACxB,IAAI+I,EAAiB1J,KAEjB0J,IAAmBA,GAA+B,UAAbpL,KACvCwC,EAAIF,EAAeG,WACnBC,EAAIJ,EAAeK,UAEvB,CAEA,MAAO,CACLR,MAAOA,EACPE,OAAQA,EACRG,EAAGA,EAAIyH,GAAoBpM,GAC3B6E,EAAGA,EAEP,CDDwD2I,CAAgBxN,EAASmC,IAAa1B,EAAU4M,GAdxG,SAAoCrN,EAASmC,GAC3C,IAAIgL,EAAOpJ,EAAsB/D,GAAS,EAAoB,UAAbmC,GASjD,OARAgL,EAAK/K,IAAM+K,EAAK/K,IAAMpC,EAAQyN,UAC9BN,EAAK5O,KAAO4O,EAAK5O,KAAOyB,EAAQ0N,WAChCP,EAAK9O,OAAS8O,EAAK/K,IAAMpC,EAAQqI,aACjC8E,EAAK7O,MAAQ6O,EAAK5O,KAAOyB,EAAQsI,YACjC6E,EAAK7I,MAAQtE,EAAQsI,YACrB6E,EAAK3I,OAASxE,EAAQqI,aACtB8E,EAAKxI,EAAIwI,EAAK5O,KACd4O,EAAKtI,EAAIsI,EAAK/K,IACP+K,CACT,CAG0HQ,CAA2BN,EAAgBlL,GAAY+K,GEtBlK,SAAyBlN,GACtC,IAAI8M,EAEAQ,EAAOzH,EAAmB7F,GAC1B4N,EAAY7B,GAAgB/L,GAC5B2M,EAA0D,OAAlDG,EAAwB9M,EAAQO,oBAAyB,EAASuM,EAAsBH,KAChGrI,EAAQ,EAAIgJ,EAAKO,YAAaP,EAAKhF,YAAaqE,EAAOA,EAAKkB,YAAc,EAAGlB,EAAOA,EAAKrE,YAAc,GACvG9D,EAAS,EAAI8I,EAAKQ,aAAcR,EAAKjF,aAAcsE,EAAOA,EAAKmB,aAAe,EAAGnB,EAAOA,EAAKtE,aAAe,GAC5G1D,GAAKiJ,EAAU5B,WAAaI,GAAoBpM,GAChD6E,GAAK+I,EAAU1B,UAMnB,MAJiD,QAA7C,EAAiBS,GAAQW,GAAMS,YACjCpJ,GAAK,EAAI2I,EAAKhF,YAAaqE,EAAOA,EAAKrE,YAAc,GAAKhE,GAGrD,CACLA,MAAOA,EACPE,OAAQA,EACRG,EAAGA,EACHE,EAAGA,EAEP,CFCkMmJ,CAAgBnI,EAAmB7F,IACrO,CG1Be,SAASiO,GAAe9M,GACrC,IAOIkI,EAPAtK,EAAYoC,EAAKpC,UACjBiB,EAAUmB,EAAKnB,QACfb,EAAYgC,EAAKhC,UACjBqI,EAAgBrI,EAAYuD,EAAiBvD,GAAa,KAC1DiK,EAAYjK,EAAY4J,EAAa5J,GAAa,KAClD+O,EAAUnP,EAAU4F,EAAI5F,EAAUuF,MAAQ,EAAItE,EAAQsE,MAAQ,EAC9D6J,EAAUpP,EAAU8F,EAAI9F,EAAUyF,OAAS,EAAIxE,EAAQwE,OAAS,EAGpE,OAAQgD,GACN,KAAK,EACH6B,EAAU,CACR1E,EAAGuJ,EACHrJ,EAAG9F,EAAU8F,EAAI7E,EAAQwE,QAE3B,MAEF,KAAKnG,EACHgL,EAAU,CACR1E,EAAGuJ,EACHrJ,EAAG9F,EAAU8F,EAAI9F,EAAUyF,QAE7B,MAEF,KAAKlG,EACH+K,EAAU,CACR1E,EAAG5F,EAAU4F,EAAI5F,EAAUuF,MAC3BO,EAAGsJ,GAEL,MAEF,KAAK5P,EACH8K,EAAU,CACR1E,EAAG5F,EAAU4F,EAAI3E,EAAQsE,MACzBO,EAAGsJ,GAEL,MAEF,QACE9E,EAAU,CACR1E,EAAG5F,EAAU4F,EACbE,EAAG9F,EAAU8F,GAInB,IAAIuJ,EAAW5G,EAAgBV,EAAyBU,GAAiB,KAEzE,GAAgB,MAAZ4G,EAAkB,CACpB,IAAI1G,EAAmB,MAAb0G,EAAmB,SAAW,QAExC,OAAQhF,GACN,KAAK1K,EACH2K,EAAQ+E,GAAY/E,EAAQ+E,IAAarP,EAAU2I,GAAO,EAAI1H,EAAQ0H,GAAO,GAC7E,MAEF,KAAK/I,EACH0K,EAAQ+E,GAAY/E,EAAQ+E,IAAarP,EAAU2I,GAAO,EAAI1H,EAAQ0H,GAAO,GAKnF,CAEA,OAAO2B,CACT,CC3De,SAASgF,GAAejN,EAAOc,QAC5B,IAAZA,IACFA,EAAU,CAAC,GAGb,IAAIoM,EAAWpM,EACXqM,EAAqBD,EAASnP,UAC9BA,OAAmC,IAAvBoP,EAAgCnN,EAAMjC,UAAYoP,EAC9DC,EAAoBF,EAASnM,SAC7BA,OAAiC,IAAtBqM,EAA+BpN,EAAMe,SAAWqM,EAC3DC,EAAoBH,EAASI,SAC7BA,OAAiC,IAAtBD,EAA+B7P,EAAkB6P,EAC5DE,EAAwBL,EAASM,aACjCA,OAAyC,IAA1BD,EAAmC9P,EAAW8P,EAC7DE,EAAwBP,EAASQ,eACjCA,OAA2C,IAA1BD,EAAmC/P,EAAS+P,EAC7DE,EAAuBT,EAASU,YAChCA,OAAuC,IAAzBD,GAA0CA,EACxDE,EAAmBX,EAAS3G,QAC5BA,OAA+B,IAArBsH,EAA8B,EAAIA,EAC5ChI,EAAgBD,EAAsC,iBAAZW,EAAuBA,EAAUT,EAAgBS,EAASlJ,IACpGyQ,EAAaJ,IAAmBhQ,EAASC,EAAYD,EACrDqK,EAAa/H,EAAMwG,MAAM9I,OACzBkB,EAAUoB,EAAME,SAAS0N,EAAcE,EAAaJ,GACpDK,EJkBS,SAAyBnP,EAAS0O,EAAUE,EAAczM,GACvE,IAAIiN,EAAmC,oBAAbV,EAlB5B,SAA4B1O,GAC1B,IAAIpB,EAAkBgO,GAAkB5G,EAAchG,IAElDqP,EADoB,CAAC,WAAY,SAASzJ,QAAQ,EAAiB5F,GAASiC,WAAa,GACnDtB,EAAcX,GAAWoG,EAAgBpG,GAAWA,EAE9F,OAAKS,EAAU4O,GAKRzQ,EAAgBgI,QAAO,SAAUyG,GACtC,OAAO5M,EAAU4M,IAAmBpI,EAASoI,EAAgBgC,IAAmD,SAAhCtP,EAAYsN,EAC9F,IANS,EAOX,CAK6DiC,CAAmBtP,GAAW,GAAGZ,OAAOsP,GAC/F9P,EAAkB,GAAGQ,OAAOgQ,EAAqB,CAACR,IAClDW,EAAsB3Q,EAAgB,GACtC4Q,EAAe5Q,EAAgBK,QAAO,SAAUwQ,EAASpC,GAC3D,IAAIF,EAAOC,GAA2BpN,EAASqN,EAAgBlL,GAK/D,OAJAsN,EAAQrN,IAAM,EAAI+K,EAAK/K,IAAKqN,EAAQrN,KACpCqN,EAAQnR,MAAQ,EAAI6O,EAAK7O,MAAOmR,EAAQnR,OACxCmR,EAAQpR,OAAS,EAAI8O,EAAK9O,OAAQoR,EAAQpR,QAC1CoR,EAAQlR,KAAO,EAAI4O,EAAK5O,KAAMkR,EAAQlR,MAC/BkR,CACT,GAAGrC,GAA2BpN,EAASuP,EAAqBpN,IAK5D,OAJAqN,EAAalL,MAAQkL,EAAalR,MAAQkR,EAAajR,KACvDiR,EAAahL,OAASgL,EAAanR,OAASmR,EAAapN,IACzDoN,EAAa7K,EAAI6K,EAAajR,KAC9BiR,EAAa3K,EAAI2K,EAAapN,IACvBoN,CACT,CInC2BE,CAAgBjP,EAAUT,GAAWA,EAAUA,EAAQ2P,gBAAkB9J,EAAmBzE,EAAME,SAASxC,QAAS4P,EAAUE,EAAczM,GACjKyN,EAAsB7L,EAAsB3C,EAAME,SAASvC,WAC3DuI,EAAgB2G,GAAe,CACjClP,UAAW6Q,EACX5P,QAASmJ,EACThH,SAAU,WACVhD,UAAWA,IAET0Q,EAAmB3C,GAAiBzP,OAAOkE,OAAO,CAAC,EAAGwH,EAAY7B,IAClEwI,EAAoBhB,IAAmBhQ,EAAS+Q,EAAmBD,EAGnEG,EAAkB,CACpB3N,IAAK+M,EAAmB/M,IAAM0N,EAAkB1N,IAAM6E,EAAc7E,IACpE/D,OAAQyR,EAAkBzR,OAAS8Q,EAAmB9Q,OAAS4I,EAAc5I,OAC7EE,KAAM4Q,EAAmB5Q,KAAOuR,EAAkBvR,KAAO0I,EAAc1I,KACvED,MAAOwR,EAAkBxR,MAAQ6Q,EAAmB7Q,MAAQ2I,EAAc3I,OAExE0R,EAAa5O,EAAMmG,cAAckB,OAErC,GAAIqG,IAAmBhQ,GAAUkR,EAAY,CAC3C,IAAIvH,EAASuH,EAAW7Q,GACxB1B,OAAO4D,KAAK0O,GAAiBxO,SAAQ,SAAUhE,GAC7C,IAAI0S,EAAW,CAAC3R,EAAOD,GAAQuH,QAAQrI,IAAQ,EAAI,GAAK,EACpDkK,EAAO,CAAC,EAAKpJ,GAAQuH,QAAQrI,IAAQ,EAAI,IAAM,IACnDwS,EAAgBxS,IAAQkL,EAAOhB,GAAQwI,CACzC,GACF,CAEA,OAAOF,CACT,CCyEA,UACEhP,KAAM,OACNC,SAAS,EACTC,MAAO,OACPC,GA5HF,SAAcC,GACZ,IAAIC,EAAQD,EAAKC,MACbc,EAAUf,EAAKe,QACfnB,EAAOI,EAAKJ,KAEhB,IAAIK,EAAMmG,cAAcxG,GAAMmP,MAA9B,CAoCA,IAhCA,IAAIC,EAAoBjO,EAAQkM,SAC5BgC,OAAsC,IAAtBD,GAAsCA,EACtDE,EAAmBnO,EAAQoO,QAC3BC,OAAoC,IAArBF,GAAqCA,EACpDG,EAA8BtO,EAAQuO,mBACtC9I,EAAUzF,EAAQyF,QAClB+G,EAAWxM,EAAQwM,SACnBE,EAAe1M,EAAQ0M,aACvBI,EAAc9M,EAAQ8M,YACtB0B,EAAwBxO,EAAQyO,eAChCA,OAA2C,IAA1BD,GAA0CA,EAC3DE,EAAwB1O,EAAQ0O,sBAChCC,EAAqBzP,EAAMc,QAAQ/C,UACnCqI,EAAgB9E,EAAiBmO,GAEjCJ,EAAqBD,IADHhJ,IAAkBqJ,GACqCF,EAjC/E,SAAuCxR,GACrC,GAAIuD,EAAiBvD,KAAeX,EAClC,MAAO,GAGT,IAAIsS,EAAoBnF,GAAqBxM,GAC7C,MAAO,CAAC2M,GAA8B3M,GAAY2R,EAAmBhF,GAA8BgF,GACrG,CA0B6IC,CAA8BF,GAA3E,CAAClF,GAAqBkF,KAChHG,EAAa,CAACH,GAAoBzR,OAAOqR,GAAoBxR,QAAO,SAAUC,EAAKC,GACrF,OAAOD,EAAIE,OAAOsD,EAAiBvD,KAAeX,ECvCvC,SAA8B4C,EAAOc,QAClC,IAAZA,IACFA,EAAU,CAAC,GAGb,IAAIoM,EAAWpM,EACX/C,EAAYmP,EAASnP,UACrBuP,EAAWJ,EAASI,SACpBE,EAAeN,EAASM,aACxBjH,EAAU2G,EAAS3G,QACnBgJ,EAAiBrC,EAASqC,eAC1BM,EAAwB3C,EAASsC,sBACjCA,OAAkD,IAA1BK,EAAmC,EAAgBA,EAC3E7H,EAAYL,EAAa5J,GACzB6R,EAAa5H,EAAYuH,EAAiB3R,EAAsBA,EAAoB4H,QAAO,SAAUzH,GACvG,OAAO4J,EAAa5J,KAAeiK,CACrC,IAAK3K,EACDyS,EAAoBF,EAAWpK,QAAO,SAAUzH,GAClD,OAAOyR,EAAsBhL,QAAQzG,IAAc,CACrD,IAEiC,IAA7B+R,EAAkBC,SACpBD,EAAoBF,GAQtB,IAAII,EAAYF,EAAkBjS,QAAO,SAAUC,EAAKC,GAOtD,OANAD,EAAIC,GAAakP,GAAejN,EAAO,CACrCjC,UAAWA,EACXuP,SAAUA,EACVE,aAAcA,EACdjH,QAASA,IACRjF,EAAiBvD,IACbD,CACT,GAAG,CAAC,GACJ,OAAOzB,OAAO4D,KAAK+P,GAAWC,MAAK,SAAUC,EAAGC,GAC9C,OAAOH,EAAUE,GAAKF,EAAUG,EAClC,GACF,CDH6DC,CAAqBpQ,EAAO,CACnFjC,UAAWA,EACXuP,SAAUA,EACVE,aAAcA,EACdjH,QAASA,EACTgJ,eAAgBA,EAChBC,sBAAuBA,IACpBzR,EACP,GAAG,IACCsS,EAAgBrQ,EAAMwG,MAAM7I,UAC5BoK,EAAa/H,EAAMwG,MAAM9I,OACzB4S,EAAY,IAAIC,IAChBC,GAAqB,EACrBC,EAAwBb,EAAW,GAE9Bc,EAAI,EAAGA,EAAId,EAAWG,OAAQW,IAAK,CAC1C,IAAI3S,EAAY6R,EAAWc,GAEvBC,EAAiBrP,EAAiBvD,GAElC6S,EAAmBjJ,EAAa5J,KAAeT,EAC/CuT,EAAa,CAAC,EAAK5T,GAAQuH,QAAQmM,IAAmB,EACtDrK,EAAMuK,EAAa,QAAU,SAC7B1F,EAAW8B,GAAejN,EAAO,CACnCjC,UAAWA,EACXuP,SAAUA,EACVE,aAAcA,EACdI,YAAaA,EACbrH,QAASA,IAEPuK,EAAoBD,EAAaD,EAAmB1T,EAAQC,EAAOyT,EAAmB3T,EAAS,EAE/FoT,EAAc/J,GAAOyB,EAAWzB,KAClCwK,EAAoBvG,GAAqBuG,IAG3C,IAAIC,EAAmBxG,GAAqBuG,GACxCE,EAAS,GAUb,GARIhC,GACFgC,EAAOC,KAAK9F,EAASwF,IAAmB,GAGtCxB,GACF6B,EAAOC,KAAK9F,EAAS2F,IAAsB,EAAG3F,EAAS4F,IAAqB,GAG1EC,EAAOE,OAAM,SAAUC,GACzB,OAAOA,CACT,IAAI,CACFV,EAAwB1S,EACxByS,GAAqB,EACrB,KACF,CAEAF,EAAUc,IAAIrT,EAAWiT,EAC3B,CAEA,GAAIR,EAqBF,IAnBA,IAEIa,EAAQ,SAAeC,GACzB,IAAIC,EAAmB3B,EAAW4B,MAAK,SAAUzT,GAC/C,IAAIiT,EAASV,EAAU9T,IAAIuB,GAE3B,GAAIiT,EACF,OAAOA,EAAOS,MAAM,EAAGH,GAAIJ,OAAM,SAAUC,GACzC,OAAOA,CACT,GAEJ,IAEA,GAAII,EAEF,OADAd,EAAwBc,EACjB,OAEX,EAESD,EAnBY/B,EAAiB,EAAI,EAmBZ+B,EAAK,GAGpB,UAFFD,EAAMC,GADmBA,KAOpCtR,EAAMjC,YAAc0S,IACtBzQ,EAAMmG,cAAcxG,GAAMmP,OAAQ,EAClC9O,EAAMjC,UAAY0S,EAClBzQ,EAAM0R,OAAQ,EA5GhB,CA8GF,EAQEhK,iBAAkB,CAAC,UACnBgC,KAAM,CACJoF,OAAO,IE7IX,SAAS6C,GAAexG,EAAUY,EAAM6F,GAQtC,YAPyB,IAArBA,IACFA,EAAmB,CACjBrO,EAAG,EACHE,EAAG,IAIA,CACLzC,IAAKmK,EAASnK,IAAM+K,EAAK3I,OAASwO,EAAiBnO,EACnDvG,MAAOiO,EAASjO,MAAQ6O,EAAK7I,MAAQ0O,EAAiBrO,EACtDtG,OAAQkO,EAASlO,OAAS8O,EAAK3I,OAASwO,EAAiBnO,EACzDtG,KAAMgO,EAAShO,KAAO4O,EAAK7I,MAAQ0O,EAAiBrO,EAExD,CAEA,SAASsO,GAAsB1G,GAC7B,MAAO,CAAC,EAAKjO,EAAOD,EAAQE,GAAM2U,MAAK,SAAUC,GAC/C,OAAO5G,EAAS4G,IAAS,CAC3B,GACF,CA+BA,UACEpS,KAAM,OACNC,SAAS,EACTC,MAAO,OACP6H,iBAAkB,CAAC,mBACnB5H,GAlCF,SAAcC,GACZ,IAAIC,EAAQD,EAAKC,MACbL,EAAOI,EAAKJ,KACZ0Q,EAAgBrQ,EAAMwG,MAAM7I,UAC5BoK,EAAa/H,EAAMwG,MAAM9I,OACzBkU,EAAmB5R,EAAMmG,cAAc6L,gBACvCC,EAAoBhF,GAAejN,EAAO,CAC5C0N,eAAgB,cAEdwE,EAAoBjF,GAAejN,EAAO,CAC5C4N,aAAa,IAEXuE,EAA2BR,GAAeM,EAAmB5B,GAC7D+B,EAAsBT,GAAeO,EAAmBnK,EAAY6J,GACpES,EAAoBR,GAAsBM,GAC1CG,EAAmBT,GAAsBO,GAC7CpS,EAAMmG,cAAcxG,GAAQ,CAC1BwS,yBAA0BA,EAC1BC,oBAAqBA,EACrBC,kBAAmBA,EACnBC,iBAAkBA,GAEpBtS,EAAMM,WAAW5C,OAASrB,OAAOkE,OAAO,CAAC,EAAGP,EAAMM,WAAW5C,OAAQ,CACnE,+BAAgC2U,EAChC,sBAAuBC,GAE3B,GCJA,IACE3S,KAAM,SACNC,SAAS,EACTC,MAAO,OACPwB,SAAU,CAAC,iBACXvB,GA5BF,SAAgBa,GACd,IAAIX,EAAQW,EAAMX,MACdc,EAAUH,EAAMG,QAChBnB,EAAOgB,EAAMhB,KACb4S,EAAkBzR,EAAQuG,OAC1BA,OAA6B,IAApBkL,EAA6B,CAAC,EAAG,GAAKA,EAC/C7I,EAAO,UAAkB,SAAU5L,EAAKC,GAE1C,OADAD,EAAIC,GA5BD,SAAiCA,EAAWyI,EAAOa,GACxD,IAAIjB,EAAgB9E,EAAiBvD,GACjCyU,EAAiB,CAACrV,EAAM,GAAKqH,QAAQ4B,IAAkB,GAAK,EAAI,EAEhErG,EAAyB,mBAAXsH,EAAwBA,EAAOhL,OAAOkE,OAAO,CAAC,EAAGiG,EAAO,CACxEzI,UAAWA,KACPsJ,EACFoL,EAAW1S,EAAK,GAChB2S,EAAW3S,EAAK,GAIpB,OAFA0S,EAAWA,GAAY,EACvBC,GAAYA,GAAY,GAAKF,EACtB,CAACrV,EAAMD,GAAOsH,QAAQ4B,IAAkB,EAAI,CACjD7C,EAAGmP,EACHjP,EAAGgP,GACD,CACFlP,EAAGkP,EACHhP,EAAGiP,EAEP,CASqBC,CAAwB5U,EAAWiC,EAAMwG,MAAOa,GAC1DvJ,CACT,GAAG,CAAC,GACA8U,EAAwBlJ,EAAK1J,EAAMjC,WACnCwF,EAAIqP,EAAsBrP,EAC1BE,EAAImP,EAAsBnP,EAEW,MAArCzD,EAAMmG,cAAcD,gBACtBlG,EAAMmG,cAAcD,cAAc3C,GAAKA,EACvCvD,EAAMmG,cAAcD,cAAczC,GAAKA,GAGzCzD,EAAMmG,cAAcxG,GAAQ+J,CAC9B,GC1BA,IACE/J,KAAM,gBACNC,SAAS,EACTC,MAAO,OACPC,GApBF,SAAuBC,GACrB,IAAIC,EAAQD,EAAKC,MACbL,EAAOI,EAAKJ,KAKhBK,EAAMmG,cAAcxG,GAAQkN,GAAe,CACzClP,UAAWqC,EAAMwG,MAAM7I,UACvBiB,QAASoB,EAAMwG,MAAM9I,OACrBqD,SAAU,WACVhD,UAAWiC,EAAMjC,WAErB,EAQE2L,KAAM,CAAC,GCgHT,IACE/J,KAAM,kBACNC,SAAS,EACTC,MAAO,OACPC,GA/HF,SAAyBC,GACvB,IAAIC,EAAQD,EAAKC,MACbc,EAAUf,EAAKe,QACfnB,EAAOI,EAAKJ,KACZoP,EAAoBjO,EAAQkM,SAC5BgC,OAAsC,IAAtBD,GAAsCA,EACtDE,EAAmBnO,EAAQoO,QAC3BC,OAAoC,IAArBF,GAAsCA,EACrD3B,EAAWxM,EAAQwM,SACnBE,EAAe1M,EAAQ0M,aACvBI,EAAc9M,EAAQ8M,YACtBrH,EAAUzF,EAAQyF,QAClBsM,EAAkB/R,EAAQgS,OAC1BA,OAA6B,IAApBD,GAAoCA,EAC7CE,EAAwBjS,EAAQkS,aAChCA,OAAyC,IAA1BD,EAAmC,EAAIA,EACtD5H,EAAW8B,GAAejN,EAAO,CACnCsN,SAAUA,EACVE,aAAcA,EACdjH,QAASA,EACTqH,YAAaA,IAEXxH,EAAgB9E,EAAiBtB,EAAMjC,WACvCiK,EAAYL,EAAa3H,EAAMjC,WAC/BkV,GAAmBjL,EACnBgF,EAAWtH,EAAyBU,GACpC8I,ECrCY,MDqCSlC,ECrCH,IAAM,IDsCxB9G,EAAgBlG,EAAMmG,cAAcD,cACpCmK,EAAgBrQ,EAAMwG,MAAM7I,UAC5BoK,EAAa/H,EAAMwG,MAAM9I,OACzBwV,EAA4C,mBAAjBF,EAA8BA,EAAa3W,OAAOkE,OAAO,CAAC,EAAGP,EAAMwG,MAAO,CACvGzI,UAAWiC,EAAMjC,aACbiV,EACFG,EAA2D,iBAAtBD,EAAiC,CACxElG,SAAUkG,EACVhE,QAASgE,GACP7W,OAAOkE,OAAO,CAChByM,SAAU,EACVkC,QAAS,GACRgE,GACCE,EAAsBpT,EAAMmG,cAAckB,OAASrH,EAAMmG,cAAckB,OAAOrH,EAAMjC,WAAa,KACjG2L,EAAO,CACTnG,EAAG,EACHE,EAAG,GAGL,GAAKyC,EAAL,CAIA,GAAI8I,EAAe,CACjB,IAAIqE,EAEAC,EAAwB,MAAbtG,EAAmB,EAAM7P,EACpCoW,EAAuB,MAAbvG,EAAmB/P,EAASC,EACtCoJ,EAAmB,MAAb0G,EAAmB,SAAW,QACpC3F,EAASnB,EAAc8G,GACvBtL,EAAM2F,EAAS8D,EAASmI,GACxB7R,EAAM4F,EAAS8D,EAASoI,GACxBC,EAAWV,GAAU/K,EAAWzB,GAAO,EAAI,EAC3CmN,EAASzL,IAAc1K,EAAQ+S,EAAc/J,GAAOyB,EAAWzB,GAC/DoN,EAAS1L,IAAc1K,GAASyK,EAAWzB,IAAQ+J,EAAc/J,GAGjEL,EAAejG,EAAME,SAASgB,MAC9BwF,EAAYoM,GAAU7M,EAAetC,EAAcsC,GAAgB,CACrE/C,MAAO,EACPE,OAAQ,GAENuQ,GAAqB3T,EAAMmG,cAAc,oBAAsBnG,EAAMmG,cAAc,oBAAoBI,QxBhFtG,CACLvF,IAAK,EACL9D,MAAO,EACPD,OAAQ,EACRE,KAAM,GwB6EFyW,GAAkBD,GAAmBL,GACrCO,GAAkBF,GAAmBJ,GAMrCO,GAAWnO,EAAO,EAAG0K,EAAc/J,GAAMI,EAAUJ,IACnDyN,GAAYd,EAAkB5C,EAAc/J,GAAO,EAAIkN,EAAWM,GAAWF,GAAkBT,EAA4BnG,SAAWyG,EAASK,GAAWF,GAAkBT,EAA4BnG,SACxMgH,GAAYf,GAAmB5C,EAAc/J,GAAO,EAAIkN,EAAWM,GAAWD,GAAkBV,EAA4BnG,SAAW0G,EAASI,GAAWD,GAAkBV,EAA4BnG,SACzMjG,GAAoB/G,EAAME,SAASgB,OAAS8D,EAAgBhF,EAAME,SAASgB,OAC3E+S,GAAelN,GAAiC,MAAbiG,EAAmBjG,GAAkBsF,WAAa,EAAItF,GAAkBuF,YAAc,EAAI,EAC7H4H,GAAwH,OAAjGb,EAA+C,MAAvBD,OAA8B,EAASA,EAAoBpG,IAAqBqG,EAAwB,EAEvJc,GAAY9M,EAAS2M,GAAYE,GACjCE,GAAkBzO,EAAOmN,EAAS,EAAQpR,EAF9B2F,EAAS0M,GAAYG,GAAsBD,IAEKvS,EAAK2F,EAAQyL,EAAS,EAAQrR,EAAK0S,IAAa1S,GAChHyE,EAAc8G,GAAYoH,GAC1B1K,EAAKsD,GAAYoH,GAAkB/M,CACrC,CAEA,GAAI8H,EAAc,CAChB,IAAIkF,GAEAC,GAAyB,MAAbtH,EAAmB,EAAM7P,EAErCoX,GAAwB,MAAbvH,EAAmB/P,EAASC,EAEvCsX,GAAUtO,EAAcgJ,GAExBuF,GAAmB,MAAZvF,EAAkB,SAAW,QAEpCwF,GAAOF,GAAUrJ,EAASmJ,IAE1BK,GAAOH,GAAUrJ,EAASoJ,IAE1BK,IAAuD,IAAxC,CAAC,EAAKzX,GAAMqH,QAAQ4B,GAEnCyO,GAAyH,OAAjGR,GAAgD,MAAvBjB,OAA8B,EAASA,EAAoBlE,IAAoBmF,GAAyB,EAEzJS,GAAaF,GAAeF,GAAOF,GAAUnE,EAAcoE,IAAQ1M,EAAW0M,IAAQI,GAAuB1B,EAA4BjE,QAEzI6F,GAAaH,GAAeJ,GAAUnE,EAAcoE,IAAQ1M,EAAW0M,IAAQI,GAAuB1B,EAA4BjE,QAAUyF,GAE5IK,GAAmBlC,GAAU8B,G1BzH9B,SAAwBlT,EAAK1E,EAAOyE,GACzC,IAAIwT,EAAItP,EAAOjE,EAAK1E,EAAOyE,GAC3B,OAAOwT,EAAIxT,EAAMA,EAAMwT,CACzB,C0BsHoDC,CAAeJ,GAAYN,GAASO,IAAcpP,EAAOmN,EAASgC,GAAaJ,GAAMF,GAAS1B,EAASiC,GAAaJ,IAEpKzO,EAAcgJ,GAAW8F,GACzBtL,EAAKwF,GAAW8F,GAAmBR,EACrC,CAEAxU,EAAMmG,cAAcxG,GAAQ+J,CAvE5B,CAwEF,EAQEhC,iBAAkB,CAAC,WE1HN,SAASyN,GAAiBC,EAAyBrQ,EAAcsD,QAC9D,IAAZA,IACFA,GAAU,GAGZ,ICnBoCrJ,ECJOJ,EFuBvCyW,EAA0B9V,EAAcwF,GACxCuQ,EAAuB/V,EAAcwF,IAf3C,SAAyBnG,GACvB,IAAImN,EAAOnN,EAAQ+D,wBACfI,EAASpB,EAAMoK,EAAK7I,OAAStE,EAAQqE,aAAe,EACpDD,EAASrB,EAAMoK,EAAK3I,QAAUxE,EAAQuE,cAAgB,EAC1D,OAAkB,IAAXJ,GAA2B,IAAXC,CACzB,CAU4DuS,CAAgBxQ,GACtEJ,EAAkBF,EAAmBM,GACrCgH,EAAOpJ,EAAsByS,EAAyBE,EAAsBjN,GAC5EyB,EAAS,CACXc,WAAY,EACZE,UAAW,GAET7C,EAAU,CACZ1E,EAAG,EACHE,EAAG,GAkBL,OAfI4R,IAA4BA,IAA4BhN,MACxB,SAA9B1J,EAAYoG,IAChBkG,GAAetG,MACbmF,GCnCgC9K,EDmCT+F,KClCdhG,EAAUC,IAAUO,EAAcP,GCJxC,CACL4L,YAFyChM,EDQbI,GCNR4L,WACpBE,UAAWlM,EAAQkM,WDGZH,GAAgB3L,IDoCnBO,EAAcwF,KAChBkD,EAAUtF,EAAsBoC,GAAc,IACtCxB,GAAKwB,EAAauH,WAC1BrE,EAAQxE,GAAKsB,EAAasH,WACjB1H,IACTsD,EAAQ1E,EAAIyH,GAAoBrG,KAI7B,CACLpB,EAAGwI,EAAK5O,KAAO2M,EAAOc,WAAa3C,EAAQ1E,EAC3CE,EAAGsI,EAAK/K,IAAM8I,EAAOgB,UAAY7C,EAAQxE,EACzCP,MAAO6I,EAAK7I,MACZE,OAAQ2I,EAAK3I,OAEjB,CGvDA,SAASoS,GAAMC,GACb,IAAItT,EAAM,IAAIoO,IACVmF,EAAU,IAAIC,IACdC,EAAS,GAKb,SAAS3F,EAAK4F,GACZH,EAAQI,IAAID,EAASlW,MACN,GAAG3B,OAAO6X,EAASxU,UAAY,GAAIwU,EAASnO,kBAAoB,IACtEvH,SAAQ,SAAU4V,GACzB,IAAKL,EAAQM,IAAID,GAAM,CACrB,IAAIE,EAAc9T,EAAI3F,IAAIuZ,GAEtBE,GACFhG,EAAKgG,EAET,CACF,IACAL,EAAO3E,KAAK4E,EACd,CAQA,OAzBAJ,EAAUtV,SAAQ,SAAU0V,GAC1B1T,EAAIiP,IAAIyE,EAASlW,KAAMkW,EACzB,IAiBAJ,EAAUtV,SAAQ,SAAU0V,GACrBH,EAAQM,IAAIH,EAASlW,OAExBsQ,EAAK4F,EAET,IACOD,CACT,CClBA,IAEIM,GAAkB,CACpBnY,UAAW,SACX0X,UAAW,GACX1U,SAAU,YAGZ,SAASoV,KACP,IAAK,IAAI1B,EAAO2B,UAAUrG,OAAQsG,EAAO,IAAIpU,MAAMwS,GAAO6B,EAAO,EAAGA,EAAO7B,EAAM6B,IAC/ED,EAAKC,GAAQF,UAAUE,GAGzB,OAAQD,EAAKvE,MAAK,SAAUlT,GAC1B,QAASA,GAAoD,mBAAlCA,EAAQ+D,sBACrC,GACF,CAEO,SAAS4T,GAAgBC,QACL,IAArBA,IACFA,EAAmB,CAAC,GAGtB,IAAIC,EAAoBD,EACpBE,EAAwBD,EAAkBE,iBAC1CA,OAA6C,IAA1BD,EAAmC,GAAKA,EAC3DE,EAAyBH,EAAkBI,eAC3CA,OAA4C,IAA3BD,EAAoCV,GAAkBU,EAC3E,OAAO,SAAsBjZ,EAAWD,EAAQoD,QAC9B,IAAZA,IACFA,EAAU+V,GAGZ,IC/C6B/W,EAC3BgX,ED8CE9W,EAAQ,CACVjC,UAAW,SACXgZ,iBAAkB,GAClBjW,QAASzE,OAAOkE,OAAO,CAAC,EAAG2V,GAAiBW,GAC5C1Q,cAAe,CAAC,EAChBjG,SAAU,CACRvC,UAAWA,EACXD,OAAQA,GAEV4C,WAAY,CAAC,EACbD,OAAQ,CAAC,GAEP2W,EAAmB,GACnBC,GAAc,EACdrN,EAAW,CACb5J,MAAOA,EACPkX,WAAY,SAAoBC,GAC9B,IAAIrW,EAAsC,mBAArBqW,EAAkCA,EAAiBnX,EAAMc,SAAWqW,EACzFC,IACApX,EAAMc,QAAUzE,OAAOkE,OAAO,CAAC,EAAGsW,EAAgB7W,EAAMc,QAASA,GACjEd,EAAMiK,cAAgB,CACpBtM,UAAW0B,EAAU1B,GAAa6N,GAAkB7N,GAAaA,EAAU4Q,eAAiB/C,GAAkB7N,EAAU4Q,gBAAkB,GAC1I7Q,OAAQ8N,GAAkB9N,IAI5B,IEzE4B+X,EAC9B4B,EFwEMN,EDvCG,SAAwBtB,GAErC,IAAIsB,EAAmBvB,GAAMC,GAE7B,OAAO/W,EAAeb,QAAO,SAAUC,EAAK+B,GAC1C,OAAO/B,EAAIE,OAAO+Y,EAAiBvR,QAAO,SAAUqQ,GAClD,OAAOA,EAAShW,QAAUA,CAC5B,IACF,GAAG,GACL,CC8B+ByX,EEzEK7B,EFyEsB,GAAGzX,OAAO2Y,EAAkB3W,EAAMc,QAAQ2U,WExE9F4B,EAAS5B,EAAU5X,QAAO,SAAUwZ,EAAQE,GAC9C,IAAIC,EAAWH,EAAOE,EAAQ5X,MAK9B,OAJA0X,EAAOE,EAAQ5X,MAAQ6X,EAAWnb,OAAOkE,OAAO,CAAC,EAAGiX,EAAUD,EAAS,CACrEzW,QAASzE,OAAOkE,OAAO,CAAC,EAAGiX,EAAS1W,QAASyW,EAAQzW,SACrD4I,KAAMrN,OAAOkE,OAAO,CAAC,EAAGiX,EAAS9N,KAAM6N,EAAQ7N,QAC5C6N,EACEF,CACT,GAAG,CAAC,GAEGhb,OAAO4D,KAAKoX,GAAQlV,KAAI,SAAUhG,GACvC,OAAOkb,EAAOlb,EAChB,MFsGM,OAvCA6D,EAAM+W,iBAAmBA,EAAiBvR,QAAO,SAAUiS,GACzD,OAAOA,EAAE7X,OACX,IAoJFI,EAAM+W,iBAAiB5W,SAAQ,SAAUqI,GACvC,IAAI7I,EAAO6I,EAAM7I,KACb+X,EAAgBlP,EAAM1H,QACtBA,OAA4B,IAAlB4W,EAA2B,CAAC,EAAIA,EAC1ChX,EAAS8H,EAAM9H,OAEnB,GAAsB,mBAAXA,EAAuB,CAChC,IAAIiX,EAAYjX,EAAO,CACrBV,MAAOA,EACPL,KAAMA,EACNiK,SAAUA,EACV9I,QAASA,IAKXkW,EAAiB/F,KAAK0G,GAFT,WAAmB,EAGlC,CACF,IAjIS/N,EAASQ,QAClB,EAMAwN,YAAa,WACX,IAAIX,EAAJ,CAIA,IAAIY,EAAkB7X,EAAME,SACxBvC,EAAYka,EAAgBla,UAC5BD,EAASma,EAAgBna,OAG7B,GAAKyY,GAAiBxY,EAAWD,GAAjC,CASAsC,EAAMwG,MAAQ,CACZ7I,UAAWwX,GAAiBxX,EAAWqH,EAAgBtH,GAAoC,UAA3BsC,EAAMc,QAAQC,UAC9ErD,OAAQiG,EAAcjG,IAOxBsC,EAAM0R,OAAQ,EACd1R,EAAMjC,UAAYiC,EAAMc,QAAQ/C,UAKhCiC,EAAM+W,iBAAiB5W,SAAQ,SAAU0V,GACvC,OAAO7V,EAAMmG,cAAc0P,EAASlW,MAAQtD,OAAOkE,OAAO,CAAC,EAAGsV,EAASnM,KACzE,IAGA,IAFA,IAESoO,EAAQ,EAAGA,EAAQ9X,EAAM+W,iBAAiBhH,OAAQ+H,IAUzD,IAAoB,IAAhB9X,EAAM0R,MAAV,CAMA,IAAIqG,EAAwB/X,EAAM+W,iBAAiBe,GAC/ChY,EAAKiY,EAAsBjY,GAC3BkY,EAAyBD,EAAsBjX,QAC/CoM,OAAsC,IAA3B8K,EAAoC,CAAC,EAAIA,EACpDrY,EAAOoY,EAAsBpY,KAEf,mBAAPG,IACTE,EAAQF,EAAG,CACTE,MAAOA,EACPc,QAASoM,EACTvN,KAAMA,EACNiK,SAAUA,KACN5J,EAdR,MAHEA,EAAM0R,OAAQ,EACdoG,GAAS,CAnCb,CAbA,CAmEF,EAGA1N,QClM2BtK,EDkMV,WACf,OAAO,IAAImY,SAAQ,SAAUC,GAC3BtO,EAASgO,cACTM,EAAQlY,EACV,GACF,ECrMG,WAUL,OATK8W,IACHA,EAAU,IAAImB,SAAQ,SAAUC,GAC9BD,QAAQC,UAAUC,MAAK,WACrBrB,OAAUsB,EACVF,EAAQpY,IACV,GACF,KAGKgX,CACT,GD2LIuB,QAAS,WACPjB,IACAH,GAAc,CAChB,GAGF,IAAKd,GAAiBxY,EAAWD,GAK/B,OAAOkM,EAmCT,SAASwN,IACPJ,EAAiB7W,SAAQ,SAAUL,GACjC,OAAOA,GACT,IACAkX,EAAmB,EACrB,CAEA,OAvCApN,EAASsN,WAAWpW,GAASqX,MAAK,SAAUnY,IACrCiX,GAAenW,EAAQwX,eAC1BxX,EAAQwX,cAActY,EAE1B,IAmCO4J,CACT,CACF,CACO,IAAI2O,GAA4BhC,KGrPnC,GAA4BA,GAAgB,CAC9CI,iBAFqB,CAAC6B,GAAgB,GAAe,GAAe,EAAa,GAAQ,GAAM,GAAiB,EAAO,MCJrH,GAA4BjC,GAAgB,CAC9CI,iBAFqB,CAAC6B,GAAgB,GAAe,GAAe,KCQtE,MAEMC,GAAiB,gBAsBjBC,GAAc9Z,IAClB,IAAI+Z,EAAW/Z,EAAQga,aAAa,kBAEpC,IAAKD,GAAyB,MAAbA,EAAkB,CACjC,IAAIE,EAAgBja,EAAQga,aAAa,QAKzC,IAAKC,IAAkBA,EAAcC,SAAS,OAASD,EAAcE,WAAW,KAC9E,OAAO,KAILF,EAAcC,SAAS,OAASD,EAAcE,WAAW,OAC3DF,EAAgB,IAAIA,EAActX,MAAM,KAAK,MAG/CoX,EAAWE,GAAmC,MAAlBA,EAAwBA,EAAcG,OAAS,IAC7E,CAEA,OAAOL,CAAQ,EAGXM,GAAyBra,IAC7B,MAAM+Z,EAAWD,GAAY9Z,GAE7B,OAAI+Z,GACKjU,SAAS+C,cAAckR,GAAYA,EAGrC,IAAI,EAGPO,GAAyBta,IAC7B,MAAM+Z,EAAWD,GAAY9Z,GAC7B,OAAO+Z,EAAWjU,SAAS+C,cAAckR,GAAY,IAAI,EA0BrDQ,GAAuBva,IAC3BA,EAAQwa,cAAc,IAAIC,MAAMZ,IAAgB,EAG5C,GAAYa,MACXA,GAA4B,iBAAXA,UAIO,IAAlBA,EAAOC,SAChBD,EAASA,EAAO,SAGgB,IAApBA,EAAOE,UAGjBC,GAAaH,GAEb,GAAUA,GACLA,EAAOC,OAASD,EAAO,GAAKA,EAGf,iBAAXA,GAAuBA,EAAOvJ,OAAS,EACzCrL,SAAS+C,cAAc6R,GAGzB,KAGHI,GAAY9a,IAChB,IAAK,GAAUA,IAAgD,IAApCA,EAAQ+a,iBAAiB5J,OAClD,OAAO,EAGT,MAAM6J,EAAgF,YAA7DtV,iBAAiB1F,GAASib,iBAAiB,cAE9DC,EAAgBlb,EAAQmb,QAAQ,uBAEtC,IAAKD,EACH,OAAOF,EAGT,GAAIE,IAAkBlb,EAAS,CAC7B,MAAMob,EAAUpb,EAAQmb,QAAQ,WAEhC,GAAIC,GAAWA,EAAQ5V,aAAe0V,EACpC,OAAO,EAGT,GAAgB,OAAZE,EACF,OAAO,CAEX,CAEA,OAAOJ,CAAgB,EAGnBK,GAAarb,IACZA,GAAWA,EAAQ4a,WAAaU,KAAKC,gBAItCvb,EAAQwb,UAAUvW,SAAS,mBAIC,IAArBjF,EAAQyb,SACVzb,EAAQyb,SAGVzb,EAAQ0b,aAAa,aAAoD,UAArC1b,EAAQga,aAAa,aAG5D2B,GAAiB3b,IACrB,IAAK8F,SAASC,gBAAgB6V,aAC5B,OAAO,KAIT,GAAmC,mBAAxB5b,EAAQqF,YAA4B,CAC7C,MAAMwW,EAAO7b,EAAQqF,cACrB,OAAOwW,aAAgB/a,WAAa+a,EAAO,IAC7C,CAEA,OAAI7b,aAAmBc,WACdd,EAIJA,EAAQwF,WAINmW,GAAe3b,EAAQwF,YAHrB,IAGgC,EAGrCsW,GAAO,OAWPC,GAAS/b,IACbA,EAAQuE,YAAY,EAGhByX,GAAY,IACZ3b,OAAO4b,SAAWnW,SAAS6G,KAAK+O,aAAa,qBACxCrb,OAAO4b,OAGT,KAGHC,GAA4B,GAmB5BC,GAAQ,IAAuC,QAAjCrW,SAASC,gBAAgBqW,IAEvCC,GAAqBC,IAnBAC,QAoBN,KACjB,MAAMC,EAAIR,KAGV,GAAIQ,EAAG,CACL,MAAMzb,EAAOub,EAAOG,KACdC,EAAqBF,EAAEtb,GAAGH,GAChCyb,EAAEtb,GAAGH,GAAQub,EAAOK,gBACpBH,EAAEtb,GAAGH,GAAM6b,YAAcN,EAEzBE,EAAEtb,GAAGH,GAAM8b,WAAa,KACtBL,EAAEtb,GAAGH,GAAQ2b,EACNJ,EAAOK,gBAElB,GAjC0B,YAAxB7W,SAASgX,YAENZ,GAA0B/K,QAC7BrL,SAASyF,iBAAiB,oBAAoB,KAC5C,IAAK,MAAMgR,KAAYL,GACrBK,GACF,IAIJL,GAA0B7J,KAAKkK,IAE/BA,GAsBA,EAGEQ,GAAUR,IACU,mBAAbA,GACTA,GACF,EAGIS,GAAyB,CAACT,EAAUU,EAAmBC,GAAoB,KAC/E,IAAKA,EAEH,YADAH,GAAQR,GAIV,MACMY,EAnMiCnd,KACvC,IAAKA,EACH,OAAO,EAIT,IAAI,mBACFod,EAAkB,gBAClBC,GACEhd,OAAOqF,iBAAiB1F,GAC5B,MAAMsd,EAA0BC,OAAOC,WAAWJ,GAC5CK,EAAuBF,OAAOC,WAAWH,GAE/C,OAAKC,GAA4BG,GAKjCL,EAAqBA,EAAmBza,MAAM,KAAK,GACnD0a,EAAkBA,EAAgB1a,MAAM,KAAK,GAjFf,KAkFtB4a,OAAOC,WAAWJ,GAAsBG,OAAOC,WAAWH,KANzD,CAMoG,EA+KpFK,CAAiCT,GADlC,EAExB,IAAIU,GAAS,EAEb,MAAMC,EAAU,EACd5Q,aAEIA,IAAWiQ,IAIfU,GAAS,EACTV,EAAkBxR,oBAAoBoO,GAAgB+D,GACtDb,GAAQR,GAAS,EAGnBU,EAAkB1R,iBAAiBsO,GAAgB+D,GACnDC,YAAW,KACJF,GACHpD,GAAqB0C,EACvB,GACCE,EAAiB,EAahBW,GAAuB,CAACjR,EAAMkR,EAAeC,EAAeC,KAChE,MAAMC,EAAarR,EAAKsE,OACxB,IAAI+H,EAAQrM,EAAKjH,QAAQmY,GAGzB,OAAe,IAAX7E,GACM8E,GAAiBC,EAAiBpR,EAAKqR,EAAa,GAAKrR,EAAK,IAGxEqM,GAAS8E,EAAgB,GAAK,EAE1BC,IACF/E,GAASA,EAAQgF,GAAcA,GAG1BrR,EAAKjK,KAAKC,IAAI,EAAGD,KAAKE,IAAIoW,EAAOgF,EAAa,KAAI,EAarDC,GAAiB,qBACjBC,GAAiB,OACjBC,GAAgB,SAChBC,GAAgB,CAAC,EAEvB,IAAIC,GAAW,EACf,MAAMC,GAAe,CACnBC,WAAY,YACZC,WAAY,YAERC,GAAe,IAAI5H,IAAI,CAAC,QAAS,WAAY,UAAW,YAAa,cAAe,aAAc,iBAAkB,YAAa,WAAY,YAAa,cAAe,YAAa,UAAW,WAAY,QAAS,oBAAqB,aAAc,YAAa,WAAY,cAAe,cAAe,cAAe,YAAa,eAAgB,gBAAiB,eAAgB,gBAAiB,aAAc,QAAS,OAAQ,SAAU,QAAS,SAAU,SAAU,UAAW,WAAY,OAAQ,SAAU,eAAgB,SAAU,OAAQ,mBAAoB,mBAAoB,QAAS,QAAS,WAK/lB,SAAS6H,GAAa5e,EAAS6e,GAC7B,OAAOA,GAAO,GAAGA,MAAQN,QAAgBve,EAAQue,UAAYA,IAC/D,CAEA,SAASO,GAAiB9e,GACxB,MAAM6e,EAAMD,GAAa5e,GAGzB,OAFAA,EAAQue,SAAWM,EACnBP,GAAcO,GAAOP,GAAcO,IAAQ,CAAC,EACrCP,GAAcO,EACvB,CA0CA,SAASE,GAAYC,EAAQC,EAAUC,EAAqB,MAC1D,OAAOzhB,OAAO0hB,OAAOH,GAAQpM,MAAKwM,GAASA,EAAMH,WAAaA,GAAYG,EAAMF,qBAAuBA,GACzG,CAEA,SAASG,GAAoBC,EAAmB1B,EAAS2B,GACvD,MAAMC,EAAiC,iBAAZ5B,EAErBqB,EAAWO,EAAcD,EAAqB3B,GAAW2B,EAC/D,IAAIE,EAAYC,GAAaJ,GAM7B,OAJKX,GAAavH,IAAIqI,KACpBA,EAAYH,GAGP,CAACE,EAAaP,EAAUQ,EACjC,CAEA,SAASE,GAAW3f,EAASsf,EAAmB1B,EAAS2B,EAAoBK,GAC3E,GAAiC,iBAAtBN,IAAmCtf,EAC5C,OAGF,IAAKwf,EAAaP,EAAUQ,GAAaJ,GAAoBC,EAAmB1B,EAAS2B,GAGzF,GAAID,KAAqBd,GAAc,CACrC,MAAMqB,EAAe3e,GACZ,SAAUke,GACf,IAAKA,EAAMU,eAAiBV,EAAMU,gBAAkBV,EAAMW,iBAAmBX,EAAMW,eAAe9a,SAASma,EAAMU,eAC/G,OAAO5e,EAAGjD,KAAK+hB,KAAMZ,EAEzB,EAGFH,EAAWY,EAAaZ,EAC1B,CAEA,MAAMD,EAASF,GAAiB9e,GAC1BigB,EAAWjB,EAAOS,KAAeT,EAAOS,GAAa,CAAC,GACtDS,EAAmBnB,GAAYkB,EAAUhB,EAAUO,EAAc5B,EAAU,MAEjF,GAAIsC,EAEF,YADAA,EAAiBN,OAASM,EAAiBN,QAAUA,GAIvD,MAAMf,EAAMD,GAAaK,EAAUK,EAAkB1T,QAAQuS,GAAgB,KACvEjd,EAAKse,EAzEb,SAAoCxf,EAAS+Z,EAAU7Y,GACrD,OAAO,SAAS0c,EAAQwB,GACtB,MAAMe,EAAcngB,EAAQogB,iBAAiBrG,GAE7C,IAAK,IAAI,OACP/M,GACEoS,EAAOpS,GAAUA,IAAWgT,KAAMhT,EAASA,EAAOxH,WACpD,IAAK,MAAM6a,KAAcF,EACvB,GAAIE,IAAerT,EAYnB,OARAsT,GAAWlB,EAAO,CAChBW,eAAgB/S,IAGd4Q,EAAQgC,QACVW,GAAaC,IAAIxgB,EAASof,EAAMqB,KAAM1G,EAAU7Y,GAG3CA,EAAGwf,MAAM1T,EAAQ,CAACoS,GAG/B,CACF,CAiD2BuB,CAA2B3gB,EAAS4d,EAASqB,GAvFxE,SAA0Bjf,EAASkB,GACjC,OAAO,SAAS0c,EAAQwB,GAStB,OARAkB,GAAWlB,EAAO,CAChBW,eAAgB/f,IAGd4d,EAAQgC,QACVW,GAAaC,IAAIxgB,EAASof,EAAMqB,KAAMvf,GAGjCA,EAAGwf,MAAM1gB,EAAS,CAACof,GAC5B,CACF,CA2EoFwB,CAAiB5gB,EAASif,GAC5G/d,EAAGge,mBAAqBM,EAAc5B,EAAU,KAChD1c,EAAG+d,SAAWA,EACd/d,EAAG0e,OAASA,EACZ1e,EAAGqd,SAAWM,EACdoB,EAASpB,GAAO3d,EAChBlB,EAAQuL,iBAAiBkU,EAAWve,EAAIse,EAC1C,CAEA,SAASqB,GAAc7gB,EAASgf,EAAQS,EAAW7B,EAASsB,GAC1D,MAAMhe,EAAK6d,GAAYC,EAAOS,GAAY7B,EAASsB,GAE9Che,IAILlB,EAAQyL,oBAAoBgU,EAAWve,EAAI4f,QAAQ5B,WAC5CF,EAAOS,GAAWve,EAAGqd,UAC9B,CAEA,SAASwC,GAAyB/gB,EAASgf,EAAQS,EAAWuB,GAC5D,MAAMC,EAAoBjC,EAAOS,IAAc,CAAC,EAEhD,IAAK,MAAMyB,KAAczjB,OAAO4D,KAAK4f,GACnC,GAAIC,EAAWhH,SAAS8G,GAAY,CAClC,MAAM5B,EAAQ6B,EAAkBC,GAChCL,GAAc7gB,EAASgf,EAAQS,EAAWL,EAAMH,SAAUG,EAAMF,mBAClE,CAEJ,CAEA,SAASQ,GAAaN,GAGpB,OADAA,EAAQA,EAAMxT,QAAQwS,GAAgB,IAC/BI,GAAaY,IAAUA,CAChC,CAEA,MAAMmB,GAAe,CACnBY,GAAGnhB,EAASof,EAAOxB,EAAS2B,GAC1BI,GAAW3f,EAASof,EAAOxB,EAAS2B,GAAoB,EAC1D,EAEA6B,IAAIphB,EAASof,EAAOxB,EAAS2B,GAC3BI,GAAW3f,EAASof,EAAOxB,EAAS2B,GAAoB,EAC1D,EAEAiB,IAAIxgB,EAASsf,EAAmB1B,EAAS2B,GACvC,GAAiC,iBAAtBD,IAAmCtf,EAC5C,OAGF,MAAOwf,EAAaP,EAAUQ,GAAaJ,GAAoBC,EAAmB1B,EAAS2B,GACrF8B,EAAc5B,IAAcH,EAC5BN,EAASF,GAAiB9e,GAC1BihB,EAAoBjC,EAAOS,IAAc,CAAC,EAC1C6B,EAAchC,EAAkBnF,WAAW,KAEjD,QAAwB,IAAb8E,EAAX,CAUA,GAAIqC,EACF,IAAK,MAAMC,KAAgB9jB,OAAO4D,KAAK2d,GACrC+B,GAAyB/gB,EAASgf,EAAQuC,EAAcjC,EAAkBzM,MAAM,IAIpF,IAAK,MAAM2O,KAAe/jB,OAAO4D,KAAK4f,GAAoB,CACxD,MAAMC,EAAaM,EAAY5V,QAAQyS,GAAe,IAEtD,IAAKgD,GAAe/B,EAAkBpF,SAASgH,GAAa,CAC1D,MAAM9B,EAAQ6B,EAAkBO,GAChCX,GAAc7gB,EAASgf,EAAQS,EAAWL,EAAMH,SAAUG,EAAMF,mBAClE,CACF,CAfA,KARA,CAEE,IAAKzhB,OAAO4D,KAAK4f,GAAmB9P,OAClC,OAGF0P,GAAc7gB,EAASgf,EAAQS,EAAWR,EAAUO,EAAc5B,EAAU,KAE9E,CAgBF,EAEA6D,QAAQzhB,EAASof,EAAO3H,GACtB,GAAqB,iBAAV2H,IAAuBpf,EAChC,OAAO,KAGT,MAAMwc,EAAIR,KAGV,IAAI0F,EAAc,KACdC,GAAU,EACVC,GAAiB,EACjBC,GAAmB,EAJHzC,IADFM,GAAaN,IAOZ5C,IACjBkF,EAAclF,EAAE/B,MAAM2E,EAAO3H,GAC7B+E,EAAExc,GAASyhB,QAAQC,GACnBC,GAAWD,EAAYI,uBACvBF,GAAkBF,EAAYK,gCAC9BF,EAAmBH,EAAYM,sBAGjC,IAAIC,EAAM,IAAIxH,MAAM2E,EAAO,CACzBuC,UACAO,YAAY,IAgBd,OAdAD,EAAM3B,GAAW2B,EAAKxK,GAElBoK,GACFI,EAAIE,iBAGFP,GACF5hB,EAAQwa,cAAcyH,GAGpBA,EAAIJ,kBAAoBH,GAC1BA,EAAYS,iBAGPF,CACT,GAIF,SAAS3B,GAAWziB,EAAKukB,GACvB,IAAK,MAAO7kB,EAAKa,KAAUX,OAAO4kB,QAAQD,GAAQ,CAAC,GACjD,IACEvkB,EAAIN,GAAOa,CACb,CAAE,MAAOkkB,GACP7kB,OAAOC,eAAeG,EAAKN,EAAK,CAC9BglB,cAAc,EAEd3kB,IAAG,IACMQ,GAIb,CAGF,OAAOP,CACT,CAYA,MAAM2kB,GAAa,IAAI7Q,IACjB8Q,GAAO,CACXjQ,IAAIxS,EAASzC,EAAKyN,GACXwX,GAAWpL,IAAIpX,IAClBwiB,GAAWhQ,IAAIxS,EAAS,IAAI2R,KAG9B,MAAM+Q,EAAcF,GAAW5kB,IAAIoC,GAG9B0iB,EAAYtL,IAAI7Z,IAA6B,IAArBmlB,EAAYC,KAMzCD,EAAYlQ,IAAIjV,EAAKyN,GAJnB4X,QAAQC,MAAM,+EAA+Exf,MAAMyf,KAAKJ,EAAYrhB,QAAQ,MAKhI,EAEAzD,IAAG,CAACoC,EAASzC,IACPilB,GAAWpL,IAAIpX,IACVwiB,GAAW5kB,IAAIoC,GAASpC,IAAIL,IAG9B,KAGTwlB,OAAO/iB,EAASzC,GACd,IAAKilB,GAAWpL,IAAIpX,GAClB,OAGF,MAAM0iB,EAAcF,GAAW5kB,IAAIoC,GACnC0iB,EAAYM,OAAOzlB,GAEM,IAArBmlB,EAAYC,MACdH,GAAWQ,OAAOhjB,EAEtB,GAUF,SAASijB,GAAc7kB,GACrB,GAAc,SAAVA,EACF,OAAO,EAGT,GAAc,UAAVA,EACF,OAAO,EAGT,GAAIA,IAAUmf,OAAOnf,GAAOkC,WAC1B,OAAOid,OAAOnf,GAGhB,GAAc,KAAVA,GAA0B,SAAVA,EAClB,OAAO,KAGT,GAAqB,iBAAVA,EACT,OAAOA,EAGT,IACE,OAAO8kB,KAAKC,MAAMC,mBAAmBhlB,GACvC,CAAE,MAAOkkB,GACP,OAAOlkB,CACT,CACF,CAEA,SAASilB,GAAiB9lB,GACxB,OAAOA,EAAIqO,QAAQ,UAAU0X,GAAO,IAAIA,EAAIpjB,iBAC9C,CAEA,MAAMqjB,GAAc,CAClBC,iBAAiBxjB,EAASzC,EAAKa,GAC7B4B,EAAQ6B,aAAa,WAAWwhB,GAAiB9lB,KAAQa,EAC3D,EAEAqlB,oBAAoBzjB,EAASzC,GAC3ByC,EAAQ4B,gBAAgB,WAAWyhB,GAAiB9lB,KACtD,EAEAmmB,kBAAkB1jB,GAChB,IAAKA,EACH,MAAO,CAAC,EAGV,MAAM0B,EAAa,CAAC,EACdiiB,EAASlmB,OAAO4D,KAAKrB,EAAQ4jB,SAAShd,QAAOrJ,GAAOA,EAAI4c,WAAW,QAAU5c,EAAI4c,WAAW,cAElG,IAAK,MAAM5c,KAAOomB,EAAQ,CACxB,IAAIE,EAAUtmB,EAAIqO,QAAQ,MAAO,IACjCiY,EAAUA,EAAQC,OAAO,GAAG5jB,cAAgB2jB,EAAQhR,MAAM,EAAGgR,EAAQ1S,QACrEzP,EAAWmiB,GAAWZ,GAAcjjB,EAAQ4jB,QAAQrmB,GACtD,CAEA,OAAOmE,CACT,EAEAqiB,iBAAgB,CAAC/jB,EAASzC,IACjB0lB,GAAcjjB,EAAQga,aAAa,WAAWqJ,GAAiB9lB,QAe1E,MAAMymB,GAEOC,qBACT,MAAO,CAAC,CACV,CAEWC,yBACT,MAAO,CAAC,CACV,CAEWzH,kBACT,MAAM,IAAI0H,MAAM,sEAClB,CAEAC,WAAWC,GAMT,OALAA,EAASrE,KAAKsE,gBAAgBD,GAC9BA,EAASrE,KAAKuE,kBAAkBF,GAEhCrE,KAAKwE,iBAAiBH,GAEfA,CACT,CAEAE,kBAAkBF,GAChB,OAAOA,CACT,CAEAC,gBAAgBD,EAAQrkB,GACtB,MAAMykB,EAAa,GAAUzkB,GAAWujB,GAAYQ,iBAAiB/jB,EAAS,UAAY,CAAC,EAE3F,MAAO,IAAKggB,KAAK0E,YAAYT,WACD,iBAAfQ,EAA0BA,EAAa,CAAC,KAC/C,GAAUzkB,GAAWujB,GAAYG,kBAAkB1jB,GAAW,CAAC,KAC7C,iBAAXqkB,EAAsBA,EAAS,CAAC,EAE/C,CAEAG,iBAAiBH,EAAQM,EAAc3E,KAAK0E,YAAYR,aACtD,IAAK,MAAM3hB,KAAY9E,OAAO4D,KAAKsjB,GAAc,CAC/C,MAAMC,EAAgBD,EAAYpiB,GAC5BnE,EAAQimB,EAAO9hB,GACfsiB,EAAY,GAAUzmB,GAAS,UA1uBrCsc,OADSA,EA2uB+Ctc,GAzuBnD,GAAGsc,IAGLjd,OAAOM,UAAUuC,SAASrC,KAAKyc,GAAQoK,MAAM,eAAe,GAAG5kB,cAwuBlE,IAAK,IAAI6kB,OAAOH,GAAe9gB,KAAK+gB,GAClC,MAAM,IAAIG,UAAU,GAAGhF,KAAK0E,YAAYjI,KAAKwI,0BAA0B1iB,qBAA4BsiB,yBAAiCD,MAExI,CAhvBWlK,KAivBb,EAmBF,MAAMwK,WAAsBlB,GAC1BU,YAAY1kB,EAASqkB,GACnBc,SACAnlB,EAAU6a,GAAW7a,MAMrBggB,KAAKoF,SAAWplB,EAChBggB,KAAKqF,QAAUrF,KAAKoE,WAAWC,GAC/B5B,GAAKjQ,IAAIwN,KAAKoF,SAAUpF,KAAK0E,YAAYY,SAAUtF,MACrD,CAGAuF,UACE9C,GAAKM,OAAO/C,KAAKoF,SAAUpF,KAAK0E,YAAYY,UAC5C/E,GAAaC,IAAIR,KAAKoF,SAAUpF,KAAK0E,YAAYc,WAEjD,IAAK,MAAMC,KAAgBhoB,OAAOioB,oBAAoB1F,MACpDA,KAAKyF,GAAgB,IAEzB,CAEAE,eAAepJ,EAAUvc,EAAS4lB,GAAa,GAC7C5I,GAAuBT,EAAUvc,EAAS4lB,EAC5C,CAEAxB,WAAWC,GAMT,OALAA,EAASrE,KAAKsE,gBAAgBD,EAAQrE,KAAKoF,UAC3Cf,EAASrE,KAAKuE,kBAAkBF,GAEhCrE,KAAKwE,iBAAiBH,GAEfA,CACT,CAGAwB,mBAAmB7lB,GACjB,OAAOyiB,GAAK7kB,IAAIid,GAAW7a,GAAUggB,KAAKsF,SAC5C,CAEAO,2BAA2B7lB,EAASqkB,EAAS,CAAC,GAC5C,OAAOrE,KAAK8F,YAAY9lB,IAAY,IAAIggB,KAAKhgB,EAA2B,iBAAXqkB,EAAsBA,EAAS,KAC9F,CAEW0B,qBACT,MApDY,OAqDd,CAEWT,sBACT,MAAO,MAAMtF,KAAKvD,MACpB,CAEW+I,uBACT,MAAO,IAAIxF,KAAKsF,UAClB,CAEAO,iBAAiB9kB,GACf,MAAO,GAAGA,IAAOif,KAAKwF,WACxB,EAWF,MAAMQ,GAAuB,CAACC,EAAWC,EAAS,UAChD,MAAMC,EAAa,gBAAgBF,EAAUT,YACvCzkB,EAAOklB,EAAUxJ,KACvB8D,GAAaY,GAAGrb,SAAUqgB,EAAY,qBAAqBplB,OAAU,SAAUqe,GAK7E,GAJI,CAAC,IAAK,QAAQlF,SAAS8F,KAAKoG,UAC9BhH,EAAM+C,iBAGJ9G,GAAW2E,MACb,OAGF,MAAMhT,EAASsN,GAAuB0F,OAASA,KAAK7E,QAAQ,IAAIpa,KAC/CklB,EAAUI,oBAAoBrZ,GAEtCkZ,IACX,GAAE,EAeEI,GAAc,YACdC,GAAc,QAAQD,KACtBE,GAAe,SAASF,KAO9B,MAAMG,WAAcvB,GAEPzI,kBACT,MAdW,OAeb,CAGAiK,QAGE,GAFmBnG,GAAakB,QAAQzB,KAAKoF,SAAUmB,IAExC1E,iBACb,OAGF7B,KAAKoF,SAAS5J,UAAUuH,OAnBF,QAqBtB,MAAM6C,EAAa5F,KAAKoF,SAAS5J,UAAUvW,SAtBrB,QAwBtB+a,KAAK2F,gBAAe,IAAM3F,KAAK2G,mBAAmB3G,KAAKoF,SAAUQ,EACnE,CAGAe,kBACE3G,KAAKoF,SAASrC,SAEdxC,GAAakB,QAAQzB,KAAKoF,SAAUoB,IACpCxG,KAAKuF,SACP,CAGAM,uBAAuBxB,GACrB,OAAOrE,KAAK4G,MAAK,WACf,MAAM9b,EAAO2b,GAAMJ,oBAAoBrG,MAEvC,GAAsB,iBAAXqE,EAAX,CAIA,QAAqB7K,IAAjB1O,EAAKuZ,IAAyBA,EAAOlK,WAAW,MAAmB,gBAAXkK,EAC1D,MAAM,IAAIW,UAAU,oBAAoBX,MAG1CvZ,EAAKuZ,GAAQrE,KANb,CAOF,GACF,EAQFgG,GAAqBS,GAAO,SAK5BpK,GAAmBoK,IAYnB,MAKMI,GAAyB,4BAM/B,MAAMC,WAAe5B,GAERzI,kBACT,MAdW,QAeb,CAGAsK,SAEE/G,KAAKoF,SAASvjB,aAAa,eAAgBme,KAAKoF,SAAS5J,UAAUuL,OAhB3C,UAiB1B,CAGAlB,uBAAuBxB,GACrB,OAAOrE,KAAK4G,MAAK,WACf,MAAM9b,EAAOgc,GAAOT,oBAAoBrG,MAEzB,WAAXqE,GACFvZ,EAAKuZ,IAET,GACF,EAQF9D,GAAaY,GAAGrb,SAlCe,2BAkCmB+gB,IAAwBzH,IACxEA,EAAM+C,iBACN,MAAM6E,EAAS5H,EAAMpS,OAAOmO,QAAQ0L,IACvBC,GAAOT,oBAAoBW,GACnCD,QAAQ,IAMf1K,GAAmByK,IAYnB,MAAMG,GAAiB,CACrBrU,KAAI,CAACmH,EAAU/Z,EAAU8F,SAASC,kBACzB,GAAG3G,UAAUsB,QAAQ3C,UAAUqiB,iBAAiBniB,KAAK+B,EAAS+Z,IAGvEmN,QAAO,CAACnN,EAAU/Z,EAAU8F,SAASC,kBAC5BrF,QAAQ3C,UAAU8K,cAAc5K,KAAK+B,EAAS+Z,GAGvDoN,SAAQ,CAACnnB,EAAS+Z,IACT,GAAG3a,UAAUY,EAAQmnB,UAAUvgB,QAAOzB,GAASA,EAAMiiB,QAAQrN,KAGtEsN,QAAQrnB,EAAS+Z,GACf,MAAMsN,EAAU,GAChB,IAAIC,EAAWtnB,EAAQwF,WAAW2V,QAAQpB,GAE1C,KAAOuN,GACLD,EAAQhV,KAAKiV,GACbA,EAAWA,EAAS9hB,WAAW2V,QAAQpB,GAGzC,OAAOsN,CACT,EAEAE,KAAKvnB,EAAS+Z,GACZ,IAAIyN,EAAWxnB,EAAQynB,uBAEvB,KAAOD,GAAU,CACf,GAAIA,EAASJ,QAAQrN,GACnB,MAAO,CAACyN,GAGVA,EAAWA,EAASC,sBACtB,CAEA,MAAO,EACT,EAGAniB,KAAKtF,EAAS+Z,GACZ,IAAIzU,EAAOtF,EAAQ0nB,mBAEnB,KAAOpiB,GAAM,CACX,GAAIA,EAAK8hB,QAAQrN,GACf,MAAO,CAACzU,GAGVA,EAAOA,EAAKoiB,kBACd,CAEA,MAAO,EACT,EAEAC,kBAAkB3nB,GAChB,MAAM4nB,EAAa,CAAC,IAAK,SAAU,QAAS,WAAY,SAAU,UAAW,aAAc,4BAA4BrkB,KAAIwW,GAAY,GAAGA,2BAAiCpW,KAAK,KAChL,OAAOqc,KAAKpN,KAAKgV,EAAY5nB,GAAS4G,QAAOihB,IAAOxM,GAAWwM,IAAO/M,GAAU+M,IAClF,GAeIC,GAAc,YACdC,GAAmB,aAAaD,KAChCE,GAAkB,YAAYF,KAC9BG,GAAiB,WAAWH,KAC5BI,GAAoB,cAAcJ,KAClCK,GAAkB,YAAYL,KAK9BM,GAAY,CAChBC,YAAa,KACbC,aAAc,KACdC,cAAe,MAEXC,GAAgB,CACpBH,YAAa,kBACbC,aAAc,kBACdC,cAAe,mBAMjB,MAAME,WAAczE,GAClBU,YAAY1kB,EAASqkB,GACnBc,QACAnF,KAAKoF,SAAWplB,EAEXA,GAAYyoB,GAAMC,gBAIvB1I,KAAKqF,QAAUrF,KAAKoE,WAAWC,GAC/BrE,KAAK2I,QAAU,EACf3I,KAAK4I,sBAAwB9H,QAAQzgB,OAAOwoB,cAE5C7I,KAAK8I,cACP,CAGW7E,qBACT,OAAOmE,EACT,CAEWlE,yBACT,OAAOsE,EACT,CAEW/L,kBACT,MAnDW,OAoDb,CAGA8I,UACEhF,GAAaC,IAAIR,KAAKoF,SAAU0C,GAClC,CAGAiB,OAAO3J,GACAY,KAAK4I,sBAKN5I,KAAKgJ,wBAAwB5J,KAC/BY,KAAK2I,QAAUvJ,EAAM6J,SALrBjJ,KAAK2I,QAAUvJ,EAAM8J,QAAQ,GAAGD,OAOpC,CAEAE,KAAK/J,GACCY,KAAKgJ,wBAAwB5J,KAC/BY,KAAK2I,QAAUvJ,EAAM6J,QAAUjJ,KAAK2I,SAGtC3I,KAAKoJ,eAELrM,GAAQiD,KAAKqF,QAAQgD,YACvB,CAEAgB,MAAMjK,GACJY,KAAK2I,QAAUvJ,EAAM8J,SAAW9J,EAAM8J,QAAQ/X,OAAS,EAAI,EAAIiO,EAAM8J,QAAQ,GAAGD,QAAUjJ,KAAK2I,OACjG,CAEAS,eACE,MAAME,EAAY1mB,KAAKoC,IAAIgb,KAAK2I,SAEhC,GAAIW,GA9EgB,GA+ElB,OAGF,MAAMvb,EAAYub,EAAYtJ,KAAK2I,QACnC3I,KAAK2I,QAAU,EAEV5a,GAILgP,GAAQhP,EAAY,EAAIiS,KAAKqF,QAAQkD,cAAgBvI,KAAKqF,QAAQiD,aACpE,CAEAQ,cACM9I,KAAK4I,uBACPrI,GAAaY,GAAGnB,KAAKoF,SAAU8C,IAAmB9I,GAASY,KAAK+I,OAAO3J,KACvEmB,GAAaY,GAAGnB,KAAKoF,SAAU+C,IAAiB/I,GAASY,KAAKmJ,KAAK/J,KAEnEY,KAAKoF,SAAS5J,UAAUtE,IAlGG,mBAoG3BqJ,GAAaY,GAAGnB,KAAKoF,SAAU2C,IAAkB3I,GAASY,KAAK+I,OAAO3J,KACtEmB,GAAaY,GAAGnB,KAAKoF,SAAU4C,IAAiB5I,GAASY,KAAKqJ,MAAMjK,KACpEmB,GAAaY,GAAGnB,KAAKoF,SAAU6C,IAAgB7I,GAASY,KAAKmJ,KAAK/J,KAEtE,CAEA4J,wBAAwB5J,GACtB,OAAOY,KAAK4I,wBA5GS,QA4GiBxJ,EAAMmK,aA7GrB,UA6GyDnK,EAAMmK,YACxF,CAGA1D,qBACE,MAAO,iBAAkB/f,SAASC,iBAAmB7C,UAAUsmB,eAAiB,CAClF,EAcF,MAEMC,GAAc,eACdC,GAAiB,YAKjBC,GAAa,OACbC,GAAa,OACbC,GAAiB,OACjBC,GAAkB,QAClBC,GAAc,QAAQN,KACtBO,GAAa,OAAOP,KACpBQ,GAAkB,UAAUR,KAC5BS,GAAqB,aAAaT,KAClCU,GAAqB,aAAaV,KAClCW,GAAmB,YAAYX,KAC/BY,GAAwB,OAAOZ,KAAcC,KAC7CY,GAAyB,QAAQb,KAAcC,KAC/Ca,GAAsB,WACtBC,GAAsB,SAMtBC,GAAkB,UAClBC,GAAgB,iBAChBC,GAAuBF,GAAkBC,GAKzCE,GAAmB,CACvB,UAAoBd,GACpB,WAAqBD,IAEjBgB,GAAY,CAChBC,SAAU,IACVC,UAAU,EACVC,MAAO,QACPC,MAAM,EACNC,OAAO,EACPC,MAAM,GAEFC,GAAgB,CACpBN,SAAU,mBAEVC,SAAU,UACVC,MAAO,mBACPC,KAAM,mBACNC,MAAO,UACPC,KAAM,WAMR,MAAME,WAAiBnG,GACrBR,YAAY1kB,EAASqkB,GACnBc,MAAMnlB,EAASqkB,GACfrE,KAAKsL,UAAY,KACjBtL,KAAKuL,eAAiB,KACtBvL,KAAKwL,YAAa,EAClBxL,KAAKyL,aAAe,KACpBzL,KAAK0L,aAAe,KACpB1L,KAAK2L,mBAAqB1E,GAAeC,QApCjB,uBAoC8ClH,KAAKoF,UAE3EpF,KAAK4L,qBAED5L,KAAKqF,QAAQ4F,OAASV,IACxBvK,KAAK6L,OAET,CAGW5H,qBACT,OAAO4G,EACT,CAEW3G,yBACT,OAAOkH,EACT,CAEW3O,kBACT,MAtFW,UAuFb,CAGAnX,OACE0a,KAAK8L,OAAOnC,GACd,CAEAoC,mBAIOjmB,SAASkmB,QAAUlR,GAAUkF,KAAKoF,WACrCpF,KAAK1a,MAET,CAEAiiB,OACEvH,KAAK8L,OAAOlC,GACd,CAEAoB,QACMhL,KAAKwL,YACPjR,GAAqByF,KAAKoF,UAG5BpF,KAAKiM,gBACP,CAEAJ,QACE7L,KAAKiM,iBAELjM,KAAKkM,kBAELlM,KAAKsL,UAAYa,aAAY,IAAMnM,KAAK+L,mBAAmB/L,KAAKqF,QAAQyF,SAC1E,CAEAsB,oBACOpM,KAAKqF,QAAQ4F,OAIdjL,KAAKwL,WACPjL,GAAaa,IAAIpB,KAAKoF,SAAU4E,IAAY,IAAMhK,KAAK6L,UAIzD7L,KAAK6L,QACP,CAEAQ,GAAGnT,GACD,MAAMoT,EAAQtM,KAAKuM,YAEnB,GAAIrT,EAAQoT,EAAMnb,OAAS,GAAK+H,EAAQ,EACtC,OAGF,GAAI8G,KAAKwL,WAEP,YADAjL,GAAaa,IAAIpB,KAAKoF,SAAU4E,IAAY,IAAMhK,KAAKqM,GAAGnT,KAI5D,MAAMsT,EAAcxM,KAAKyM,cAAczM,KAAK0M,cAE5C,GAAIF,IAAgBtT,EAClB,OAGF,MAAMtC,EAAQsC,EAAQsT,EAAc7C,GAAaC,GAEjD5J,KAAK8L,OAAOlV,EAAO0V,EAAMpT,GAC3B,CAEAqM,UACMvF,KAAK0L,cACP1L,KAAK0L,aAAanG,UAGpBJ,MAAMI,SACR,CAGAhB,kBAAkBF,GAEhB,OADAA,EAAOsI,gBAAkBtI,EAAOyG,SACzBzG,CACT,CAEAuH,qBACM5L,KAAKqF,QAAQ0F,UACfxK,GAAaY,GAAGnB,KAAKoF,SAAU6E,IAAiB7K,GAASY,KAAK4M,SAASxN,KAG9C,UAAvBY,KAAKqF,QAAQ2F,QACfzK,GAAaY,GAAGnB,KAAKoF,SAAU8E,IAAoB,IAAMlK,KAAKgL,UAC9DzK,GAAaY,GAAGnB,KAAKoF,SAAU+E,IAAoB,IAAMnK,KAAKoM,uBAG5DpM,KAAKqF,QAAQ6F,OAASzC,GAAMC,eAC9B1I,KAAK6M,yBAET,CAEAA,0BACE,IAAK,MAAMC,KAAO7F,GAAerU,KA/JX,qBA+JmCoN,KAAKoF,UAC5D7E,GAAaY,GAAG2L,EAAK1C,IAAkBhL,GAASA,EAAM+C,mBAGxD,MAqBM4K,EAAc,CAClBzE,aAAc,IAAMtI,KAAK8L,OAAO9L,KAAKgN,kBAAkBnD,KACvDtB,cAAe,IAAMvI,KAAK8L,OAAO9L,KAAKgN,kBAAkBlD,KACxDzB,YAxBkB,KACS,UAAvBrI,KAAKqF,QAAQ2F,QAWjBhL,KAAKgL,QAEDhL,KAAKyL,cACPwB,aAAajN,KAAKyL,cAGpBzL,KAAKyL,aAAe5N,YAAW,IAAMmC,KAAKoM,qBA7MjB,IA6M+DpM,KAAKqF,QAAQyF,UAAS,GAQhH9K,KAAK0L,aAAe,IAAIjD,GAAMzI,KAAKoF,SAAU2H,EAC/C,CAEAH,SAASxN,GACP,GAAI,kBAAkBtb,KAAKsb,EAAMpS,OAAOoZ,SACtC,OAGF,MAAMrY,EAAY6c,GAAiBxL,EAAM7hB,KAErCwQ,IACFqR,EAAM+C,iBAENnC,KAAK8L,OAAO9L,KAAKgN,kBAAkBjf,IAEvC,CAEA0e,cAAczsB,GACZ,OAAOggB,KAAKuM,YAAY3mB,QAAQ5F,EAClC,CAEAktB,2BAA2BhU,GACzB,IAAK8G,KAAK2L,mBACR,OAGF,MAAMwB,EAAkBlG,GAAeC,QAAQuD,GAAiBzK,KAAK2L,oBACrEwB,EAAgB3R,UAAUuH,OAAOyH,IACjC2C,EAAgBvrB,gBAAgB,gBAChC,MAAMwrB,EAAqBnG,GAAeC,QAAQ,sBAAsBhO,MAAW8G,KAAK2L,oBAEpFyB,IACFA,EAAmB5R,UAAUtE,IAAIsT,IACjC4C,EAAmBvrB,aAAa,eAAgB,QAEpD,CAEAqqB,kBACE,MAAMlsB,EAAUggB,KAAKuL,gBAAkBvL,KAAK0M,aAE5C,IAAK1sB,EACH,OAGF,MAAMqtB,EAAkB9P,OAAO+P,SAASttB,EAAQga,aAAa,oBAAqB,IAClFgG,KAAKqF,QAAQyF,SAAWuC,GAAmBrN,KAAKqF,QAAQsH,eAC1D,CAEAb,OAAOlV,EAAO5W,EAAU,MACtB,GAAIggB,KAAKwL,WACP,OAGF,MAAMzN,EAAgBiC,KAAK0M,aAErBa,EAAS3W,IAAU+S,GACnB6D,EAAcxtB,GAAW8d,GAAqBkC,KAAKuM,YAAaxO,EAAewP,EAAQvN,KAAKqF,QAAQ8F,MAE1G,GAAIqC,IAAgBzP,EAClB,OAGF,MAAM0P,EAAmBzN,KAAKyM,cAAce,GAEtCE,EAAeC,GACZpN,GAAakB,QAAQzB,KAAKoF,SAAUuI,EAAW,CACpD7N,cAAe0N,EACfzf,UAAWiS,KAAK4N,kBAAkBhX,GAClCkM,KAAM9C,KAAKyM,cAAc1O,GACzBsO,GAAIoB,IAMR,GAFmBC,EAAa3D,IAEjBlI,iBACb,OAGF,IAAK9D,IAAkByP,EAGrB,OAGF,MAAMK,EAAY/M,QAAQd,KAAKsL,WAC/BtL,KAAKgL,QACLhL,KAAKwL,YAAa,EAElBxL,KAAKkN,2BAA2BO,GAEhCzN,KAAKuL,eAAiBiC,EACtB,MAAMM,EAAuBP,EA/RR,sBADF,oBAiSbQ,EAAiBR,EA/RH,qBACA,qBA+RpBC,EAAYhS,UAAUtE,IAAI6W,GAC1BhS,GAAOyR,GACPzP,EAAcvC,UAAUtE,IAAI4W,GAC5BN,EAAYhS,UAAUtE,IAAI4W,GAU1B9N,KAAK2F,gBARoB,KACvB6H,EAAYhS,UAAUuH,OAAO+K,EAAsBC,GACnDP,EAAYhS,UAAUtE,IAAIsT,IAC1BzM,EAAcvC,UAAUuH,OAAOyH,GAAqBuD,EAAgBD,GACpE9N,KAAKwL,YAAa,EAClBkC,EAAa1D,GAAW,GAGYjM,EAAeiC,KAAKgO,eAEtDH,GACF7N,KAAK6L,OAET,CAEAmC,cACE,OAAOhO,KAAKoF,SAAS5J,UAAUvW,SAxTV,QAyTvB,CAEAynB,aACE,OAAOzF,GAAeC,QAAQyD,GAAsB3K,KAAKoF,SAC3D,CAEAmH,YACE,OAAOtF,GAAerU,KAAK8X,GAAe1K,KAAKoF,SACjD,CAEA6G,iBACMjM,KAAKsL,YACP2C,cAAcjO,KAAKsL,WACnBtL,KAAKsL,UAAY,KAErB,CAEA0B,kBAAkBjf,GAChB,OAAIoO,KACKpO,IAAc8b,GAAiBD,GAAaD,GAG9C5b,IAAc8b,GAAiBF,GAAaC,EACrD,CAEAgE,kBAAkBhX,GAChB,OAAIuF,KACKvF,IAAUgT,GAAaC,GAAiBC,GAG1ClT,IAAUgT,GAAaE,GAAkBD,EAClD,CAGAhE,uBAAuBxB,GACrB,OAAOrE,KAAK4G,MAAK,WACf,MAAM9b,EAAOugB,GAAShF,oBAAoBrG,KAAMqE,GAEhD,GAAsB,iBAAXA,GAKX,GAAsB,iBAAXA,EAAqB,CAC9B,QAAqB7K,IAAjB1O,EAAKuZ,IAAyBA,EAAOlK,WAAW,MAAmB,gBAAXkK,EAC1D,MAAM,IAAIW,UAAU,oBAAoBX,MAG1CvZ,EAAKuZ,IACP,OAVEvZ,EAAKuhB,GAAGhI,EAWZ,GACF,EAQF9D,GAAaY,GAAGrb,SAAUwkB,GA1WE,uCA0W2C,SAAUlL,GAC/E,MAAMpS,EAASsN,GAAuB0F,MAEtC,IAAKhT,IAAWA,EAAOwO,UAAUvW,SAASslB,IACxC,OAGFnL,EAAM+C,iBACN,MAAM+L,EAAW7C,GAAShF,oBAAoBrZ,GACxCmhB,EAAanO,KAAKhG,aAAa,oBAErC,OAAImU,GACFD,EAAS7B,GAAG8B,QAEZD,EAAS9B,qBAKyC,SAAhD7I,GAAYQ,iBAAiB/D,KAAM,UACrCkO,EAAS5oB,YAET4oB,EAAS9B,sBAKX8B,EAAS3G,YAET2G,EAAS9B,oBACX,IACA7L,GAAaY,GAAG9gB,OAAQgqB,IAAuB,KAC7C,MAAM+D,EAAYnH,GAAerU,KAzYR,6BA2YzB,IAAK,MAAMsb,KAAYE,EACrB/C,GAAShF,oBAAoB6H,EAC/B,IAMF7R,GAAmBgP,IAYnB,MAEMgD,GAAc,eAEdC,GAAe,OAAOD,KACtBE,GAAgB,QAAQF,KACxBG,GAAe,OAAOH,KACtBI,GAAiB,SAASJ,KAC1BK,GAAyB,QAAQL,cACjCM,GAAoB,OACpBC,GAAsB,WACtBC,GAAwB,aAExBC,GAA6B,WAAWF,OAAwBA,KAKhEG,GAAyB,8BACzBC,GAAY,CAChB9pB,OAAQ,KACR6hB,QAAQ,GAEJkI,GAAgB,CACpB/pB,OAAQ,iBACR6hB,OAAQ,WAMV,MAAMmI,WAAiBhK,GACrBR,YAAY1kB,EAASqkB,GACnBc,MAAMnlB,EAASqkB,GACfrE,KAAKmP,kBAAmB,EACxBnP,KAAKoP,cAAgB,GACrB,MAAMC,EAAapI,GAAerU,KAAKmc,IAEvC,IAAK,MAAMO,KAAQD,EAAY,CAC7B,MAAMtV,EAAWM,GAAuBiV,GAClCC,EAAgBtI,GAAerU,KAAKmH,GAAUnT,QAAO4oB,GAAgBA,IAAiBxP,KAAKoF,WAEhF,OAAbrL,GAAqBwV,EAAcpe,QACrC6O,KAAKoP,cAAc/c,KAAKid,EAE5B,CAEAtP,KAAKyP,sBAEAzP,KAAKqF,QAAQngB,QAChB8a,KAAK0P,0BAA0B1P,KAAKoP,cAAepP,KAAK2P,YAGtD3P,KAAKqF,QAAQ0B,QACf/G,KAAK+G,QAET,CAGW9C,qBACT,OAAO+K,EACT,CAEW9K,yBACT,OAAO+K,EACT,CAEWxS,kBACT,MApEW,UAqEb,CAGAsK,SACM/G,KAAK2P,WACP3P,KAAK4P,OAEL5P,KAAK6P,MAET,CAEAA,OACE,GAAI7P,KAAKmP,kBAAoBnP,KAAK2P,WAChC,OAGF,IAAIG,EAAiB,GAQrB,GANI9P,KAAKqF,QAAQngB,SACf4qB,EAAiB9P,KAAK+P,uBAvEH,wCAuE4CnpB,QAAO5G,GAAWA,IAAYggB,KAAKoF,WAAU7hB,KAAIvD,GAAWkvB,GAAS7I,oBAAoBrmB,EAAS,CAC/J+mB,QAAQ,OAIR+I,EAAe3e,QAAU2e,EAAe,GAAGX,iBAC7C,OAKF,GAFmB5O,GAAakB,QAAQzB,KAAKoF,SAAUkJ,IAExCzM,iBACb,OAGF,IAAK,MAAMmO,KAAkBF,EAC3BE,EAAeJ,OAGjB,MAAMK,EAAYjQ,KAAKkQ,gBAEvBlQ,KAAKoF,SAAS5J,UAAUuH,OAAO6L,IAE/B5O,KAAKoF,SAAS5J,UAAUtE,IAAI2X,IAE5B7O,KAAKoF,SAAS5jB,MAAMyuB,GAAa,EAEjCjQ,KAAK0P,0BAA0B1P,KAAKoP,eAAe,GAEnDpP,KAAKmP,kBAAmB,EAExB,MAYMgB,EAAa,SADUF,EAAU,GAAGhL,cAAgBgL,EAAUpd,MAAM,KAG1EmN,KAAK2F,gBAdY,KACf3F,KAAKmP,kBAAmB,EAExBnP,KAAKoF,SAAS5J,UAAUuH,OAAO8L,IAE/B7O,KAAKoF,SAAS5J,UAAUtE,IAAI0X,GAAqBD,IAEjD3O,KAAKoF,SAAS5jB,MAAMyuB,GAAa,GACjC1P,GAAakB,QAAQzB,KAAKoF,SAAUmJ,GAAc,GAMtBvO,KAAKoF,UAAU,GAE7CpF,KAAKoF,SAAS5jB,MAAMyuB,GAAa,GAAGjQ,KAAKoF,SAAS+K,MACpD,CAEAP,OACE,GAAI5P,KAAKmP,mBAAqBnP,KAAK2P,WACjC,OAKF,GAFmBpP,GAAakB,QAAQzB,KAAKoF,SAAUoJ,IAExC3M,iBACb,OAGF,MAAMoO,EAAYjQ,KAAKkQ,gBAEvBlQ,KAAKoF,SAAS5jB,MAAMyuB,GAAa,GAAGjQ,KAAKoF,SAASrhB,wBAAwBksB,OAC1ElU,GAAOiE,KAAKoF,UAEZpF,KAAKoF,SAAS5J,UAAUtE,IAAI2X,IAE5B7O,KAAKoF,SAAS5J,UAAUuH,OAAO6L,GAAqBD,IAEpD,IAAK,MAAMlN,KAAWzB,KAAKoP,cAAe,CACxC,MAAMpvB,EAAUsa,GAAuBmH,GAEnCzhB,IAAYggB,KAAK2P,SAAS3vB,IAC5BggB,KAAK0P,0BAA0B,CAACjO,IAAU,EAE9C,CAEAzB,KAAKmP,kBAAmB,EAYxBnP,KAAKoF,SAAS5jB,MAAMyuB,GAAa,GAEjCjQ,KAAK2F,gBAZY,KACf3F,KAAKmP,kBAAmB,EAExBnP,KAAKoF,SAAS5J,UAAUuH,OAAO8L,IAE/B7O,KAAKoF,SAAS5J,UAAUtE,IAAI0X,IAE5BrO,GAAakB,QAAQzB,KAAKoF,SAAUqJ,GAAe,GAKvBzO,KAAKoF,UAAU,EAC/C,CAEAuK,SAAS3vB,EAAUggB,KAAKoF,UACtB,OAAOplB,EAAQwb,UAAUvW,SAAS0pB,GACpC,CAGApK,kBAAkBF,GAIhB,OAHAA,EAAO0C,OAASjG,QAAQuD,EAAO0C,QAE/B1C,EAAOnf,OAAS2V,GAAWwJ,EAAOnf,QAC3Bmf,CACT,CAEA6L,gBACE,OAAOlQ,KAAKoF,SAAS5J,UAAUvW,SAtLL,uBAChB,QACC,QAqLb,CAEAwqB,sBACE,IAAKzP,KAAKqF,QAAQngB,OAChB,OAGF,MAAMiiB,EAAWnH,KAAK+P,uBAAuBhB,IAE7C,IAAK,MAAM/uB,KAAWmnB,EAAU,CAC9B,MAAMiJ,EAAW9V,GAAuBta,GAEpCowB,GACFpQ,KAAK0P,0BAA0B,CAAC1vB,GAAUggB,KAAK2P,SAASS,GAE5D,CACF,CAEAL,uBAAuBhW,GACrB,MAAMoN,EAAWF,GAAerU,KAAKkc,GAA4B9O,KAAKqF,QAAQngB,QAE9E,OAAO+hB,GAAerU,KAAKmH,EAAUiG,KAAKqF,QAAQngB,QAAQ0B,QAAO5G,IAAYmnB,EAASjN,SAASla,IACjG,CAEA0vB,0BAA0BW,EAAcC,GACtC,GAAKD,EAAalf,OAIlB,IAAK,MAAMnR,KAAWqwB,EACpBrwB,EAAQwb,UAAUuL,OAvNK,aAuNyBuJ,GAChDtwB,EAAQ6B,aAAa,gBAAiByuB,EAE1C,CAGAzK,uBAAuBxB,GACrB,MAAMgB,EAAU,CAAC,EAMjB,MAJsB,iBAAXhB,GAAuB,YAAYvgB,KAAKugB,KACjDgB,EAAQ0B,QAAS,GAGZ/G,KAAK4G,MAAK,WACf,MAAM9b,EAAOokB,GAAS7I,oBAAoBrG,KAAMqF,GAEhD,GAAsB,iBAAXhB,EAAqB,CAC9B,QAA4B,IAAjBvZ,EAAKuZ,GACd,MAAM,IAAIW,UAAU,oBAAoBX,MAG1CvZ,EAAKuZ,IACP,CACF,GACF,EAQF9D,GAAaY,GAAGrb,SAAU4oB,GAAwBK,IAAwB,SAAU3P,IAErD,MAAzBA,EAAMpS,OAAOoZ,SAAmBhH,EAAMW,gBAAmD,MAAjCX,EAAMW,eAAeqG,UAC/EhH,EAAM+C,iBAGR,MAAMpI,EAAWM,GAAuB2F,MAClCuQ,EAAmBtJ,GAAerU,KAAKmH,GAE7C,IAAK,MAAM/Z,KAAWuwB,EACpBrB,GAAS7I,oBAAoBrmB,EAAS,CACpC+mB,QAAQ,IACPA,QAEP,IAKA1K,GAAmB6S,IAYnB,MAAMsB,GAAS,WAETC,GAAc,eACdC,GAAiB,YAGjBC,GAAiB,UACjBC,GAAmB,YAGnBC,GAAe,OAAOJ,KACtBK,GAAiB,SAASL,KAC1BM,GAAe,OAAON,KACtBO,GAAgB,QAAQP,KACxBQ,GAAyB,QAAQR,KAAcC,KAC/CQ,GAAyB,UAAUT,KAAcC,KACjDS,GAAuB,QAAQV,KAAcC,KAC7CU,GAAoB,OAMpBC,GAAyB,4DACzBC,GAA6B,GAAGD,MAA0BD,KAC1DG,GAAgB,iBAIhBC,GAAgBrV,KAAU,UAAY,YACtCsV,GAAmBtV,KAAU,YAAc,UAC3CuV,GAAmBvV,KAAU,aAAe,eAC5CwV,GAAsBxV,KAAU,eAAiB,aACjDyV,GAAkBzV,KAAU,aAAe,cAC3C0V,GAAiB1V,KAAU,cAAgB,aAG3C2V,GAAY,CAChBC,WAAW,EACXrjB,SAAU,kBACVsjB,QAAS,UACTvpB,OAAQ,CAAC,EAAG,GACZwpB,aAAc,KACdlzB,UAAW,UAEPmzB,GAAgB,CACpBH,UAAW,mBACXrjB,SAAU,mBACVsjB,QAAS,SACTvpB,OAAQ,0BACRwpB,aAAc,yBACdlzB,UAAW,2BAMb,MAAMozB,WAAiBjN,GACrBR,YAAY1kB,EAASqkB,GACnBc,MAAMnlB,EAASqkB,GACfrE,KAAKoS,QAAU,KACfpS,KAAKqS,QAAUrS,KAAKoF,SAAS5f,WAG7Bwa,KAAKsS,MAAQrL,GAAe3hB,KAAK0a,KAAKoF,SAAUmM,IAAe,IAAMtK,GAAeM,KAAKvH,KAAKoF,SAAUmM,IAAe,IAAMtK,GAAeC,QAAQqK,GAAevR,KAAKqS,SACxKrS,KAAKuS,UAAYvS,KAAKwS,eACxB,CAGWvO,qBACT,OAAO6N,EACT,CAEW5N,yBACT,OAAOgO,EACT,CAEWzV,kBACT,OAAO+T,EACT,CAGAzJ,SACE,OAAO/G,KAAK2P,WAAa3P,KAAK4P,OAAS5P,KAAK6P,MAC9C,CAEAA,OACE,GAAIxU,GAAW2E,KAAKoF,WAAapF,KAAK2P,WACpC,OAGF,MAAM7P,EAAgB,CACpBA,cAAeE,KAAKoF,UAItB,IAFkB7E,GAAakB,QAAQzB,KAAKoF,SAAU2L,GAAcjR,GAEtD+B,iBAAd,CAUA,GANA7B,KAAKyS,gBAMD,iBAAkB3sB,SAASC,kBAAoBia,KAAKqS,QAAQlX,QA/ExC,eAgFtB,IAAK,MAAMnb,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAKwa,UAC/C5G,GAAaY,GAAGnhB,EAAS,YAAa8b,IAI1CkE,KAAKoF,SAASsN,QAEd1S,KAAKoF,SAASvjB,aAAa,iBAAiB,GAE5Cme,KAAKsS,MAAM9W,UAAUtE,IAAIka,IAEzBpR,KAAKoF,SAAS5J,UAAUtE,IAAIka,IAE5B7Q,GAAakB,QAAQzB,KAAKoF,SAAU4L,GAAelR,EAtBnD,CAuBF,CAEA8P,OACE,GAAIvU,GAAW2E,KAAKoF,YAAcpF,KAAK2P,WACrC,OAGF,MAAM7P,EAAgB,CACpBA,cAAeE,KAAKoF,UAGtBpF,KAAK2S,cAAc7S,EACrB,CAEAyF,UACMvF,KAAKoS,SACPpS,KAAKoS,QAAQ3Y,UAGf0L,MAAMI,SACR,CAEA/Z,SACEwU,KAAKuS,UAAYvS,KAAKwS,gBAElBxS,KAAKoS,SACPpS,KAAKoS,QAAQ5mB,QAEjB,CAGAmnB,cAAc7S,GAGZ,IAFkBS,GAAakB,QAAQzB,KAAKoF,SAAUyL,GAAc/Q,GAEtD+B,iBAAd,CAMA,GAAI,iBAAkB/b,SAASC,gBAC7B,IAAK,MAAM/F,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAKwa,UAC/C5G,GAAaC,IAAIxgB,EAAS,YAAa8b,IAIvCkE,KAAKoS,SACPpS,KAAKoS,QAAQ3Y,UAGfuG,KAAKsS,MAAM9W,UAAUuH,OAAOqO,IAE5BpR,KAAKoF,SAAS5J,UAAUuH,OAAOqO,IAE/BpR,KAAKoF,SAASvjB,aAAa,gBAAiB,SAE5C0hB,GAAYE,oBAAoBzD,KAAKsS,MAAO,UAC5C/R,GAAakB,QAAQzB,KAAKoF,SAAU0L,GAAgBhR,EArBpD,CAsBF,CAEAsE,WAAWC,GAGT,GAAgC,iBAFhCA,EAASc,MAAMf,WAAWC,IAERtlB,YAA2B,GAAUslB,EAAOtlB,YAAgE,mBAA3CslB,EAAOtlB,UAAUgF,sBAElG,MAAM,IAAIihB,UAAU,GAAGwL,GAAOvL,+GAGhC,OAAOZ,CACT,CAEAoO,gBACE,QAAsB,IAAX,EACT,MAAM,IAAIzN,UAAU,gEAGtB,IAAI4N,EAAmB5S,KAAKoF,SAEG,WAA3BpF,KAAKqF,QAAQtmB,UACf6zB,EAAmB5S,KAAKqS,QACf,GAAUrS,KAAKqF,QAAQtmB,WAChC6zB,EAAmB/X,GAAWmF,KAAKqF,QAAQtmB,WACA,iBAA3BihB,KAAKqF,QAAQtmB,YAC7B6zB,EAAmB5S,KAAKqF,QAAQtmB,WAGlC,MAAMkzB,EAAejS,KAAK6S,mBAE1B7S,KAAKoS,QAAU,GAAoBQ,EAAkB5S,KAAKsS,MAAOL,EACnE,CAEAtC,WACE,OAAO3P,KAAKsS,MAAM9W,UAAUvW,SAASmsB,GACvC,CAEA0B,gBACE,MAAMC,EAAiB/S,KAAKqS,QAE5B,GAAIU,EAAevX,UAAUvW,SAxMN,WAyMrB,OAAO2sB,GAGT,GAAImB,EAAevX,UAAUvW,SA3MJ,aA4MvB,OAAO4sB,GAGT,GAAIkB,EAAevX,UAAUvW,SA9MA,iBA+M3B,MAjMsB,MAoMxB,GAAI8tB,EAAevX,UAAUvW,SAjNE,mBAkN7B,MApMyB,SAwM3B,MAAM+tB,EAAkF,QAA1EttB,iBAAiBsa,KAAKsS,OAAOrX,iBAAiB,iBAAiBb,OAE7E,OAAI2Y,EAAevX,UAAUvW,SA5NP,UA6Nb+tB,EAAQvB,GAAmBD,GAG7BwB,EAAQrB,GAAsBD,EACvC,CAEAc,gBACE,OAAkD,OAA3CxS,KAAKoF,SAASjK,QA5ND,UA6NtB,CAEA8X,aACE,MAAM,OACJxqB,GACEuX,KAAKqF,QAET,MAAsB,iBAAX5c,EACFA,EAAO9F,MAAM,KAAKY,KAAInF,GAASmf,OAAO+P,SAASlvB,EAAO,MAGzC,mBAAXqK,EACFyqB,GAAczqB,EAAOyqB,EAAYlT,KAAKoF,UAGxC3c,CACT,CAEAoqB,mBACE,MAAMM,EAAwB,CAC5Bh0B,UAAW6gB,KAAK8S,gBAChBjc,UAAW,CAAC,CACV9V,KAAM,kBACNmB,QAAS,CACPwM,SAAUsR,KAAKqF,QAAQ3W,WAExB,CACD3N,KAAM,SACNmB,QAAS,CACPuG,OAAQuX,KAAKiT,iBAcnB,OATIjT,KAAKuS,WAAsC,WAAzBvS,KAAKqF,QAAQ2M,WACjCzO,GAAYC,iBAAiBxD,KAAKsS,MAAO,SAAU,UAEnDa,EAAsBtc,UAAY,CAAC,CACjC9V,KAAM,cACNC,SAAS,KAIN,IAAKmyB,KAC+B,mBAA9BnT,KAAKqF,QAAQ4M,aAA8BjS,KAAKqF,QAAQ4M,aAAakB,GAAyBnT,KAAKqF,QAAQ4M,aAE1H,CAEAmB,iBAAgB,IACd71B,EAAG,OACHyP,IAEA,MAAMsf,EAAQrF,GAAerU,KA/QF,8DA+Q+BoN,KAAKsS,OAAO1rB,QAAO5G,GAAW8a,GAAU9a,KAE7FssB,EAAMnb,QAMX2M,GAAqBwO,EAAOtf,EAAQzP,IAAQqzB,IAAmBtE,EAAMpS,SAASlN,IAAS0lB,OACzF,CAGA7M,uBAAuBxB,GACrB,OAAOrE,KAAK4G,MAAK,WACf,MAAM9b,EAAOqnB,GAAS9L,oBAAoBrG,KAAMqE,GAEhD,GAAsB,iBAAXA,EAAX,CAIA,QAA4B,IAAjBvZ,EAAKuZ,GACd,MAAM,IAAIW,UAAU,oBAAoBX,MAG1CvZ,EAAKuZ,IANL,CAOF,GACF,CAEAwB,kBAAkBzG,GAChB,GAhUuB,IAgUnBA,EAAM4H,QAAgD,UAAf5H,EAAMqB,MAnUnC,QAmUuDrB,EAAM7hB,IACzE,OAGF,MAAM81B,EAAcpM,GAAerU,KAAK0e,IAExC,IAAK,MAAMvK,KAAUsM,EAAa,CAChC,MAAMC,EAAUnB,GAASrM,YAAYiB,GAErC,IAAKuM,IAAyC,IAA9BA,EAAQjO,QAAQ0M,UAC9B,SAGF,MAAMwB,EAAenU,EAAMmU,eACrBC,EAAeD,EAAarZ,SAASoZ,EAAQhB,OAEnD,GAAIiB,EAAarZ,SAASoZ,EAAQlO,WAA2C,WAA9BkO,EAAQjO,QAAQ0M,YAA2ByB,GAA8C,YAA9BF,EAAQjO,QAAQ0M,WAA2ByB,EACnJ,SAIF,GAAIF,EAAQhB,MAAMrtB,SAASma,EAAMpS,UAA2B,UAAfoS,EAAMqB,MAxVvC,QAwV2DrB,EAAM7hB,KAAqB,qCAAqCuG,KAAKsb,EAAMpS,OAAOoZ,UACvJ,SAGF,MAAMtG,EAAgB,CACpBA,cAAewT,EAAQlO,UAGN,UAAfhG,EAAMqB,OACRX,EAAcqG,WAAa/G,GAG7BkU,EAAQX,cAAc7S,EACxB,CACF,CAEA+F,6BAA6BzG,GAG3B,MAAMqU,EAAU,kBAAkB3vB,KAAKsb,EAAMpS,OAAOoZ,SAC9CsN,EA7WW,WA6WKtU,EAAM7hB,IACtBo2B,EAAkB,CAAChD,GAAgBC,IAAkB1W,SAASkF,EAAM7hB,KAE1E,IAAKo2B,IAAoBD,EACvB,OAGF,GAAID,IAAYC,EACd,OAGFtU,EAAM+C,iBAEN,MAAMyR,EAAkB5T,KAAKoH,QAAQiK,IAA0BrR,KAAOiH,GAAeM,KAAKvH,KAAMqR,IAAwB,IAAMpK,GAAe3hB,KAAK0a,KAAMqR,IAAwB,IAAMpK,GAAeC,QAAQmK,GAAwBjS,EAAMW,eAAeva,YACpPwF,EAAWmnB,GAAS9L,oBAAoBuN,GAE9C,GAAID,EAMF,OALAvU,EAAMyU,kBACN7oB,EAAS6kB,YAET7kB,EAASooB,gBAAgBhU,GAKvBpU,EAAS2kB,aAEXvQ,EAAMyU,kBACN7oB,EAAS4kB,OACTgE,EAAgBlB,QAEpB,EAQFnS,GAAaY,GAAGrb,SAAUorB,GAAwBG,GAAwBc,GAAS2B,uBACnFvT,GAAaY,GAAGrb,SAAUorB,GAAwBK,GAAeY,GAAS2B,uBAC1EvT,GAAaY,GAAGrb,SAAUmrB,GAAwBkB,GAAS4B,YAC3DxT,GAAaY,GAAGrb,SAAUqrB,GAAsBgB,GAAS4B,YACzDxT,GAAaY,GAAGrb,SAAUmrB,GAAwBI,IAAwB,SAAUjS,GAClFA,EAAM+C,iBACNgQ,GAAS9L,oBAAoBrG,MAAM+G,QACrC,IAKA1K,GAAmB8V,IAYnB,MAAM6B,GAAyB,oDACzBC,GAA0B,cAC1BC,GAAmB,gBACnBC,GAAkB,eAKxB,MAAMC,GACJ1P,cACE1E,KAAKoF,SAAWtf,SAAS6G,IAC3B,CAGA0nB,WAEE,MAAMC,EAAgBxuB,SAASC,gBAAgBuC,YAC/C,OAAO1F,KAAKoC,IAAI3E,OAAOk0B,WAAaD,EACtC,CAEA1E,OACE,MAAMtrB,EAAQ0b,KAAKqU,WAEnBrU,KAAKwU,mBAGLxU,KAAKyU,sBAAsBzU,KAAKoF,SAAU8O,IAAkBQ,GAAmBA,EAAkBpwB,IAGjG0b,KAAKyU,sBAAsBT,GAAwBE,IAAkBQ,GAAmBA,EAAkBpwB,IAE1G0b,KAAKyU,sBAAsBR,GAAyBE,IAAiBO,GAAmBA,EAAkBpwB,GAC5G,CAEAwO,QACEkN,KAAK2U,wBAAwB3U,KAAKoF,SAAU,YAE5CpF,KAAK2U,wBAAwB3U,KAAKoF,SAAU8O,IAE5ClU,KAAK2U,wBAAwBX,GAAwBE,IAErDlU,KAAK2U,wBAAwBV,GAAyBE,GACxD,CAEAS,gBACE,OAAO5U,KAAKqU,WAAa,CAC3B,CAGAG,mBACExU,KAAK6U,sBAAsB7U,KAAKoF,SAAU,YAE1CpF,KAAKoF,SAAS5jB,MAAM+K,SAAW,QACjC,CAEAkoB,sBAAsB1a,EAAU+a,EAAevY,GAC7C,MAAMwY,EAAiB/U,KAAKqU,WAa5BrU,KAAKgV,2BAA2Bjb,GAXH/Z,IAC3B,GAAIA,IAAYggB,KAAKoF,UAAY/kB,OAAOk0B,WAAav0B,EAAQsI,YAAcysB,EACzE,OAGF/U,KAAK6U,sBAAsB70B,EAAS80B,GAEpC,MAAMJ,EAAkBr0B,OAAOqF,iBAAiB1F,GAASib,iBAAiB6Z,GAC1E90B,EAAQwB,MAAMyzB,YAAYH,EAAe,GAAGvY,EAASgB,OAAOC,WAAWkX,QAAsB,GAIjG,CAEAG,sBAAsB70B,EAAS80B,GAC7B,MAAMI,EAAcl1B,EAAQwB,MAAMyZ,iBAAiB6Z,GAE/CI,GACF3R,GAAYC,iBAAiBxjB,EAAS80B,EAAeI,EAEzD,CAEAP,wBAAwB5a,EAAU+a,GAahC9U,KAAKgV,2BAA2Bjb,GAZH/Z,IAC3B,MAAM5B,EAAQmlB,GAAYQ,iBAAiB/jB,EAAS80B,GAEtC,OAAV12B,GAKJmlB,GAAYE,oBAAoBzjB,EAAS80B,GACzC90B,EAAQwB,MAAMyzB,YAAYH,EAAe12B,IALvC4B,EAAQwB,MAAM2zB,eAAeL,EAKgB,GAInD,CAEAE,2BAA2Bjb,EAAUqb,GACnC,GAAI,GAAUrb,GACZqb,EAASrb,QAIX,IAAK,MAAMsb,KAAOpO,GAAerU,KAAKmH,EAAUiG,KAAKoF,UACnDgQ,EAASC,EAEb,EAcF,MAAMC,GAAS,WAETC,GAAoB,OACpBC,GAAkB,gBAAgBF,KAClCG,GAAY,CAChBC,UAAW,iBACXC,cAAe,KACf/P,YAAY,EACZ9K,WAAW,EAEX8a,YAAa,QAGTC,GAAgB,CACpBH,UAAW,SACXC,cAAe,kBACf/P,WAAY,UACZ9K,UAAW,UACX8a,YAAa,oBAMf,MAAME,WAAiB9R,GACrBU,YAAYL,GACVc,QACAnF,KAAKqF,QAAUrF,KAAKoE,WAAWC,GAC/BrE,KAAK+V,aAAc,EACnB/V,KAAKoF,SAAW,IAClB,CAGWnB,qBACT,OAAOwR,EACT,CAEWvR,yBACT,OAAO2R,EACT,CAEWpZ,kBACT,OAAO6Y,EACT,CAGAzF,KAAKtT,GACH,IAAKyD,KAAKqF,QAAQvK,UAEhB,YADAiC,GAAQR,GAIVyD,KAAKgW,UAEL,MAAMh2B,EAAUggB,KAAKiW,cAEjBjW,KAAKqF,QAAQO,YACf7J,GAAO/b,GAGTA,EAAQwb,UAAUtE,IAAIqe,IAEtBvV,KAAKkW,mBAAkB,KACrBnZ,GAAQR,EAAS,GAErB,CAEAqT,KAAKrT,GACEyD,KAAKqF,QAAQvK,WAKlBkF,KAAKiW,cAAcza,UAAUuH,OAAOwS,IAEpCvV,KAAKkW,mBAAkB,KACrBlW,KAAKuF,UACLxI,GAAQR,EAAS,KARjBQ,GAAQR,EAUZ,CAEAgJ,UACOvF,KAAK+V,cAIVxV,GAAaC,IAAIR,KAAKoF,SAAUoQ,IAEhCxV,KAAKoF,SAASrC,SAEd/C,KAAK+V,aAAc,EACrB,CAGAE,cACE,IAAKjW,KAAKoF,SAAU,CAClB,MAAM+Q,EAAWrwB,SAASswB,cAAc,OACxCD,EAAST,UAAY1V,KAAKqF,QAAQqQ,UAE9B1V,KAAKqF,QAAQO,YACfuQ,EAAS3a,UAAUtE,IAnGD,QAsGpB8I,KAAKoF,SAAW+Q,CAClB,CAEA,OAAOnW,KAAKoF,QACd,CAEAb,kBAAkBF,GAGhB,OADAA,EAAOuR,YAAc/a,GAAWwJ,EAAOuR,aAChCvR,CACT,CAEA2R,UACE,GAAIhW,KAAK+V,YACP,OAGF,MAAM/1B,EAAUggB,KAAKiW,cAErBjW,KAAKqF,QAAQuQ,YAAYS,OAAOr2B,GAEhCugB,GAAaY,GAAGnhB,EAASw1B,IAAiB,KACxCzY,GAAQiD,KAAKqF,QAAQsQ,cAAc,IAErC3V,KAAK+V,aAAc,CACrB,CAEAG,kBAAkB3Z,GAChBS,GAAuBT,EAAUyD,KAAKiW,cAAejW,KAAKqF,QAAQO,WACpE,EAcF,MAEM0Q,GAAc,gBACdC,GAAkB,UAAUD,KAC5BE,GAAoB,cAAcF,KAGlCG,GAAmB,WACnBC,GAAY,CAChBC,WAAW,EACXC,YAAa,MAGTC,GAAgB,CACpBF,UAAW,UACXC,YAAa,WAMf,MAAME,WAAkB9S,GACtBU,YAAYL,GACVc,QACAnF,KAAKqF,QAAUrF,KAAKoE,WAAWC,GAC/BrE,KAAK+W,WAAY,EACjB/W,KAAKgX,qBAAuB,IAC9B,CAGW/S,qBACT,OAAOyS,EACT,CAEWxS,yBACT,OAAO2S,EACT,CAEWpa,kBACT,MAvCW,WAwCb,CAGAwa,WACMjX,KAAK+W,YAIL/W,KAAKqF,QAAQsR,WACf3W,KAAKqF,QAAQuR,YAAYlE,QAG3BnS,GAAaC,IAAI1a,SAAUwwB,IAE3B/V,GAAaY,GAAGrb,SAAUywB,IAAiBnX,GAASY,KAAKkX,eAAe9X,KACxEmB,GAAaY,GAAGrb,SAAU0wB,IAAmBpX,GAASY,KAAKmX,eAAe/X,KAC1EY,KAAK+W,WAAY,EACnB,CAEAK,aACOpX,KAAK+W,YAIV/W,KAAK+W,WAAY,EACjBxW,GAAaC,IAAI1a,SAAUwwB,IAC7B,CAGAY,eAAe9X,GACb,MAAM,YACJwX,GACE5W,KAAKqF,QAET,GAAIjG,EAAMpS,SAAWlH,UAAYsZ,EAAMpS,SAAW4pB,GAAeA,EAAY3xB,SAASma,EAAMpS,QAC1F,OAGF,MAAM1L,EAAW2lB,GAAeU,kBAAkBiP,GAE1B,IAApBt1B,EAAS6P,OACXylB,EAAYlE,QACH1S,KAAKgX,uBAAyBP,GACvCn1B,EAASA,EAAS6P,OAAS,GAAGuhB,QAE9BpxB,EAAS,GAAGoxB,OAEhB,CAEAyE,eAAe/X,GApFD,QAqFRA,EAAM7hB,MAIVyiB,KAAKgX,qBAAuB5X,EAAMiY,SAAWZ,GAxFzB,UAyFtB,EAcF,MAEMa,GAAc,YAGdC,GAAe,OAAOD,KACtBE,GAAyB,gBAAgBF,KACzCG,GAAiB,SAASH,KAC1BI,GAAe,OAAOJ,KACtBK,GAAgB,QAAQL,KACxBM,GAAiB,SAASN,KAC1BO,GAAsB,gBAAgBP,KACtCQ,GAA0B,oBAAoBR,KAC9CS,GAA0B,kBAAkBT,KAC5CU,GAAyB,QAAQV,cACjCW,GAAkB,aAElBC,GAAoB,OACpBC,GAAoB,eAKpBC,GAAY,CAChBjC,UAAU,EACVzD,OAAO,EACP3H,UAAU,GAENsN,GAAgB,CACpBlC,SAAU,mBACVzD,MAAO,UACP3H,SAAU,WAMZ,MAAMuN,WAAcpT,GAClBR,YAAY1kB,EAASqkB,GACnBc,MAAMnlB,EAASqkB,GACfrE,KAAKuY,QAAUtR,GAAeC,QApBV,gBAoBmClH,KAAKoF,UAC5DpF,KAAKwY,UAAYxY,KAAKyY,sBACtBzY,KAAK0Y,WAAa1Y,KAAK2Y,uBACvB3Y,KAAK2P,UAAW,EAChB3P,KAAKmP,kBAAmB,EACxBnP,KAAK4Y,WAAa,IAAIxE,GAEtBpU,KAAK4L,oBACP,CAGW3H,qBACT,OAAOmU,EACT,CAEWlU,yBACT,OAAOmU,EACT,CAEW5b,kBACT,MA5DW,OA6Db,CAGAsK,OAAOjH,GACL,OAAOE,KAAK2P,SAAW3P,KAAK4P,OAAS5P,KAAK6P,KAAK/P,EACjD,CAEA+P,KAAK/P,GACCE,KAAK2P,UAAY3P,KAAKmP,kBAIR5O,GAAakB,QAAQzB,KAAKoF,SAAUsS,GAAc,CAClE5X,kBAGY+B,mBAId7B,KAAK2P,UAAW,EAChB3P,KAAKmP,kBAAmB,EAExBnP,KAAK4Y,WAAWhJ,OAEhB9pB,SAAS6G,KAAK6O,UAAUtE,IAAI+gB,IAE5BjY,KAAK6Y,gBAEL7Y,KAAKwY,UAAU3I,MAAK,IAAM7P,KAAK8Y,aAAahZ,KAC9C,CAEA8P,OACO5P,KAAK2P,WAAY3P,KAAKmP,mBAIT5O,GAAakB,QAAQzB,KAAKoF,SAAUmS,IAExC1V,mBAId7B,KAAK2P,UAAW,EAChB3P,KAAKmP,kBAAmB,EAExBnP,KAAK0Y,WAAWtB,aAEhBpX,KAAKoF,SAAS5J,UAAUuH,OAAOmV,IAE/BlY,KAAK2F,gBAAe,IAAM3F,KAAK+Y,cAAc/Y,KAAKoF,SAAUpF,KAAKgO,gBACnE,CAEAzI,UACE,IAAK,MAAMyT,IAAe,CAAC34B,OAAQ2f,KAAKuY,SACtChY,GAAaC,IAAIwY,EAAa1B,IAGhCtX,KAAKwY,UAAUjT,UAEfvF,KAAK0Y,WAAWtB,aAEhBjS,MAAMI,SACR,CAEA0T,eACEjZ,KAAK6Y,eACP,CAGAJ,sBACE,OAAO,IAAI3C,GAAS,CAClBhb,UAAWgG,QAAQd,KAAKqF,QAAQ8Q,UAEhCvQ,WAAY5F,KAAKgO,eAErB,CAEA2K,uBACE,OAAO,IAAI7B,GAAU,CACnBF,YAAa5W,KAAKoF,UAEtB,CAEA0T,aAAahZ,GAENha,SAAS6G,KAAK1H,SAAS+a,KAAKoF,WAC/Btf,SAAS6G,KAAK0pB,OAAOrW,KAAKoF,UAG5BpF,KAAKoF,SAAS5jB,MAAMwwB,QAAU,QAE9BhS,KAAKoF,SAASxjB,gBAAgB,eAE9Boe,KAAKoF,SAASvjB,aAAa,cAAc,GAEzCme,KAAKoF,SAASvjB,aAAa,OAAQ,UAEnCme,KAAKoF,SAASlZ,UAAY,EAC1B,MAAMgtB,EAAYjS,GAAeC,QA3IT,cA2IsClH,KAAKuY,SAE/DW,IACFA,EAAUhtB,UAAY,GAGxB6P,GAAOiE,KAAKoF,UAEZpF,KAAKoF,SAAS5J,UAAUtE,IAAIghB,IAa5BlY,KAAK2F,gBAXsB,KACrB3F,KAAKqF,QAAQqN,OACf1S,KAAK0Y,WAAWzB,WAGlBjX,KAAKmP,kBAAmB,EACxB5O,GAAakB,QAAQzB,KAAKoF,SAAUuS,GAAe,CACjD7X,iBACA,GAGoCE,KAAKuY,QAASvY,KAAKgO,cAC7D,CAEApC,qBACErL,GAAaY,GAAGnB,KAAKoF,SAAU2S,IAAyB3Y,IACtD,GAtLe,WAsLXA,EAAM7hB,IAIV,OAAIyiB,KAAKqF,QAAQ0F,UACf3L,EAAM+C,sBACNnC,KAAK4P,aAIP5P,KAAKmZ,4BAA4B,IAEnC5Y,GAAaY,GAAG9gB,OAAQu3B,IAAgB,KAClC5X,KAAK2P,WAAa3P,KAAKmP,kBACzBnP,KAAK6Y,eACP,IAEFtY,GAAaY,GAAGnB,KAAKoF,SAAU0S,IAAyB1Y,IAEtDmB,GAAaa,IAAIpB,KAAKoF,SAAUyS,IAAqBuB,IAC/CpZ,KAAKoF,WAAahG,EAAMpS,QAAUgT,KAAKoF,WAAagU,EAAOpsB,SAIjC,WAA1BgT,KAAKqF,QAAQ8Q,SAMbnW,KAAKqF,QAAQ8Q,UACfnW,KAAK4P,OANL5P,KAAKmZ,6BAOP,GACA,GAEN,CAEAJ,aACE/Y,KAAKoF,SAAS5jB,MAAMwwB,QAAU,OAE9BhS,KAAKoF,SAASvjB,aAAa,eAAe,GAE1Cme,KAAKoF,SAASxjB,gBAAgB,cAE9Boe,KAAKoF,SAASxjB,gBAAgB,QAE9Boe,KAAKmP,kBAAmB,EAExBnP,KAAKwY,UAAU5I,MAAK,KAClB9pB,SAAS6G,KAAK6O,UAAUuH,OAAOkV,IAE/BjY,KAAKqZ,oBAELrZ,KAAK4Y,WAAW9lB,QAEhByN,GAAakB,QAAQzB,KAAKoF,SAAUqS,GAAe,GAEvD,CAEAzJ,cACE,OAAOhO,KAAKoF,SAAS5J,UAAUvW,SAtOT,OAuOxB,CAEAk0B,6BAGE,GAFkB5Y,GAAakB,QAAQzB,KAAKoF,SAAUoS,IAExC3V,iBACZ,OAGF,MAAMyX,EAAqBtZ,KAAKoF,SAAStX,aAAehI,SAASC,gBAAgBsC,aAC3EkxB,EAAmBvZ,KAAKoF,SAAS5jB,MAAMiL,UAEpB,WAArB8sB,GAAiCvZ,KAAKoF,SAAS5J,UAAUvW,SAASkzB,MAIjEmB,IACHtZ,KAAKoF,SAAS5jB,MAAMiL,UAAY,UAGlCuT,KAAKoF,SAAS5J,UAAUtE,IAAIihB,IAE5BnY,KAAK2F,gBAAe,KAClB3F,KAAKoF,SAAS5J,UAAUuH,OAAOoV,IAE/BnY,KAAK2F,gBAAe,KAClB3F,KAAKoF,SAAS5jB,MAAMiL,UAAY8sB,CAAgB,GAC/CvZ,KAAKuY,QAAQ,GACfvY,KAAKuY,SAERvY,KAAKoF,SAASsN,QAChB,CAMAmG,gBACE,MAAMS,EAAqBtZ,KAAKoF,SAAStX,aAAehI,SAASC,gBAAgBsC,aAE3E0sB,EAAiB/U,KAAK4Y,WAAWvE,WAEjCmF,EAAoBzE,EAAiB,EAE3C,GAAIyE,IAAsBF,EAAoB,CAC5C,MAAM/2B,EAAW4Z,KAAU,cAAgB,eAC3C6D,KAAKoF,SAAS5jB,MAAMe,GAAY,GAAGwyB,KACrC,CAEA,IAAKyE,GAAqBF,EAAoB,CAC5C,MAAM/2B,EAAW4Z,KAAU,eAAiB,cAC5C6D,KAAKoF,SAAS5jB,MAAMe,GAAY,GAAGwyB,KACrC,CACF,CAEAsE,oBACErZ,KAAKoF,SAAS5jB,MAAMi4B,YAAc,GAClCzZ,KAAKoF,SAAS5jB,MAAMk4B,aAAe,EACrC,CAGA7T,uBAAuBxB,EAAQvE,GAC7B,OAAOE,KAAK4G,MAAK,WACf,MAAM9b,EAAOwtB,GAAMjS,oBAAoBrG,KAAMqE,GAE7C,GAAsB,iBAAXA,EAAX,CAIA,QAA4B,IAAjBvZ,EAAKuZ,GACd,MAAM,IAAIW,UAAU,oBAAoBX,MAG1CvZ,EAAKuZ,GAAQvE,EANb,CAOF,GACF,EAQFS,GAAaY,GAAGrb,SAAUkyB,GApTK,4BAoT2C,SAAU5Y,GAClF,MAAMpS,EAASsN,GAAuB0F,MAElC,CAAC,IAAK,QAAQ9F,SAAS8F,KAAKoG,UAC9BhH,EAAM+C,iBAGR5B,GAAaa,IAAIpU,EAAQ0qB,IAAciC,IACjCA,EAAU9X,kBAKdtB,GAAaa,IAAIpU,EAAQyqB,IAAgB,KACnC3c,GAAUkF,OACZA,KAAK0S,OACP,GACA,IAGJ,MAAMkH,EAAc3S,GAAeC,QA3Ub,eA6UlB0S,GACFtB,GAAMxS,YAAY8T,GAAahK,OAGpB0I,GAAMjS,oBAAoBrZ,GAClC+Z,OAAO/G,KACd,IACAgG,GAAqBsS,IAKrBjc,GAAmBic,IAYnB,MAEMuB,GAAc,gBACdC,GAAiB,YACjBC,GAAwB,OAAOF,KAAcC,KAE7CE,GAAoB,OACpBC,GAAuB,UACvBC,GAAoB,SAEpBC,GAAgB,kBAChBC,GAAe,OAAOP,KACtBQ,GAAgB,QAAQR,KACxBS,GAAe,OAAOT,KACtBU,GAAuB,gBAAgBV,KACvCW,GAAiB,SAASX,KAC1BY,GAAe,SAASZ,KACxBa,GAAyB,QAAQb,KAAcC,KAC/Ca,GAAwB,kBAAkBd,KAE1Ce,GAAY,CAChBzE,UAAU,EACVpL,UAAU,EACV7f,QAAQ,GAEJ2vB,GAAgB,CACpB1E,SAAU,mBACVpL,SAAU,UACV7f,OAAQ,WAMV,MAAM4vB,WAAkB5V,GACtBR,YAAY1kB,EAASqkB,GACnBc,MAAMnlB,EAASqkB,GACfrE,KAAK2P,UAAW,EAChB3P,KAAKwY,UAAYxY,KAAKyY,sBACtBzY,KAAK0Y,WAAa1Y,KAAK2Y,uBAEvB3Y,KAAK4L,oBACP,CAGW3H,qBACT,OAAO2W,EACT,CAEW1W,yBACT,OAAO2W,EACT,CAEWpe,kBACT,MAtDW,WAuDb,CAGAsK,OAAOjH,GACL,OAAOE,KAAK2P,SAAW3P,KAAK4P,OAAS5P,KAAK6P,KAAK/P,EACjD,CAEA+P,KAAK/P,GACCE,KAAK2P,UAISpP,GAAakB,QAAQzB,KAAKoF,SAAUgV,GAAc,CAClEta,kBAGY+B,mBAId7B,KAAK2P,UAAW,EAEhB3P,KAAKwY,UAAU3I,OAEV7P,KAAKqF,QAAQna,SAChB,IAAIkpB,IAAkBxE,OAGxB5P,KAAKoF,SAASvjB,aAAa,cAAc,GAEzCme,KAAKoF,SAASvjB,aAAa,OAAQ,UAEnCme,KAAKoF,SAAS5J,UAAUtE,IAAI+iB,IAgB5Bja,KAAK2F,gBAdoB,KAClB3F,KAAKqF,QAAQna,SAAU8U,KAAKqF,QAAQ8Q,UACvCnW,KAAK0Y,WAAWzB,WAGlBjX,KAAKoF,SAAS5J,UAAUtE,IAAI8iB,IAE5Bha,KAAKoF,SAAS5J,UAAUuH,OAAOkX,IAE/B1Z,GAAakB,QAAQzB,KAAKoF,SAAUiV,GAAe,CACjDva,iBACA,GAGkCE,KAAKoF,UAAU,GACvD,CAEAwK,OACO5P,KAAK2P,WAIQpP,GAAakB,QAAQzB,KAAKoF,SAAUkV,IAExCzY,mBAId7B,KAAK0Y,WAAWtB,aAEhBpX,KAAKoF,SAAS2V,OAEd/a,KAAK2P,UAAW,EAEhB3P,KAAKoF,SAAS5J,UAAUtE,IAAIgjB,IAE5Bla,KAAKwY,UAAU5I,OAgBf5P,KAAK2F,gBAdoB,KACvB3F,KAAKoF,SAAS5J,UAAUuH,OAAOiX,GAAmBE,IAElDla,KAAKoF,SAASxjB,gBAAgB,cAE9Boe,KAAKoF,SAASxjB,gBAAgB,QAEzBoe,KAAKqF,QAAQna,SAChB,IAAIkpB,IAAkBthB,QAGxByN,GAAakB,QAAQzB,KAAKoF,SAAUoV,GAAe,GAGfxa,KAAKoF,UAAU,IACvD,CAEAG,UACEvF,KAAKwY,UAAUjT,UAEfvF,KAAK0Y,WAAWtB,aAEhBjS,MAAMI,SACR,CAGAkT,sBACE,MAUM3d,EAAYgG,QAAQd,KAAKqF,QAAQ8Q,UACvC,OAAO,IAAIL,GAAS,CAClBJ,UA7JsB,qBA8JtB5a,YACA8K,YAAY,EACZgQ,YAAa5V,KAAKoF,SAAS5f,WAC3BmwB,cAAe7a,EAhBK,KACU,WAA1BkF,KAAKqF,QAAQ8Q,SAKjBnW,KAAK4P,OAJHrP,GAAakB,QAAQzB,KAAKoF,SAAUmV,GAI3B,EAUgC,MAE/C,CAEA5B,uBACE,OAAO,IAAI7B,GAAU,CACnBF,YAAa5W,KAAKoF,UAEtB,CAEAwG,qBACErL,GAAaY,GAAGnB,KAAKoF,SAAUuV,IAAuBvb,IAhLvC,WAiLTA,EAAM7hB,MAILyiB,KAAKqF,QAAQ0F,SAKlB/K,KAAK4P,OAJHrP,GAAakB,QAAQzB,KAAKoF,SAAUmV,IAI3B,GAEf,CAGA1U,uBAAuBxB,GACrB,OAAOrE,KAAK4G,MAAK,WACf,MAAM9b,EAAOgwB,GAAUzU,oBAAoBrG,KAAMqE,GAEjD,GAAsB,iBAAXA,EAAX,CAIA,QAAqB7K,IAAjB1O,EAAKuZ,IAAyBA,EAAOlK,WAAW,MAAmB,gBAAXkK,EAC1D,MAAM,IAAIW,UAAU,oBAAoBX,MAG1CvZ,EAAKuZ,GAAQrE,KANb,CAOF,GACF,EAQFO,GAAaY,GAAGrb,SAAU40B,GAvMK,gCAuM2C,SAAUtb,GAClF,MAAMpS,EAASsN,GAAuB0F,MAMtC,GAJI,CAAC,IAAK,QAAQ9F,SAAS8F,KAAKoG,UAC9BhH,EAAM+C,iBAGJ9G,GAAW2E,MACb,OAGFO,GAAaa,IAAIpU,EAAQwtB,IAAgB,KAEnC1f,GAAUkF,OACZA,KAAK0S,OACP,IAGF,MAAMkH,EAAc3S,GAAeC,QAAQiT,IAEvCP,GAAeA,IAAgB5sB,GACjC8tB,GAAUhV,YAAY8T,GAAahK,OAGxBkL,GAAUzU,oBAAoBrZ,GACtC+Z,OAAO/G,KACd,IACAO,GAAaY,GAAG9gB,OAAQ05B,IAAuB,KAC7C,IAAK,MAAMhgB,KAAYkN,GAAerU,KAAKunB,IACzCW,GAAUzU,oBAAoBtM,GAAU8V,MAC1C,IAEFtP,GAAaY,GAAG9gB,OAAQo6B,IAAc,KACpC,IAAK,MAAMz6B,KAAWinB,GAAerU,KAAK,gDACG,UAAvClN,iBAAiB1F,GAASiC,UAC5B64B,GAAUzU,oBAAoBrmB,GAAS4vB,MAE3C,IAEF5J,GAAqB8U,IAKrBze,GAAmBye,IAQnB,MAAME,GAAgB,IAAIjkB,IAAI,CAAC,aAAc,OAAQ,OAAQ,WAAY,WAAY,SAAU,MAAO,eAQhGkkB,GAAmB,iEAOnBC,GAAmB,qIAEnBC,GAAmB,CAAC34B,EAAW44B,KACnC,MAAMC,EAAgB74B,EAAUvC,SAASC,cAEzC,OAAIk7B,EAAqBlhB,SAASmhB,IAC5BL,GAAc5jB,IAAIikB,IACbva,QAAQma,GAAiBn3B,KAAKtB,EAAU84B,YAAcJ,GAAiBp3B,KAAKtB,EAAU84B,YAO1FF,EAAqBx0B,QAAO20B,GAAkBA,aAA0BxW,SAAQ7R,MAAKsoB,GAASA,EAAM13B,KAAKu3B,IAAe,EAG3HI,GAAmB,CAEvB,IAAK,CAAC,QAAS,MAAO,KAAM,OAAQ,OAjCP,kBAkC7BnqB,EAAG,CAAC,SAAU,OAAQ,QAAS,OAC/BoqB,KAAM,GACNnqB,EAAG,GACHoqB,GAAI,GACJC,IAAK,GACLC,KAAM,GACNC,IAAK,GACLC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJxqB,EAAG,GACHgb,IAAK,CAAC,MAAO,SAAU,MAAO,QAAS,QAAS,UAChDyP,GAAI,GACJC,GAAI,GACJC,EAAG,GACHC,IAAK,GACLC,EAAG,GACHC,MAAO,GACPC,KAAM,GACNC,IAAK,GACLC,IAAK,GACLC,OAAQ,GACRC,EAAG,GACHC,GAAI,IA+CAC,GAAY,CAChBC,UAAW3B,GACX4B,QAAS,CAAC,EAEVC,WAAY,GACZhwB,MAAM,EACNiwB,UAAU,EACVC,WAAY,KACZC,SAAU,eAENC,GAAgB,CACpBN,UAAW,SACXC,QAAS,SACTC,WAAY,oBACZhwB,KAAM,UACNiwB,SAAU,UACVC,WAAY,kBACZC,SAAU,UAENE,GAAqB,CACzBC,MAAO,iCACP7jB,SAAU,oBAMZ,MAAM8jB,WAAwB7Z,GAC5BU,YAAYL,GACVc,QACAnF,KAAKqF,QAAUrF,KAAKoE,WAAWC,EACjC,CAGWJ,qBACT,OAAOkZ,EACT,CAEWjZ,yBACT,OAAOwZ,EACT,CAEWjhB,kBACT,MA5CW,iBA6Cb,CAGAqhB,aACE,OAAOrgC,OAAO0hB,OAAOa,KAAKqF,QAAQgY,SAAS95B,KAAI8gB,GAAUrE,KAAK+d,yBAAyB1Z,KAASzd,OAAOka,QACzG,CAEAkd,aACE,OAAOhe,KAAK8d,aAAa3sB,OAAS,CACpC,CAEA8sB,cAAcZ,GAMZ,OALArd,KAAKke,cAAcb,GAEnBrd,KAAKqF,QAAQgY,QAAU,IAAKrd,KAAKqF,QAAQgY,WACpCA,GAEErd,IACT,CAEAme,SACE,MAAMC,EAAkBt4B,SAASswB,cAAc,OAC/CgI,EAAgBC,UAAYre,KAAKse,eAAete,KAAKqF,QAAQoY,UAE7D,IAAK,MAAO1jB,EAAUwkB,KAAS9gC,OAAO4kB,QAAQrC,KAAKqF,QAAQgY,SACzDrd,KAAKwe,YAAYJ,EAAiBG,EAAMxkB,GAG1C,MAAM0jB,EAAWW,EAAgBjX,SAAS,GAEpCmW,EAAatd,KAAK+d,yBAAyB/d,KAAKqF,QAAQiY,YAM9D,OAJIA,GACFG,EAASjiB,UAAUtE,OAAOomB,EAAW36B,MAAM,MAGtC86B,CACT,CAGAjZ,iBAAiBH,GACfc,MAAMX,iBAAiBH,GAEvBrE,KAAKke,cAAc7Z,EAAOgZ,QAC5B,CAEAa,cAAcO,GACZ,IAAK,MAAO1kB,EAAUsjB,KAAY5/B,OAAO4kB,QAAQoc,GAC/CtZ,MAAMX,iBAAiB,CACrBzK,WACA6jB,MAAOP,GACNM,GAEP,CAEAa,YAAYf,EAAUJ,EAAStjB,GAC7B,MAAM2kB,EAAkBzX,GAAeC,QAAQnN,EAAU0jB,GAEpDiB,KAILrB,EAAUrd,KAAK+d,yBAAyBV,IAOpC,GAAUA,GACZrd,KAAK2e,sBAAsB9jB,GAAWwiB,GAAUqB,GAK9C1e,KAAKqF,QAAQ/X,KACfoxB,EAAgBL,UAAYre,KAAKse,eAAejB,GAIlDqB,EAAgBE,YAAcvB,EAf5BqB,EAAgB3b,SAgBpB,CAEAub,eAAeG,GACb,OAAOze,KAAKqF,QAAQkY,SA7KxB,SAAsBsB,EAAYzB,EAAW0B,GAC3C,IAAKD,EAAW1tB,OACd,OAAO0tB,EAGT,GAAIC,GAAgD,mBAArBA,EAC7B,OAAOA,EAAiBD,GAG1B,MACME,GADY,IAAI1+B,OAAO2+B,WACKC,gBAAgBJ,EAAY,aACxDv9B,EAAW,GAAGlC,UAAU2/B,EAAgBpyB,KAAKyT,iBAAiB,MAEpE,IAAK,MAAMpgB,KAAWsB,EAAU,CAC9B,MAAM49B,EAAcl/B,EAAQC,SAASC,cAErC,IAAKzC,OAAO4D,KAAK+7B,GAAWljB,SAASglB,GAAc,CACjDl/B,EAAQ+iB,SACR,QACF,CAEA,MAAMoc,EAAgB,GAAG//B,UAAUY,EAAQ0B,YACrC09B,EAAoB,GAAGhgC,OAAOg+B,EAAU,MAAQ,GAAIA,EAAU8B,IAAgB,IAEpF,IAAK,MAAM18B,KAAa28B,EACjBhE,GAAiB34B,EAAW48B,IAC/Bp/B,EAAQ4B,gBAAgBY,EAAUvC,SAGxC,CAEA,OAAO8+B,EAAgBpyB,KAAK0xB,SAC9B,CA6ImCgB,CAAaZ,EAAKze,KAAKqF,QAAQ+X,UAAWpd,KAAKqF,QAAQmY,YAAciB,CACtG,CAEAV,yBAAyBU,GACvB,MAAsB,mBAARA,EAAqBA,EAAIze,MAAQye,CACjD,CAEAE,sBAAsB3+B,EAAS0+B,GAC7B,GAAI1e,KAAKqF,QAAQ/X,KAGf,OAFAoxB,EAAgBL,UAAY,QAC5BK,EAAgBrI,OAAOr2B,GAIzB0+B,EAAgBE,YAAc5+B,EAAQ4+B,WACxC,EAcF,MACMU,GAAwB,IAAIvoB,IAAI,CAAC,WAAY,YAAa,eAC1DwoB,GAAoB,OAEpBC,GAAoB,OAEpBC,GAAiB,SACjBC,GAAmB,gBACnBC,GAAgB,QAChBC,GAAgB,QAahBC,GAAgB,CACpBC,KAAM,OACNC,IAAK,MACLC,MAAO7jB,KAAU,OAAS,QAC1B8jB,OAAQ,SACRC,KAAM/jB,KAAU,QAAU,QAEtBgkB,GAAY,CAChB/C,UAAW3B,GACX2E,WAAW,EACX1xB,SAAU,kBACV2xB,WAAW,EACXC,YAAa,GACbC,MAAO,EACP9vB,mBAAoB,CAAC,MAAO,QAAS,SAAU,QAC/CnD,MAAM,EACN7E,OAAQ,CAAC,EAAG,GACZtJ,UAAW,MACX8yB,aAAc,KACdsL,UAAU,EACVC,WAAY,KACZzjB,UAAU,EACV0jB,SAAU,+GACV+C,MAAO,GACP/e,QAAS,eAELgf,GAAgB,CACpBrD,UAAW,SACXgD,UAAW,UACX1xB,SAAU,mBACV2xB,UAAW,2BACXC,YAAa,oBACbC,MAAO,kBACP9vB,mBAAoB,QACpBnD,KAAM,UACN7E,OAAQ,0BACRtJ,UAAW,oBACX8yB,aAAc,yBACdsL,SAAU,UACVC,WAAY,kBACZzjB,SAAU,mBACV0jB,SAAU,SACV+C,MAAO,4BACP/e,QAAS,UAMX,MAAMif,WAAgBxb,GACpBR,YAAY1kB,EAASqkB,GACnB,QAAsB,IAAX,EACT,MAAM,IAAIW,UAAU,+DAGtBG,MAAMnlB,EAASqkB,GAEfrE,KAAK2gB,YAAa,EAClB3gB,KAAK4gB,SAAW,EAChB5gB,KAAK6gB,WAAa,KAClB7gB,KAAK8gB,eAAiB,CAAC,EACvB9gB,KAAKoS,QAAU,KACfpS,KAAK+gB,iBAAmB,KACxB/gB,KAAKghB,YAAc,KAEnBhhB,KAAKihB,IAAM,KAEXjhB,KAAKkhB,gBAEAlhB,KAAKqF,QAAQtL,UAChBiG,KAAKmhB,WAET,CAGWld,qBACT,OAAOkc,EACT,CAEWjc,yBACT,OAAOuc,EACT,CAEWhkB,kBACT,MA1GW,SA2Gb,CAGA2kB,SACEphB,KAAK2gB,YAAa,CACpB,CAEAU,UACErhB,KAAK2gB,YAAa,CACpB,CAEAW,gBACEthB,KAAK2gB,YAAc3gB,KAAK2gB,UAC1B,CAEA5Z,SACO/G,KAAK2gB,aAIV3gB,KAAK8gB,eAAeS,OAASvhB,KAAK8gB,eAAeS,MAE7CvhB,KAAK2P,WACP3P,KAAKwhB,SAKPxhB,KAAKyhB,SACP,CAEAlc,UACE0H,aAAajN,KAAK4gB,UAClBrgB,GAAaC,IAAIR,KAAKoF,SAASjK,QAAQskB,IAAiBC,GAAkB1f,KAAK0hB,mBAE3E1hB,KAAKoF,SAASpL,aAAa,2BAC7BgG,KAAKoF,SAASvjB,aAAa,QAASme,KAAKoF,SAASpL,aAAa,2BAGjEgG,KAAK2hB,iBAELxc,MAAMI,SACR,CAEAsK,OACE,GAAoC,SAAhC7P,KAAKoF,SAAS5jB,MAAMwwB,QACtB,MAAM,IAAI7N,MAAM,uCAGlB,IAAMnE,KAAK4hB,mBAAoB5hB,KAAK2gB,WAClC,OAGF,MAAMhH,EAAYpZ,GAAakB,QAAQzB,KAAKoF,SAAUpF,KAAK0E,YAAYiJ,UAlJtD,SAqJXkU,GAFalmB,GAAeqE,KAAKoF,WAELpF,KAAKoF,SAAS7kB,cAAcwF,iBAAiBd,SAAS+a,KAAKoF,UAE7F,GAAIuU,EAAU9X,mBAAqBggB,EACjC,OAIF7hB,KAAK2hB,iBAEL,MAAMV,EAAMjhB,KAAK8hB,iBAEjB9hB,KAAKoF,SAASvjB,aAAa,mBAAoBo/B,EAAIjnB,aAAa,OAEhE,MAAM,UACJqmB,GACErgB,KAAKqF,QAaT,GAXKrF,KAAKoF,SAAS7kB,cAAcwF,gBAAgBd,SAAS+a,KAAKihB,OAC7DZ,EAAUhK,OAAO4K,GACjB1gB,GAAakB,QAAQzB,KAAKoF,SAAUpF,KAAK0E,YAAYiJ,UAtKpC,cAyKnB3N,KAAKoS,QAAUpS,KAAKyS,cAAcwO,GAClCA,EAAIzlB,UAAUtE,IAAIsoB,IAKd,iBAAkB15B,SAASC,gBAC7B,IAAK,MAAM/F,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAKwa,UAC/C5G,GAAaY,GAAGnhB,EAAS,YAAa8b,IAc1CkE,KAAK2F,gBAVY,KACfpF,GAAakB,QAAQzB,KAAKoF,SAAUpF,KAAK0E,YAAYiJ,UAvLrC,WAyLQ,IAApB3N,KAAK6gB,YACP7gB,KAAKwhB,SAGPxhB,KAAK6gB,YAAa,CAAK,GAGK7gB,KAAKihB,IAAKjhB,KAAKgO,cAC/C,CAEA4B,OACE,GAAK5P,KAAK2P,aAIQpP,GAAakB,QAAQzB,KAAKoF,SAAUpF,KAAK0E,YAAYiJ,UA3MtD,SA6MH9L,iBAAd,CASA,GALY7B,KAAK8hB,iBAEbtmB,UAAUuH,OAAOyc,IAGjB,iBAAkB15B,SAASC,gBAC7B,IAAK,MAAM/F,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAKwa,UAC/C5G,GAAaC,IAAIxgB,EAAS,YAAa8b,IAI3CkE,KAAK8gB,eAA4B,OAAI,EACrC9gB,KAAK8gB,eAAelB,KAAiB,EACrC5f,KAAK8gB,eAAenB,KAAiB,EACrC3f,KAAK6gB,WAAa,KAgBlB7gB,KAAK2F,gBAdY,KACX3F,KAAK+hB,yBAIJ/hB,KAAK6gB,YACR7gB,KAAK2hB,iBAGP3hB,KAAKoF,SAASxjB,gBAAgB,oBAE9B2e,GAAakB,QAAQzB,KAAKoF,SAAUpF,KAAK0E,YAAYiJ,UA3OpC,WA2O8D,GAGnD3N,KAAKihB,IAAKjhB,KAAKgO,cAhC7C,CAiCF,CAEAxiB,SACMwU,KAAKoS,SACPpS,KAAKoS,QAAQ5mB,QAEjB,CAGAo2B,iBACE,OAAO9gB,QAAQd,KAAKgiB,YACtB,CAEAF,iBAKE,OAJK9hB,KAAKihB,MACRjhB,KAAKihB,IAAMjhB,KAAKiiB,kBAAkBjiB,KAAKghB,aAAehhB,KAAKkiB,2BAGtDliB,KAAKihB,GACd,CAEAgB,kBAAkB5E,GAChB,MAAM4D,EAAMjhB,KAAKmiB,oBAAoB9E,GAASc,SAG9C,IAAK8C,EACH,OAAO,KAGTA,EAAIzlB,UAAUuH,OAAOwc,GAAmBC,IAExCyB,EAAIzlB,UAAUtE,IAAI,MAAM8I,KAAK0E,YAAYjI,aACzC,MAAM2lB,EA92HKC,KACb,GACEA,GAAUz/B,KAAK0/B,MAlBH,IAkBS1/B,KAAK2/B,gBACnBz8B,SAAS08B,eAAeH,IAEjC,OAAOA,CAAM,EAy2HGI,CAAOziB,KAAK0E,YAAYjI,MAAMnc,WAO5C,OANA2gC,EAAIp/B,aAAa,KAAMugC,GAEnBpiB,KAAKgO,eACPiT,EAAIzlB,UAAUtE,IAAIqoB,IAGb0B,CACT,CAEAyB,WAAWrF,GACTrd,KAAKghB,YAAc3D,EAEfrd,KAAK2P,aACP3P,KAAK2hB,iBAEL3hB,KAAK6P,OAET,CAEAsS,oBAAoB9E,GAYlB,OAXIrd,KAAK+gB,iBACP/gB,KAAK+gB,iBAAiB9C,cAAcZ,GAEpCrd,KAAK+gB,iBAAmB,IAAIlD,GAAgB,IAAK7d,KAAKqF,QAGpDgY,UACAC,WAAYtd,KAAK+d,yBAAyB/d,KAAKqF,QAAQib,eAIpDtgB,KAAK+gB,gBACd,CAEAmB,yBACE,MAAO,CACL,iBAA0BliB,KAAKgiB,YAEnC,CAEAA,YACE,OAAOhiB,KAAK+d,yBAAyB/d,KAAKqF,QAAQmb,QAAUxgB,KAAKoF,SAASpL,aAAa,yBACzF,CAGA2oB,6BAA6BvjB,GAC3B,OAAOY,KAAK0E,YAAY2B,oBAAoBjH,EAAMW,eAAgBC,KAAK4iB,qBACzE,CAEA5U,cACE,OAAOhO,KAAKqF,QAAQ+a,WAAapgB,KAAKihB,KAAOjhB,KAAKihB,IAAIzlB,UAAUvW,SAASs6B,GAC3E,CAEA5P,WACE,OAAO3P,KAAKihB,KAAOjhB,KAAKihB,IAAIzlB,UAAUvW,SAASu6B,GACjD,CAEA/M,cAAcwO,GACZ,MAAM9hC,EAA8C,mBAA3B6gB,KAAKqF,QAAQlmB,UAA2B6gB,KAAKqF,QAAQlmB,UAAUlB,KAAK+hB,KAAMihB,EAAKjhB,KAAKoF,UAAYpF,KAAKqF,QAAQlmB,UAChI0jC,EAAahD,GAAc1gC,EAAU8lB,eAC3C,OAAO,GAAoBjF,KAAKoF,SAAU6b,EAAKjhB,KAAK6S,iBAAiBgQ,GACvE,CAEA5P,aACE,MAAM,OACJxqB,GACEuX,KAAKqF,QAET,MAAsB,iBAAX5c,EACFA,EAAO9F,MAAM,KAAKY,KAAInF,GAASmf,OAAO+P,SAASlvB,EAAO,MAGzC,mBAAXqK,EACFyqB,GAAczqB,EAAOyqB,EAAYlT,KAAKoF,UAGxC3c,CACT,CAEAs1B,yBAAyBU,GACvB,MAAsB,mBAARA,EAAqBA,EAAIxgC,KAAK+hB,KAAKoF,UAAYqZ,CAC/D,CAEA5L,iBAAiBgQ,GACf,MAAM1P,EAAwB,CAC5Bh0B,UAAW0jC,EACXhsB,UAAW,CAAC,CACV9V,KAAM,OACNmB,QAAS,CACPuO,mBAAoBuP,KAAKqF,QAAQ5U,qBAElC,CACD1P,KAAM,SACNmB,QAAS,CACPuG,OAAQuX,KAAKiT,eAEd,CACDlyB,KAAM,kBACNmB,QAAS,CACPwM,SAAUsR,KAAKqF,QAAQ3W,WAExB,CACD3N,KAAM,QACNmB,QAAS,CACPlC,QAAS,IAAIggB,KAAK0E,YAAYjI,eAE/B,CACD1b,KAAM,kBACNC,SAAS,EACTC,MAAO,aACPC,GAAI4J,IAGFkV,KAAK8hB,iBAAiBjgC,aAAa,wBAAyBiJ,EAAK1J,MAAMjC,UAAU,KAIvF,MAAO,IAAKg0B,KAC+B,mBAA9BnT,KAAKqF,QAAQ4M,aAA8BjS,KAAKqF,QAAQ4M,aAAakB,GAAyBnT,KAAKqF,QAAQ4M,aAE1H,CAEAiP,gBACE,MAAM4B,EAAW9iB,KAAKqF,QAAQ5D,QAAQ9e,MAAM,KAE5C,IAAK,MAAM8e,KAAWqhB,EACpB,GAAgB,UAAZrhB,EACFlB,GAAaY,GAAGnB,KAAKoF,SAAUpF,KAAK0E,YAAYiJ,UA3YlC,SA2Y4D3N,KAAKqF,QAAQtL,UAAUqF,IAC/EY,KAAK2iB,6BAA6BvjB,GAE1C2H,QAAQ,SAEb,GAtZU,WAsZNtF,EAA4B,CACrC,MAAMshB,EAAUthB,IAAYke,GAAgB3f,KAAK0E,YAAYiJ,UA9Y5C,cA8Y0E3N,KAAK0E,YAAYiJ,UAhZ5F,WAiZVqV,EAAWvhB,IAAYke,GAAgB3f,KAAK0E,YAAYiJ,UA9Y7C,cA8Y2E3N,KAAK0E,YAAYiJ,UAhZ5F,YAiZjBpN,GAAaY,GAAGnB,KAAKoF,SAAU2d,EAAS/iB,KAAKqF,QAAQtL,UAAUqF,IAC7D,MAAMkU,EAAUtT,KAAK2iB,6BAA6BvjB,GAElDkU,EAAQwN,eAA8B,YAAf1hB,EAAMqB,KAAqBmf,GAAgBD,KAAiB,EAEnFrM,EAAQmO,QAAQ,IAElBlhB,GAAaY,GAAGnB,KAAKoF,SAAU4d,EAAUhjB,KAAKqF,QAAQtL,UAAUqF,IAC9D,MAAMkU,EAAUtT,KAAK2iB,6BAA6BvjB,GAElDkU,EAAQwN,eAA8B,aAAf1hB,EAAMqB,KAAsBmf,GAAgBD,IAAiBrM,EAAQlO,SAASngB,SAASma,EAAMU,eAEpHwT,EAAQkO,QAAQ,GAEpB,CAGFxhB,KAAK0hB,kBAAoB,KACnB1hB,KAAKoF,UACPpF,KAAK4P,MACP,EAGFrP,GAAaY,GAAGnB,KAAKoF,SAASjK,QAAQskB,IAAiBC,GAAkB1f,KAAK0hB,kBAChF,CAEAP,YACE,MAAMX,EAAQxgB,KAAKoF,SAASpL,aAAa,SAEpCwmB,IAIAxgB,KAAKoF,SAASpL,aAAa,eAAkBgG,KAAKoF,SAASwZ,YAAYxkB,QAC1E4F,KAAKoF,SAASvjB,aAAa,aAAc2+B,GAG3CxgB,KAAKoF,SAASvjB,aAAa,yBAA0B2+B,GAGrDxgB,KAAKoF,SAASxjB,gBAAgB,SAChC,CAEA6/B,SACMzhB,KAAK2P,YAAc3P,KAAK6gB,WAC1B7gB,KAAK6gB,YAAa,GAIpB7gB,KAAK6gB,YAAa,EAElB7gB,KAAKijB,aAAY,KACXjjB,KAAK6gB,YACP7gB,KAAK6P,MACP,GACC7P,KAAKqF,QAAQkb,MAAM1Q,MACxB,CAEA2R,SACMxhB,KAAK+hB,yBAIT/hB,KAAK6gB,YAAa,EAElB7gB,KAAKijB,aAAY,KACVjjB,KAAK6gB,YACR7gB,KAAK4P,MACP,GACC5P,KAAKqF,QAAQkb,MAAM3Q,MACxB,CAEAqT,YAAYrlB,EAASslB,GACnBjW,aAAajN,KAAK4gB,UAClB5gB,KAAK4gB,SAAW/iB,WAAWD,EAASslB,EACtC,CAEAnB,uBACE,OAAOtkC,OAAO0hB,OAAOa,KAAK8gB,gBAAgB5mB,UAAS,EACrD,CAEAkK,WAAWC,GACT,MAAM8e,EAAiB5f,GAAYG,kBAAkB1D,KAAKoF,UAE1D,IAAK,MAAMge,KAAiB3lC,OAAO4D,KAAK8hC,GAClC7D,GAAsBloB,IAAIgsB,WACrBD,EAAeC,GAY1B,OARA/e,EAAS,IAAK8e,KACU,iBAAX9e,GAAuBA,EAASA,EAAS,CAAC,GAEvDA,EAASrE,KAAKsE,gBAAgBD,GAC9BA,EAASrE,KAAKuE,kBAAkBF,GAEhCrE,KAAKwE,iBAAiBH,GAEfA,CACT,CAEAE,kBAAkBF,GAkBhB,OAjBAA,EAAOgc,WAAiC,IAArBhc,EAAOgc,UAAsBv6B,SAAS6G,KAAOkO,GAAWwJ,EAAOgc,WAEtD,iBAAjBhc,EAAOkc,QAChBlc,EAAOkc,MAAQ,CACb1Q,KAAMxL,EAAOkc,MACb3Q,KAAMvL,EAAOkc,QAIW,iBAAjBlc,EAAOmc,QAChBnc,EAAOmc,MAAQnc,EAAOmc,MAAMlgC,YAGA,iBAAnB+jB,EAAOgZ,UAChBhZ,EAAOgZ,QAAUhZ,EAAOgZ,QAAQ/8B,YAG3B+jB,CACT,CAEAue,qBACE,MAAMve,EAAS,CAAC,EAEhB,IAAK,MAAM9mB,KAAOyiB,KAAKqF,QACjBrF,KAAK0E,YAAYT,QAAQ1mB,KAASyiB,KAAKqF,QAAQ9nB,KACjD8mB,EAAO9mB,GAAOyiB,KAAKqF,QAAQ9nB,IAS/B,OALA8mB,EAAOtK,UAAW,EAClBsK,EAAO5C,QAAU,SAIV4C,CACT,CAEAsd,iBACM3hB,KAAKoS,UACPpS,KAAKoS,QAAQ3Y,UAEbuG,KAAKoS,QAAU,MAGbpS,KAAKihB,MACPjhB,KAAKihB,IAAIle,SACT/C,KAAKihB,IAAM,KAEf,CAGApb,uBAAuBxB,GACrB,OAAOrE,KAAK4G,MAAK,WACf,MAAM9b,EAAO41B,GAAQra,oBAAoBrG,KAAMqE,GAE/C,GAAsB,iBAAXA,EAAX,CAIA,QAA4B,IAAjBvZ,EAAKuZ,GACd,MAAM,IAAIW,UAAU,oBAAoBX,MAG1CvZ,EAAKuZ,IANL,CAOF,GACF,EAQFhI,GAAmBqkB,IAYnB,MAGM2C,GAAY,IAAK3C,GAAQzc,QAC7BoZ,QAAS,GACT50B,OAAQ,CAAC,EAAG,GACZtJ,UAAW,QACXs+B,SAAU,8IACVhc,QAAS,SAEL6hB,GAAgB,IAAK5C,GAAQxc,YACjCmZ,QAAS,kCAMX,MAAMkG,WAAgB7C,GAETzc,qBACT,OAAOof,EACT,CAEWnf,yBACT,OAAOof,EACT,CAEW7mB,kBACT,MA5BW,SA6Bb,CAGAmlB,iBACE,OAAO5hB,KAAKgiB,aAAehiB,KAAKwjB,aAClC,CAGAtB,yBACE,MAAO,CACL,kBAAkBliB,KAAKgiB,YACvB,gBAAoBhiB,KAAKwjB,cAE7B,CAEAA,cACE,OAAOxjB,KAAK+d,yBAAyB/d,KAAKqF,QAAQgY,QACpD,CAGAxX,uBAAuBxB,GACrB,OAAOrE,KAAK4G,MAAK,WACf,MAAM9b,EAAOy4B,GAAQld,oBAAoBrG,KAAMqE,GAE/C,GAAsB,iBAAXA,EAAX,CAIA,QAA4B,IAAjBvZ,EAAKuZ,GACd,MAAM,IAAIW,UAAU,oBAAoBX,MAG1CvZ,EAAKuZ,IANL,CAOF,GACF,EAQFhI,GAAmBknB,IAYnB,MAEME,GAAc,gBAEdC,GAAiB,WAAWD,KAC5BE,GAAc,QAAQF,KACtBG,GAAwB,OAAOH,cAE/BI,GAAsB,SAEtBC,GAAwB,SAExBC,GAAqB,YAGrBC,GAAsB,GAAGD,mBAA+CA,uBAGxEE,GAAY,CAChBx7B,OAAQ,KAERy7B,WAAY,eACZC,cAAc,EACdn3B,OAAQ,KACRo3B,UAAW,CAAC,GAAK,GAAK,IAElBC,GAAgB,CACpB57B,OAAQ,gBAERy7B,WAAY,SACZC,aAAc,UACdn3B,OAAQ,UACRo3B,UAAW,SAMb,MAAME,WAAkBpf,GACtBR,YAAY1kB,EAASqkB,GACnBc,MAAMnlB,EAASqkB,GAEfrE,KAAKukB,aAAe,IAAI5yB,IACxBqO,KAAKwkB,oBAAsB,IAAI7yB,IAC/BqO,KAAKykB,aAA6D,YAA9C/+B,iBAAiBsa,KAAKoF,UAAU3Y,UAA0B,KAAOuT,KAAKoF,SAC1FpF,KAAK0kB,cAAgB,KACrB1kB,KAAK2kB,UAAY,KACjB3kB,KAAK4kB,oBAAsB,CACzBC,gBAAiB,EACjBC,gBAAiB,GAEnB9kB,KAAK+kB,SACP,CAGW9gB,qBACT,OAAOggB,EACT,CAEW/f,yBACT,OAAOmgB,EACT,CAEW5nB,kBACT,MAhEW,WAiEb,CAGAsoB,UACE/kB,KAAKglB,mCAELhlB,KAAKilB,2BAEDjlB,KAAK2kB,UACP3kB,KAAK2kB,UAAUO,aAEfllB,KAAK2kB,UAAY3kB,KAAKmlB,kBAGxB,IAAK,MAAMC,KAAWplB,KAAKwkB,oBAAoBrlB,SAC7Ca,KAAK2kB,UAAUU,QAAQD,EAE3B,CAEA7f,UACEvF,KAAK2kB,UAAUO,aAEf/f,MAAMI,SACR,CAGAhB,kBAAkBF,GAUhB,OARAA,EAAOrX,OAAS6N,GAAWwJ,EAAOrX,SAAWlH,SAAS6G,KAEtD0X,EAAO6f,WAAa7f,EAAO5b,OAAS,GAAG4b,EAAO5b,oBAAsB4b,EAAO6f,WAE3C,iBAArB7f,EAAO+f,YAChB/f,EAAO+f,UAAY/f,EAAO+f,UAAUzhC,MAAM,KAAKY,KAAInF,GAASmf,OAAOC,WAAWpf,MAGzEimB,CACT,CAEA4gB,2BACOjlB,KAAKqF,QAAQ8e,eAKlB5jB,GAAaC,IAAIR,KAAKqF,QAAQrY,OAAQ22B,IACtCpjB,GAAaY,GAAGnB,KAAKqF,QAAQrY,OAAQ22B,GAAaG,IAAuB1kB,IACvE,MAAMkmB,EAAoBtlB,KAAKwkB,oBAAoB5mC,IAAIwhB,EAAMpS,OAAOtB,MAEpE,GAAI45B,EAAmB,CACrBlmB,EAAM+C,iBACN,MAAMtG,EAAOmE,KAAKykB,cAAgBpkC,OAC5BmE,EAAS8gC,EAAkBxgC,UAAYkb,KAAKoF,SAAStgB,UAE3D,GAAI+W,EAAK0pB,SAKP,YAJA1pB,EAAK0pB,SAAS,CACZnjC,IAAKoC,EACLghC,SAAU,WAMd3pB,EAAK3P,UAAY1H,CACnB,KAEJ,CAEA2gC,kBACE,MAAMjjC,EAAU,CACd2Z,KAAMmE,KAAKykB,aACXL,UAAWpkB,KAAKqF,QAAQ+e,UACxBF,WAAYlkB,KAAKqF,QAAQ6e,YAE3B,OAAO,IAAIuB,sBAAqBpjB,GAAWrC,KAAK0lB,kBAAkBrjB,IAAUngB,EAC9E,CAGAwjC,kBAAkBrjB,GAChB,MAAMsjB,EAAgB/H,GAAS5d,KAAKukB,aAAa3mC,IAAI,IAAIggC,EAAM5wB,OAAO44B,MAEhE3O,EAAW2G,IACf5d,KAAK4kB,oBAAoBC,gBAAkBjH,EAAM5wB,OAAOlI,UAExDkb,KAAK6lB,SAASF,EAAc/H,GAAO,EAG/BkH,GAAmB9kB,KAAKykB,cAAgB3+B,SAASC,iBAAiBmG,UAClE45B,EAAkBhB,GAAmB9kB,KAAK4kB,oBAAoBE,gBACpE9kB,KAAK4kB,oBAAoBE,gBAAkBA,EAE3C,IAAK,MAAMlH,KAASvb,EAAS,CAC3B,IAAKub,EAAMmI,eAAgB,CACzB/lB,KAAK0kB,cAAgB,KAErB1kB,KAAKgmB,kBAAkBL,EAAc/H,IAErC,QACF,CAEA,MAAMqI,EAA2BrI,EAAM5wB,OAAOlI,WAAakb,KAAK4kB,oBAAoBC,gBAEpF,GAAIiB,GAAmBG,GAGrB,GAFAhP,EAAS2G,IAEJkH,EACH,YAOCgB,GAAoBG,GACvBhP,EAAS2G,EAEb,CACF,CAEAoH,mCACEhlB,KAAKukB,aAAe,IAAI5yB,IACxBqO,KAAKwkB,oBAAsB,IAAI7yB,IAC/B,MAAMu0B,EAAcjf,GAAerU,KAAKkxB,GAAuB9jB,KAAKqF,QAAQrY,QAE5E,IAAK,MAAMm5B,KAAUD,EAAa,CAEhC,IAAKC,EAAOz6B,MAAQ2P,GAAW8qB,GAC7B,SAGF,MAAMb,EAAoBre,GAAeC,QAAQif,EAAOz6B,KAAMsU,KAAKoF,UAE/DtK,GAAUwqB,KACZtlB,KAAKukB,aAAa/xB,IAAI2zB,EAAOz6B,KAAMy6B,GAEnCnmB,KAAKwkB,oBAAoBhyB,IAAI2zB,EAAOz6B,KAAM45B,GAE9C,CACF,CAEAO,SAAS74B,GACHgT,KAAK0kB,gBAAkB13B,IAI3BgT,KAAKgmB,kBAAkBhmB,KAAKqF,QAAQrY,QAEpCgT,KAAK0kB,cAAgB13B,EACrBA,EAAOwO,UAAUtE,IAAI2sB,IAErB7jB,KAAKomB,iBAAiBp5B,GAEtBuT,GAAakB,QAAQzB,KAAKoF,SAAUse,GAAgB,CAClD5jB,cAAe9S,IAEnB,CAEAo5B,iBAAiBp5B,GAEf,GAAIA,EAAOwO,UAAUvW,SAzNQ,iBA0N3BgiB,GAAeC,QAhNc,mBAgNsBla,EAAOmO,QAjNtC,cAiNkEK,UAAUtE,IAAI2sB,SAItG,IAAK,MAAMwC,KAAapf,GAAeI,QAAQra,EA1NnB,qBA6N1B,IAAK,MAAMxJ,KAAQyjB,GAAeM,KAAK8e,EAAWrC,IAChDxgC,EAAKgY,UAAUtE,IAAI2sB,GAGzB,CAEAmC,kBAAkB9gC,GAChBA,EAAOsW,UAAUuH,OAAO8gB,IACxB,MAAMyC,EAAcrf,GAAerU,KAAK,GAAGkxB,MAAyBD,KAAuB3+B,GAE3F,IAAK,MAAM9E,KAAQkmC,EACjBlmC,EAAKob,UAAUuH,OAAO8gB,GAE1B,CAGAhe,uBAAuBxB,GACrB,OAAOrE,KAAK4G,MAAK,WACf,MAAM9b,EAAOw5B,GAAUje,oBAAoBrG,KAAMqE,GAEjD,GAAsB,iBAAXA,EAAX,CAIA,QAAqB7K,IAAjB1O,EAAKuZ,IAAyBA,EAAOlK,WAAW,MAAmB,gBAAXkK,EAC1D,MAAM,IAAIW,UAAU,oBAAoBX,MAG1CvZ,EAAKuZ,IANL,CAOF,GACF,EAQF9D,GAAaY,GAAG9gB,OAAQujC,IAAuB,KAC7C,IAAK,MAAM2C,KAAOtf,GAAerU,KAtQT,0BAuQtB0xB,GAAUje,oBAAoBkgB,EAChC,IAMFlqB,GAAmBioB,IAYnB,MAEMkC,GAAc,UACdC,GAAe,OAAOD,KACtBE,GAAiB,SAASF,KAC1BG,GAAe,OAAOH,KACtBI,GAAgB,QAAQJ,KACxBK,GAAuB,QAAQL,KAC/BM,GAAgB,UAAUN,KAC1BO,GAAsB,OAAOP,KAC7BQ,GAAiB,YACjBC,GAAkB,aAClBC,GAAe,UACfC,GAAiB,YACjBC,GAAoB,SACpBC,GAAoB,OACpBC,GAAoB,OAIpBC,GAA+B,yBAI/BC,GAAuB,2EAEvBC,GAAsB,YAHOF,uBAAiDA,mBAA6CA,OAG/EC,KAC5CE,GAA8B,IAAIN,8BAA6CA,+BAA8CA,4BAKnI,MAAMO,WAAYziB,GAChBR,YAAY1kB,GACVmlB,MAAMnlB,GACNggB,KAAKqS,QAAUrS,KAAKoF,SAASjK,QAdN,uCAgBlB6E,KAAKqS,UAMVrS,KAAK4nB,sBAAsB5nB,KAAKqS,QAASrS,KAAK6nB,gBAE9CtnB,GAAaY,GAAGnB,KAAKoF,SAAU0hB,IAAe1nB,GAASY,KAAK4M,SAASxN,KACvE,CAGW3C,kBACT,MAlDW,KAmDb,CAGAoT,OAEE,MAAMiY,EAAY9nB,KAAKoF,SAEvB,GAAIpF,KAAK+nB,cAAcD,GACrB,OAIF,MAAME,EAAShoB,KAAKioB,iBAEdC,EAAYF,EAASznB,GAAakB,QAAQumB,EAAQvB,GAAc,CACpE3mB,cAAegoB,IACZ,KACavnB,GAAakB,QAAQqmB,EAAWnB,GAAc,CAC9D7mB,cAAekoB,IAGHnmB,kBAAoBqmB,GAAaA,EAAUrmB,mBAIzD7B,KAAKmoB,YAAYH,EAAQF,GAEzB9nB,KAAKooB,UAAUN,EAAWE,GAC5B,CAGAI,UAAUpoC,EAASqoC,GACZroC,IAILA,EAAQwb,UAAUtE,IAAIkwB,IAEtBpnB,KAAKooB,UAAU9tB,GAAuBta,IAmBtCggB,KAAK2F,gBAhBY,KACsB,QAAjC3lB,EAAQga,aAAa,SAKzBha,EAAQ4B,gBAAgB,YACxB5B,EAAQ6B,aAAa,iBAAiB,GAEtCme,KAAKsoB,gBAAgBtoC,GAAS,GAE9BugB,GAAakB,QAAQzhB,EAAS4mC,GAAe,CAC3C9mB,cAAeuoB,KAVfroC,EAAQwb,UAAUtE,IAAIowB,GAWtB,GAG0BtnC,EAASA,EAAQwb,UAAUvW,SAASoiC,KACpE,CAEAc,YAAYnoC,EAASqoC,GACdroC,IAILA,EAAQwb,UAAUuH,OAAOqkB,IACzBpnC,EAAQ+6B,OAER/a,KAAKmoB,YAAY7tB,GAAuBta,IAmBxCggB,KAAK2F,gBAhBY,KACsB,QAAjC3lB,EAAQga,aAAa,SAKzBha,EAAQ6B,aAAa,iBAAiB,GACtC7B,EAAQ6B,aAAa,WAAY,MAEjCme,KAAKsoB,gBAAgBtoC,GAAS,GAE9BugB,GAAakB,QAAQzhB,EAAS0mC,GAAgB,CAC5C5mB,cAAeuoB,KAVfroC,EAAQwb,UAAUuH,OAAOukB,GAWzB,GAG0BtnC,EAASA,EAAQwb,UAAUvW,SAASoiC,KACpE,CAEAza,SAASxN,GACP,IAAK,CAAC4nB,GAAgBC,GAAiBC,GAAcC,IAAgBjtB,SAASkF,EAAM7hB,KAClF,OAGF6hB,EAAMyU,kBAENzU,EAAM+C,iBACN,MAAMoL,EAAS,CAAC0Z,GAAiBE,IAAgBjtB,SAASkF,EAAM7hB,KAC1DgrC,EAAoBzqB,GAAqBkC,KAAK6nB,eAAejhC,QAAO5G,IAAYqb,GAAWrb,KAAWof,EAAMpS,OAAQugB,GAAQ,GAE9Hgb,IACFA,EAAkB7V,MAAM,CACtB8V,eAAe,IAEjBb,GAAIthB,oBAAoBkiB,GAAmB1Y,OAE/C,CAEAgY,eAEE,OAAO5gB,GAAerU,KAAK60B,GAAqBznB,KAAKqS,QACvD,CAEA4V,iBACE,OAAOjoB,KAAK6nB,eAAej1B,MAAKzN,GAAS6a,KAAK+nB,cAAc5iC,MAAW,IACzE,CAEAyiC,sBAAsB1iC,EAAQiiB,GAC5BnH,KAAKyoB,yBAAyBvjC,EAAQ,OAAQ,WAE9C,IAAK,MAAMC,KAASgiB,EAClBnH,KAAK0oB,6BAA6BvjC,EAEtC,CAEAujC,6BAA6BvjC,GAC3BA,EAAQ6a,KAAK2oB,iBAAiBxjC,GAE9B,MAAMyjC,EAAW5oB,KAAK+nB,cAAc5iC,GAE9B0jC,EAAY7oB,KAAK8oB,iBAAiB3jC,GAExCA,EAAMtD,aAAa,gBAAiB+mC,GAEhCC,IAAc1jC,GAChB6a,KAAKyoB,yBAAyBI,EAAW,OAAQ,gBAG9CD,GACHzjC,EAAMtD,aAAa,WAAY,MAGjCme,KAAKyoB,yBAAyBtjC,EAAO,OAAQ,OAG7C6a,KAAK+oB,mCAAmC5jC,EAC1C,CAEA4jC,mCAAmC5jC,GACjC,MAAM6H,EAASsN,GAAuBnV,GAEjC6H,IAILgT,KAAKyoB,yBAAyBz7B,EAAQ,OAAQ,YAE1C7H,EAAMygC,IACR5lB,KAAKyoB,yBAAyBz7B,EAAQ,kBAAmB,IAAI7H,EAAMygC,MAEvE,CAEA0C,gBAAgBtoC,EAASgpC,GACvB,MAAMH,EAAY7oB,KAAK8oB,iBAAiB9oC,GAExC,IAAK6oC,EAAUrtB,UAAUvW,SAxMN,YAyMjB,OAGF,MAAM8hB,EAAS,CAAChN,EAAU2b,KACxB,MAAM11B,EAAUinB,GAAeC,QAAQnN,EAAU8uB,GAE7C7oC,GACFA,EAAQwb,UAAUuL,OAAO2O,EAAWsT,EACtC,EAGFjiB,EAnN6B,mBAmNIqgB,IACjCrgB,EAnN2B,iBAmNIugB,IAC/BuB,EAAUhnC,aAAa,gBAAiBmnC,EAC1C,CAEAP,yBAAyBzoC,EAASwC,EAAWpE,GACtC4B,EAAQ0b,aAAalZ,IACxBxC,EAAQ6B,aAAaW,EAAWpE,EAEpC,CAEA2pC,cAAczY,GACZ,OAAOA,EAAK9T,UAAUvW,SAASmiC,GACjC,CAGAuB,iBAAiBrZ,GACf,OAAOA,EAAKlI,QAAQqgB,IAAuBnY,EAAOrI,GAAeC,QAAQugB,GAAqBnY,EAChG,CAGAwZ,iBAAiBxZ,GACf,OAAOA,EAAKnU,QArOO,gCAqOoBmU,CACzC,CAGAzJ,uBAAuBxB,GACrB,OAAOrE,KAAK4G,MAAK,WACf,MAAM9b,EAAO68B,GAAIthB,oBAAoBrG,MAErC,GAAsB,iBAAXqE,EAAX,CAIA,QAAqB7K,IAAjB1O,EAAKuZ,IAAyBA,EAAOlK,WAAW,MAAmB,gBAAXkK,EAC1D,MAAM,IAAIW,UAAU,oBAAoBX,MAG1CvZ,EAAKuZ,IANL,CAOF,GACF,EAQF9D,GAAaY,GAAGrb,SAAU+gC,GAAsBW,IAAsB,SAAUpoB,GAC1E,CAAC,IAAK,QAAQlF,SAAS8F,KAAKoG,UAC9BhH,EAAM+C,iBAGJ9G,GAAW2E,OAIf2nB,GAAIthB,oBAAoBrG,MAAM6P,MAChC,IAKAtP,GAAaY,GAAG9gB,OAAQ0mC,IAAqB,KAC3C,IAAK,MAAM/mC,KAAWinB,GAAerU,KAAK80B,IACxCC,GAAIthB,oBAAoBrmB,EAC1B,IAMFqc,GAAmBsrB,IAYnB,MAEMniB,GAAY,YACZyjB,GAAkB,YAAYzjB,KAC9B0jB,GAAiB,WAAW1jB,KAC5B2jB,GAAgB,UAAU3jB,KAC1B4jB,GAAiB,WAAW5jB,KAC5B6jB,GAAa,OAAO7jB,KACpB8jB,GAAe,SAAS9jB,KACxB+jB,GAAa,OAAO/jB,KACpBgkB,GAAc,QAAQhkB,KAEtBikB,GAAkB,OAElBC,GAAkB,OAClBC,GAAqB,UACrBzlB,GAAc,CAClBkc,UAAW,UACXwJ,SAAU,UACVrJ,MAAO,UAEHtc,GAAU,CACdmc,WAAW,EACXwJ,UAAU,EACVrJ,MAAO,KAMT,MAAMsJ,WAAc3kB,GAClBR,YAAY1kB,EAASqkB,GACnBc,MAAMnlB,EAASqkB,GACfrE,KAAK4gB,SAAW,KAChB5gB,KAAK8pB,sBAAuB,EAC5B9pB,KAAK+pB,yBAA0B,EAE/B/pB,KAAKkhB,eACP,CAGWjd,qBACT,OAAOA,EACT,CAEWC,yBACT,OAAOA,EACT,CAEWzH,kBACT,MAlDS,OAmDX,CAGAoT,OACoBtP,GAAakB,QAAQzB,KAAKoF,SAAUmkB,IAExC1nB,mBAId7B,KAAKgqB,gBAEDhqB,KAAKqF,QAAQ+a,WACfpgB,KAAKoF,SAAS5J,UAAUtE,IArDN,QAgEpB8I,KAAKoF,SAAS5J,UAAUuH,OAAO0mB,IAG/B1tB,GAAOiE,KAAKoF,UAEZpF,KAAKoF,SAAS5J,UAAUtE,IAAIwyB,GAAiBC,IAE7C3pB,KAAK2F,gBAfY,KACf3F,KAAKoF,SAAS5J,UAAUuH,OAAO4mB,IAE/BppB,GAAakB,QAAQzB,KAAKoF,SAAUokB,IAEpCxpB,KAAKiqB,oBAAoB,GAUGjqB,KAAKoF,SAAUpF,KAAKqF,QAAQ+a,WAC5D,CAEAxQ,OACO5P,KAAKkqB,YAIQ3pB,GAAakB,QAAQzB,KAAKoF,SAAUikB,IAExCxnB,mBAad7B,KAAKoF,SAAS5J,UAAUtE,IAAIyyB,IAE5B3pB,KAAK2F,gBAXY,KACf3F,KAAKoF,SAAS5J,UAAUtE,IAAIuyB,IAG5BzpB,KAAKoF,SAAS5J,UAAUuH,OAAO4mB,GAAoBD,IAEnDnpB,GAAakB,QAAQzB,KAAKoF,SAAUkkB,GAAa,GAKrBtpB,KAAKoF,SAAUpF,KAAKqF,QAAQ+a,YAC5D,CAEA7a,UACEvF,KAAKgqB,gBAEDhqB,KAAKkqB,WACPlqB,KAAKoF,SAAS5J,UAAUuH,OAAO2mB,IAGjCvkB,MAAMI,SACR,CAEA2kB,UACE,OAAOlqB,KAAKoF,SAAS5J,UAAUvW,SAASykC,GAC1C,CAGAO,qBACOjqB,KAAKqF,QAAQukB,WAId5pB,KAAK8pB,sBAAwB9pB,KAAK+pB,0BAItC/pB,KAAK4gB,SAAW/iB,YAAW,KACzBmC,KAAK4P,MAAM,GACV5P,KAAKqF,QAAQkb,QAClB,CAEA4J,eAAe/qB,EAAOgrB,GACpB,OAAQhrB,EAAMqB,MACZ,IAAK,YACL,IAAK,WAEDT,KAAK8pB,qBAAuBM,EAC5B,MAGJ,IAAK,UACL,IAAK,WAEDpqB,KAAK+pB,wBAA0BK,EAKrC,GAAIA,EAGF,YAFApqB,KAAKgqB,gBAKP,MAAMxc,EAAcpO,EAAMU,cAEtBE,KAAKoF,WAAaoI,GAAexN,KAAKoF,SAASngB,SAASuoB,IAI5DxN,KAAKiqB,oBACP,CAEA/I,gBACE3gB,GAAaY,GAAGnB,KAAKoF,SAAU6jB,IAAiB7pB,GAASY,KAAKmqB,eAAe/qB,GAAO,KACpFmB,GAAaY,GAAGnB,KAAKoF,SAAU8jB,IAAgB9pB,GAASY,KAAKmqB,eAAe/qB,GAAO,KACnFmB,GAAaY,GAAGnB,KAAKoF,SAAU+jB,IAAe/pB,GAASY,KAAKmqB,eAAe/qB,GAAO,KAClFmB,GAAaY,GAAGnB,KAAKoF,SAAUgkB,IAAgBhqB,GAASY,KAAKmqB,eAAe/qB,GAAO,IACrF,CAEA4qB,gBACE/c,aAAajN,KAAK4gB,UAClB5gB,KAAK4gB,SAAW,IAClB,CAGA/a,uBAAuBxB,GACrB,OAAOrE,KAAK4G,MAAK,WACf,MAAM9b,EAAO++B,GAAMxjB,oBAAoBrG,KAAMqE,GAE7C,GAAsB,iBAAXA,EAAqB,CAC9B,QAA4B,IAAjBvZ,EAAKuZ,GACd,MAAM,IAAIW,UAAU,oBAAoBX,MAG1CvZ,EAAKuZ,GAAQrE,KACf,CACF,GACF,ECxjKK,IAAuBzD,GDgkK9ByJ,GAAqB6jB,IAKrBxtB,GAAmBwtB,ICrkKWttB,GCK9B,WAC2B,GAAG1J,MAAM5U,KAChC6H,SAASsa,iBAAiB,+BAET7c,KAAI,SAAU8mC,GAC/B,OAAO,IAAI3J,GAAQ2J,EAAkB,CAAE9J,MAAO,CAAE1Q,KAAM,IAAKD,KAAM,MACnE,GACF,EDX6B,WAAvB9pB,SAASgX,WAAyBP,KACjCzW,SAASyF,iBAAiB,mBAAoBgR","sources":["webpack://pydata_sphinx_theme/webpack/bootstrap","webpack://pydata_sphinx_theme/webpack/runtime/define property getters","webpack://pydata_sphinx_theme/webpack/runtime/hasOwnProperty shorthand","webpack://pydata_sphinx_theme/webpack/runtime/make namespace object","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/enums.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getNodeName.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getWindow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/instanceOf.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/applyStyles.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getBasePlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/math.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/userAgent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/isLayoutViewport.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getBoundingClientRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getLayoutRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/contains.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getComputedStyle.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/isTableElement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getDocumentElement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getParentNode.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getOffsetParent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getMainAxisFromPlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/within.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/mergePaddingObject.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getFreshSideObject.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/expandToHashMap.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/arrow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getVariation.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/computeStyles.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/eventListeners.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getOppositePlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getOppositeVariationPlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getWindowScroll.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getWindowScrollBarX.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/isScrollParent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getScrollParent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/listScrollParents.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/rectToClientRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getClippingRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getViewportRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getDocumentRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/computeOffsets.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/detectOverflow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/flip.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/computeAutoPlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/hide.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/offset.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/popperOffsets.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/preventOverflow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getAltAxis.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getCompositeRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getNodeScroll.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getHTMLElementScroll.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/orderModifiers.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/createPopper.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/debounce.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/mergeByName.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/popper.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/popper-lite.js","webpack://pydata_sphinx_theme/./node_modules/bootstrap/dist/js/bootstrap.esm.js","webpack://pydata_sphinx_theme/./src/pydata_sphinx_theme/assets/scripts/mixin.js","webpack://pydata_sphinx_theme/./src/pydata_sphinx_theme/assets/scripts/bootstrap.js"],"sourcesContent":["// The require scope\nvar __webpack_require__ = {};\n\n","// define getter functions for harmony exports\n__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n\t\t}\n\t}\n};","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","// define __esModule on exports\n__webpack_require__.r = (exports) => {\n\tif(typeof Symbol !== 'undefined' && Symbol.toStringTag) {\n\t\tObject.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });\n\t}\n\tObject.defineProperty(exports, '__esModule', { value: true });\n};","export var top = 'top';\nexport var bottom = 'bottom';\nexport var right = 'right';\nexport var left = 'left';\nexport var auto = 'auto';\nexport var basePlacements = [top, bottom, right, left];\nexport var start = 'start';\nexport var end = 'end';\nexport var clippingParents = 'clippingParents';\nexport var viewport = 'viewport';\nexport var popper = 'popper';\nexport var reference = 'reference';\nexport var variationPlacements = /*#__PURE__*/basePlacements.reduce(function (acc, placement) {\n return acc.concat([placement + \"-\" + start, placement + \"-\" + end]);\n}, []);\nexport var placements = /*#__PURE__*/[].concat(basePlacements, [auto]).reduce(function (acc, placement) {\n return acc.concat([placement, placement + \"-\" + start, placement + \"-\" + end]);\n}, []); // modifiers that need to read the DOM\n\nexport var beforeRead = 'beforeRead';\nexport var read = 'read';\nexport var afterRead = 'afterRead'; // pure-logic modifiers\n\nexport var beforeMain = 'beforeMain';\nexport var main = 'main';\nexport var afterMain = 'afterMain'; // modifier with the purpose to write to the DOM (or write into a framework state)\n\nexport var beforeWrite = 'beforeWrite';\nexport var write = 'write';\nexport var afterWrite = 'afterWrite';\nexport var modifierPhases = [beforeRead, read, afterRead, beforeMain, main, afterMain, beforeWrite, write, afterWrite];","export default function getNodeName(element) {\n return element ? (element.nodeName || '').toLowerCase() : null;\n}","export default function getWindow(node) {\n if (node == null) {\n return window;\n }\n\n if (node.toString() !== '[object Window]') {\n var ownerDocument = node.ownerDocument;\n return ownerDocument ? ownerDocument.defaultView || window : window;\n }\n\n return node;\n}","import getWindow from \"./getWindow.js\";\n\nfunction isElement(node) {\n var OwnElement = getWindow(node).Element;\n return node instanceof OwnElement || node instanceof Element;\n}\n\nfunction isHTMLElement(node) {\n var OwnElement = getWindow(node).HTMLElement;\n return node instanceof OwnElement || node instanceof HTMLElement;\n}\n\nfunction isShadowRoot(node) {\n // IE 11 has no ShadowRoot\n if (typeof ShadowRoot === 'undefined') {\n return false;\n }\n\n var OwnElement = getWindow(node).ShadowRoot;\n return node instanceof OwnElement || node instanceof ShadowRoot;\n}\n\nexport { isElement, isHTMLElement, isShadowRoot };","import getNodeName from \"../dom-utils/getNodeName.js\";\nimport { isHTMLElement } from \"../dom-utils/instanceOf.js\"; // This modifier takes the styles prepared by the `computeStyles` modifier\n// and applies them to the HTMLElements such as popper and arrow\n\nfunction applyStyles(_ref) {\n var state = _ref.state;\n Object.keys(state.elements).forEach(function (name) {\n var style = state.styles[name] || {};\n var attributes = state.attributes[name] || {};\n var element = state.elements[name]; // arrow is optional + virtual elements\n\n if (!isHTMLElement(element) || !getNodeName(element)) {\n return;\n } // Flow doesn't support to extend this property, but it's the most\n // effective way to apply styles to an HTMLElement\n // $FlowFixMe[cannot-write]\n\n\n Object.assign(element.style, style);\n Object.keys(attributes).forEach(function (name) {\n var value = attributes[name];\n\n if (value === false) {\n element.removeAttribute(name);\n } else {\n element.setAttribute(name, value === true ? '' : value);\n }\n });\n });\n}\n\nfunction effect(_ref2) {\n var state = _ref2.state;\n var initialStyles = {\n popper: {\n position: state.options.strategy,\n left: '0',\n top: '0',\n margin: '0'\n },\n arrow: {\n position: 'absolute'\n },\n reference: {}\n };\n Object.assign(state.elements.popper.style, initialStyles.popper);\n state.styles = initialStyles;\n\n if (state.elements.arrow) {\n Object.assign(state.elements.arrow.style, initialStyles.arrow);\n }\n\n return function () {\n Object.keys(state.elements).forEach(function (name) {\n var element = state.elements[name];\n var attributes = state.attributes[name] || {};\n var styleProperties = Object.keys(state.styles.hasOwnProperty(name) ? state.styles[name] : initialStyles[name]); // Set all values to an empty string to unset them\n\n var style = styleProperties.reduce(function (style, property) {\n style[property] = '';\n return style;\n }, {}); // arrow is optional + virtual elements\n\n if (!isHTMLElement(element) || !getNodeName(element)) {\n return;\n }\n\n Object.assign(element.style, style);\n Object.keys(attributes).forEach(function (attribute) {\n element.removeAttribute(attribute);\n });\n });\n };\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'applyStyles',\n enabled: true,\n phase: 'write',\n fn: applyStyles,\n effect: effect,\n requires: ['computeStyles']\n};","import { auto } from \"../enums.js\";\nexport default function getBasePlacement(placement) {\n return placement.split('-')[0];\n}","export var max = Math.max;\nexport var min = Math.min;\nexport var round = Math.round;","export default function getUAString() {\n var uaData = navigator.userAgentData;\n\n if (uaData != null && uaData.brands && Array.isArray(uaData.brands)) {\n return uaData.brands.map(function (item) {\n return item.brand + \"/\" + item.version;\n }).join(' ');\n }\n\n return navigator.userAgent;\n}","import getUAString from \"../utils/userAgent.js\";\nexport default function isLayoutViewport() {\n return !/^((?!chrome|android).)*safari/i.test(getUAString());\n}","import { isElement, isHTMLElement } from \"./instanceOf.js\";\nimport { round } from \"../utils/math.js\";\nimport getWindow from \"./getWindow.js\";\nimport isLayoutViewport from \"./isLayoutViewport.js\";\nexport default function getBoundingClientRect(element, includeScale, isFixedStrategy) {\n if (includeScale === void 0) {\n includeScale = false;\n }\n\n if (isFixedStrategy === void 0) {\n isFixedStrategy = false;\n }\n\n var clientRect = element.getBoundingClientRect();\n var scaleX = 1;\n var scaleY = 1;\n\n if (includeScale && isHTMLElement(element)) {\n scaleX = element.offsetWidth > 0 ? round(clientRect.width) / element.offsetWidth || 1 : 1;\n scaleY = element.offsetHeight > 0 ? round(clientRect.height) / element.offsetHeight || 1 : 1;\n }\n\n var _ref = isElement(element) ? getWindow(element) : window,\n visualViewport = _ref.visualViewport;\n\n var addVisualOffsets = !isLayoutViewport() && isFixedStrategy;\n var x = (clientRect.left + (addVisualOffsets && visualViewport ? visualViewport.offsetLeft : 0)) / scaleX;\n var y = (clientRect.top + (addVisualOffsets && visualViewport ? visualViewport.offsetTop : 0)) / scaleY;\n var width = clientRect.width / scaleX;\n var height = clientRect.height / scaleY;\n return {\n width: width,\n height: height,\n top: y,\n right: x + width,\n bottom: y + height,\n left: x,\n x: x,\n y: y\n };\n}","import getBoundingClientRect from \"./getBoundingClientRect.js\"; // Returns the layout rect of an element relative to its offsetParent. Layout\n// means it doesn't take into account transforms.\n\nexport default function getLayoutRect(element) {\n var clientRect = getBoundingClientRect(element); // Use the clientRect sizes if it's not been transformed.\n // Fixes https://github.com/popperjs/popper-core/issues/1223\n\n var width = element.offsetWidth;\n var height = element.offsetHeight;\n\n if (Math.abs(clientRect.width - width) <= 1) {\n width = clientRect.width;\n }\n\n if (Math.abs(clientRect.height - height) <= 1) {\n height = clientRect.height;\n }\n\n return {\n x: element.offsetLeft,\n y: element.offsetTop,\n width: width,\n height: height\n };\n}","import { isShadowRoot } from \"./instanceOf.js\";\nexport default function contains(parent, child) {\n var rootNode = child.getRootNode && child.getRootNode(); // First, attempt with faster native method\n\n if (parent.contains(child)) {\n return true;\n } // then fallback to custom implementation with Shadow DOM support\n else if (rootNode && isShadowRoot(rootNode)) {\n var next = child;\n\n do {\n if (next && parent.isSameNode(next)) {\n return true;\n } // $FlowFixMe[prop-missing]: need a better way to handle this...\n\n\n next = next.parentNode || next.host;\n } while (next);\n } // Give up, the result is false\n\n\n return false;\n}","import getWindow from \"./getWindow.js\";\nexport default function getComputedStyle(element) {\n return getWindow(element).getComputedStyle(element);\n}","import getNodeName from \"./getNodeName.js\";\nexport default function isTableElement(element) {\n return ['table', 'td', 'th'].indexOf(getNodeName(element)) >= 0;\n}","import { isElement } from \"./instanceOf.js\";\nexport default function getDocumentElement(element) {\n // $FlowFixMe[incompatible-return]: assume body is always available\n return ((isElement(element) ? element.ownerDocument : // $FlowFixMe[prop-missing]\n element.document) || window.document).documentElement;\n}","import getNodeName from \"./getNodeName.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport { isShadowRoot } from \"./instanceOf.js\";\nexport default function getParentNode(element) {\n if (getNodeName(element) === 'html') {\n return element;\n }\n\n return (// this is a quicker (but less type safe) way to save quite some bytes from the bundle\n // $FlowFixMe[incompatible-return]\n // $FlowFixMe[prop-missing]\n element.assignedSlot || // step into the shadow DOM of the parent of a slotted node\n element.parentNode || ( // DOM Element detected\n isShadowRoot(element) ? element.host : null) || // ShadowRoot detected\n // $FlowFixMe[incompatible-call]: HTMLElement is a Node\n getDocumentElement(element) // fallback\n\n );\n}","import getWindow from \"./getWindow.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport getComputedStyle from \"./getComputedStyle.js\";\nimport { isHTMLElement, isShadowRoot } from \"./instanceOf.js\";\nimport isTableElement from \"./isTableElement.js\";\nimport getParentNode from \"./getParentNode.js\";\nimport getUAString from \"../utils/userAgent.js\";\n\nfunction getTrueOffsetParent(element) {\n if (!isHTMLElement(element) || // https://github.com/popperjs/popper-core/issues/837\n getComputedStyle(element).position === 'fixed') {\n return null;\n }\n\n return element.offsetParent;\n} // `.offsetParent` reports `null` for fixed elements, while absolute elements\n// return the containing block\n\n\nfunction getContainingBlock(element) {\n var isFirefox = /firefox/i.test(getUAString());\n var isIE = /Trident/i.test(getUAString());\n\n if (isIE && isHTMLElement(element)) {\n // In IE 9, 10 and 11 fixed elements containing block is always established by the viewport\n var elementCss = getComputedStyle(element);\n\n if (elementCss.position === 'fixed') {\n return null;\n }\n }\n\n var currentNode = getParentNode(element);\n\n if (isShadowRoot(currentNode)) {\n currentNode = currentNode.host;\n }\n\n while (isHTMLElement(currentNode) && ['html', 'body'].indexOf(getNodeName(currentNode)) < 0) {\n var css = getComputedStyle(currentNode); // This is non-exhaustive but covers the most common CSS properties that\n // create a containing block.\n // https://developer.mozilla.org/en-US/docs/Web/CSS/Containing_block#identifying_the_containing_block\n\n if (css.transform !== 'none' || css.perspective !== 'none' || css.contain === 'paint' || ['transform', 'perspective'].indexOf(css.willChange) !== -1 || isFirefox && css.willChange === 'filter' || isFirefox && css.filter && css.filter !== 'none') {\n return currentNode;\n } else {\n currentNode = currentNode.parentNode;\n }\n }\n\n return null;\n} // Gets the closest ancestor positioned element. Handles some edge cases,\n// such as table ancestors and cross browser bugs.\n\n\nexport default function getOffsetParent(element) {\n var window = getWindow(element);\n var offsetParent = getTrueOffsetParent(element);\n\n while (offsetParent && isTableElement(offsetParent) && getComputedStyle(offsetParent).position === 'static') {\n offsetParent = getTrueOffsetParent(offsetParent);\n }\n\n if (offsetParent && (getNodeName(offsetParent) === 'html' || getNodeName(offsetParent) === 'body' && getComputedStyle(offsetParent).position === 'static')) {\n return window;\n }\n\n return offsetParent || getContainingBlock(element) || window;\n}","export default function getMainAxisFromPlacement(placement) {\n return ['top', 'bottom'].indexOf(placement) >= 0 ? 'x' : 'y';\n}","import { max as mathMax, min as mathMin } from \"./math.js\";\nexport function within(min, value, max) {\n return mathMax(min, mathMin(value, max));\n}\nexport function withinMaxClamp(min, value, max) {\n var v = within(min, value, max);\n return v > max ? max : v;\n}","import getFreshSideObject from \"./getFreshSideObject.js\";\nexport default function mergePaddingObject(paddingObject) {\n return Object.assign({}, getFreshSideObject(), paddingObject);\n}","export default function getFreshSideObject() {\n return {\n top: 0,\n right: 0,\n bottom: 0,\n left: 0\n };\n}","export default function expandToHashMap(value, keys) {\n return keys.reduce(function (hashMap, key) {\n hashMap[key] = value;\n return hashMap;\n }, {});\n}","import getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getLayoutRect from \"../dom-utils/getLayoutRect.js\";\nimport contains from \"../dom-utils/contains.js\";\nimport getOffsetParent from \"../dom-utils/getOffsetParent.js\";\nimport getMainAxisFromPlacement from \"../utils/getMainAxisFromPlacement.js\";\nimport { within } from \"../utils/within.js\";\nimport mergePaddingObject from \"../utils/mergePaddingObject.js\";\nimport expandToHashMap from \"../utils/expandToHashMap.js\";\nimport { left, right, basePlacements, top, bottom } from \"../enums.js\";\nimport { isHTMLElement } from \"../dom-utils/instanceOf.js\"; // eslint-disable-next-line import/no-unused-modules\n\nvar toPaddingObject = function toPaddingObject(padding, state) {\n padding = typeof padding === 'function' ? padding(Object.assign({}, state.rects, {\n placement: state.placement\n })) : padding;\n return mergePaddingObject(typeof padding !== 'number' ? padding : expandToHashMap(padding, basePlacements));\n};\n\nfunction arrow(_ref) {\n var _state$modifiersData$;\n\n var state = _ref.state,\n name = _ref.name,\n options = _ref.options;\n var arrowElement = state.elements.arrow;\n var popperOffsets = state.modifiersData.popperOffsets;\n var basePlacement = getBasePlacement(state.placement);\n var axis = getMainAxisFromPlacement(basePlacement);\n var isVertical = [left, right].indexOf(basePlacement) >= 0;\n var len = isVertical ? 'height' : 'width';\n\n if (!arrowElement || !popperOffsets) {\n return;\n }\n\n var paddingObject = toPaddingObject(options.padding, state);\n var arrowRect = getLayoutRect(arrowElement);\n var minProp = axis === 'y' ? top : left;\n var maxProp = axis === 'y' ? bottom : right;\n var endDiff = state.rects.reference[len] + state.rects.reference[axis] - popperOffsets[axis] - state.rects.popper[len];\n var startDiff = popperOffsets[axis] - state.rects.reference[axis];\n var arrowOffsetParent = getOffsetParent(arrowElement);\n var clientSize = arrowOffsetParent ? axis === 'y' ? arrowOffsetParent.clientHeight || 0 : arrowOffsetParent.clientWidth || 0 : 0;\n var centerToReference = endDiff / 2 - startDiff / 2; // Make sure the arrow doesn't overflow the popper if the center point is\n // outside of the popper bounds\n\n var min = paddingObject[minProp];\n var max = clientSize - arrowRect[len] - paddingObject[maxProp];\n var center = clientSize / 2 - arrowRect[len] / 2 + centerToReference;\n var offset = within(min, center, max); // Prevents breaking syntax highlighting...\n\n var axisProp = axis;\n state.modifiersData[name] = (_state$modifiersData$ = {}, _state$modifiersData$[axisProp] = offset, _state$modifiersData$.centerOffset = offset - center, _state$modifiersData$);\n}\n\nfunction effect(_ref2) {\n var state = _ref2.state,\n options = _ref2.options;\n var _options$element = options.element,\n arrowElement = _options$element === void 0 ? '[data-popper-arrow]' : _options$element;\n\n if (arrowElement == null) {\n return;\n } // CSS selector\n\n\n if (typeof arrowElement === 'string') {\n arrowElement = state.elements.popper.querySelector(arrowElement);\n\n if (!arrowElement) {\n return;\n }\n }\n\n if (process.env.NODE_ENV !== \"production\") {\n if (!isHTMLElement(arrowElement)) {\n console.error(['Popper: \"arrow\" element must be an HTMLElement (not an SVGElement).', 'To use an SVG arrow, wrap it in an HTMLElement that will be used as', 'the arrow.'].join(' '));\n }\n }\n\n if (!contains(state.elements.popper, arrowElement)) {\n if (process.env.NODE_ENV !== \"production\") {\n console.error(['Popper: \"arrow\" modifier\\'s `element` must be a child of the popper', 'element.'].join(' '));\n }\n\n return;\n }\n\n state.elements.arrow = arrowElement;\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'arrow',\n enabled: true,\n phase: 'main',\n fn: arrow,\n effect: effect,\n requires: ['popperOffsets'],\n requiresIfExists: ['preventOverflow']\n};","export default function getVariation(placement) {\n return placement.split('-')[1];\n}","import { top, left, right, bottom, end } from \"../enums.js\";\nimport getOffsetParent from \"../dom-utils/getOffsetParent.js\";\nimport getWindow from \"../dom-utils/getWindow.js\";\nimport getDocumentElement from \"../dom-utils/getDocumentElement.js\";\nimport getComputedStyle from \"../dom-utils/getComputedStyle.js\";\nimport getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getVariation from \"../utils/getVariation.js\";\nimport { round } from \"../utils/math.js\"; // eslint-disable-next-line import/no-unused-modules\n\nvar unsetSides = {\n top: 'auto',\n right: 'auto',\n bottom: 'auto',\n left: 'auto'\n}; // Round the offsets to the nearest suitable subpixel based on the DPR.\n// Zooming can change the DPR, but it seems to report a value that will\n// cleanly divide the values into the appropriate subpixels.\n\nfunction roundOffsetsByDPR(_ref, win) {\n var x = _ref.x,\n y = _ref.y;\n var dpr = win.devicePixelRatio || 1;\n return {\n x: round(x * dpr) / dpr || 0,\n y: round(y * dpr) / dpr || 0\n };\n}\n\nexport function mapToStyles(_ref2) {\n var _Object$assign2;\n\n var popper = _ref2.popper,\n popperRect = _ref2.popperRect,\n placement = _ref2.placement,\n variation = _ref2.variation,\n offsets = _ref2.offsets,\n position = _ref2.position,\n gpuAcceleration = _ref2.gpuAcceleration,\n adaptive = _ref2.adaptive,\n roundOffsets = _ref2.roundOffsets,\n isFixed = _ref2.isFixed;\n var _offsets$x = offsets.x,\n x = _offsets$x === void 0 ? 0 : _offsets$x,\n _offsets$y = offsets.y,\n y = _offsets$y === void 0 ? 0 : _offsets$y;\n\n var _ref3 = typeof roundOffsets === 'function' ? roundOffsets({\n x: x,\n y: y\n }) : {\n x: x,\n y: y\n };\n\n x = _ref3.x;\n y = _ref3.y;\n var hasX = offsets.hasOwnProperty('x');\n var hasY = offsets.hasOwnProperty('y');\n var sideX = left;\n var sideY = top;\n var win = window;\n\n if (adaptive) {\n var offsetParent = getOffsetParent(popper);\n var heightProp = 'clientHeight';\n var widthProp = 'clientWidth';\n\n if (offsetParent === getWindow(popper)) {\n offsetParent = getDocumentElement(popper);\n\n if (getComputedStyle(offsetParent).position !== 'static' && position === 'absolute') {\n heightProp = 'scrollHeight';\n widthProp = 'scrollWidth';\n }\n } // $FlowFixMe[incompatible-cast]: force type refinement, we compare offsetParent with window above, but Flow doesn't detect it\n\n\n offsetParent = offsetParent;\n\n if (placement === top || (placement === left || placement === right) && variation === end) {\n sideY = bottom;\n var offsetY = isFixed && offsetParent === win && win.visualViewport ? win.visualViewport.height : // $FlowFixMe[prop-missing]\n offsetParent[heightProp];\n y -= offsetY - popperRect.height;\n y *= gpuAcceleration ? 1 : -1;\n }\n\n if (placement === left || (placement === top || placement === bottom) && variation === end) {\n sideX = right;\n var offsetX = isFixed && offsetParent === win && win.visualViewport ? win.visualViewport.width : // $FlowFixMe[prop-missing]\n offsetParent[widthProp];\n x -= offsetX - popperRect.width;\n x *= gpuAcceleration ? 1 : -1;\n }\n }\n\n var commonStyles = Object.assign({\n position: position\n }, adaptive && unsetSides);\n\n var _ref4 = roundOffsets === true ? roundOffsetsByDPR({\n x: x,\n y: y\n }, getWindow(popper)) : {\n x: x,\n y: y\n };\n\n x = _ref4.x;\n y = _ref4.y;\n\n if (gpuAcceleration) {\n var _Object$assign;\n\n return Object.assign({}, commonStyles, (_Object$assign = {}, _Object$assign[sideY] = hasY ? '0' : '', _Object$assign[sideX] = hasX ? '0' : '', _Object$assign.transform = (win.devicePixelRatio || 1) <= 1 ? \"translate(\" + x + \"px, \" + y + \"px)\" : \"translate3d(\" + x + \"px, \" + y + \"px, 0)\", _Object$assign));\n }\n\n return Object.assign({}, commonStyles, (_Object$assign2 = {}, _Object$assign2[sideY] = hasY ? y + \"px\" : '', _Object$assign2[sideX] = hasX ? x + \"px\" : '', _Object$assign2.transform = '', _Object$assign2));\n}\n\nfunction computeStyles(_ref5) {\n var state = _ref5.state,\n options = _ref5.options;\n var _options$gpuAccelerat = options.gpuAcceleration,\n gpuAcceleration = _options$gpuAccelerat === void 0 ? true : _options$gpuAccelerat,\n _options$adaptive = options.adaptive,\n adaptive = _options$adaptive === void 0 ? true : _options$adaptive,\n _options$roundOffsets = options.roundOffsets,\n roundOffsets = _options$roundOffsets === void 0 ? true : _options$roundOffsets;\n\n if (process.env.NODE_ENV !== \"production\") {\n var transitionProperty = getComputedStyle(state.elements.popper).transitionProperty || '';\n\n if (adaptive && ['transform', 'top', 'right', 'bottom', 'left'].some(function (property) {\n return transitionProperty.indexOf(property) >= 0;\n })) {\n console.warn(['Popper: Detected CSS transitions on at least one of the following', 'CSS properties: \"transform\", \"top\", \"right\", \"bottom\", \"left\".', '\\n\\n', 'Disable the \"computeStyles\" modifier\\'s `adaptive` option to allow', 'for smooth transitions, or remove these properties from the CSS', 'transition declaration on the popper element if only transitioning', 'opacity or background-color for example.', '\\n\\n', 'We recommend using the popper element as a wrapper around an inner', 'element that can have any CSS property transitioned for animations.'].join(' '));\n }\n }\n\n var commonStyles = {\n placement: getBasePlacement(state.placement),\n variation: getVariation(state.placement),\n popper: state.elements.popper,\n popperRect: state.rects.popper,\n gpuAcceleration: gpuAcceleration,\n isFixed: state.options.strategy === 'fixed'\n };\n\n if (state.modifiersData.popperOffsets != null) {\n state.styles.popper = Object.assign({}, state.styles.popper, mapToStyles(Object.assign({}, commonStyles, {\n offsets: state.modifiersData.popperOffsets,\n position: state.options.strategy,\n adaptive: adaptive,\n roundOffsets: roundOffsets\n })));\n }\n\n if (state.modifiersData.arrow != null) {\n state.styles.arrow = Object.assign({}, state.styles.arrow, mapToStyles(Object.assign({}, commonStyles, {\n offsets: state.modifiersData.arrow,\n position: 'absolute',\n adaptive: false,\n roundOffsets: roundOffsets\n })));\n }\n\n state.attributes.popper = Object.assign({}, state.attributes.popper, {\n 'data-popper-placement': state.placement\n });\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'computeStyles',\n enabled: true,\n phase: 'beforeWrite',\n fn: computeStyles,\n data: {}\n};","import getWindow from \"../dom-utils/getWindow.js\"; // eslint-disable-next-line import/no-unused-modules\n\nvar passive = {\n passive: true\n};\n\nfunction effect(_ref) {\n var state = _ref.state,\n instance = _ref.instance,\n options = _ref.options;\n var _options$scroll = options.scroll,\n scroll = _options$scroll === void 0 ? true : _options$scroll,\n _options$resize = options.resize,\n resize = _options$resize === void 0 ? true : _options$resize;\n var window = getWindow(state.elements.popper);\n var scrollParents = [].concat(state.scrollParents.reference, state.scrollParents.popper);\n\n if (scroll) {\n scrollParents.forEach(function (scrollParent) {\n scrollParent.addEventListener('scroll', instance.update, passive);\n });\n }\n\n if (resize) {\n window.addEventListener('resize', instance.update, passive);\n }\n\n return function () {\n if (scroll) {\n scrollParents.forEach(function (scrollParent) {\n scrollParent.removeEventListener('scroll', instance.update, passive);\n });\n }\n\n if (resize) {\n window.removeEventListener('resize', instance.update, passive);\n }\n };\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'eventListeners',\n enabled: true,\n phase: 'write',\n fn: function fn() {},\n effect: effect,\n data: {}\n};","var hash = {\n left: 'right',\n right: 'left',\n bottom: 'top',\n top: 'bottom'\n};\nexport default function getOppositePlacement(placement) {\n return placement.replace(/left|right|bottom|top/g, function (matched) {\n return hash[matched];\n });\n}","var hash = {\n start: 'end',\n end: 'start'\n};\nexport default function getOppositeVariationPlacement(placement) {\n return placement.replace(/start|end/g, function (matched) {\n return hash[matched];\n });\n}","import getWindow from \"./getWindow.js\";\nexport default function getWindowScroll(node) {\n var win = getWindow(node);\n var scrollLeft = win.pageXOffset;\n var scrollTop = win.pageYOffset;\n return {\n scrollLeft: scrollLeft,\n scrollTop: scrollTop\n };\n}","import getBoundingClientRect from \"./getBoundingClientRect.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport getWindowScroll from \"./getWindowScroll.js\";\nexport default function getWindowScrollBarX(element) {\n // If has a CSS width greater than the viewport, then this will be\n // incorrect for RTL.\n // Popper 1 is broken in this case and never had a bug report so let's assume\n // it's not an issue. I don't think anyone ever specifies width on \n // anyway.\n // Browsers where the left scrollbar doesn't cause an issue report `0` for\n // this (e.g. Edge 2019, IE11, Safari)\n return getBoundingClientRect(getDocumentElement(element)).left + getWindowScroll(element).scrollLeft;\n}","import getComputedStyle from \"./getComputedStyle.js\";\nexport default function isScrollParent(element) {\n // Firefox wants us to check `-x` and `-y` variations as well\n var _getComputedStyle = getComputedStyle(element),\n overflow = _getComputedStyle.overflow,\n overflowX = _getComputedStyle.overflowX,\n overflowY = _getComputedStyle.overflowY;\n\n return /auto|scroll|overlay|hidden/.test(overflow + overflowY + overflowX);\n}","import getParentNode from \"./getParentNode.js\";\nimport isScrollParent from \"./isScrollParent.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport { isHTMLElement } from \"./instanceOf.js\";\nexport default function getScrollParent(node) {\n if (['html', 'body', '#document'].indexOf(getNodeName(node)) >= 0) {\n // $FlowFixMe[incompatible-return]: assume body is always available\n return node.ownerDocument.body;\n }\n\n if (isHTMLElement(node) && isScrollParent(node)) {\n return node;\n }\n\n return getScrollParent(getParentNode(node));\n}","import getScrollParent from \"./getScrollParent.js\";\nimport getParentNode from \"./getParentNode.js\";\nimport getWindow from \"./getWindow.js\";\nimport isScrollParent from \"./isScrollParent.js\";\n/*\ngiven a DOM element, return the list of all scroll parents, up the list of ancesors\nuntil we get to the top window object. This list is what we attach scroll listeners\nto, because if any of these parent elements scroll, we'll need to re-calculate the\nreference element's position.\n*/\n\nexport default function listScrollParents(element, list) {\n var _element$ownerDocumen;\n\n if (list === void 0) {\n list = [];\n }\n\n var scrollParent = getScrollParent(element);\n var isBody = scrollParent === ((_element$ownerDocumen = element.ownerDocument) == null ? void 0 : _element$ownerDocumen.body);\n var win = getWindow(scrollParent);\n var target = isBody ? [win].concat(win.visualViewport || [], isScrollParent(scrollParent) ? scrollParent : []) : scrollParent;\n var updatedList = list.concat(target);\n return isBody ? updatedList : // $FlowFixMe[incompatible-call]: isBody tells us target will be an HTMLElement here\n updatedList.concat(listScrollParents(getParentNode(target)));\n}","export default function rectToClientRect(rect) {\n return Object.assign({}, rect, {\n left: rect.x,\n top: rect.y,\n right: rect.x + rect.width,\n bottom: rect.y + rect.height\n });\n}","import { viewport } from \"../enums.js\";\nimport getViewportRect from \"./getViewportRect.js\";\nimport getDocumentRect from \"./getDocumentRect.js\";\nimport listScrollParents from \"./listScrollParents.js\";\nimport getOffsetParent from \"./getOffsetParent.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport getComputedStyle from \"./getComputedStyle.js\";\nimport { isElement, isHTMLElement } from \"./instanceOf.js\";\nimport getBoundingClientRect from \"./getBoundingClientRect.js\";\nimport getParentNode from \"./getParentNode.js\";\nimport contains from \"./contains.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport rectToClientRect from \"../utils/rectToClientRect.js\";\nimport { max, min } from \"../utils/math.js\";\n\nfunction getInnerBoundingClientRect(element, strategy) {\n var rect = getBoundingClientRect(element, false, strategy === 'fixed');\n rect.top = rect.top + element.clientTop;\n rect.left = rect.left + element.clientLeft;\n rect.bottom = rect.top + element.clientHeight;\n rect.right = rect.left + element.clientWidth;\n rect.width = element.clientWidth;\n rect.height = element.clientHeight;\n rect.x = rect.left;\n rect.y = rect.top;\n return rect;\n}\n\nfunction getClientRectFromMixedType(element, clippingParent, strategy) {\n return clippingParent === viewport ? rectToClientRect(getViewportRect(element, strategy)) : isElement(clippingParent) ? getInnerBoundingClientRect(clippingParent, strategy) : rectToClientRect(getDocumentRect(getDocumentElement(element)));\n} // A \"clipping parent\" is an overflowable container with the characteristic of\n// clipping (or hiding) overflowing elements with a position different from\n// `initial`\n\n\nfunction getClippingParents(element) {\n var clippingParents = listScrollParents(getParentNode(element));\n var canEscapeClipping = ['absolute', 'fixed'].indexOf(getComputedStyle(element).position) >= 0;\n var clipperElement = canEscapeClipping && isHTMLElement(element) ? getOffsetParent(element) : element;\n\n if (!isElement(clipperElement)) {\n return [];\n } // $FlowFixMe[incompatible-return]: https://github.com/facebook/flow/issues/1414\n\n\n return clippingParents.filter(function (clippingParent) {\n return isElement(clippingParent) && contains(clippingParent, clipperElement) && getNodeName(clippingParent) !== 'body';\n });\n} // Gets the maximum area that the element is visible in due to any number of\n// clipping parents\n\n\nexport default function getClippingRect(element, boundary, rootBoundary, strategy) {\n var mainClippingParents = boundary === 'clippingParents' ? getClippingParents(element) : [].concat(boundary);\n var clippingParents = [].concat(mainClippingParents, [rootBoundary]);\n var firstClippingParent = clippingParents[0];\n var clippingRect = clippingParents.reduce(function (accRect, clippingParent) {\n var rect = getClientRectFromMixedType(element, clippingParent, strategy);\n accRect.top = max(rect.top, accRect.top);\n accRect.right = min(rect.right, accRect.right);\n accRect.bottom = min(rect.bottom, accRect.bottom);\n accRect.left = max(rect.left, accRect.left);\n return accRect;\n }, getClientRectFromMixedType(element, firstClippingParent, strategy));\n clippingRect.width = clippingRect.right - clippingRect.left;\n clippingRect.height = clippingRect.bottom - clippingRect.top;\n clippingRect.x = clippingRect.left;\n clippingRect.y = clippingRect.top;\n return clippingRect;\n}","import getWindow from \"./getWindow.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport getWindowScrollBarX from \"./getWindowScrollBarX.js\";\nimport isLayoutViewport from \"./isLayoutViewport.js\";\nexport default function getViewportRect(element, strategy) {\n var win = getWindow(element);\n var html = getDocumentElement(element);\n var visualViewport = win.visualViewport;\n var width = html.clientWidth;\n var height = html.clientHeight;\n var x = 0;\n var y = 0;\n\n if (visualViewport) {\n width = visualViewport.width;\n height = visualViewport.height;\n var layoutViewport = isLayoutViewport();\n\n if (layoutViewport || !layoutViewport && strategy === 'fixed') {\n x = visualViewport.offsetLeft;\n y = visualViewport.offsetTop;\n }\n }\n\n return {\n width: width,\n height: height,\n x: x + getWindowScrollBarX(element),\n y: y\n };\n}","import getDocumentElement from \"./getDocumentElement.js\";\nimport getComputedStyle from \"./getComputedStyle.js\";\nimport getWindowScrollBarX from \"./getWindowScrollBarX.js\";\nimport getWindowScroll from \"./getWindowScroll.js\";\nimport { max } from \"../utils/math.js\"; // Gets the entire size of the scrollable document area, even extending outside\n// of the `` and `` rect bounds if horizontally scrollable\n\nexport default function getDocumentRect(element) {\n var _element$ownerDocumen;\n\n var html = getDocumentElement(element);\n var winScroll = getWindowScroll(element);\n var body = (_element$ownerDocumen = element.ownerDocument) == null ? void 0 : _element$ownerDocumen.body;\n var width = max(html.scrollWidth, html.clientWidth, body ? body.scrollWidth : 0, body ? body.clientWidth : 0);\n var height = max(html.scrollHeight, html.clientHeight, body ? body.scrollHeight : 0, body ? body.clientHeight : 0);\n var x = -winScroll.scrollLeft + getWindowScrollBarX(element);\n var y = -winScroll.scrollTop;\n\n if (getComputedStyle(body || html).direction === 'rtl') {\n x += max(html.clientWidth, body ? body.clientWidth : 0) - width;\n }\n\n return {\n width: width,\n height: height,\n x: x,\n y: y\n };\n}","import getBasePlacement from \"./getBasePlacement.js\";\nimport getVariation from \"./getVariation.js\";\nimport getMainAxisFromPlacement from \"./getMainAxisFromPlacement.js\";\nimport { top, right, bottom, left, start, end } from \"../enums.js\";\nexport default function computeOffsets(_ref) {\n var reference = _ref.reference,\n element = _ref.element,\n placement = _ref.placement;\n var basePlacement = placement ? getBasePlacement(placement) : null;\n var variation = placement ? getVariation(placement) : null;\n var commonX = reference.x + reference.width / 2 - element.width / 2;\n var commonY = reference.y + reference.height / 2 - element.height / 2;\n var offsets;\n\n switch (basePlacement) {\n case top:\n offsets = {\n x: commonX,\n y: reference.y - element.height\n };\n break;\n\n case bottom:\n offsets = {\n x: commonX,\n y: reference.y + reference.height\n };\n break;\n\n case right:\n offsets = {\n x: reference.x + reference.width,\n y: commonY\n };\n break;\n\n case left:\n offsets = {\n x: reference.x - element.width,\n y: commonY\n };\n break;\n\n default:\n offsets = {\n x: reference.x,\n y: reference.y\n };\n }\n\n var mainAxis = basePlacement ? getMainAxisFromPlacement(basePlacement) : null;\n\n if (mainAxis != null) {\n var len = mainAxis === 'y' ? 'height' : 'width';\n\n switch (variation) {\n case start:\n offsets[mainAxis] = offsets[mainAxis] - (reference[len] / 2 - element[len] / 2);\n break;\n\n case end:\n offsets[mainAxis] = offsets[mainAxis] + (reference[len] / 2 - element[len] / 2);\n break;\n\n default:\n }\n }\n\n return offsets;\n}","import getClippingRect from \"../dom-utils/getClippingRect.js\";\nimport getDocumentElement from \"../dom-utils/getDocumentElement.js\";\nimport getBoundingClientRect from \"../dom-utils/getBoundingClientRect.js\";\nimport computeOffsets from \"./computeOffsets.js\";\nimport rectToClientRect from \"./rectToClientRect.js\";\nimport { clippingParents, reference, popper, bottom, top, right, basePlacements, viewport } from \"../enums.js\";\nimport { isElement } from \"../dom-utils/instanceOf.js\";\nimport mergePaddingObject from \"./mergePaddingObject.js\";\nimport expandToHashMap from \"./expandToHashMap.js\"; // eslint-disable-next-line import/no-unused-modules\n\nexport default function detectOverflow(state, options) {\n if (options === void 0) {\n options = {};\n }\n\n var _options = options,\n _options$placement = _options.placement,\n placement = _options$placement === void 0 ? state.placement : _options$placement,\n _options$strategy = _options.strategy,\n strategy = _options$strategy === void 0 ? state.strategy : _options$strategy,\n _options$boundary = _options.boundary,\n boundary = _options$boundary === void 0 ? clippingParents : _options$boundary,\n _options$rootBoundary = _options.rootBoundary,\n rootBoundary = _options$rootBoundary === void 0 ? viewport : _options$rootBoundary,\n _options$elementConte = _options.elementContext,\n elementContext = _options$elementConte === void 0 ? popper : _options$elementConte,\n _options$altBoundary = _options.altBoundary,\n altBoundary = _options$altBoundary === void 0 ? false : _options$altBoundary,\n _options$padding = _options.padding,\n padding = _options$padding === void 0 ? 0 : _options$padding;\n var paddingObject = mergePaddingObject(typeof padding !== 'number' ? padding : expandToHashMap(padding, basePlacements));\n var altContext = elementContext === popper ? reference : popper;\n var popperRect = state.rects.popper;\n var element = state.elements[altBoundary ? altContext : elementContext];\n var clippingClientRect = getClippingRect(isElement(element) ? element : element.contextElement || getDocumentElement(state.elements.popper), boundary, rootBoundary, strategy);\n var referenceClientRect = getBoundingClientRect(state.elements.reference);\n var popperOffsets = computeOffsets({\n reference: referenceClientRect,\n element: popperRect,\n strategy: 'absolute',\n placement: placement\n });\n var popperClientRect = rectToClientRect(Object.assign({}, popperRect, popperOffsets));\n var elementClientRect = elementContext === popper ? popperClientRect : referenceClientRect; // positive = overflowing the clipping rect\n // 0 or negative = within the clipping rect\n\n var overflowOffsets = {\n top: clippingClientRect.top - elementClientRect.top + paddingObject.top,\n bottom: elementClientRect.bottom - clippingClientRect.bottom + paddingObject.bottom,\n left: clippingClientRect.left - elementClientRect.left + paddingObject.left,\n right: elementClientRect.right - clippingClientRect.right + paddingObject.right\n };\n var offsetData = state.modifiersData.offset; // Offsets can be applied only to the popper element\n\n if (elementContext === popper && offsetData) {\n var offset = offsetData[placement];\n Object.keys(overflowOffsets).forEach(function (key) {\n var multiply = [right, bottom].indexOf(key) >= 0 ? 1 : -1;\n var axis = [top, bottom].indexOf(key) >= 0 ? 'y' : 'x';\n overflowOffsets[key] += offset[axis] * multiply;\n });\n }\n\n return overflowOffsets;\n}","import getOppositePlacement from \"../utils/getOppositePlacement.js\";\nimport getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getOppositeVariationPlacement from \"../utils/getOppositeVariationPlacement.js\";\nimport detectOverflow from \"../utils/detectOverflow.js\";\nimport computeAutoPlacement from \"../utils/computeAutoPlacement.js\";\nimport { bottom, top, start, right, left, auto } from \"../enums.js\";\nimport getVariation from \"../utils/getVariation.js\"; // eslint-disable-next-line import/no-unused-modules\n\nfunction getExpandedFallbackPlacements(placement) {\n if (getBasePlacement(placement) === auto) {\n return [];\n }\n\n var oppositePlacement = getOppositePlacement(placement);\n return [getOppositeVariationPlacement(placement), oppositePlacement, getOppositeVariationPlacement(oppositePlacement)];\n}\n\nfunction flip(_ref) {\n var state = _ref.state,\n options = _ref.options,\n name = _ref.name;\n\n if (state.modifiersData[name]._skip) {\n return;\n }\n\n var _options$mainAxis = options.mainAxis,\n checkMainAxis = _options$mainAxis === void 0 ? true : _options$mainAxis,\n _options$altAxis = options.altAxis,\n checkAltAxis = _options$altAxis === void 0 ? true : _options$altAxis,\n specifiedFallbackPlacements = options.fallbackPlacements,\n padding = options.padding,\n boundary = options.boundary,\n rootBoundary = options.rootBoundary,\n altBoundary = options.altBoundary,\n _options$flipVariatio = options.flipVariations,\n flipVariations = _options$flipVariatio === void 0 ? true : _options$flipVariatio,\n allowedAutoPlacements = options.allowedAutoPlacements;\n var preferredPlacement = state.options.placement;\n var basePlacement = getBasePlacement(preferredPlacement);\n var isBasePlacement = basePlacement === preferredPlacement;\n var fallbackPlacements = specifiedFallbackPlacements || (isBasePlacement || !flipVariations ? [getOppositePlacement(preferredPlacement)] : getExpandedFallbackPlacements(preferredPlacement));\n var placements = [preferredPlacement].concat(fallbackPlacements).reduce(function (acc, placement) {\n return acc.concat(getBasePlacement(placement) === auto ? computeAutoPlacement(state, {\n placement: placement,\n boundary: boundary,\n rootBoundary: rootBoundary,\n padding: padding,\n flipVariations: flipVariations,\n allowedAutoPlacements: allowedAutoPlacements\n }) : placement);\n }, []);\n var referenceRect = state.rects.reference;\n var popperRect = state.rects.popper;\n var checksMap = new Map();\n var makeFallbackChecks = true;\n var firstFittingPlacement = placements[0];\n\n for (var i = 0; i < placements.length; i++) {\n var placement = placements[i];\n\n var _basePlacement = getBasePlacement(placement);\n\n var isStartVariation = getVariation(placement) === start;\n var isVertical = [top, bottom].indexOf(_basePlacement) >= 0;\n var len = isVertical ? 'width' : 'height';\n var overflow = detectOverflow(state, {\n placement: placement,\n boundary: boundary,\n rootBoundary: rootBoundary,\n altBoundary: altBoundary,\n padding: padding\n });\n var mainVariationSide = isVertical ? isStartVariation ? right : left : isStartVariation ? bottom : top;\n\n if (referenceRect[len] > popperRect[len]) {\n mainVariationSide = getOppositePlacement(mainVariationSide);\n }\n\n var altVariationSide = getOppositePlacement(mainVariationSide);\n var checks = [];\n\n if (checkMainAxis) {\n checks.push(overflow[_basePlacement] <= 0);\n }\n\n if (checkAltAxis) {\n checks.push(overflow[mainVariationSide] <= 0, overflow[altVariationSide] <= 0);\n }\n\n if (checks.every(function (check) {\n return check;\n })) {\n firstFittingPlacement = placement;\n makeFallbackChecks = false;\n break;\n }\n\n checksMap.set(placement, checks);\n }\n\n if (makeFallbackChecks) {\n // `2` may be desired in some cases – research later\n var numberOfChecks = flipVariations ? 3 : 1;\n\n var _loop = function _loop(_i) {\n var fittingPlacement = placements.find(function (placement) {\n var checks = checksMap.get(placement);\n\n if (checks) {\n return checks.slice(0, _i).every(function (check) {\n return check;\n });\n }\n });\n\n if (fittingPlacement) {\n firstFittingPlacement = fittingPlacement;\n return \"break\";\n }\n };\n\n for (var _i = numberOfChecks; _i > 0; _i--) {\n var _ret = _loop(_i);\n\n if (_ret === \"break\") break;\n }\n }\n\n if (state.placement !== firstFittingPlacement) {\n state.modifiersData[name]._skip = true;\n state.placement = firstFittingPlacement;\n state.reset = true;\n }\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'flip',\n enabled: true,\n phase: 'main',\n fn: flip,\n requiresIfExists: ['offset'],\n data: {\n _skip: false\n }\n};","import getVariation from \"./getVariation.js\";\nimport { variationPlacements, basePlacements, placements as allPlacements } from \"../enums.js\";\nimport detectOverflow from \"./detectOverflow.js\";\nimport getBasePlacement from \"./getBasePlacement.js\";\nexport default function computeAutoPlacement(state, options) {\n if (options === void 0) {\n options = {};\n }\n\n var _options = options,\n placement = _options.placement,\n boundary = _options.boundary,\n rootBoundary = _options.rootBoundary,\n padding = _options.padding,\n flipVariations = _options.flipVariations,\n _options$allowedAutoP = _options.allowedAutoPlacements,\n allowedAutoPlacements = _options$allowedAutoP === void 0 ? allPlacements : _options$allowedAutoP;\n var variation = getVariation(placement);\n var placements = variation ? flipVariations ? variationPlacements : variationPlacements.filter(function (placement) {\n return getVariation(placement) === variation;\n }) : basePlacements;\n var allowedPlacements = placements.filter(function (placement) {\n return allowedAutoPlacements.indexOf(placement) >= 0;\n });\n\n if (allowedPlacements.length === 0) {\n allowedPlacements = placements;\n\n if (process.env.NODE_ENV !== \"production\") {\n console.error(['Popper: The `allowedAutoPlacements` option did not allow any', 'placements. Ensure the `placement` option matches the variation', 'of the allowed placements.', 'For example, \"auto\" cannot be used to allow \"bottom-start\".', 'Use \"auto-start\" instead.'].join(' '));\n }\n } // $FlowFixMe[incompatible-type]: Flow seems to have problems with two array unions...\n\n\n var overflows = allowedPlacements.reduce(function (acc, placement) {\n acc[placement] = detectOverflow(state, {\n placement: placement,\n boundary: boundary,\n rootBoundary: rootBoundary,\n padding: padding\n })[getBasePlacement(placement)];\n return acc;\n }, {});\n return Object.keys(overflows).sort(function (a, b) {\n return overflows[a] - overflows[b];\n });\n}","import { top, bottom, left, right } from \"../enums.js\";\nimport detectOverflow from \"../utils/detectOverflow.js\";\n\nfunction getSideOffsets(overflow, rect, preventedOffsets) {\n if (preventedOffsets === void 0) {\n preventedOffsets = {\n x: 0,\n y: 0\n };\n }\n\n return {\n top: overflow.top - rect.height - preventedOffsets.y,\n right: overflow.right - rect.width + preventedOffsets.x,\n bottom: overflow.bottom - rect.height + preventedOffsets.y,\n left: overflow.left - rect.width - preventedOffsets.x\n };\n}\n\nfunction isAnySideFullyClipped(overflow) {\n return [top, right, bottom, left].some(function (side) {\n return overflow[side] >= 0;\n });\n}\n\nfunction hide(_ref) {\n var state = _ref.state,\n name = _ref.name;\n var referenceRect = state.rects.reference;\n var popperRect = state.rects.popper;\n var preventedOffsets = state.modifiersData.preventOverflow;\n var referenceOverflow = detectOverflow(state, {\n elementContext: 'reference'\n });\n var popperAltOverflow = detectOverflow(state, {\n altBoundary: true\n });\n var referenceClippingOffsets = getSideOffsets(referenceOverflow, referenceRect);\n var popperEscapeOffsets = getSideOffsets(popperAltOverflow, popperRect, preventedOffsets);\n var isReferenceHidden = isAnySideFullyClipped(referenceClippingOffsets);\n var hasPopperEscaped = isAnySideFullyClipped(popperEscapeOffsets);\n state.modifiersData[name] = {\n referenceClippingOffsets: referenceClippingOffsets,\n popperEscapeOffsets: popperEscapeOffsets,\n isReferenceHidden: isReferenceHidden,\n hasPopperEscaped: hasPopperEscaped\n };\n state.attributes.popper = Object.assign({}, state.attributes.popper, {\n 'data-popper-reference-hidden': isReferenceHidden,\n 'data-popper-escaped': hasPopperEscaped\n });\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'hide',\n enabled: true,\n phase: 'main',\n requiresIfExists: ['preventOverflow'],\n fn: hide\n};","import getBasePlacement from \"../utils/getBasePlacement.js\";\nimport { top, left, right, placements } from \"../enums.js\"; // eslint-disable-next-line import/no-unused-modules\n\nexport function distanceAndSkiddingToXY(placement, rects, offset) {\n var basePlacement = getBasePlacement(placement);\n var invertDistance = [left, top].indexOf(basePlacement) >= 0 ? -1 : 1;\n\n var _ref = typeof offset === 'function' ? offset(Object.assign({}, rects, {\n placement: placement\n })) : offset,\n skidding = _ref[0],\n distance = _ref[1];\n\n skidding = skidding || 0;\n distance = (distance || 0) * invertDistance;\n return [left, right].indexOf(basePlacement) >= 0 ? {\n x: distance,\n y: skidding\n } : {\n x: skidding,\n y: distance\n };\n}\n\nfunction offset(_ref2) {\n var state = _ref2.state,\n options = _ref2.options,\n name = _ref2.name;\n var _options$offset = options.offset,\n offset = _options$offset === void 0 ? [0, 0] : _options$offset;\n var data = placements.reduce(function (acc, placement) {\n acc[placement] = distanceAndSkiddingToXY(placement, state.rects, offset);\n return acc;\n }, {});\n var _data$state$placement = data[state.placement],\n x = _data$state$placement.x,\n y = _data$state$placement.y;\n\n if (state.modifiersData.popperOffsets != null) {\n state.modifiersData.popperOffsets.x += x;\n state.modifiersData.popperOffsets.y += y;\n }\n\n state.modifiersData[name] = data;\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'offset',\n enabled: true,\n phase: 'main',\n requires: ['popperOffsets'],\n fn: offset\n};","import computeOffsets from \"../utils/computeOffsets.js\";\n\nfunction popperOffsets(_ref) {\n var state = _ref.state,\n name = _ref.name;\n // Offsets are the actual position the popper needs to have to be\n // properly positioned near its reference element\n // This is the most basic placement, and will be adjusted by\n // the modifiers in the next step\n state.modifiersData[name] = computeOffsets({\n reference: state.rects.reference,\n element: state.rects.popper,\n strategy: 'absolute',\n placement: state.placement\n });\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'popperOffsets',\n enabled: true,\n phase: 'read',\n fn: popperOffsets,\n data: {}\n};","import { top, left, right, bottom, start } from \"../enums.js\";\nimport getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getMainAxisFromPlacement from \"../utils/getMainAxisFromPlacement.js\";\nimport getAltAxis from \"../utils/getAltAxis.js\";\nimport { within, withinMaxClamp } from \"../utils/within.js\";\nimport getLayoutRect from \"../dom-utils/getLayoutRect.js\";\nimport getOffsetParent from \"../dom-utils/getOffsetParent.js\";\nimport detectOverflow from \"../utils/detectOverflow.js\";\nimport getVariation from \"../utils/getVariation.js\";\nimport getFreshSideObject from \"../utils/getFreshSideObject.js\";\nimport { min as mathMin, max as mathMax } from \"../utils/math.js\";\n\nfunction preventOverflow(_ref) {\n var state = _ref.state,\n options = _ref.options,\n name = _ref.name;\n var _options$mainAxis = options.mainAxis,\n checkMainAxis = _options$mainAxis === void 0 ? true : _options$mainAxis,\n _options$altAxis = options.altAxis,\n checkAltAxis = _options$altAxis === void 0 ? false : _options$altAxis,\n boundary = options.boundary,\n rootBoundary = options.rootBoundary,\n altBoundary = options.altBoundary,\n padding = options.padding,\n _options$tether = options.tether,\n tether = _options$tether === void 0 ? true : _options$tether,\n _options$tetherOffset = options.tetherOffset,\n tetherOffset = _options$tetherOffset === void 0 ? 0 : _options$tetherOffset;\n var overflow = detectOverflow(state, {\n boundary: boundary,\n rootBoundary: rootBoundary,\n padding: padding,\n altBoundary: altBoundary\n });\n var basePlacement = getBasePlacement(state.placement);\n var variation = getVariation(state.placement);\n var isBasePlacement = !variation;\n var mainAxis = getMainAxisFromPlacement(basePlacement);\n var altAxis = getAltAxis(mainAxis);\n var popperOffsets = state.modifiersData.popperOffsets;\n var referenceRect = state.rects.reference;\n var popperRect = state.rects.popper;\n var tetherOffsetValue = typeof tetherOffset === 'function' ? tetherOffset(Object.assign({}, state.rects, {\n placement: state.placement\n })) : tetherOffset;\n var normalizedTetherOffsetValue = typeof tetherOffsetValue === 'number' ? {\n mainAxis: tetherOffsetValue,\n altAxis: tetherOffsetValue\n } : Object.assign({\n mainAxis: 0,\n altAxis: 0\n }, tetherOffsetValue);\n var offsetModifierState = state.modifiersData.offset ? state.modifiersData.offset[state.placement] : null;\n var data = {\n x: 0,\n y: 0\n };\n\n if (!popperOffsets) {\n return;\n }\n\n if (checkMainAxis) {\n var _offsetModifierState$;\n\n var mainSide = mainAxis === 'y' ? top : left;\n var altSide = mainAxis === 'y' ? bottom : right;\n var len = mainAxis === 'y' ? 'height' : 'width';\n var offset = popperOffsets[mainAxis];\n var min = offset + overflow[mainSide];\n var max = offset - overflow[altSide];\n var additive = tether ? -popperRect[len] / 2 : 0;\n var minLen = variation === start ? referenceRect[len] : popperRect[len];\n var maxLen = variation === start ? -popperRect[len] : -referenceRect[len]; // We need to include the arrow in the calculation so the arrow doesn't go\n // outside the reference bounds\n\n var arrowElement = state.elements.arrow;\n var arrowRect = tether && arrowElement ? getLayoutRect(arrowElement) : {\n width: 0,\n height: 0\n };\n var arrowPaddingObject = state.modifiersData['arrow#persistent'] ? state.modifiersData['arrow#persistent'].padding : getFreshSideObject();\n var arrowPaddingMin = arrowPaddingObject[mainSide];\n var arrowPaddingMax = arrowPaddingObject[altSide]; // If the reference length is smaller than the arrow length, we don't want\n // to include its full size in the calculation. If the reference is small\n // and near the edge of a boundary, the popper can overflow even if the\n // reference is not overflowing as well (e.g. virtual elements with no\n // width or height)\n\n var arrowLen = within(0, referenceRect[len], arrowRect[len]);\n var minOffset = isBasePlacement ? referenceRect[len] / 2 - additive - arrowLen - arrowPaddingMin - normalizedTetherOffsetValue.mainAxis : minLen - arrowLen - arrowPaddingMin - normalizedTetherOffsetValue.mainAxis;\n var maxOffset = isBasePlacement ? -referenceRect[len] / 2 + additive + arrowLen + arrowPaddingMax + normalizedTetherOffsetValue.mainAxis : maxLen + arrowLen + arrowPaddingMax + normalizedTetherOffsetValue.mainAxis;\n var arrowOffsetParent = state.elements.arrow && getOffsetParent(state.elements.arrow);\n var clientOffset = arrowOffsetParent ? mainAxis === 'y' ? arrowOffsetParent.clientTop || 0 : arrowOffsetParent.clientLeft || 0 : 0;\n var offsetModifierValue = (_offsetModifierState$ = offsetModifierState == null ? void 0 : offsetModifierState[mainAxis]) != null ? _offsetModifierState$ : 0;\n var tetherMin = offset + minOffset - offsetModifierValue - clientOffset;\n var tetherMax = offset + maxOffset - offsetModifierValue;\n var preventedOffset = within(tether ? mathMin(min, tetherMin) : min, offset, tether ? mathMax(max, tetherMax) : max);\n popperOffsets[mainAxis] = preventedOffset;\n data[mainAxis] = preventedOffset - offset;\n }\n\n if (checkAltAxis) {\n var _offsetModifierState$2;\n\n var _mainSide = mainAxis === 'x' ? top : left;\n\n var _altSide = mainAxis === 'x' ? bottom : right;\n\n var _offset = popperOffsets[altAxis];\n\n var _len = altAxis === 'y' ? 'height' : 'width';\n\n var _min = _offset + overflow[_mainSide];\n\n var _max = _offset - overflow[_altSide];\n\n var isOriginSide = [top, left].indexOf(basePlacement) !== -1;\n\n var _offsetModifierValue = (_offsetModifierState$2 = offsetModifierState == null ? void 0 : offsetModifierState[altAxis]) != null ? _offsetModifierState$2 : 0;\n\n var _tetherMin = isOriginSide ? _min : _offset - referenceRect[_len] - popperRect[_len] - _offsetModifierValue + normalizedTetherOffsetValue.altAxis;\n\n var _tetherMax = isOriginSide ? _offset + referenceRect[_len] + popperRect[_len] - _offsetModifierValue - normalizedTetherOffsetValue.altAxis : _max;\n\n var _preventedOffset = tether && isOriginSide ? withinMaxClamp(_tetherMin, _offset, _tetherMax) : within(tether ? _tetherMin : _min, _offset, tether ? _tetherMax : _max);\n\n popperOffsets[altAxis] = _preventedOffset;\n data[altAxis] = _preventedOffset - _offset;\n }\n\n state.modifiersData[name] = data;\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'preventOverflow',\n enabled: true,\n phase: 'main',\n fn: preventOverflow,\n requiresIfExists: ['offset']\n};","export default function getAltAxis(axis) {\n return axis === 'x' ? 'y' : 'x';\n}","import getBoundingClientRect from \"./getBoundingClientRect.js\";\nimport getNodeScroll from \"./getNodeScroll.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport { isHTMLElement } from \"./instanceOf.js\";\nimport getWindowScrollBarX from \"./getWindowScrollBarX.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport isScrollParent from \"./isScrollParent.js\";\nimport { round } from \"../utils/math.js\";\n\nfunction isElementScaled(element) {\n var rect = element.getBoundingClientRect();\n var scaleX = round(rect.width) / element.offsetWidth || 1;\n var scaleY = round(rect.height) / element.offsetHeight || 1;\n return scaleX !== 1 || scaleY !== 1;\n} // Returns the composite rect of an element relative to its offsetParent.\n// Composite means it takes into account transforms as well as layout.\n\n\nexport default function getCompositeRect(elementOrVirtualElement, offsetParent, isFixed) {\n if (isFixed === void 0) {\n isFixed = false;\n }\n\n var isOffsetParentAnElement = isHTMLElement(offsetParent);\n var offsetParentIsScaled = isHTMLElement(offsetParent) && isElementScaled(offsetParent);\n var documentElement = getDocumentElement(offsetParent);\n var rect = getBoundingClientRect(elementOrVirtualElement, offsetParentIsScaled, isFixed);\n var scroll = {\n scrollLeft: 0,\n scrollTop: 0\n };\n var offsets = {\n x: 0,\n y: 0\n };\n\n if (isOffsetParentAnElement || !isOffsetParentAnElement && !isFixed) {\n if (getNodeName(offsetParent) !== 'body' || // https://github.com/popperjs/popper-core/issues/1078\n isScrollParent(documentElement)) {\n scroll = getNodeScroll(offsetParent);\n }\n\n if (isHTMLElement(offsetParent)) {\n offsets = getBoundingClientRect(offsetParent, true);\n offsets.x += offsetParent.clientLeft;\n offsets.y += offsetParent.clientTop;\n } else if (documentElement) {\n offsets.x = getWindowScrollBarX(documentElement);\n }\n }\n\n return {\n x: rect.left + scroll.scrollLeft - offsets.x,\n y: rect.top + scroll.scrollTop - offsets.y,\n width: rect.width,\n height: rect.height\n };\n}","import getWindowScroll from \"./getWindowScroll.js\";\nimport getWindow from \"./getWindow.js\";\nimport { isHTMLElement } from \"./instanceOf.js\";\nimport getHTMLElementScroll from \"./getHTMLElementScroll.js\";\nexport default function getNodeScroll(node) {\n if (node === getWindow(node) || !isHTMLElement(node)) {\n return getWindowScroll(node);\n } else {\n return getHTMLElementScroll(node);\n }\n}","export default function getHTMLElementScroll(element) {\n return {\n scrollLeft: element.scrollLeft,\n scrollTop: element.scrollTop\n };\n}","import { modifierPhases } from \"../enums.js\"; // source: https://stackoverflow.com/questions/49875255\n\nfunction order(modifiers) {\n var map = new Map();\n var visited = new Set();\n var result = [];\n modifiers.forEach(function (modifier) {\n map.set(modifier.name, modifier);\n }); // On visiting object, check for its dependencies and visit them recursively\n\n function sort(modifier) {\n visited.add(modifier.name);\n var requires = [].concat(modifier.requires || [], modifier.requiresIfExists || []);\n requires.forEach(function (dep) {\n if (!visited.has(dep)) {\n var depModifier = map.get(dep);\n\n if (depModifier) {\n sort(depModifier);\n }\n }\n });\n result.push(modifier);\n }\n\n modifiers.forEach(function (modifier) {\n if (!visited.has(modifier.name)) {\n // check for visited object\n sort(modifier);\n }\n });\n return result;\n}\n\nexport default function orderModifiers(modifiers) {\n // order based on dependencies\n var orderedModifiers = order(modifiers); // order based on phase\n\n return modifierPhases.reduce(function (acc, phase) {\n return acc.concat(orderedModifiers.filter(function (modifier) {\n return modifier.phase === phase;\n }));\n }, []);\n}","import getCompositeRect from \"./dom-utils/getCompositeRect.js\";\nimport getLayoutRect from \"./dom-utils/getLayoutRect.js\";\nimport listScrollParents from \"./dom-utils/listScrollParents.js\";\nimport getOffsetParent from \"./dom-utils/getOffsetParent.js\";\nimport getComputedStyle from \"./dom-utils/getComputedStyle.js\";\nimport orderModifiers from \"./utils/orderModifiers.js\";\nimport debounce from \"./utils/debounce.js\";\nimport validateModifiers from \"./utils/validateModifiers.js\";\nimport uniqueBy from \"./utils/uniqueBy.js\";\nimport getBasePlacement from \"./utils/getBasePlacement.js\";\nimport mergeByName from \"./utils/mergeByName.js\";\nimport detectOverflow from \"./utils/detectOverflow.js\";\nimport { isElement } from \"./dom-utils/instanceOf.js\";\nimport { auto } from \"./enums.js\";\nvar INVALID_ELEMENT_ERROR = 'Popper: Invalid reference or popper argument provided. They must be either a DOM element or virtual element.';\nvar INFINITE_LOOP_ERROR = 'Popper: An infinite loop in the modifiers cycle has been detected! The cycle has been interrupted to prevent a browser crash.';\nvar DEFAULT_OPTIONS = {\n placement: 'bottom',\n modifiers: [],\n strategy: 'absolute'\n};\n\nfunction areValidElements() {\n for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) {\n args[_key] = arguments[_key];\n }\n\n return !args.some(function (element) {\n return !(element && typeof element.getBoundingClientRect === 'function');\n });\n}\n\nexport function popperGenerator(generatorOptions) {\n if (generatorOptions === void 0) {\n generatorOptions = {};\n }\n\n var _generatorOptions = generatorOptions,\n _generatorOptions$def = _generatorOptions.defaultModifiers,\n defaultModifiers = _generatorOptions$def === void 0 ? [] : _generatorOptions$def,\n _generatorOptions$def2 = _generatorOptions.defaultOptions,\n defaultOptions = _generatorOptions$def2 === void 0 ? DEFAULT_OPTIONS : _generatorOptions$def2;\n return function createPopper(reference, popper, options) {\n if (options === void 0) {\n options = defaultOptions;\n }\n\n var state = {\n placement: 'bottom',\n orderedModifiers: [],\n options: Object.assign({}, DEFAULT_OPTIONS, defaultOptions),\n modifiersData: {},\n elements: {\n reference: reference,\n popper: popper\n },\n attributes: {},\n styles: {}\n };\n var effectCleanupFns = [];\n var isDestroyed = false;\n var instance = {\n state: state,\n setOptions: function setOptions(setOptionsAction) {\n var options = typeof setOptionsAction === 'function' ? setOptionsAction(state.options) : setOptionsAction;\n cleanupModifierEffects();\n state.options = Object.assign({}, defaultOptions, state.options, options);\n state.scrollParents = {\n reference: isElement(reference) ? listScrollParents(reference) : reference.contextElement ? listScrollParents(reference.contextElement) : [],\n popper: listScrollParents(popper)\n }; // Orders the modifiers based on their dependencies and `phase`\n // properties\n\n var orderedModifiers = orderModifiers(mergeByName([].concat(defaultModifiers, state.options.modifiers))); // Strip out disabled modifiers\n\n state.orderedModifiers = orderedModifiers.filter(function (m) {\n return m.enabled;\n }); // Validate the provided modifiers so that the consumer will get warned\n // if one of the modifiers is invalid for any reason\n\n if (process.env.NODE_ENV !== \"production\") {\n var modifiers = uniqueBy([].concat(orderedModifiers, state.options.modifiers), function (_ref) {\n var name = _ref.name;\n return name;\n });\n validateModifiers(modifiers);\n\n if (getBasePlacement(state.options.placement) === auto) {\n var flipModifier = state.orderedModifiers.find(function (_ref2) {\n var name = _ref2.name;\n return name === 'flip';\n });\n\n if (!flipModifier) {\n console.error(['Popper: \"auto\" placements require the \"flip\" modifier be', 'present and enabled to work.'].join(' '));\n }\n }\n\n var _getComputedStyle = getComputedStyle(popper),\n marginTop = _getComputedStyle.marginTop,\n marginRight = _getComputedStyle.marginRight,\n marginBottom = _getComputedStyle.marginBottom,\n marginLeft = _getComputedStyle.marginLeft; // We no longer take into account `margins` on the popper, and it can\n // cause bugs with positioning, so we'll warn the consumer\n\n\n if ([marginTop, marginRight, marginBottom, marginLeft].some(function (margin) {\n return parseFloat(margin);\n })) {\n console.warn(['Popper: CSS \"margin\" styles cannot be used to apply padding', 'between the popper and its reference element or boundary.', 'To replicate margin, use the `offset` modifier, as well as', 'the `padding` option in the `preventOverflow` and `flip`', 'modifiers.'].join(' '));\n }\n }\n\n runModifierEffects();\n return instance.update();\n },\n // Sync update – it will always be executed, even if not necessary. This\n // is useful for low frequency updates where sync behavior simplifies the\n // logic.\n // For high frequency updates (e.g. `resize` and `scroll` events), always\n // prefer the async Popper#update method\n forceUpdate: function forceUpdate() {\n if (isDestroyed) {\n return;\n }\n\n var _state$elements = state.elements,\n reference = _state$elements.reference,\n popper = _state$elements.popper; // Don't proceed if `reference` or `popper` are not valid elements\n // anymore\n\n if (!areValidElements(reference, popper)) {\n if (process.env.NODE_ENV !== \"production\") {\n console.error(INVALID_ELEMENT_ERROR);\n }\n\n return;\n } // Store the reference and popper rects to be read by modifiers\n\n\n state.rects = {\n reference: getCompositeRect(reference, getOffsetParent(popper), state.options.strategy === 'fixed'),\n popper: getLayoutRect(popper)\n }; // Modifiers have the ability to reset the current update cycle. The\n // most common use case for this is the `flip` modifier changing the\n // placement, which then needs to re-run all the modifiers, because the\n // logic was previously ran for the previous placement and is therefore\n // stale/incorrect\n\n state.reset = false;\n state.placement = state.options.placement; // On each update cycle, the `modifiersData` property for each modifier\n // is filled with the initial data specified by the modifier. This means\n // it doesn't persist and is fresh on each update.\n // To ensure persistent data, use `${name}#persistent`\n\n state.orderedModifiers.forEach(function (modifier) {\n return state.modifiersData[modifier.name] = Object.assign({}, modifier.data);\n });\n var __debug_loops__ = 0;\n\n for (var index = 0; index < state.orderedModifiers.length; index++) {\n if (process.env.NODE_ENV !== \"production\") {\n __debug_loops__ += 1;\n\n if (__debug_loops__ > 100) {\n console.error(INFINITE_LOOP_ERROR);\n break;\n }\n }\n\n if (state.reset === true) {\n state.reset = false;\n index = -1;\n continue;\n }\n\n var _state$orderedModifie = state.orderedModifiers[index],\n fn = _state$orderedModifie.fn,\n _state$orderedModifie2 = _state$orderedModifie.options,\n _options = _state$orderedModifie2 === void 0 ? {} : _state$orderedModifie2,\n name = _state$orderedModifie.name;\n\n if (typeof fn === 'function') {\n state = fn({\n state: state,\n options: _options,\n name: name,\n instance: instance\n }) || state;\n }\n }\n },\n // Async and optimistically optimized update – it will not be executed if\n // not necessary (debounced to run at most once-per-tick)\n update: debounce(function () {\n return new Promise(function (resolve) {\n instance.forceUpdate();\n resolve(state);\n });\n }),\n destroy: function destroy() {\n cleanupModifierEffects();\n isDestroyed = true;\n }\n };\n\n if (!areValidElements(reference, popper)) {\n if (process.env.NODE_ENV !== \"production\") {\n console.error(INVALID_ELEMENT_ERROR);\n }\n\n return instance;\n }\n\n instance.setOptions(options).then(function (state) {\n if (!isDestroyed && options.onFirstUpdate) {\n options.onFirstUpdate(state);\n }\n }); // Modifiers have the ability to execute arbitrary code before the first\n // update cycle runs. They will be executed in the same order as the update\n // cycle. This is useful when a modifier adds some persistent data that\n // other modifiers need to use, but the modifier is run after the dependent\n // one.\n\n function runModifierEffects() {\n state.orderedModifiers.forEach(function (_ref3) {\n var name = _ref3.name,\n _ref3$options = _ref3.options,\n options = _ref3$options === void 0 ? {} : _ref3$options,\n effect = _ref3.effect;\n\n if (typeof effect === 'function') {\n var cleanupFn = effect({\n state: state,\n name: name,\n instance: instance,\n options: options\n });\n\n var noopFn = function noopFn() {};\n\n effectCleanupFns.push(cleanupFn || noopFn);\n }\n });\n }\n\n function cleanupModifierEffects() {\n effectCleanupFns.forEach(function (fn) {\n return fn();\n });\n effectCleanupFns = [];\n }\n\n return instance;\n };\n}\nexport var createPopper = /*#__PURE__*/popperGenerator(); // eslint-disable-next-line import/no-unused-modules\n\nexport { detectOverflow };","export default function debounce(fn) {\n var pending;\n return function () {\n if (!pending) {\n pending = new Promise(function (resolve) {\n Promise.resolve().then(function () {\n pending = undefined;\n resolve(fn());\n });\n });\n }\n\n return pending;\n };\n}","export default function mergeByName(modifiers) {\n var merged = modifiers.reduce(function (merged, current) {\n var existing = merged[current.name];\n merged[current.name] = existing ? Object.assign({}, existing, current, {\n options: Object.assign({}, existing.options, current.options),\n data: Object.assign({}, existing.data, current.data)\n }) : current;\n return merged;\n }, {}); // IE11 does not support Object.values\n\n return Object.keys(merged).map(function (key) {\n return merged[key];\n });\n}","import { popperGenerator, detectOverflow } from \"./createPopper.js\";\nimport eventListeners from \"./modifiers/eventListeners.js\";\nimport popperOffsets from \"./modifiers/popperOffsets.js\";\nimport computeStyles from \"./modifiers/computeStyles.js\";\nimport applyStyles from \"./modifiers/applyStyles.js\";\nimport offset from \"./modifiers/offset.js\";\nimport flip from \"./modifiers/flip.js\";\nimport preventOverflow from \"./modifiers/preventOverflow.js\";\nimport arrow from \"./modifiers/arrow.js\";\nimport hide from \"./modifiers/hide.js\";\nvar defaultModifiers = [eventListeners, popperOffsets, computeStyles, applyStyles, offset, flip, preventOverflow, arrow, hide];\nvar createPopper = /*#__PURE__*/popperGenerator({\n defaultModifiers: defaultModifiers\n}); // eslint-disable-next-line import/no-unused-modules\n\nexport { createPopper, popperGenerator, defaultModifiers, detectOverflow }; // eslint-disable-next-line import/no-unused-modules\n\nexport { createPopper as createPopperLite } from \"./popper-lite.js\"; // eslint-disable-next-line import/no-unused-modules\n\nexport * from \"./modifiers/index.js\";","import { popperGenerator, detectOverflow } from \"./createPopper.js\";\nimport eventListeners from \"./modifiers/eventListeners.js\";\nimport popperOffsets from \"./modifiers/popperOffsets.js\";\nimport computeStyles from \"./modifiers/computeStyles.js\";\nimport applyStyles from \"./modifiers/applyStyles.js\";\nvar defaultModifiers = [eventListeners, popperOffsets, computeStyles, applyStyles];\nvar createPopper = /*#__PURE__*/popperGenerator({\n defaultModifiers: defaultModifiers\n}); // eslint-disable-next-line import/no-unused-modules\n\nexport { createPopper, popperGenerator, defaultModifiers, detectOverflow };","/*!\n * Bootstrap v5.2.3 (https://getbootstrap.com/)\n * Copyright 2011-2022 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors)\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n */\nimport * as Popper from '@popperjs/core';\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): util/index.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\nconst MAX_UID = 1000000;\nconst MILLISECONDS_MULTIPLIER = 1000;\nconst TRANSITION_END = 'transitionend'; // Shout-out Angus Croll (https://goo.gl/pxwQGp)\n\nconst toType = object => {\n if (object === null || object === undefined) {\n return `${object}`;\n }\n\n return Object.prototype.toString.call(object).match(/\\s([a-z]+)/i)[1].toLowerCase();\n};\n/**\n * Public Util API\n */\n\n\nconst getUID = prefix => {\n do {\n prefix += Math.floor(Math.random() * MAX_UID);\n } while (document.getElementById(prefix));\n\n return prefix;\n};\n\nconst getSelector = element => {\n let selector = element.getAttribute('data-bs-target');\n\n if (!selector || selector === '#') {\n let hrefAttribute = element.getAttribute('href'); // The only valid content that could double as a selector are IDs or classes,\n // so everything starting with `#` or `.`. If a \"real\" URL is used as the selector,\n // `document.querySelector` will rightfully complain it is invalid.\n // See https://github.com/twbs/bootstrap/issues/32273\n\n if (!hrefAttribute || !hrefAttribute.includes('#') && !hrefAttribute.startsWith('.')) {\n return null;\n } // Just in case some CMS puts out a full URL with the anchor appended\n\n\n if (hrefAttribute.includes('#') && !hrefAttribute.startsWith('#')) {\n hrefAttribute = `#${hrefAttribute.split('#')[1]}`;\n }\n\n selector = hrefAttribute && hrefAttribute !== '#' ? hrefAttribute.trim() : null;\n }\n\n return selector;\n};\n\nconst getSelectorFromElement = element => {\n const selector = getSelector(element);\n\n if (selector) {\n return document.querySelector(selector) ? selector : null;\n }\n\n return null;\n};\n\nconst getElementFromSelector = element => {\n const selector = getSelector(element);\n return selector ? document.querySelector(selector) : null;\n};\n\nconst getTransitionDurationFromElement = element => {\n if (!element) {\n return 0;\n } // Get transition-duration of the element\n\n\n let {\n transitionDuration,\n transitionDelay\n } = window.getComputedStyle(element);\n const floatTransitionDuration = Number.parseFloat(transitionDuration);\n const floatTransitionDelay = Number.parseFloat(transitionDelay); // Return 0 if element or transition duration is not found\n\n if (!floatTransitionDuration && !floatTransitionDelay) {\n return 0;\n } // If multiple durations are defined, take the first\n\n\n transitionDuration = transitionDuration.split(',')[0];\n transitionDelay = transitionDelay.split(',')[0];\n return (Number.parseFloat(transitionDuration) + Number.parseFloat(transitionDelay)) * MILLISECONDS_MULTIPLIER;\n};\n\nconst triggerTransitionEnd = element => {\n element.dispatchEvent(new Event(TRANSITION_END));\n};\n\nconst isElement = object => {\n if (!object || typeof object !== 'object') {\n return false;\n }\n\n if (typeof object.jquery !== 'undefined') {\n object = object[0];\n }\n\n return typeof object.nodeType !== 'undefined';\n};\n\nconst getElement = object => {\n // it's a jQuery object or a node element\n if (isElement(object)) {\n return object.jquery ? object[0] : object;\n }\n\n if (typeof object === 'string' && object.length > 0) {\n return document.querySelector(object);\n }\n\n return null;\n};\n\nconst isVisible = element => {\n if (!isElement(element) || element.getClientRects().length === 0) {\n return false;\n }\n\n const elementIsVisible = getComputedStyle(element).getPropertyValue('visibility') === 'visible'; // Handle `details` element as its content may falsie appear visible when it is closed\n\n const closedDetails = element.closest('details:not([open])');\n\n if (!closedDetails) {\n return elementIsVisible;\n }\n\n if (closedDetails !== element) {\n const summary = element.closest('summary');\n\n if (summary && summary.parentNode !== closedDetails) {\n return false;\n }\n\n if (summary === null) {\n return false;\n }\n }\n\n return elementIsVisible;\n};\n\nconst isDisabled = element => {\n if (!element || element.nodeType !== Node.ELEMENT_NODE) {\n return true;\n }\n\n if (element.classList.contains('disabled')) {\n return true;\n }\n\n if (typeof element.disabled !== 'undefined') {\n return element.disabled;\n }\n\n return element.hasAttribute('disabled') && element.getAttribute('disabled') !== 'false';\n};\n\nconst findShadowRoot = element => {\n if (!document.documentElement.attachShadow) {\n return null;\n } // Can find the shadow root otherwise it'll return the document\n\n\n if (typeof element.getRootNode === 'function') {\n const root = element.getRootNode();\n return root instanceof ShadowRoot ? root : null;\n }\n\n if (element instanceof ShadowRoot) {\n return element;\n } // when we don't find a shadow root\n\n\n if (!element.parentNode) {\n return null;\n }\n\n return findShadowRoot(element.parentNode);\n};\n\nconst noop = () => {};\n/**\n * Trick to restart an element's animation\n *\n * @param {HTMLElement} element\n * @return void\n *\n * @see https://www.charistheo.io/blog/2021/02/restart-a-css-animation-with-javascript/#restarting-a-css-animation\n */\n\n\nconst reflow = element => {\n element.offsetHeight; // eslint-disable-line no-unused-expressions\n};\n\nconst getjQuery = () => {\n if (window.jQuery && !document.body.hasAttribute('data-bs-no-jquery')) {\n return window.jQuery;\n }\n\n return null;\n};\n\nconst DOMContentLoadedCallbacks = [];\n\nconst onDOMContentLoaded = callback => {\n if (document.readyState === 'loading') {\n // add listener on the first call when the document is in loading state\n if (!DOMContentLoadedCallbacks.length) {\n document.addEventListener('DOMContentLoaded', () => {\n for (const callback of DOMContentLoadedCallbacks) {\n callback();\n }\n });\n }\n\n DOMContentLoadedCallbacks.push(callback);\n } else {\n callback();\n }\n};\n\nconst isRTL = () => document.documentElement.dir === 'rtl';\n\nconst defineJQueryPlugin = plugin => {\n onDOMContentLoaded(() => {\n const $ = getjQuery();\n /* istanbul ignore if */\n\n if ($) {\n const name = plugin.NAME;\n const JQUERY_NO_CONFLICT = $.fn[name];\n $.fn[name] = plugin.jQueryInterface;\n $.fn[name].Constructor = plugin;\n\n $.fn[name].noConflict = () => {\n $.fn[name] = JQUERY_NO_CONFLICT;\n return plugin.jQueryInterface;\n };\n }\n });\n};\n\nconst execute = callback => {\n if (typeof callback === 'function') {\n callback();\n }\n};\n\nconst executeAfterTransition = (callback, transitionElement, waitForTransition = true) => {\n if (!waitForTransition) {\n execute(callback);\n return;\n }\n\n const durationPadding = 5;\n const emulatedDuration = getTransitionDurationFromElement(transitionElement) + durationPadding;\n let called = false;\n\n const handler = ({\n target\n }) => {\n if (target !== transitionElement) {\n return;\n }\n\n called = true;\n transitionElement.removeEventListener(TRANSITION_END, handler);\n execute(callback);\n };\n\n transitionElement.addEventListener(TRANSITION_END, handler);\n setTimeout(() => {\n if (!called) {\n triggerTransitionEnd(transitionElement);\n }\n }, emulatedDuration);\n};\n/**\n * Return the previous/next element of a list.\n *\n * @param {array} list The list of elements\n * @param activeElement The active element\n * @param shouldGetNext Choose to get next or previous element\n * @param isCycleAllowed\n * @return {Element|elem} The proper element\n */\n\n\nconst getNextActiveElement = (list, activeElement, shouldGetNext, isCycleAllowed) => {\n const listLength = list.length;\n let index = list.indexOf(activeElement); // if the element does not exist in the list return an element\n // depending on the direction and if cycle is allowed\n\n if (index === -1) {\n return !shouldGetNext && isCycleAllowed ? list[listLength - 1] : list[0];\n }\n\n index += shouldGetNext ? 1 : -1;\n\n if (isCycleAllowed) {\n index = (index + listLength) % listLength;\n }\n\n return list[Math.max(0, Math.min(index, listLength - 1))];\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): dom/event-handler.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst namespaceRegex = /[^.]*(?=\\..*)\\.|.*/;\nconst stripNameRegex = /\\..*/;\nconst stripUidRegex = /::\\d+$/;\nconst eventRegistry = {}; // Events storage\n\nlet uidEvent = 1;\nconst customEvents = {\n mouseenter: 'mouseover',\n mouseleave: 'mouseout'\n};\nconst nativeEvents = new Set(['click', 'dblclick', 'mouseup', 'mousedown', 'contextmenu', 'mousewheel', 'DOMMouseScroll', 'mouseover', 'mouseout', 'mousemove', 'selectstart', 'selectend', 'keydown', 'keypress', 'keyup', 'orientationchange', 'touchstart', 'touchmove', 'touchend', 'touchcancel', 'pointerdown', 'pointermove', 'pointerup', 'pointerleave', 'pointercancel', 'gesturestart', 'gesturechange', 'gestureend', 'focus', 'blur', 'change', 'reset', 'select', 'submit', 'focusin', 'focusout', 'load', 'unload', 'beforeunload', 'resize', 'move', 'DOMContentLoaded', 'readystatechange', 'error', 'abort', 'scroll']);\n/**\n * Private methods\n */\n\nfunction makeEventUid(element, uid) {\n return uid && `${uid}::${uidEvent++}` || element.uidEvent || uidEvent++;\n}\n\nfunction getElementEvents(element) {\n const uid = makeEventUid(element);\n element.uidEvent = uid;\n eventRegistry[uid] = eventRegistry[uid] || {};\n return eventRegistry[uid];\n}\n\nfunction bootstrapHandler(element, fn) {\n return function handler(event) {\n hydrateObj(event, {\n delegateTarget: element\n });\n\n if (handler.oneOff) {\n EventHandler.off(element, event.type, fn);\n }\n\n return fn.apply(element, [event]);\n };\n}\n\nfunction bootstrapDelegationHandler(element, selector, fn) {\n return function handler(event) {\n const domElements = element.querySelectorAll(selector);\n\n for (let {\n target\n } = event; target && target !== this; target = target.parentNode) {\n for (const domElement of domElements) {\n if (domElement !== target) {\n continue;\n }\n\n hydrateObj(event, {\n delegateTarget: target\n });\n\n if (handler.oneOff) {\n EventHandler.off(element, event.type, selector, fn);\n }\n\n return fn.apply(target, [event]);\n }\n }\n };\n}\n\nfunction findHandler(events, callable, delegationSelector = null) {\n return Object.values(events).find(event => event.callable === callable && event.delegationSelector === delegationSelector);\n}\n\nfunction normalizeParameters(originalTypeEvent, handler, delegationFunction) {\n const isDelegated = typeof handler === 'string'; // todo: tooltip passes `false` instead of selector, so we need to check\n\n const callable = isDelegated ? delegationFunction : handler || delegationFunction;\n let typeEvent = getTypeEvent(originalTypeEvent);\n\n if (!nativeEvents.has(typeEvent)) {\n typeEvent = originalTypeEvent;\n }\n\n return [isDelegated, callable, typeEvent];\n}\n\nfunction addHandler(element, originalTypeEvent, handler, delegationFunction, oneOff) {\n if (typeof originalTypeEvent !== 'string' || !element) {\n return;\n }\n\n let [isDelegated, callable, typeEvent] = normalizeParameters(originalTypeEvent, handler, delegationFunction); // in case of mouseenter or mouseleave wrap the handler within a function that checks for its DOM position\n // this prevents the handler from being dispatched the same way as mouseover or mouseout does\n\n if (originalTypeEvent in customEvents) {\n const wrapFunction = fn => {\n return function (event) {\n if (!event.relatedTarget || event.relatedTarget !== event.delegateTarget && !event.delegateTarget.contains(event.relatedTarget)) {\n return fn.call(this, event);\n }\n };\n };\n\n callable = wrapFunction(callable);\n }\n\n const events = getElementEvents(element);\n const handlers = events[typeEvent] || (events[typeEvent] = {});\n const previousFunction = findHandler(handlers, callable, isDelegated ? handler : null);\n\n if (previousFunction) {\n previousFunction.oneOff = previousFunction.oneOff && oneOff;\n return;\n }\n\n const uid = makeEventUid(callable, originalTypeEvent.replace(namespaceRegex, ''));\n const fn = isDelegated ? bootstrapDelegationHandler(element, handler, callable) : bootstrapHandler(element, callable);\n fn.delegationSelector = isDelegated ? handler : null;\n fn.callable = callable;\n fn.oneOff = oneOff;\n fn.uidEvent = uid;\n handlers[uid] = fn;\n element.addEventListener(typeEvent, fn, isDelegated);\n}\n\nfunction removeHandler(element, events, typeEvent, handler, delegationSelector) {\n const fn = findHandler(events[typeEvent], handler, delegationSelector);\n\n if (!fn) {\n return;\n }\n\n element.removeEventListener(typeEvent, fn, Boolean(delegationSelector));\n delete events[typeEvent][fn.uidEvent];\n}\n\nfunction removeNamespacedHandlers(element, events, typeEvent, namespace) {\n const storeElementEvent = events[typeEvent] || {};\n\n for (const handlerKey of Object.keys(storeElementEvent)) {\n if (handlerKey.includes(namespace)) {\n const event = storeElementEvent[handlerKey];\n removeHandler(element, events, typeEvent, event.callable, event.delegationSelector);\n }\n }\n}\n\nfunction getTypeEvent(event) {\n // allow to get the native events from namespaced events ('click.bs.button' --> 'click')\n event = event.replace(stripNameRegex, '');\n return customEvents[event] || event;\n}\n\nconst EventHandler = {\n on(element, event, handler, delegationFunction) {\n addHandler(element, event, handler, delegationFunction, false);\n },\n\n one(element, event, handler, delegationFunction) {\n addHandler(element, event, handler, delegationFunction, true);\n },\n\n off(element, originalTypeEvent, handler, delegationFunction) {\n if (typeof originalTypeEvent !== 'string' || !element) {\n return;\n }\n\n const [isDelegated, callable, typeEvent] = normalizeParameters(originalTypeEvent, handler, delegationFunction);\n const inNamespace = typeEvent !== originalTypeEvent;\n const events = getElementEvents(element);\n const storeElementEvent = events[typeEvent] || {};\n const isNamespace = originalTypeEvent.startsWith('.');\n\n if (typeof callable !== 'undefined') {\n // Simplest case: handler is passed, remove that listener ONLY.\n if (!Object.keys(storeElementEvent).length) {\n return;\n }\n\n removeHandler(element, events, typeEvent, callable, isDelegated ? handler : null);\n return;\n }\n\n if (isNamespace) {\n for (const elementEvent of Object.keys(events)) {\n removeNamespacedHandlers(element, events, elementEvent, originalTypeEvent.slice(1));\n }\n }\n\n for (const keyHandlers of Object.keys(storeElementEvent)) {\n const handlerKey = keyHandlers.replace(stripUidRegex, '');\n\n if (!inNamespace || originalTypeEvent.includes(handlerKey)) {\n const event = storeElementEvent[keyHandlers];\n removeHandler(element, events, typeEvent, event.callable, event.delegationSelector);\n }\n }\n },\n\n trigger(element, event, args) {\n if (typeof event !== 'string' || !element) {\n return null;\n }\n\n const $ = getjQuery();\n const typeEvent = getTypeEvent(event);\n const inNamespace = event !== typeEvent;\n let jQueryEvent = null;\n let bubbles = true;\n let nativeDispatch = true;\n let defaultPrevented = false;\n\n if (inNamespace && $) {\n jQueryEvent = $.Event(event, args);\n $(element).trigger(jQueryEvent);\n bubbles = !jQueryEvent.isPropagationStopped();\n nativeDispatch = !jQueryEvent.isImmediatePropagationStopped();\n defaultPrevented = jQueryEvent.isDefaultPrevented();\n }\n\n let evt = new Event(event, {\n bubbles,\n cancelable: true\n });\n evt = hydrateObj(evt, args);\n\n if (defaultPrevented) {\n evt.preventDefault();\n }\n\n if (nativeDispatch) {\n element.dispatchEvent(evt);\n }\n\n if (evt.defaultPrevented && jQueryEvent) {\n jQueryEvent.preventDefault();\n }\n\n return evt;\n }\n\n};\n\nfunction hydrateObj(obj, meta) {\n for (const [key, value] of Object.entries(meta || {})) {\n try {\n obj[key] = value;\n } catch (_unused) {\n Object.defineProperty(obj, key, {\n configurable: true,\n\n get() {\n return value;\n }\n\n });\n }\n }\n\n return obj;\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): dom/data.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n/**\n * Constants\n */\nconst elementMap = new Map();\nconst Data = {\n set(element, key, instance) {\n if (!elementMap.has(element)) {\n elementMap.set(element, new Map());\n }\n\n const instanceMap = elementMap.get(element); // make it clear we only want one instance per element\n // can be removed later when multiple key/instances are fine to be used\n\n if (!instanceMap.has(key) && instanceMap.size !== 0) {\n // eslint-disable-next-line no-console\n console.error(`Bootstrap doesn't allow more than one instance per element. Bound instance: ${Array.from(instanceMap.keys())[0]}.`);\n return;\n }\n\n instanceMap.set(key, instance);\n },\n\n get(element, key) {\n if (elementMap.has(element)) {\n return elementMap.get(element).get(key) || null;\n }\n\n return null;\n },\n\n remove(element, key) {\n if (!elementMap.has(element)) {\n return;\n }\n\n const instanceMap = elementMap.get(element);\n instanceMap.delete(key); // free up element references if there are no instances left for an element\n\n if (instanceMap.size === 0) {\n elementMap.delete(element);\n }\n }\n\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): dom/manipulator.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\nfunction normalizeData(value) {\n if (value === 'true') {\n return true;\n }\n\n if (value === 'false') {\n return false;\n }\n\n if (value === Number(value).toString()) {\n return Number(value);\n }\n\n if (value === '' || value === 'null') {\n return null;\n }\n\n if (typeof value !== 'string') {\n return value;\n }\n\n try {\n return JSON.parse(decodeURIComponent(value));\n } catch (_unused) {\n return value;\n }\n}\n\nfunction normalizeDataKey(key) {\n return key.replace(/[A-Z]/g, chr => `-${chr.toLowerCase()}`);\n}\n\nconst Manipulator = {\n setDataAttribute(element, key, value) {\n element.setAttribute(`data-bs-${normalizeDataKey(key)}`, value);\n },\n\n removeDataAttribute(element, key) {\n element.removeAttribute(`data-bs-${normalizeDataKey(key)}`);\n },\n\n getDataAttributes(element) {\n if (!element) {\n return {};\n }\n\n const attributes = {};\n const bsKeys = Object.keys(element.dataset).filter(key => key.startsWith('bs') && !key.startsWith('bsConfig'));\n\n for (const key of bsKeys) {\n let pureKey = key.replace(/^bs/, '');\n pureKey = pureKey.charAt(0).toLowerCase() + pureKey.slice(1, pureKey.length);\n attributes[pureKey] = normalizeData(element.dataset[key]);\n }\n\n return attributes;\n },\n\n getDataAttribute(element, key) {\n return normalizeData(element.getAttribute(`data-bs-${normalizeDataKey(key)}`));\n }\n\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): util/config.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Class definition\n */\n\nclass Config {\n // Getters\n static get Default() {\n return {};\n }\n\n static get DefaultType() {\n return {};\n }\n\n static get NAME() {\n throw new Error('You have to implement the static method \"NAME\", for each component!');\n }\n\n _getConfig(config) {\n config = this._mergeConfigObj(config);\n config = this._configAfterMerge(config);\n\n this._typeCheckConfig(config);\n\n return config;\n }\n\n _configAfterMerge(config) {\n return config;\n }\n\n _mergeConfigObj(config, element) {\n const jsonConfig = isElement(element) ? Manipulator.getDataAttribute(element, 'config') : {}; // try to parse\n\n return { ...this.constructor.Default,\n ...(typeof jsonConfig === 'object' ? jsonConfig : {}),\n ...(isElement(element) ? Manipulator.getDataAttributes(element) : {}),\n ...(typeof config === 'object' ? config : {})\n };\n }\n\n _typeCheckConfig(config, configTypes = this.constructor.DefaultType) {\n for (const property of Object.keys(configTypes)) {\n const expectedTypes = configTypes[property];\n const value = config[property];\n const valueType = isElement(value) ? 'element' : toType(value);\n\n if (!new RegExp(expectedTypes).test(valueType)) {\n throw new TypeError(`${this.constructor.NAME.toUpperCase()}: Option \"${property}\" provided type \"${valueType}\" but expected type \"${expectedTypes}\".`);\n }\n }\n }\n\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): base-component.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst VERSION = '5.2.3';\n/**\n * Class definition\n */\n\nclass BaseComponent extends Config {\n constructor(element, config) {\n super();\n element = getElement(element);\n\n if (!element) {\n return;\n }\n\n this._element = element;\n this._config = this._getConfig(config);\n Data.set(this._element, this.constructor.DATA_KEY, this);\n } // Public\n\n\n dispose() {\n Data.remove(this._element, this.constructor.DATA_KEY);\n EventHandler.off(this._element, this.constructor.EVENT_KEY);\n\n for (const propertyName of Object.getOwnPropertyNames(this)) {\n this[propertyName] = null;\n }\n }\n\n _queueCallback(callback, element, isAnimated = true) {\n executeAfterTransition(callback, element, isAnimated);\n }\n\n _getConfig(config) {\n config = this._mergeConfigObj(config, this._element);\n config = this._configAfterMerge(config);\n\n this._typeCheckConfig(config);\n\n return config;\n } // Static\n\n\n static getInstance(element) {\n return Data.get(getElement(element), this.DATA_KEY);\n }\n\n static getOrCreateInstance(element, config = {}) {\n return this.getInstance(element) || new this(element, typeof config === 'object' ? config : null);\n }\n\n static get VERSION() {\n return VERSION;\n }\n\n static get DATA_KEY() {\n return `bs.${this.NAME}`;\n }\n\n static get EVENT_KEY() {\n return `.${this.DATA_KEY}`;\n }\n\n static eventName(name) {\n return `${name}${this.EVENT_KEY}`;\n }\n\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): util/component-functions.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\nconst enableDismissTrigger = (component, method = 'hide') => {\n const clickEvent = `click.dismiss${component.EVENT_KEY}`;\n const name = component.NAME;\n EventHandler.on(document, clickEvent, `[data-bs-dismiss=\"${name}\"]`, function (event) {\n if (['A', 'AREA'].includes(this.tagName)) {\n event.preventDefault();\n }\n\n if (isDisabled(this)) {\n return;\n }\n\n const target = getElementFromSelector(this) || this.closest(`.${name}`);\n const instance = component.getOrCreateInstance(target); // Method argument is left, for Alert and only, as it doesn't implement the 'hide' method\n\n instance[method]();\n });\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): alert.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$f = 'alert';\nconst DATA_KEY$a = 'bs.alert';\nconst EVENT_KEY$b = `.${DATA_KEY$a}`;\nconst EVENT_CLOSE = `close${EVENT_KEY$b}`;\nconst EVENT_CLOSED = `closed${EVENT_KEY$b}`;\nconst CLASS_NAME_FADE$5 = 'fade';\nconst CLASS_NAME_SHOW$8 = 'show';\n/**\n * Class definition\n */\n\nclass Alert extends BaseComponent {\n // Getters\n static get NAME() {\n return NAME$f;\n } // Public\n\n\n close() {\n const closeEvent = EventHandler.trigger(this._element, EVENT_CLOSE);\n\n if (closeEvent.defaultPrevented) {\n return;\n }\n\n this._element.classList.remove(CLASS_NAME_SHOW$8);\n\n const isAnimated = this._element.classList.contains(CLASS_NAME_FADE$5);\n\n this._queueCallback(() => this._destroyElement(), this._element, isAnimated);\n } // Private\n\n\n _destroyElement() {\n this._element.remove();\n\n EventHandler.trigger(this._element, EVENT_CLOSED);\n this.dispose();\n } // Static\n\n\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Alert.getOrCreateInstance(this);\n\n if (typeof config !== 'string') {\n return;\n }\n\n if (data[config] === undefined || config.startsWith('_') || config === 'constructor') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n\n data[config](this);\n });\n }\n\n}\n/**\n * Data API implementation\n */\n\n\nenableDismissTrigger(Alert, 'close');\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Alert);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): button.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$e = 'button';\nconst DATA_KEY$9 = 'bs.button';\nconst EVENT_KEY$a = `.${DATA_KEY$9}`;\nconst DATA_API_KEY$6 = '.data-api';\nconst CLASS_NAME_ACTIVE$3 = 'active';\nconst SELECTOR_DATA_TOGGLE$5 = '[data-bs-toggle=\"button\"]';\nconst EVENT_CLICK_DATA_API$6 = `click${EVENT_KEY$a}${DATA_API_KEY$6}`;\n/**\n * Class definition\n */\n\nclass Button extends BaseComponent {\n // Getters\n static get NAME() {\n return NAME$e;\n } // Public\n\n\n toggle() {\n // Toggle class and sync the `aria-pressed` attribute with the return value of the `.toggle()` method\n this._element.setAttribute('aria-pressed', this._element.classList.toggle(CLASS_NAME_ACTIVE$3));\n } // Static\n\n\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Button.getOrCreateInstance(this);\n\n if (config === 'toggle') {\n data[config]();\n }\n });\n }\n\n}\n/**\n * Data API implementation\n */\n\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$6, SELECTOR_DATA_TOGGLE$5, event => {\n event.preventDefault();\n const button = event.target.closest(SELECTOR_DATA_TOGGLE$5);\n const data = Button.getOrCreateInstance(button);\n data.toggle();\n});\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Button);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): dom/selector-engine.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst SelectorEngine = {\n find(selector, element = document.documentElement) {\n return [].concat(...Element.prototype.querySelectorAll.call(element, selector));\n },\n\n findOne(selector, element = document.documentElement) {\n return Element.prototype.querySelector.call(element, selector);\n },\n\n children(element, selector) {\n return [].concat(...element.children).filter(child => child.matches(selector));\n },\n\n parents(element, selector) {\n const parents = [];\n let ancestor = element.parentNode.closest(selector);\n\n while (ancestor) {\n parents.push(ancestor);\n ancestor = ancestor.parentNode.closest(selector);\n }\n\n return parents;\n },\n\n prev(element, selector) {\n let previous = element.previousElementSibling;\n\n while (previous) {\n if (previous.matches(selector)) {\n return [previous];\n }\n\n previous = previous.previousElementSibling;\n }\n\n return [];\n },\n\n // TODO: this is now unused; remove later along with prev()\n next(element, selector) {\n let next = element.nextElementSibling;\n\n while (next) {\n if (next.matches(selector)) {\n return [next];\n }\n\n next = next.nextElementSibling;\n }\n\n return [];\n },\n\n focusableChildren(element) {\n const focusables = ['a', 'button', 'input', 'textarea', 'select', 'details', '[tabindex]', '[contenteditable=\"true\"]'].map(selector => `${selector}:not([tabindex^=\"-\"])`).join(',');\n return this.find(focusables, element).filter(el => !isDisabled(el) && isVisible(el));\n }\n\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): util/swipe.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$d = 'swipe';\nconst EVENT_KEY$9 = '.bs.swipe';\nconst EVENT_TOUCHSTART = `touchstart${EVENT_KEY$9}`;\nconst EVENT_TOUCHMOVE = `touchmove${EVENT_KEY$9}`;\nconst EVENT_TOUCHEND = `touchend${EVENT_KEY$9}`;\nconst EVENT_POINTERDOWN = `pointerdown${EVENT_KEY$9}`;\nconst EVENT_POINTERUP = `pointerup${EVENT_KEY$9}`;\nconst POINTER_TYPE_TOUCH = 'touch';\nconst POINTER_TYPE_PEN = 'pen';\nconst CLASS_NAME_POINTER_EVENT = 'pointer-event';\nconst SWIPE_THRESHOLD = 40;\nconst Default$c = {\n endCallback: null,\n leftCallback: null,\n rightCallback: null\n};\nconst DefaultType$c = {\n endCallback: '(function|null)',\n leftCallback: '(function|null)',\n rightCallback: '(function|null)'\n};\n/**\n * Class definition\n */\n\nclass Swipe extends Config {\n constructor(element, config) {\n super();\n this._element = element;\n\n if (!element || !Swipe.isSupported()) {\n return;\n }\n\n this._config = this._getConfig(config);\n this._deltaX = 0;\n this._supportPointerEvents = Boolean(window.PointerEvent);\n\n this._initEvents();\n } // Getters\n\n\n static get Default() {\n return Default$c;\n }\n\n static get DefaultType() {\n return DefaultType$c;\n }\n\n static get NAME() {\n return NAME$d;\n } // Public\n\n\n dispose() {\n EventHandler.off(this._element, EVENT_KEY$9);\n } // Private\n\n\n _start(event) {\n if (!this._supportPointerEvents) {\n this._deltaX = event.touches[0].clientX;\n return;\n }\n\n if (this._eventIsPointerPenTouch(event)) {\n this._deltaX = event.clientX;\n }\n }\n\n _end(event) {\n if (this._eventIsPointerPenTouch(event)) {\n this._deltaX = event.clientX - this._deltaX;\n }\n\n this._handleSwipe();\n\n execute(this._config.endCallback);\n }\n\n _move(event) {\n this._deltaX = event.touches && event.touches.length > 1 ? 0 : event.touches[0].clientX - this._deltaX;\n }\n\n _handleSwipe() {\n const absDeltaX = Math.abs(this._deltaX);\n\n if (absDeltaX <= SWIPE_THRESHOLD) {\n return;\n }\n\n const direction = absDeltaX / this._deltaX;\n this._deltaX = 0;\n\n if (!direction) {\n return;\n }\n\n execute(direction > 0 ? this._config.rightCallback : this._config.leftCallback);\n }\n\n _initEvents() {\n if (this._supportPointerEvents) {\n EventHandler.on(this._element, EVENT_POINTERDOWN, event => this._start(event));\n EventHandler.on(this._element, EVENT_POINTERUP, event => this._end(event));\n\n this._element.classList.add(CLASS_NAME_POINTER_EVENT);\n } else {\n EventHandler.on(this._element, EVENT_TOUCHSTART, event => this._start(event));\n EventHandler.on(this._element, EVENT_TOUCHMOVE, event => this._move(event));\n EventHandler.on(this._element, EVENT_TOUCHEND, event => this._end(event));\n }\n }\n\n _eventIsPointerPenTouch(event) {\n return this._supportPointerEvents && (event.pointerType === POINTER_TYPE_PEN || event.pointerType === POINTER_TYPE_TOUCH);\n } // Static\n\n\n static isSupported() {\n return 'ontouchstart' in document.documentElement || navigator.maxTouchPoints > 0;\n }\n\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): carousel.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$c = 'carousel';\nconst DATA_KEY$8 = 'bs.carousel';\nconst EVENT_KEY$8 = `.${DATA_KEY$8}`;\nconst DATA_API_KEY$5 = '.data-api';\nconst ARROW_LEFT_KEY$1 = 'ArrowLeft';\nconst ARROW_RIGHT_KEY$1 = 'ArrowRight';\nconst TOUCHEVENT_COMPAT_WAIT = 500; // Time for mouse compat events to fire after touch\n\nconst ORDER_NEXT = 'next';\nconst ORDER_PREV = 'prev';\nconst DIRECTION_LEFT = 'left';\nconst DIRECTION_RIGHT = 'right';\nconst EVENT_SLIDE = `slide${EVENT_KEY$8}`;\nconst EVENT_SLID = `slid${EVENT_KEY$8}`;\nconst EVENT_KEYDOWN$1 = `keydown${EVENT_KEY$8}`;\nconst EVENT_MOUSEENTER$1 = `mouseenter${EVENT_KEY$8}`;\nconst EVENT_MOUSELEAVE$1 = `mouseleave${EVENT_KEY$8}`;\nconst EVENT_DRAG_START = `dragstart${EVENT_KEY$8}`;\nconst EVENT_LOAD_DATA_API$3 = `load${EVENT_KEY$8}${DATA_API_KEY$5}`;\nconst EVENT_CLICK_DATA_API$5 = `click${EVENT_KEY$8}${DATA_API_KEY$5}`;\nconst CLASS_NAME_CAROUSEL = 'carousel';\nconst CLASS_NAME_ACTIVE$2 = 'active';\nconst CLASS_NAME_SLIDE = 'slide';\nconst CLASS_NAME_END = 'carousel-item-end';\nconst CLASS_NAME_START = 'carousel-item-start';\nconst CLASS_NAME_NEXT = 'carousel-item-next';\nconst CLASS_NAME_PREV = 'carousel-item-prev';\nconst SELECTOR_ACTIVE = '.active';\nconst SELECTOR_ITEM = '.carousel-item';\nconst SELECTOR_ACTIVE_ITEM = SELECTOR_ACTIVE + SELECTOR_ITEM;\nconst SELECTOR_ITEM_IMG = '.carousel-item img';\nconst SELECTOR_INDICATORS = '.carousel-indicators';\nconst SELECTOR_DATA_SLIDE = '[data-bs-slide], [data-bs-slide-to]';\nconst SELECTOR_DATA_RIDE = '[data-bs-ride=\"carousel\"]';\nconst KEY_TO_DIRECTION = {\n [ARROW_LEFT_KEY$1]: DIRECTION_RIGHT,\n [ARROW_RIGHT_KEY$1]: DIRECTION_LEFT\n};\nconst Default$b = {\n interval: 5000,\n keyboard: true,\n pause: 'hover',\n ride: false,\n touch: true,\n wrap: true\n};\nconst DefaultType$b = {\n interval: '(number|boolean)',\n // TODO:v6 remove boolean support\n keyboard: 'boolean',\n pause: '(string|boolean)',\n ride: '(boolean|string)',\n touch: 'boolean',\n wrap: 'boolean'\n};\n/**\n * Class definition\n */\n\nclass Carousel extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._interval = null;\n this._activeElement = null;\n this._isSliding = false;\n this.touchTimeout = null;\n this._swipeHelper = null;\n this._indicatorsElement = SelectorEngine.findOne(SELECTOR_INDICATORS, this._element);\n\n this._addEventListeners();\n\n if (this._config.ride === CLASS_NAME_CAROUSEL) {\n this.cycle();\n }\n } // Getters\n\n\n static get Default() {\n return Default$b;\n }\n\n static get DefaultType() {\n return DefaultType$b;\n }\n\n static get NAME() {\n return NAME$c;\n } // Public\n\n\n next() {\n this._slide(ORDER_NEXT);\n }\n\n nextWhenVisible() {\n // FIXME TODO use `document.visibilityState`\n // Don't call next when the page isn't visible\n // or the carousel or its parent isn't visible\n if (!document.hidden && isVisible(this._element)) {\n this.next();\n }\n }\n\n prev() {\n this._slide(ORDER_PREV);\n }\n\n pause() {\n if (this._isSliding) {\n triggerTransitionEnd(this._element);\n }\n\n this._clearInterval();\n }\n\n cycle() {\n this._clearInterval();\n\n this._updateInterval();\n\n this._interval = setInterval(() => this.nextWhenVisible(), this._config.interval);\n }\n\n _maybeEnableCycle() {\n if (!this._config.ride) {\n return;\n }\n\n if (this._isSliding) {\n EventHandler.one(this._element, EVENT_SLID, () => this.cycle());\n return;\n }\n\n this.cycle();\n }\n\n to(index) {\n const items = this._getItems();\n\n if (index > items.length - 1 || index < 0) {\n return;\n }\n\n if (this._isSliding) {\n EventHandler.one(this._element, EVENT_SLID, () => this.to(index));\n return;\n }\n\n const activeIndex = this._getItemIndex(this._getActive());\n\n if (activeIndex === index) {\n return;\n }\n\n const order = index > activeIndex ? ORDER_NEXT : ORDER_PREV;\n\n this._slide(order, items[index]);\n }\n\n dispose() {\n if (this._swipeHelper) {\n this._swipeHelper.dispose();\n }\n\n super.dispose();\n } // Private\n\n\n _configAfterMerge(config) {\n config.defaultInterval = config.interval;\n return config;\n }\n\n _addEventListeners() {\n if (this._config.keyboard) {\n EventHandler.on(this._element, EVENT_KEYDOWN$1, event => this._keydown(event));\n }\n\n if (this._config.pause === 'hover') {\n EventHandler.on(this._element, EVENT_MOUSEENTER$1, () => this.pause());\n EventHandler.on(this._element, EVENT_MOUSELEAVE$1, () => this._maybeEnableCycle());\n }\n\n if (this._config.touch && Swipe.isSupported()) {\n this._addTouchEventListeners();\n }\n }\n\n _addTouchEventListeners() {\n for (const img of SelectorEngine.find(SELECTOR_ITEM_IMG, this._element)) {\n EventHandler.on(img, EVENT_DRAG_START, event => event.preventDefault());\n }\n\n const endCallBack = () => {\n if (this._config.pause !== 'hover') {\n return;\n } // If it's a touch-enabled device, mouseenter/leave are fired as\n // part of the mouse compatibility events on first tap - the carousel\n // would stop cycling until user tapped out of it;\n // here, we listen for touchend, explicitly pause the carousel\n // (as if it's the second time we tap on it, mouseenter compat event\n // is NOT fired) and after a timeout (to allow for mouse compatibility\n // events to fire) we explicitly restart cycling\n\n\n this.pause();\n\n if (this.touchTimeout) {\n clearTimeout(this.touchTimeout);\n }\n\n this.touchTimeout = setTimeout(() => this._maybeEnableCycle(), TOUCHEVENT_COMPAT_WAIT + this._config.interval);\n };\n\n const swipeConfig = {\n leftCallback: () => this._slide(this._directionToOrder(DIRECTION_LEFT)),\n rightCallback: () => this._slide(this._directionToOrder(DIRECTION_RIGHT)),\n endCallback: endCallBack\n };\n this._swipeHelper = new Swipe(this._element, swipeConfig);\n }\n\n _keydown(event) {\n if (/input|textarea/i.test(event.target.tagName)) {\n return;\n }\n\n const direction = KEY_TO_DIRECTION[event.key];\n\n if (direction) {\n event.preventDefault();\n\n this._slide(this._directionToOrder(direction));\n }\n }\n\n _getItemIndex(element) {\n return this._getItems().indexOf(element);\n }\n\n _setActiveIndicatorElement(index) {\n if (!this._indicatorsElement) {\n return;\n }\n\n const activeIndicator = SelectorEngine.findOne(SELECTOR_ACTIVE, this._indicatorsElement);\n activeIndicator.classList.remove(CLASS_NAME_ACTIVE$2);\n activeIndicator.removeAttribute('aria-current');\n const newActiveIndicator = SelectorEngine.findOne(`[data-bs-slide-to=\"${index}\"]`, this._indicatorsElement);\n\n if (newActiveIndicator) {\n newActiveIndicator.classList.add(CLASS_NAME_ACTIVE$2);\n newActiveIndicator.setAttribute('aria-current', 'true');\n }\n }\n\n _updateInterval() {\n const element = this._activeElement || this._getActive();\n\n if (!element) {\n return;\n }\n\n const elementInterval = Number.parseInt(element.getAttribute('data-bs-interval'), 10);\n this._config.interval = elementInterval || this._config.defaultInterval;\n }\n\n _slide(order, element = null) {\n if (this._isSliding) {\n return;\n }\n\n const activeElement = this._getActive();\n\n const isNext = order === ORDER_NEXT;\n const nextElement = element || getNextActiveElement(this._getItems(), activeElement, isNext, this._config.wrap);\n\n if (nextElement === activeElement) {\n return;\n }\n\n const nextElementIndex = this._getItemIndex(nextElement);\n\n const triggerEvent = eventName => {\n return EventHandler.trigger(this._element, eventName, {\n relatedTarget: nextElement,\n direction: this._orderToDirection(order),\n from: this._getItemIndex(activeElement),\n to: nextElementIndex\n });\n };\n\n const slideEvent = triggerEvent(EVENT_SLIDE);\n\n if (slideEvent.defaultPrevented) {\n return;\n }\n\n if (!activeElement || !nextElement) {\n // Some weirdness is happening, so we bail\n // todo: change tests that use empty divs to avoid this check\n return;\n }\n\n const isCycling = Boolean(this._interval);\n this.pause();\n this._isSliding = true;\n\n this._setActiveIndicatorElement(nextElementIndex);\n\n this._activeElement = nextElement;\n const directionalClassName = isNext ? CLASS_NAME_START : CLASS_NAME_END;\n const orderClassName = isNext ? CLASS_NAME_NEXT : CLASS_NAME_PREV;\n nextElement.classList.add(orderClassName);\n reflow(nextElement);\n activeElement.classList.add(directionalClassName);\n nextElement.classList.add(directionalClassName);\n\n const completeCallBack = () => {\n nextElement.classList.remove(directionalClassName, orderClassName);\n nextElement.classList.add(CLASS_NAME_ACTIVE$2);\n activeElement.classList.remove(CLASS_NAME_ACTIVE$2, orderClassName, directionalClassName);\n this._isSliding = false;\n triggerEvent(EVENT_SLID);\n };\n\n this._queueCallback(completeCallBack, activeElement, this._isAnimated());\n\n if (isCycling) {\n this.cycle();\n }\n }\n\n _isAnimated() {\n return this._element.classList.contains(CLASS_NAME_SLIDE);\n }\n\n _getActive() {\n return SelectorEngine.findOne(SELECTOR_ACTIVE_ITEM, this._element);\n }\n\n _getItems() {\n return SelectorEngine.find(SELECTOR_ITEM, this._element);\n }\n\n _clearInterval() {\n if (this._interval) {\n clearInterval(this._interval);\n this._interval = null;\n }\n }\n\n _directionToOrder(direction) {\n if (isRTL()) {\n return direction === DIRECTION_LEFT ? ORDER_PREV : ORDER_NEXT;\n }\n\n return direction === DIRECTION_LEFT ? ORDER_NEXT : ORDER_PREV;\n }\n\n _orderToDirection(order) {\n if (isRTL()) {\n return order === ORDER_PREV ? DIRECTION_LEFT : DIRECTION_RIGHT;\n }\n\n return order === ORDER_PREV ? DIRECTION_RIGHT : DIRECTION_LEFT;\n } // Static\n\n\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Carousel.getOrCreateInstance(this, config);\n\n if (typeof config === 'number') {\n data.to(config);\n return;\n }\n\n if (typeof config === 'string') {\n if (data[config] === undefined || config.startsWith('_') || config === 'constructor') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n\n data[config]();\n }\n });\n }\n\n}\n/**\n * Data API implementation\n */\n\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$5, SELECTOR_DATA_SLIDE, function (event) {\n const target = getElementFromSelector(this);\n\n if (!target || !target.classList.contains(CLASS_NAME_CAROUSEL)) {\n return;\n }\n\n event.preventDefault();\n const carousel = Carousel.getOrCreateInstance(target);\n const slideIndex = this.getAttribute('data-bs-slide-to');\n\n if (slideIndex) {\n carousel.to(slideIndex);\n\n carousel._maybeEnableCycle();\n\n return;\n }\n\n if (Manipulator.getDataAttribute(this, 'slide') === 'next') {\n carousel.next();\n\n carousel._maybeEnableCycle();\n\n return;\n }\n\n carousel.prev();\n\n carousel._maybeEnableCycle();\n});\nEventHandler.on(window, EVENT_LOAD_DATA_API$3, () => {\n const carousels = SelectorEngine.find(SELECTOR_DATA_RIDE);\n\n for (const carousel of carousels) {\n Carousel.getOrCreateInstance(carousel);\n }\n});\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Carousel);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): collapse.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$b = 'collapse';\nconst DATA_KEY$7 = 'bs.collapse';\nconst EVENT_KEY$7 = `.${DATA_KEY$7}`;\nconst DATA_API_KEY$4 = '.data-api';\nconst EVENT_SHOW$6 = `show${EVENT_KEY$7}`;\nconst EVENT_SHOWN$6 = `shown${EVENT_KEY$7}`;\nconst EVENT_HIDE$6 = `hide${EVENT_KEY$7}`;\nconst EVENT_HIDDEN$6 = `hidden${EVENT_KEY$7}`;\nconst EVENT_CLICK_DATA_API$4 = `click${EVENT_KEY$7}${DATA_API_KEY$4}`;\nconst CLASS_NAME_SHOW$7 = 'show';\nconst CLASS_NAME_COLLAPSE = 'collapse';\nconst CLASS_NAME_COLLAPSING = 'collapsing';\nconst CLASS_NAME_COLLAPSED = 'collapsed';\nconst CLASS_NAME_DEEPER_CHILDREN = `:scope .${CLASS_NAME_COLLAPSE} .${CLASS_NAME_COLLAPSE}`;\nconst CLASS_NAME_HORIZONTAL = 'collapse-horizontal';\nconst WIDTH = 'width';\nconst HEIGHT = 'height';\nconst SELECTOR_ACTIVES = '.collapse.show, .collapse.collapsing';\nconst SELECTOR_DATA_TOGGLE$4 = '[data-bs-toggle=\"collapse\"]';\nconst Default$a = {\n parent: null,\n toggle: true\n};\nconst DefaultType$a = {\n parent: '(null|element)',\n toggle: 'boolean'\n};\n/**\n * Class definition\n */\n\nclass Collapse extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._isTransitioning = false;\n this._triggerArray = [];\n const toggleList = SelectorEngine.find(SELECTOR_DATA_TOGGLE$4);\n\n for (const elem of toggleList) {\n const selector = getSelectorFromElement(elem);\n const filterElement = SelectorEngine.find(selector).filter(foundElement => foundElement === this._element);\n\n if (selector !== null && filterElement.length) {\n this._triggerArray.push(elem);\n }\n }\n\n this._initializeChildren();\n\n if (!this._config.parent) {\n this._addAriaAndCollapsedClass(this._triggerArray, this._isShown());\n }\n\n if (this._config.toggle) {\n this.toggle();\n }\n } // Getters\n\n\n static get Default() {\n return Default$a;\n }\n\n static get DefaultType() {\n return DefaultType$a;\n }\n\n static get NAME() {\n return NAME$b;\n } // Public\n\n\n toggle() {\n if (this._isShown()) {\n this.hide();\n } else {\n this.show();\n }\n }\n\n show() {\n if (this._isTransitioning || this._isShown()) {\n return;\n }\n\n let activeChildren = []; // find active children\n\n if (this._config.parent) {\n activeChildren = this._getFirstLevelChildren(SELECTOR_ACTIVES).filter(element => element !== this._element).map(element => Collapse.getOrCreateInstance(element, {\n toggle: false\n }));\n }\n\n if (activeChildren.length && activeChildren[0]._isTransitioning) {\n return;\n }\n\n const startEvent = EventHandler.trigger(this._element, EVENT_SHOW$6);\n\n if (startEvent.defaultPrevented) {\n return;\n }\n\n for (const activeInstance of activeChildren) {\n activeInstance.hide();\n }\n\n const dimension = this._getDimension();\n\n this._element.classList.remove(CLASS_NAME_COLLAPSE);\n\n this._element.classList.add(CLASS_NAME_COLLAPSING);\n\n this._element.style[dimension] = 0;\n\n this._addAriaAndCollapsedClass(this._triggerArray, true);\n\n this._isTransitioning = true;\n\n const complete = () => {\n this._isTransitioning = false;\n\n this._element.classList.remove(CLASS_NAME_COLLAPSING);\n\n this._element.classList.add(CLASS_NAME_COLLAPSE, CLASS_NAME_SHOW$7);\n\n this._element.style[dimension] = '';\n EventHandler.trigger(this._element, EVENT_SHOWN$6);\n };\n\n const capitalizedDimension = dimension[0].toUpperCase() + dimension.slice(1);\n const scrollSize = `scroll${capitalizedDimension}`;\n\n this._queueCallback(complete, this._element, true);\n\n this._element.style[dimension] = `${this._element[scrollSize]}px`;\n }\n\n hide() {\n if (this._isTransitioning || !this._isShown()) {\n return;\n }\n\n const startEvent = EventHandler.trigger(this._element, EVENT_HIDE$6);\n\n if (startEvent.defaultPrevented) {\n return;\n }\n\n const dimension = this._getDimension();\n\n this._element.style[dimension] = `${this._element.getBoundingClientRect()[dimension]}px`;\n reflow(this._element);\n\n this._element.classList.add(CLASS_NAME_COLLAPSING);\n\n this._element.classList.remove(CLASS_NAME_COLLAPSE, CLASS_NAME_SHOW$7);\n\n for (const trigger of this._triggerArray) {\n const element = getElementFromSelector(trigger);\n\n if (element && !this._isShown(element)) {\n this._addAriaAndCollapsedClass([trigger], false);\n }\n }\n\n this._isTransitioning = true;\n\n const complete = () => {\n this._isTransitioning = false;\n\n this._element.classList.remove(CLASS_NAME_COLLAPSING);\n\n this._element.classList.add(CLASS_NAME_COLLAPSE);\n\n EventHandler.trigger(this._element, EVENT_HIDDEN$6);\n };\n\n this._element.style[dimension] = '';\n\n this._queueCallback(complete, this._element, true);\n }\n\n _isShown(element = this._element) {\n return element.classList.contains(CLASS_NAME_SHOW$7);\n } // Private\n\n\n _configAfterMerge(config) {\n config.toggle = Boolean(config.toggle); // Coerce string values\n\n config.parent = getElement(config.parent);\n return config;\n }\n\n _getDimension() {\n return this._element.classList.contains(CLASS_NAME_HORIZONTAL) ? WIDTH : HEIGHT;\n }\n\n _initializeChildren() {\n if (!this._config.parent) {\n return;\n }\n\n const children = this._getFirstLevelChildren(SELECTOR_DATA_TOGGLE$4);\n\n for (const element of children) {\n const selected = getElementFromSelector(element);\n\n if (selected) {\n this._addAriaAndCollapsedClass([element], this._isShown(selected));\n }\n }\n }\n\n _getFirstLevelChildren(selector) {\n const children = SelectorEngine.find(CLASS_NAME_DEEPER_CHILDREN, this._config.parent); // remove children if greater depth\n\n return SelectorEngine.find(selector, this._config.parent).filter(element => !children.includes(element));\n }\n\n _addAriaAndCollapsedClass(triggerArray, isOpen) {\n if (!triggerArray.length) {\n return;\n }\n\n for (const element of triggerArray) {\n element.classList.toggle(CLASS_NAME_COLLAPSED, !isOpen);\n element.setAttribute('aria-expanded', isOpen);\n }\n } // Static\n\n\n static jQueryInterface(config) {\n const _config = {};\n\n if (typeof config === 'string' && /show|hide/.test(config)) {\n _config.toggle = false;\n }\n\n return this.each(function () {\n const data = Collapse.getOrCreateInstance(this, _config);\n\n if (typeof config === 'string') {\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n\n data[config]();\n }\n });\n }\n\n}\n/**\n * Data API implementation\n */\n\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$4, SELECTOR_DATA_TOGGLE$4, function (event) {\n // preventDefault only for elements (which change the URL) not inside the collapsible element\n if (event.target.tagName === 'A' || event.delegateTarget && event.delegateTarget.tagName === 'A') {\n event.preventDefault();\n }\n\n const selector = getSelectorFromElement(this);\n const selectorElements = SelectorEngine.find(selector);\n\n for (const element of selectorElements) {\n Collapse.getOrCreateInstance(element, {\n toggle: false\n }).toggle();\n }\n});\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Collapse);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): dropdown.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$a = 'dropdown';\nconst DATA_KEY$6 = 'bs.dropdown';\nconst EVENT_KEY$6 = `.${DATA_KEY$6}`;\nconst DATA_API_KEY$3 = '.data-api';\nconst ESCAPE_KEY$2 = 'Escape';\nconst TAB_KEY$1 = 'Tab';\nconst ARROW_UP_KEY$1 = 'ArrowUp';\nconst ARROW_DOWN_KEY$1 = 'ArrowDown';\nconst RIGHT_MOUSE_BUTTON = 2; // MouseEvent.button value for the secondary button, usually the right button\n\nconst EVENT_HIDE$5 = `hide${EVENT_KEY$6}`;\nconst EVENT_HIDDEN$5 = `hidden${EVENT_KEY$6}`;\nconst EVENT_SHOW$5 = `show${EVENT_KEY$6}`;\nconst EVENT_SHOWN$5 = `shown${EVENT_KEY$6}`;\nconst EVENT_CLICK_DATA_API$3 = `click${EVENT_KEY$6}${DATA_API_KEY$3}`;\nconst EVENT_KEYDOWN_DATA_API = `keydown${EVENT_KEY$6}${DATA_API_KEY$3}`;\nconst EVENT_KEYUP_DATA_API = `keyup${EVENT_KEY$6}${DATA_API_KEY$3}`;\nconst CLASS_NAME_SHOW$6 = 'show';\nconst CLASS_NAME_DROPUP = 'dropup';\nconst CLASS_NAME_DROPEND = 'dropend';\nconst CLASS_NAME_DROPSTART = 'dropstart';\nconst CLASS_NAME_DROPUP_CENTER = 'dropup-center';\nconst CLASS_NAME_DROPDOWN_CENTER = 'dropdown-center';\nconst SELECTOR_DATA_TOGGLE$3 = '[data-bs-toggle=\"dropdown\"]:not(.disabled):not(:disabled)';\nconst SELECTOR_DATA_TOGGLE_SHOWN = `${SELECTOR_DATA_TOGGLE$3}.${CLASS_NAME_SHOW$6}`;\nconst SELECTOR_MENU = '.dropdown-menu';\nconst SELECTOR_NAVBAR = '.navbar';\nconst SELECTOR_NAVBAR_NAV = '.navbar-nav';\nconst SELECTOR_VISIBLE_ITEMS = '.dropdown-menu .dropdown-item:not(.disabled):not(:disabled)';\nconst PLACEMENT_TOP = isRTL() ? 'top-end' : 'top-start';\nconst PLACEMENT_TOPEND = isRTL() ? 'top-start' : 'top-end';\nconst PLACEMENT_BOTTOM = isRTL() ? 'bottom-end' : 'bottom-start';\nconst PLACEMENT_BOTTOMEND = isRTL() ? 'bottom-start' : 'bottom-end';\nconst PLACEMENT_RIGHT = isRTL() ? 'left-start' : 'right-start';\nconst PLACEMENT_LEFT = isRTL() ? 'right-start' : 'left-start';\nconst PLACEMENT_TOPCENTER = 'top';\nconst PLACEMENT_BOTTOMCENTER = 'bottom';\nconst Default$9 = {\n autoClose: true,\n boundary: 'clippingParents',\n display: 'dynamic',\n offset: [0, 2],\n popperConfig: null,\n reference: 'toggle'\n};\nconst DefaultType$9 = {\n autoClose: '(boolean|string)',\n boundary: '(string|element)',\n display: 'string',\n offset: '(array|string|function)',\n popperConfig: '(null|object|function)',\n reference: '(string|element|object)'\n};\n/**\n * Class definition\n */\n\nclass Dropdown extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._popper = null;\n this._parent = this._element.parentNode; // dropdown wrapper\n // todo: v6 revert #37011 & change markup https://getbootstrap.com/docs/5.2/forms/input-group/\n\n this._menu = SelectorEngine.next(this._element, SELECTOR_MENU)[0] || SelectorEngine.prev(this._element, SELECTOR_MENU)[0] || SelectorEngine.findOne(SELECTOR_MENU, this._parent);\n this._inNavbar = this._detectNavbar();\n } // Getters\n\n\n static get Default() {\n return Default$9;\n }\n\n static get DefaultType() {\n return DefaultType$9;\n }\n\n static get NAME() {\n return NAME$a;\n } // Public\n\n\n toggle() {\n return this._isShown() ? this.hide() : this.show();\n }\n\n show() {\n if (isDisabled(this._element) || this._isShown()) {\n return;\n }\n\n const relatedTarget = {\n relatedTarget: this._element\n };\n const showEvent = EventHandler.trigger(this._element, EVENT_SHOW$5, relatedTarget);\n\n if (showEvent.defaultPrevented) {\n return;\n }\n\n this._createPopper(); // If this is a touch-enabled device we add extra\n // empty mouseover listeners to the body's immediate children;\n // only needed because of broken event delegation on iOS\n // https://www.quirksmode.org/blog/archives/2014/02/mouse_event_bub.html\n\n\n if ('ontouchstart' in document.documentElement && !this._parent.closest(SELECTOR_NAVBAR_NAV)) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.on(element, 'mouseover', noop);\n }\n }\n\n this._element.focus();\n\n this._element.setAttribute('aria-expanded', true);\n\n this._menu.classList.add(CLASS_NAME_SHOW$6);\n\n this._element.classList.add(CLASS_NAME_SHOW$6);\n\n EventHandler.trigger(this._element, EVENT_SHOWN$5, relatedTarget);\n }\n\n hide() {\n if (isDisabled(this._element) || !this._isShown()) {\n return;\n }\n\n const relatedTarget = {\n relatedTarget: this._element\n };\n\n this._completeHide(relatedTarget);\n }\n\n dispose() {\n if (this._popper) {\n this._popper.destroy();\n }\n\n super.dispose();\n }\n\n update() {\n this._inNavbar = this._detectNavbar();\n\n if (this._popper) {\n this._popper.update();\n }\n } // Private\n\n\n _completeHide(relatedTarget) {\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE$5, relatedTarget);\n\n if (hideEvent.defaultPrevented) {\n return;\n } // If this is a touch-enabled device we remove the extra\n // empty mouseover listeners we added for iOS support\n\n\n if ('ontouchstart' in document.documentElement) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.off(element, 'mouseover', noop);\n }\n }\n\n if (this._popper) {\n this._popper.destroy();\n }\n\n this._menu.classList.remove(CLASS_NAME_SHOW$6);\n\n this._element.classList.remove(CLASS_NAME_SHOW$6);\n\n this._element.setAttribute('aria-expanded', 'false');\n\n Manipulator.removeDataAttribute(this._menu, 'popper');\n EventHandler.trigger(this._element, EVENT_HIDDEN$5, relatedTarget);\n }\n\n _getConfig(config) {\n config = super._getConfig(config);\n\n if (typeof config.reference === 'object' && !isElement(config.reference) && typeof config.reference.getBoundingClientRect !== 'function') {\n // Popper virtual elements require a getBoundingClientRect method\n throw new TypeError(`${NAME$a.toUpperCase()}: Option \"reference\" provided type \"object\" without a required \"getBoundingClientRect\" method.`);\n }\n\n return config;\n }\n\n _createPopper() {\n if (typeof Popper === 'undefined') {\n throw new TypeError('Bootstrap\\'s dropdowns require Popper (https://popper.js.org)');\n }\n\n let referenceElement = this._element;\n\n if (this._config.reference === 'parent') {\n referenceElement = this._parent;\n } else if (isElement(this._config.reference)) {\n referenceElement = getElement(this._config.reference);\n } else if (typeof this._config.reference === 'object') {\n referenceElement = this._config.reference;\n }\n\n const popperConfig = this._getPopperConfig();\n\n this._popper = Popper.createPopper(referenceElement, this._menu, popperConfig);\n }\n\n _isShown() {\n return this._menu.classList.contains(CLASS_NAME_SHOW$6);\n }\n\n _getPlacement() {\n const parentDropdown = this._parent;\n\n if (parentDropdown.classList.contains(CLASS_NAME_DROPEND)) {\n return PLACEMENT_RIGHT;\n }\n\n if (parentDropdown.classList.contains(CLASS_NAME_DROPSTART)) {\n return PLACEMENT_LEFT;\n }\n\n if (parentDropdown.classList.contains(CLASS_NAME_DROPUP_CENTER)) {\n return PLACEMENT_TOPCENTER;\n }\n\n if (parentDropdown.classList.contains(CLASS_NAME_DROPDOWN_CENTER)) {\n return PLACEMENT_BOTTOMCENTER;\n } // We need to trim the value because custom properties can also include spaces\n\n\n const isEnd = getComputedStyle(this._menu).getPropertyValue('--bs-position').trim() === 'end';\n\n if (parentDropdown.classList.contains(CLASS_NAME_DROPUP)) {\n return isEnd ? PLACEMENT_TOPEND : PLACEMENT_TOP;\n }\n\n return isEnd ? PLACEMENT_BOTTOMEND : PLACEMENT_BOTTOM;\n }\n\n _detectNavbar() {\n return this._element.closest(SELECTOR_NAVBAR) !== null;\n }\n\n _getOffset() {\n const {\n offset\n } = this._config;\n\n if (typeof offset === 'string') {\n return offset.split(',').map(value => Number.parseInt(value, 10));\n }\n\n if (typeof offset === 'function') {\n return popperData => offset(popperData, this._element);\n }\n\n return offset;\n }\n\n _getPopperConfig() {\n const defaultBsPopperConfig = {\n placement: this._getPlacement(),\n modifiers: [{\n name: 'preventOverflow',\n options: {\n boundary: this._config.boundary\n }\n }, {\n name: 'offset',\n options: {\n offset: this._getOffset()\n }\n }]\n }; // Disable Popper if we have a static display or Dropdown is in Navbar\n\n if (this._inNavbar || this._config.display === 'static') {\n Manipulator.setDataAttribute(this._menu, 'popper', 'static'); // todo:v6 remove\n\n defaultBsPopperConfig.modifiers = [{\n name: 'applyStyles',\n enabled: false\n }];\n }\n\n return { ...defaultBsPopperConfig,\n ...(typeof this._config.popperConfig === 'function' ? this._config.popperConfig(defaultBsPopperConfig) : this._config.popperConfig)\n };\n }\n\n _selectMenuItem({\n key,\n target\n }) {\n const items = SelectorEngine.find(SELECTOR_VISIBLE_ITEMS, this._menu).filter(element => isVisible(element));\n\n if (!items.length) {\n return;\n } // if target isn't included in items (e.g. when expanding the dropdown)\n // allow cycling to get the last item in case key equals ARROW_UP_KEY\n\n\n getNextActiveElement(items, target, key === ARROW_DOWN_KEY$1, !items.includes(target)).focus();\n } // Static\n\n\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Dropdown.getOrCreateInstance(this, config);\n\n if (typeof config !== 'string') {\n return;\n }\n\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n\n data[config]();\n });\n }\n\n static clearMenus(event) {\n if (event.button === RIGHT_MOUSE_BUTTON || event.type === 'keyup' && event.key !== TAB_KEY$1) {\n return;\n }\n\n const openToggles = SelectorEngine.find(SELECTOR_DATA_TOGGLE_SHOWN);\n\n for (const toggle of openToggles) {\n const context = Dropdown.getInstance(toggle);\n\n if (!context || context._config.autoClose === false) {\n continue;\n }\n\n const composedPath = event.composedPath();\n const isMenuTarget = composedPath.includes(context._menu);\n\n if (composedPath.includes(context._element) || context._config.autoClose === 'inside' && !isMenuTarget || context._config.autoClose === 'outside' && isMenuTarget) {\n continue;\n } // Tab navigation through the dropdown menu or events from contained inputs shouldn't close the menu\n\n\n if (context._menu.contains(event.target) && (event.type === 'keyup' && event.key === TAB_KEY$1 || /input|select|option|textarea|form/i.test(event.target.tagName))) {\n continue;\n }\n\n const relatedTarget = {\n relatedTarget: context._element\n };\n\n if (event.type === 'click') {\n relatedTarget.clickEvent = event;\n }\n\n context._completeHide(relatedTarget);\n }\n }\n\n static dataApiKeydownHandler(event) {\n // If not an UP | DOWN | ESCAPE key => not a dropdown command\n // If input/textarea && if key is other than ESCAPE => not a dropdown command\n const isInput = /input|textarea/i.test(event.target.tagName);\n const isEscapeEvent = event.key === ESCAPE_KEY$2;\n const isUpOrDownEvent = [ARROW_UP_KEY$1, ARROW_DOWN_KEY$1].includes(event.key);\n\n if (!isUpOrDownEvent && !isEscapeEvent) {\n return;\n }\n\n if (isInput && !isEscapeEvent) {\n return;\n }\n\n event.preventDefault(); // todo: v6 revert #37011 & change markup https://getbootstrap.com/docs/5.2/forms/input-group/\n\n const getToggleButton = this.matches(SELECTOR_DATA_TOGGLE$3) ? this : SelectorEngine.prev(this, SELECTOR_DATA_TOGGLE$3)[0] || SelectorEngine.next(this, SELECTOR_DATA_TOGGLE$3)[0] || SelectorEngine.findOne(SELECTOR_DATA_TOGGLE$3, event.delegateTarget.parentNode);\n const instance = Dropdown.getOrCreateInstance(getToggleButton);\n\n if (isUpOrDownEvent) {\n event.stopPropagation();\n instance.show();\n\n instance._selectMenuItem(event);\n\n return;\n }\n\n if (instance._isShown()) {\n // else is escape and we check if it is shown\n event.stopPropagation();\n instance.hide();\n getToggleButton.focus();\n }\n }\n\n}\n/**\n * Data API implementation\n */\n\n\nEventHandler.on(document, EVENT_KEYDOWN_DATA_API, SELECTOR_DATA_TOGGLE$3, Dropdown.dataApiKeydownHandler);\nEventHandler.on(document, EVENT_KEYDOWN_DATA_API, SELECTOR_MENU, Dropdown.dataApiKeydownHandler);\nEventHandler.on(document, EVENT_CLICK_DATA_API$3, Dropdown.clearMenus);\nEventHandler.on(document, EVENT_KEYUP_DATA_API, Dropdown.clearMenus);\nEventHandler.on(document, EVENT_CLICK_DATA_API$3, SELECTOR_DATA_TOGGLE$3, function (event) {\n event.preventDefault();\n Dropdown.getOrCreateInstance(this).toggle();\n});\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Dropdown);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): util/scrollBar.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst SELECTOR_FIXED_CONTENT = '.fixed-top, .fixed-bottom, .is-fixed, .sticky-top';\nconst SELECTOR_STICKY_CONTENT = '.sticky-top';\nconst PROPERTY_PADDING = 'padding-right';\nconst PROPERTY_MARGIN = 'margin-right';\n/**\n * Class definition\n */\n\nclass ScrollBarHelper {\n constructor() {\n this._element = document.body;\n } // Public\n\n\n getWidth() {\n // https://developer.mozilla.org/en-US/docs/Web/API/Window/innerWidth#usage_notes\n const documentWidth = document.documentElement.clientWidth;\n return Math.abs(window.innerWidth - documentWidth);\n }\n\n hide() {\n const width = this.getWidth();\n\n this._disableOverFlow(); // give padding to element to balance the hidden scrollbar width\n\n\n this._setElementAttributes(this._element, PROPERTY_PADDING, calculatedValue => calculatedValue + width); // trick: We adjust positive paddingRight and negative marginRight to sticky-top elements to keep showing fullwidth\n\n\n this._setElementAttributes(SELECTOR_FIXED_CONTENT, PROPERTY_PADDING, calculatedValue => calculatedValue + width);\n\n this._setElementAttributes(SELECTOR_STICKY_CONTENT, PROPERTY_MARGIN, calculatedValue => calculatedValue - width);\n }\n\n reset() {\n this._resetElementAttributes(this._element, 'overflow');\n\n this._resetElementAttributes(this._element, PROPERTY_PADDING);\n\n this._resetElementAttributes(SELECTOR_FIXED_CONTENT, PROPERTY_PADDING);\n\n this._resetElementAttributes(SELECTOR_STICKY_CONTENT, PROPERTY_MARGIN);\n }\n\n isOverflowing() {\n return this.getWidth() > 0;\n } // Private\n\n\n _disableOverFlow() {\n this._saveInitialAttribute(this._element, 'overflow');\n\n this._element.style.overflow = 'hidden';\n }\n\n _setElementAttributes(selector, styleProperty, callback) {\n const scrollbarWidth = this.getWidth();\n\n const manipulationCallBack = element => {\n if (element !== this._element && window.innerWidth > element.clientWidth + scrollbarWidth) {\n return;\n }\n\n this._saveInitialAttribute(element, styleProperty);\n\n const calculatedValue = window.getComputedStyle(element).getPropertyValue(styleProperty);\n element.style.setProperty(styleProperty, `${callback(Number.parseFloat(calculatedValue))}px`);\n };\n\n this._applyManipulationCallback(selector, manipulationCallBack);\n }\n\n _saveInitialAttribute(element, styleProperty) {\n const actualValue = element.style.getPropertyValue(styleProperty);\n\n if (actualValue) {\n Manipulator.setDataAttribute(element, styleProperty, actualValue);\n }\n }\n\n _resetElementAttributes(selector, styleProperty) {\n const manipulationCallBack = element => {\n const value = Manipulator.getDataAttribute(element, styleProperty); // We only want to remove the property if the value is `null`; the value can also be zero\n\n if (value === null) {\n element.style.removeProperty(styleProperty);\n return;\n }\n\n Manipulator.removeDataAttribute(element, styleProperty);\n element.style.setProperty(styleProperty, value);\n };\n\n this._applyManipulationCallback(selector, manipulationCallBack);\n }\n\n _applyManipulationCallback(selector, callBack) {\n if (isElement(selector)) {\n callBack(selector);\n return;\n }\n\n for (const sel of SelectorEngine.find(selector, this._element)) {\n callBack(sel);\n }\n }\n\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): util/backdrop.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$9 = 'backdrop';\nconst CLASS_NAME_FADE$4 = 'fade';\nconst CLASS_NAME_SHOW$5 = 'show';\nconst EVENT_MOUSEDOWN = `mousedown.bs.${NAME$9}`;\nconst Default$8 = {\n className: 'modal-backdrop',\n clickCallback: null,\n isAnimated: false,\n isVisible: true,\n // if false, we use the backdrop helper without adding any element to the dom\n rootElement: 'body' // give the choice to place backdrop under different elements\n\n};\nconst DefaultType$8 = {\n className: 'string',\n clickCallback: '(function|null)',\n isAnimated: 'boolean',\n isVisible: 'boolean',\n rootElement: '(element|string)'\n};\n/**\n * Class definition\n */\n\nclass Backdrop extends Config {\n constructor(config) {\n super();\n this._config = this._getConfig(config);\n this._isAppended = false;\n this._element = null;\n } // Getters\n\n\n static get Default() {\n return Default$8;\n }\n\n static get DefaultType() {\n return DefaultType$8;\n }\n\n static get NAME() {\n return NAME$9;\n } // Public\n\n\n show(callback) {\n if (!this._config.isVisible) {\n execute(callback);\n return;\n }\n\n this._append();\n\n const element = this._getElement();\n\n if (this._config.isAnimated) {\n reflow(element);\n }\n\n element.classList.add(CLASS_NAME_SHOW$5);\n\n this._emulateAnimation(() => {\n execute(callback);\n });\n }\n\n hide(callback) {\n if (!this._config.isVisible) {\n execute(callback);\n return;\n }\n\n this._getElement().classList.remove(CLASS_NAME_SHOW$5);\n\n this._emulateAnimation(() => {\n this.dispose();\n execute(callback);\n });\n }\n\n dispose() {\n if (!this._isAppended) {\n return;\n }\n\n EventHandler.off(this._element, EVENT_MOUSEDOWN);\n\n this._element.remove();\n\n this._isAppended = false;\n } // Private\n\n\n _getElement() {\n if (!this._element) {\n const backdrop = document.createElement('div');\n backdrop.className = this._config.className;\n\n if (this._config.isAnimated) {\n backdrop.classList.add(CLASS_NAME_FADE$4);\n }\n\n this._element = backdrop;\n }\n\n return this._element;\n }\n\n _configAfterMerge(config) {\n // use getElement() with the default \"body\" to get a fresh Element on each instantiation\n config.rootElement = getElement(config.rootElement);\n return config;\n }\n\n _append() {\n if (this._isAppended) {\n return;\n }\n\n const element = this._getElement();\n\n this._config.rootElement.append(element);\n\n EventHandler.on(element, EVENT_MOUSEDOWN, () => {\n execute(this._config.clickCallback);\n });\n this._isAppended = true;\n }\n\n _emulateAnimation(callback) {\n executeAfterTransition(callback, this._getElement(), this._config.isAnimated);\n }\n\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): util/focustrap.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$8 = 'focustrap';\nconst DATA_KEY$5 = 'bs.focustrap';\nconst EVENT_KEY$5 = `.${DATA_KEY$5}`;\nconst EVENT_FOCUSIN$2 = `focusin${EVENT_KEY$5}`;\nconst EVENT_KEYDOWN_TAB = `keydown.tab${EVENT_KEY$5}`;\nconst TAB_KEY = 'Tab';\nconst TAB_NAV_FORWARD = 'forward';\nconst TAB_NAV_BACKWARD = 'backward';\nconst Default$7 = {\n autofocus: true,\n trapElement: null // The element to trap focus inside of\n\n};\nconst DefaultType$7 = {\n autofocus: 'boolean',\n trapElement: 'element'\n};\n/**\n * Class definition\n */\n\nclass FocusTrap extends Config {\n constructor(config) {\n super();\n this._config = this._getConfig(config);\n this._isActive = false;\n this._lastTabNavDirection = null;\n } // Getters\n\n\n static get Default() {\n return Default$7;\n }\n\n static get DefaultType() {\n return DefaultType$7;\n }\n\n static get NAME() {\n return NAME$8;\n } // Public\n\n\n activate() {\n if (this._isActive) {\n return;\n }\n\n if (this._config.autofocus) {\n this._config.trapElement.focus();\n }\n\n EventHandler.off(document, EVENT_KEY$5); // guard against infinite focus loop\n\n EventHandler.on(document, EVENT_FOCUSIN$2, event => this._handleFocusin(event));\n EventHandler.on(document, EVENT_KEYDOWN_TAB, event => this._handleKeydown(event));\n this._isActive = true;\n }\n\n deactivate() {\n if (!this._isActive) {\n return;\n }\n\n this._isActive = false;\n EventHandler.off(document, EVENT_KEY$5);\n } // Private\n\n\n _handleFocusin(event) {\n const {\n trapElement\n } = this._config;\n\n if (event.target === document || event.target === trapElement || trapElement.contains(event.target)) {\n return;\n }\n\n const elements = SelectorEngine.focusableChildren(trapElement);\n\n if (elements.length === 0) {\n trapElement.focus();\n } else if (this._lastTabNavDirection === TAB_NAV_BACKWARD) {\n elements[elements.length - 1].focus();\n } else {\n elements[0].focus();\n }\n }\n\n _handleKeydown(event) {\n if (event.key !== TAB_KEY) {\n return;\n }\n\n this._lastTabNavDirection = event.shiftKey ? TAB_NAV_BACKWARD : TAB_NAV_FORWARD;\n }\n\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): modal.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$7 = 'modal';\nconst DATA_KEY$4 = 'bs.modal';\nconst EVENT_KEY$4 = `.${DATA_KEY$4}`;\nconst DATA_API_KEY$2 = '.data-api';\nconst ESCAPE_KEY$1 = 'Escape';\nconst EVENT_HIDE$4 = `hide${EVENT_KEY$4}`;\nconst EVENT_HIDE_PREVENTED$1 = `hidePrevented${EVENT_KEY$4}`;\nconst EVENT_HIDDEN$4 = `hidden${EVENT_KEY$4}`;\nconst EVENT_SHOW$4 = `show${EVENT_KEY$4}`;\nconst EVENT_SHOWN$4 = `shown${EVENT_KEY$4}`;\nconst EVENT_RESIZE$1 = `resize${EVENT_KEY$4}`;\nconst EVENT_CLICK_DISMISS = `click.dismiss${EVENT_KEY$4}`;\nconst EVENT_MOUSEDOWN_DISMISS = `mousedown.dismiss${EVENT_KEY$4}`;\nconst EVENT_KEYDOWN_DISMISS$1 = `keydown.dismiss${EVENT_KEY$4}`;\nconst EVENT_CLICK_DATA_API$2 = `click${EVENT_KEY$4}${DATA_API_KEY$2}`;\nconst CLASS_NAME_OPEN = 'modal-open';\nconst CLASS_NAME_FADE$3 = 'fade';\nconst CLASS_NAME_SHOW$4 = 'show';\nconst CLASS_NAME_STATIC = 'modal-static';\nconst OPEN_SELECTOR$1 = '.modal.show';\nconst SELECTOR_DIALOG = '.modal-dialog';\nconst SELECTOR_MODAL_BODY = '.modal-body';\nconst SELECTOR_DATA_TOGGLE$2 = '[data-bs-toggle=\"modal\"]';\nconst Default$6 = {\n backdrop: true,\n focus: true,\n keyboard: true\n};\nconst DefaultType$6 = {\n backdrop: '(boolean|string)',\n focus: 'boolean',\n keyboard: 'boolean'\n};\n/**\n * Class definition\n */\n\nclass Modal extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._dialog = SelectorEngine.findOne(SELECTOR_DIALOG, this._element);\n this._backdrop = this._initializeBackDrop();\n this._focustrap = this._initializeFocusTrap();\n this._isShown = false;\n this._isTransitioning = false;\n this._scrollBar = new ScrollBarHelper();\n\n this._addEventListeners();\n } // Getters\n\n\n static get Default() {\n return Default$6;\n }\n\n static get DefaultType() {\n return DefaultType$6;\n }\n\n static get NAME() {\n return NAME$7;\n } // Public\n\n\n toggle(relatedTarget) {\n return this._isShown ? this.hide() : this.show(relatedTarget);\n }\n\n show(relatedTarget) {\n if (this._isShown || this._isTransitioning) {\n return;\n }\n\n const showEvent = EventHandler.trigger(this._element, EVENT_SHOW$4, {\n relatedTarget\n });\n\n if (showEvent.defaultPrevented) {\n return;\n }\n\n this._isShown = true;\n this._isTransitioning = true;\n\n this._scrollBar.hide();\n\n document.body.classList.add(CLASS_NAME_OPEN);\n\n this._adjustDialog();\n\n this._backdrop.show(() => this._showElement(relatedTarget));\n }\n\n hide() {\n if (!this._isShown || this._isTransitioning) {\n return;\n }\n\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE$4);\n\n if (hideEvent.defaultPrevented) {\n return;\n }\n\n this._isShown = false;\n this._isTransitioning = true;\n\n this._focustrap.deactivate();\n\n this._element.classList.remove(CLASS_NAME_SHOW$4);\n\n this._queueCallback(() => this._hideModal(), this._element, this._isAnimated());\n }\n\n dispose() {\n for (const htmlElement of [window, this._dialog]) {\n EventHandler.off(htmlElement, EVENT_KEY$4);\n }\n\n this._backdrop.dispose();\n\n this._focustrap.deactivate();\n\n super.dispose();\n }\n\n handleUpdate() {\n this._adjustDialog();\n } // Private\n\n\n _initializeBackDrop() {\n return new Backdrop({\n isVisible: Boolean(this._config.backdrop),\n // 'static' option will be translated to true, and booleans will keep their value,\n isAnimated: this._isAnimated()\n });\n }\n\n _initializeFocusTrap() {\n return new FocusTrap({\n trapElement: this._element\n });\n }\n\n _showElement(relatedTarget) {\n // try to append dynamic modal\n if (!document.body.contains(this._element)) {\n document.body.append(this._element);\n }\n\n this._element.style.display = 'block';\n\n this._element.removeAttribute('aria-hidden');\n\n this._element.setAttribute('aria-modal', true);\n\n this._element.setAttribute('role', 'dialog');\n\n this._element.scrollTop = 0;\n const modalBody = SelectorEngine.findOne(SELECTOR_MODAL_BODY, this._dialog);\n\n if (modalBody) {\n modalBody.scrollTop = 0;\n }\n\n reflow(this._element);\n\n this._element.classList.add(CLASS_NAME_SHOW$4);\n\n const transitionComplete = () => {\n if (this._config.focus) {\n this._focustrap.activate();\n }\n\n this._isTransitioning = false;\n EventHandler.trigger(this._element, EVENT_SHOWN$4, {\n relatedTarget\n });\n };\n\n this._queueCallback(transitionComplete, this._dialog, this._isAnimated());\n }\n\n _addEventListeners() {\n EventHandler.on(this._element, EVENT_KEYDOWN_DISMISS$1, event => {\n if (event.key !== ESCAPE_KEY$1) {\n return;\n }\n\n if (this._config.keyboard) {\n event.preventDefault();\n this.hide();\n return;\n }\n\n this._triggerBackdropTransition();\n });\n EventHandler.on(window, EVENT_RESIZE$1, () => {\n if (this._isShown && !this._isTransitioning) {\n this._adjustDialog();\n }\n });\n EventHandler.on(this._element, EVENT_MOUSEDOWN_DISMISS, event => {\n // a bad trick to segregate clicks that may start inside dialog but end outside, and avoid listen to scrollbar clicks\n EventHandler.one(this._element, EVENT_CLICK_DISMISS, event2 => {\n if (this._element !== event.target || this._element !== event2.target) {\n return;\n }\n\n if (this._config.backdrop === 'static') {\n this._triggerBackdropTransition();\n\n return;\n }\n\n if (this._config.backdrop) {\n this.hide();\n }\n });\n });\n }\n\n _hideModal() {\n this._element.style.display = 'none';\n\n this._element.setAttribute('aria-hidden', true);\n\n this._element.removeAttribute('aria-modal');\n\n this._element.removeAttribute('role');\n\n this._isTransitioning = false;\n\n this._backdrop.hide(() => {\n document.body.classList.remove(CLASS_NAME_OPEN);\n\n this._resetAdjustments();\n\n this._scrollBar.reset();\n\n EventHandler.trigger(this._element, EVENT_HIDDEN$4);\n });\n }\n\n _isAnimated() {\n return this._element.classList.contains(CLASS_NAME_FADE$3);\n }\n\n _triggerBackdropTransition() {\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE_PREVENTED$1);\n\n if (hideEvent.defaultPrevented) {\n return;\n }\n\n const isModalOverflowing = this._element.scrollHeight > document.documentElement.clientHeight;\n const initialOverflowY = this._element.style.overflowY; // return if the following background transition hasn't yet completed\n\n if (initialOverflowY === 'hidden' || this._element.classList.contains(CLASS_NAME_STATIC)) {\n return;\n }\n\n if (!isModalOverflowing) {\n this._element.style.overflowY = 'hidden';\n }\n\n this._element.classList.add(CLASS_NAME_STATIC);\n\n this._queueCallback(() => {\n this._element.classList.remove(CLASS_NAME_STATIC);\n\n this._queueCallback(() => {\n this._element.style.overflowY = initialOverflowY;\n }, this._dialog);\n }, this._dialog);\n\n this._element.focus();\n }\n /**\n * The following methods are used to handle overflowing modals\n */\n\n\n _adjustDialog() {\n const isModalOverflowing = this._element.scrollHeight > document.documentElement.clientHeight;\n\n const scrollbarWidth = this._scrollBar.getWidth();\n\n const isBodyOverflowing = scrollbarWidth > 0;\n\n if (isBodyOverflowing && !isModalOverflowing) {\n const property = isRTL() ? 'paddingLeft' : 'paddingRight';\n this._element.style[property] = `${scrollbarWidth}px`;\n }\n\n if (!isBodyOverflowing && isModalOverflowing) {\n const property = isRTL() ? 'paddingRight' : 'paddingLeft';\n this._element.style[property] = `${scrollbarWidth}px`;\n }\n }\n\n _resetAdjustments() {\n this._element.style.paddingLeft = '';\n this._element.style.paddingRight = '';\n } // Static\n\n\n static jQueryInterface(config, relatedTarget) {\n return this.each(function () {\n const data = Modal.getOrCreateInstance(this, config);\n\n if (typeof config !== 'string') {\n return;\n }\n\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n\n data[config](relatedTarget);\n });\n }\n\n}\n/**\n * Data API implementation\n */\n\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$2, SELECTOR_DATA_TOGGLE$2, function (event) {\n const target = getElementFromSelector(this);\n\n if (['A', 'AREA'].includes(this.tagName)) {\n event.preventDefault();\n }\n\n EventHandler.one(target, EVENT_SHOW$4, showEvent => {\n if (showEvent.defaultPrevented) {\n // only register focus restorer if modal will actually get shown\n return;\n }\n\n EventHandler.one(target, EVENT_HIDDEN$4, () => {\n if (isVisible(this)) {\n this.focus();\n }\n });\n }); // avoid conflict when clicking modal toggler while another one is open\n\n const alreadyOpen = SelectorEngine.findOne(OPEN_SELECTOR$1);\n\n if (alreadyOpen) {\n Modal.getInstance(alreadyOpen).hide();\n }\n\n const data = Modal.getOrCreateInstance(target);\n data.toggle(this);\n});\nenableDismissTrigger(Modal);\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Modal);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): offcanvas.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$6 = 'offcanvas';\nconst DATA_KEY$3 = 'bs.offcanvas';\nconst EVENT_KEY$3 = `.${DATA_KEY$3}`;\nconst DATA_API_KEY$1 = '.data-api';\nconst EVENT_LOAD_DATA_API$2 = `load${EVENT_KEY$3}${DATA_API_KEY$1}`;\nconst ESCAPE_KEY = 'Escape';\nconst CLASS_NAME_SHOW$3 = 'show';\nconst CLASS_NAME_SHOWING$1 = 'showing';\nconst CLASS_NAME_HIDING = 'hiding';\nconst CLASS_NAME_BACKDROP = 'offcanvas-backdrop';\nconst OPEN_SELECTOR = '.offcanvas.show';\nconst EVENT_SHOW$3 = `show${EVENT_KEY$3}`;\nconst EVENT_SHOWN$3 = `shown${EVENT_KEY$3}`;\nconst EVENT_HIDE$3 = `hide${EVENT_KEY$3}`;\nconst EVENT_HIDE_PREVENTED = `hidePrevented${EVENT_KEY$3}`;\nconst EVENT_HIDDEN$3 = `hidden${EVENT_KEY$3}`;\nconst EVENT_RESIZE = `resize${EVENT_KEY$3}`;\nconst EVENT_CLICK_DATA_API$1 = `click${EVENT_KEY$3}${DATA_API_KEY$1}`;\nconst EVENT_KEYDOWN_DISMISS = `keydown.dismiss${EVENT_KEY$3}`;\nconst SELECTOR_DATA_TOGGLE$1 = '[data-bs-toggle=\"offcanvas\"]';\nconst Default$5 = {\n backdrop: true,\n keyboard: true,\n scroll: false\n};\nconst DefaultType$5 = {\n backdrop: '(boolean|string)',\n keyboard: 'boolean',\n scroll: 'boolean'\n};\n/**\n * Class definition\n */\n\nclass Offcanvas extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._isShown = false;\n this._backdrop = this._initializeBackDrop();\n this._focustrap = this._initializeFocusTrap();\n\n this._addEventListeners();\n } // Getters\n\n\n static get Default() {\n return Default$5;\n }\n\n static get DefaultType() {\n return DefaultType$5;\n }\n\n static get NAME() {\n return NAME$6;\n } // Public\n\n\n toggle(relatedTarget) {\n return this._isShown ? this.hide() : this.show(relatedTarget);\n }\n\n show(relatedTarget) {\n if (this._isShown) {\n return;\n }\n\n const showEvent = EventHandler.trigger(this._element, EVENT_SHOW$3, {\n relatedTarget\n });\n\n if (showEvent.defaultPrevented) {\n return;\n }\n\n this._isShown = true;\n\n this._backdrop.show();\n\n if (!this._config.scroll) {\n new ScrollBarHelper().hide();\n }\n\n this._element.setAttribute('aria-modal', true);\n\n this._element.setAttribute('role', 'dialog');\n\n this._element.classList.add(CLASS_NAME_SHOWING$1);\n\n const completeCallBack = () => {\n if (!this._config.scroll || this._config.backdrop) {\n this._focustrap.activate();\n }\n\n this._element.classList.add(CLASS_NAME_SHOW$3);\n\n this._element.classList.remove(CLASS_NAME_SHOWING$1);\n\n EventHandler.trigger(this._element, EVENT_SHOWN$3, {\n relatedTarget\n });\n };\n\n this._queueCallback(completeCallBack, this._element, true);\n }\n\n hide() {\n if (!this._isShown) {\n return;\n }\n\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE$3);\n\n if (hideEvent.defaultPrevented) {\n return;\n }\n\n this._focustrap.deactivate();\n\n this._element.blur();\n\n this._isShown = false;\n\n this._element.classList.add(CLASS_NAME_HIDING);\n\n this._backdrop.hide();\n\n const completeCallback = () => {\n this._element.classList.remove(CLASS_NAME_SHOW$3, CLASS_NAME_HIDING);\n\n this._element.removeAttribute('aria-modal');\n\n this._element.removeAttribute('role');\n\n if (!this._config.scroll) {\n new ScrollBarHelper().reset();\n }\n\n EventHandler.trigger(this._element, EVENT_HIDDEN$3);\n };\n\n this._queueCallback(completeCallback, this._element, true);\n }\n\n dispose() {\n this._backdrop.dispose();\n\n this._focustrap.deactivate();\n\n super.dispose();\n } // Private\n\n\n _initializeBackDrop() {\n const clickCallback = () => {\n if (this._config.backdrop === 'static') {\n EventHandler.trigger(this._element, EVENT_HIDE_PREVENTED);\n return;\n }\n\n this.hide();\n }; // 'static' option will be translated to true, and booleans will keep their value\n\n\n const isVisible = Boolean(this._config.backdrop);\n return new Backdrop({\n className: CLASS_NAME_BACKDROP,\n isVisible,\n isAnimated: true,\n rootElement: this._element.parentNode,\n clickCallback: isVisible ? clickCallback : null\n });\n }\n\n _initializeFocusTrap() {\n return new FocusTrap({\n trapElement: this._element\n });\n }\n\n _addEventListeners() {\n EventHandler.on(this._element, EVENT_KEYDOWN_DISMISS, event => {\n if (event.key !== ESCAPE_KEY) {\n return;\n }\n\n if (!this._config.keyboard) {\n EventHandler.trigger(this._element, EVENT_HIDE_PREVENTED);\n return;\n }\n\n this.hide();\n });\n } // Static\n\n\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Offcanvas.getOrCreateInstance(this, config);\n\n if (typeof config !== 'string') {\n return;\n }\n\n if (data[config] === undefined || config.startsWith('_') || config === 'constructor') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n\n data[config](this);\n });\n }\n\n}\n/**\n * Data API implementation\n */\n\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$1, SELECTOR_DATA_TOGGLE$1, function (event) {\n const target = getElementFromSelector(this);\n\n if (['A', 'AREA'].includes(this.tagName)) {\n event.preventDefault();\n }\n\n if (isDisabled(this)) {\n return;\n }\n\n EventHandler.one(target, EVENT_HIDDEN$3, () => {\n // focus on trigger when it is closed\n if (isVisible(this)) {\n this.focus();\n }\n }); // avoid conflict when clicking a toggler of an offcanvas, while another is open\n\n const alreadyOpen = SelectorEngine.findOne(OPEN_SELECTOR);\n\n if (alreadyOpen && alreadyOpen !== target) {\n Offcanvas.getInstance(alreadyOpen).hide();\n }\n\n const data = Offcanvas.getOrCreateInstance(target);\n data.toggle(this);\n});\nEventHandler.on(window, EVENT_LOAD_DATA_API$2, () => {\n for (const selector of SelectorEngine.find(OPEN_SELECTOR)) {\n Offcanvas.getOrCreateInstance(selector).show();\n }\n});\nEventHandler.on(window, EVENT_RESIZE, () => {\n for (const element of SelectorEngine.find('[aria-modal][class*=show][class*=offcanvas-]')) {\n if (getComputedStyle(element).position !== 'fixed') {\n Offcanvas.getOrCreateInstance(element).hide();\n }\n }\n});\nenableDismissTrigger(Offcanvas);\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Offcanvas);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): util/sanitizer.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\nconst uriAttributes = new Set(['background', 'cite', 'href', 'itemtype', 'longdesc', 'poster', 'src', 'xlink:href']);\nconst ARIA_ATTRIBUTE_PATTERN = /^aria-[\\w-]*$/i;\n/**\n * A pattern that recognizes a commonly useful subset of URLs that are safe.\n *\n * Shout-out to Angular https://github.com/angular/angular/blob/12.2.x/packages/core/src/sanitization/url_sanitizer.ts\n */\n\nconst SAFE_URL_PATTERN = /^(?:(?:https?|mailto|ftp|tel|file|sms):|[^#&/:?]*(?:[#/?]|$))/i;\n/**\n * A pattern that matches safe data URLs. Only matches image, video and audio types.\n *\n * Shout-out to Angular https://github.com/angular/angular/blob/12.2.x/packages/core/src/sanitization/url_sanitizer.ts\n */\n\nconst DATA_URL_PATTERN = /^data:(?:image\\/(?:bmp|gif|jpeg|jpg|png|tiff|webp)|video\\/(?:mpeg|mp4|ogg|webm)|audio\\/(?:mp3|oga|ogg|opus));base64,[\\d+/a-z]+=*$/i;\n\nconst allowedAttribute = (attribute, allowedAttributeList) => {\n const attributeName = attribute.nodeName.toLowerCase();\n\n if (allowedAttributeList.includes(attributeName)) {\n if (uriAttributes.has(attributeName)) {\n return Boolean(SAFE_URL_PATTERN.test(attribute.nodeValue) || DATA_URL_PATTERN.test(attribute.nodeValue));\n }\n\n return true;\n } // Check if a regular expression validates the attribute.\n\n\n return allowedAttributeList.filter(attributeRegex => attributeRegex instanceof RegExp).some(regex => regex.test(attributeName));\n};\n\nconst DefaultAllowlist = {\n // Global attributes allowed on any supplied element below.\n '*': ['class', 'dir', 'id', 'lang', 'role', ARIA_ATTRIBUTE_PATTERN],\n a: ['target', 'href', 'title', 'rel'],\n area: [],\n b: [],\n br: [],\n col: [],\n code: [],\n div: [],\n em: [],\n hr: [],\n h1: [],\n h2: [],\n h3: [],\n h4: [],\n h5: [],\n h6: [],\n i: [],\n img: ['src', 'srcset', 'alt', 'title', 'width', 'height'],\n li: [],\n ol: [],\n p: [],\n pre: [],\n s: [],\n small: [],\n span: [],\n sub: [],\n sup: [],\n strong: [],\n u: [],\n ul: []\n};\nfunction sanitizeHtml(unsafeHtml, allowList, sanitizeFunction) {\n if (!unsafeHtml.length) {\n return unsafeHtml;\n }\n\n if (sanitizeFunction && typeof sanitizeFunction === 'function') {\n return sanitizeFunction(unsafeHtml);\n }\n\n const domParser = new window.DOMParser();\n const createdDocument = domParser.parseFromString(unsafeHtml, 'text/html');\n const elements = [].concat(...createdDocument.body.querySelectorAll('*'));\n\n for (const element of elements) {\n const elementName = element.nodeName.toLowerCase();\n\n if (!Object.keys(allowList).includes(elementName)) {\n element.remove();\n continue;\n }\n\n const attributeList = [].concat(...element.attributes);\n const allowedAttributes = [].concat(allowList['*'] || [], allowList[elementName] || []);\n\n for (const attribute of attributeList) {\n if (!allowedAttribute(attribute, allowedAttributes)) {\n element.removeAttribute(attribute.nodeName);\n }\n }\n }\n\n return createdDocument.body.innerHTML;\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): util/template-factory.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$5 = 'TemplateFactory';\nconst Default$4 = {\n allowList: DefaultAllowlist,\n content: {},\n // { selector : text , selector2 : text2 , }\n extraClass: '',\n html: false,\n sanitize: true,\n sanitizeFn: null,\n template: '
'\n};\nconst DefaultType$4 = {\n allowList: 'object',\n content: 'object',\n extraClass: '(string|function)',\n html: 'boolean',\n sanitize: 'boolean',\n sanitizeFn: '(null|function)',\n template: 'string'\n};\nconst DefaultContentType = {\n entry: '(string|element|function|null)',\n selector: '(string|element)'\n};\n/**\n * Class definition\n */\n\nclass TemplateFactory extends Config {\n constructor(config) {\n super();\n this._config = this._getConfig(config);\n } // Getters\n\n\n static get Default() {\n return Default$4;\n }\n\n static get DefaultType() {\n return DefaultType$4;\n }\n\n static get NAME() {\n return NAME$5;\n } // Public\n\n\n getContent() {\n return Object.values(this._config.content).map(config => this._resolvePossibleFunction(config)).filter(Boolean);\n }\n\n hasContent() {\n return this.getContent().length > 0;\n }\n\n changeContent(content) {\n this._checkContent(content);\n\n this._config.content = { ...this._config.content,\n ...content\n };\n return this;\n }\n\n toHtml() {\n const templateWrapper = document.createElement('div');\n templateWrapper.innerHTML = this._maybeSanitize(this._config.template);\n\n for (const [selector, text] of Object.entries(this._config.content)) {\n this._setContent(templateWrapper, text, selector);\n }\n\n const template = templateWrapper.children[0];\n\n const extraClass = this._resolvePossibleFunction(this._config.extraClass);\n\n if (extraClass) {\n template.classList.add(...extraClass.split(' '));\n }\n\n return template;\n } // Private\n\n\n _typeCheckConfig(config) {\n super._typeCheckConfig(config);\n\n this._checkContent(config.content);\n }\n\n _checkContent(arg) {\n for (const [selector, content] of Object.entries(arg)) {\n super._typeCheckConfig({\n selector,\n entry: content\n }, DefaultContentType);\n }\n }\n\n _setContent(template, content, selector) {\n const templateElement = SelectorEngine.findOne(selector, template);\n\n if (!templateElement) {\n return;\n }\n\n content = this._resolvePossibleFunction(content);\n\n if (!content) {\n templateElement.remove();\n return;\n }\n\n if (isElement(content)) {\n this._putElementInTemplate(getElement(content), templateElement);\n\n return;\n }\n\n if (this._config.html) {\n templateElement.innerHTML = this._maybeSanitize(content);\n return;\n }\n\n templateElement.textContent = content;\n }\n\n _maybeSanitize(arg) {\n return this._config.sanitize ? sanitizeHtml(arg, this._config.allowList, this._config.sanitizeFn) : arg;\n }\n\n _resolvePossibleFunction(arg) {\n return typeof arg === 'function' ? arg(this) : arg;\n }\n\n _putElementInTemplate(element, templateElement) {\n if (this._config.html) {\n templateElement.innerHTML = '';\n templateElement.append(element);\n return;\n }\n\n templateElement.textContent = element.textContent;\n }\n\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): tooltip.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$4 = 'tooltip';\nconst DISALLOWED_ATTRIBUTES = new Set(['sanitize', 'allowList', 'sanitizeFn']);\nconst CLASS_NAME_FADE$2 = 'fade';\nconst CLASS_NAME_MODAL = 'modal';\nconst CLASS_NAME_SHOW$2 = 'show';\nconst SELECTOR_TOOLTIP_INNER = '.tooltip-inner';\nconst SELECTOR_MODAL = `.${CLASS_NAME_MODAL}`;\nconst EVENT_MODAL_HIDE = 'hide.bs.modal';\nconst TRIGGER_HOVER = 'hover';\nconst TRIGGER_FOCUS = 'focus';\nconst TRIGGER_CLICK = 'click';\nconst TRIGGER_MANUAL = 'manual';\nconst EVENT_HIDE$2 = 'hide';\nconst EVENT_HIDDEN$2 = 'hidden';\nconst EVENT_SHOW$2 = 'show';\nconst EVENT_SHOWN$2 = 'shown';\nconst EVENT_INSERTED = 'inserted';\nconst EVENT_CLICK$1 = 'click';\nconst EVENT_FOCUSIN$1 = 'focusin';\nconst EVENT_FOCUSOUT$1 = 'focusout';\nconst EVENT_MOUSEENTER = 'mouseenter';\nconst EVENT_MOUSELEAVE = 'mouseleave';\nconst AttachmentMap = {\n AUTO: 'auto',\n TOP: 'top',\n RIGHT: isRTL() ? 'left' : 'right',\n BOTTOM: 'bottom',\n LEFT: isRTL() ? 'right' : 'left'\n};\nconst Default$3 = {\n allowList: DefaultAllowlist,\n animation: true,\n boundary: 'clippingParents',\n container: false,\n customClass: '',\n delay: 0,\n fallbackPlacements: ['top', 'right', 'bottom', 'left'],\n html: false,\n offset: [0, 0],\n placement: 'top',\n popperConfig: null,\n sanitize: true,\n sanitizeFn: null,\n selector: false,\n template: '
' + '
' + '
' + '
',\n title: '',\n trigger: 'hover focus'\n};\nconst DefaultType$3 = {\n allowList: 'object',\n animation: 'boolean',\n boundary: '(string|element)',\n container: '(string|element|boolean)',\n customClass: '(string|function)',\n delay: '(number|object)',\n fallbackPlacements: 'array',\n html: 'boolean',\n offset: '(array|string|function)',\n placement: '(string|function)',\n popperConfig: '(null|object|function)',\n sanitize: 'boolean',\n sanitizeFn: '(null|function)',\n selector: '(string|boolean)',\n template: 'string',\n title: '(string|element|function)',\n trigger: 'string'\n};\n/**\n * Class definition\n */\n\nclass Tooltip extends BaseComponent {\n constructor(element, config) {\n if (typeof Popper === 'undefined') {\n throw new TypeError('Bootstrap\\'s tooltips require Popper (https://popper.js.org)');\n }\n\n super(element, config); // Private\n\n this._isEnabled = true;\n this._timeout = 0;\n this._isHovered = null;\n this._activeTrigger = {};\n this._popper = null;\n this._templateFactory = null;\n this._newContent = null; // Protected\n\n this.tip = null;\n\n this._setListeners();\n\n if (!this._config.selector) {\n this._fixTitle();\n }\n } // Getters\n\n\n static get Default() {\n return Default$3;\n }\n\n static get DefaultType() {\n return DefaultType$3;\n }\n\n static get NAME() {\n return NAME$4;\n } // Public\n\n\n enable() {\n this._isEnabled = true;\n }\n\n disable() {\n this._isEnabled = false;\n }\n\n toggleEnabled() {\n this._isEnabled = !this._isEnabled;\n }\n\n toggle() {\n if (!this._isEnabled) {\n return;\n }\n\n this._activeTrigger.click = !this._activeTrigger.click;\n\n if (this._isShown()) {\n this._leave();\n\n return;\n }\n\n this._enter();\n }\n\n dispose() {\n clearTimeout(this._timeout);\n EventHandler.off(this._element.closest(SELECTOR_MODAL), EVENT_MODAL_HIDE, this._hideModalHandler);\n\n if (this._element.getAttribute('data-bs-original-title')) {\n this._element.setAttribute('title', this._element.getAttribute('data-bs-original-title'));\n }\n\n this._disposePopper();\n\n super.dispose();\n }\n\n show() {\n if (this._element.style.display === 'none') {\n throw new Error('Please use show on visible elements');\n }\n\n if (!(this._isWithContent() && this._isEnabled)) {\n return;\n }\n\n const showEvent = EventHandler.trigger(this._element, this.constructor.eventName(EVENT_SHOW$2));\n const shadowRoot = findShadowRoot(this._element);\n\n const isInTheDom = (shadowRoot || this._element.ownerDocument.documentElement).contains(this._element);\n\n if (showEvent.defaultPrevented || !isInTheDom) {\n return;\n } // todo v6 remove this OR make it optional\n\n\n this._disposePopper();\n\n const tip = this._getTipElement();\n\n this._element.setAttribute('aria-describedby', tip.getAttribute('id'));\n\n const {\n container\n } = this._config;\n\n if (!this._element.ownerDocument.documentElement.contains(this.tip)) {\n container.append(tip);\n EventHandler.trigger(this._element, this.constructor.eventName(EVENT_INSERTED));\n }\n\n this._popper = this._createPopper(tip);\n tip.classList.add(CLASS_NAME_SHOW$2); // If this is a touch-enabled device we add extra\n // empty mouseover listeners to the body's immediate children;\n // only needed because of broken event delegation on iOS\n // https://www.quirksmode.org/blog/archives/2014/02/mouse_event_bub.html\n\n if ('ontouchstart' in document.documentElement) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.on(element, 'mouseover', noop);\n }\n }\n\n const complete = () => {\n EventHandler.trigger(this._element, this.constructor.eventName(EVENT_SHOWN$2));\n\n if (this._isHovered === false) {\n this._leave();\n }\n\n this._isHovered = false;\n };\n\n this._queueCallback(complete, this.tip, this._isAnimated());\n }\n\n hide() {\n if (!this._isShown()) {\n return;\n }\n\n const hideEvent = EventHandler.trigger(this._element, this.constructor.eventName(EVENT_HIDE$2));\n\n if (hideEvent.defaultPrevented) {\n return;\n }\n\n const tip = this._getTipElement();\n\n tip.classList.remove(CLASS_NAME_SHOW$2); // If this is a touch-enabled device we remove the extra\n // empty mouseover listeners we added for iOS support\n\n if ('ontouchstart' in document.documentElement) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.off(element, 'mouseover', noop);\n }\n }\n\n this._activeTrigger[TRIGGER_CLICK] = false;\n this._activeTrigger[TRIGGER_FOCUS] = false;\n this._activeTrigger[TRIGGER_HOVER] = false;\n this._isHovered = null; // it is a trick to support manual triggering\n\n const complete = () => {\n if (this._isWithActiveTrigger()) {\n return;\n }\n\n if (!this._isHovered) {\n this._disposePopper();\n }\n\n this._element.removeAttribute('aria-describedby');\n\n EventHandler.trigger(this._element, this.constructor.eventName(EVENT_HIDDEN$2));\n };\n\n this._queueCallback(complete, this.tip, this._isAnimated());\n }\n\n update() {\n if (this._popper) {\n this._popper.update();\n }\n } // Protected\n\n\n _isWithContent() {\n return Boolean(this._getTitle());\n }\n\n _getTipElement() {\n if (!this.tip) {\n this.tip = this._createTipElement(this._newContent || this._getContentForTemplate());\n }\n\n return this.tip;\n }\n\n _createTipElement(content) {\n const tip = this._getTemplateFactory(content).toHtml(); // todo: remove this check on v6\n\n\n if (!tip) {\n return null;\n }\n\n tip.classList.remove(CLASS_NAME_FADE$2, CLASS_NAME_SHOW$2); // todo: on v6 the following can be achieved with CSS only\n\n tip.classList.add(`bs-${this.constructor.NAME}-auto`);\n const tipId = getUID(this.constructor.NAME).toString();\n tip.setAttribute('id', tipId);\n\n if (this._isAnimated()) {\n tip.classList.add(CLASS_NAME_FADE$2);\n }\n\n return tip;\n }\n\n setContent(content) {\n this._newContent = content;\n\n if (this._isShown()) {\n this._disposePopper();\n\n this.show();\n }\n }\n\n _getTemplateFactory(content) {\n if (this._templateFactory) {\n this._templateFactory.changeContent(content);\n } else {\n this._templateFactory = new TemplateFactory({ ...this._config,\n // the `content` var has to be after `this._config`\n // to override config.content in case of popover\n content,\n extraClass: this._resolvePossibleFunction(this._config.customClass)\n });\n }\n\n return this._templateFactory;\n }\n\n _getContentForTemplate() {\n return {\n [SELECTOR_TOOLTIP_INNER]: this._getTitle()\n };\n }\n\n _getTitle() {\n return this._resolvePossibleFunction(this._config.title) || this._element.getAttribute('data-bs-original-title');\n } // Private\n\n\n _initializeOnDelegatedTarget(event) {\n return this.constructor.getOrCreateInstance(event.delegateTarget, this._getDelegateConfig());\n }\n\n _isAnimated() {\n return this._config.animation || this.tip && this.tip.classList.contains(CLASS_NAME_FADE$2);\n }\n\n _isShown() {\n return this.tip && this.tip.classList.contains(CLASS_NAME_SHOW$2);\n }\n\n _createPopper(tip) {\n const placement = typeof this._config.placement === 'function' ? this._config.placement.call(this, tip, this._element) : this._config.placement;\n const attachment = AttachmentMap[placement.toUpperCase()];\n return Popper.createPopper(this._element, tip, this._getPopperConfig(attachment));\n }\n\n _getOffset() {\n const {\n offset\n } = this._config;\n\n if (typeof offset === 'string') {\n return offset.split(',').map(value => Number.parseInt(value, 10));\n }\n\n if (typeof offset === 'function') {\n return popperData => offset(popperData, this._element);\n }\n\n return offset;\n }\n\n _resolvePossibleFunction(arg) {\n return typeof arg === 'function' ? arg.call(this._element) : arg;\n }\n\n _getPopperConfig(attachment) {\n const defaultBsPopperConfig = {\n placement: attachment,\n modifiers: [{\n name: 'flip',\n options: {\n fallbackPlacements: this._config.fallbackPlacements\n }\n }, {\n name: 'offset',\n options: {\n offset: this._getOffset()\n }\n }, {\n name: 'preventOverflow',\n options: {\n boundary: this._config.boundary\n }\n }, {\n name: 'arrow',\n options: {\n element: `.${this.constructor.NAME}-arrow`\n }\n }, {\n name: 'preSetPlacement',\n enabled: true,\n phase: 'beforeMain',\n fn: data => {\n // Pre-set Popper's placement attribute in order to read the arrow sizes properly.\n // Otherwise, Popper mixes up the width and height dimensions since the initial arrow style is for top placement\n this._getTipElement().setAttribute('data-popper-placement', data.state.placement);\n }\n }]\n };\n return { ...defaultBsPopperConfig,\n ...(typeof this._config.popperConfig === 'function' ? this._config.popperConfig(defaultBsPopperConfig) : this._config.popperConfig)\n };\n }\n\n _setListeners() {\n const triggers = this._config.trigger.split(' ');\n\n for (const trigger of triggers) {\n if (trigger === 'click') {\n EventHandler.on(this._element, this.constructor.eventName(EVENT_CLICK$1), this._config.selector, event => {\n const context = this._initializeOnDelegatedTarget(event);\n\n context.toggle();\n });\n } else if (trigger !== TRIGGER_MANUAL) {\n const eventIn = trigger === TRIGGER_HOVER ? this.constructor.eventName(EVENT_MOUSEENTER) : this.constructor.eventName(EVENT_FOCUSIN$1);\n const eventOut = trigger === TRIGGER_HOVER ? this.constructor.eventName(EVENT_MOUSELEAVE) : this.constructor.eventName(EVENT_FOCUSOUT$1);\n EventHandler.on(this._element, eventIn, this._config.selector, event => {\n const context = this._initializeOnDelegatedTarget(event);\n\n context._activeTrigger[event.type === 'focusin' ? TRIGGER_FOCUS : TRIGGER_HOVER] = true;\n\n context._enter();\n });\n EventHandler.on(this._element, eventOut, this._config.selector, event => {\n const context = this._initializeOnDelegatedTarget(event);\n\n context._activeTrigger[event.type === 'focusout' ? TRIGGER_FOCUS : TRIGGER_HOVER] = context._element.contains(event.relatedTarget);\n\n context._leave();\n });\n }\n }\n\n this._hideModalHandler = () => {\n if (this._element) {\n this.hide();\n }\n };\n\n EventHandler.on(this._element.closest(SELECTOR_MODAL), EVENT_MODAL_HIDE, this._hideModalHandler);\n }\n\n _fixTitle() {\n const title = this._element.getAttribute('title');\n\n if (!title) {\n return;\n }\n\n if (!this._element.getAttribute('aria-label') && !this._element.textContent.trim()) {\n this._element.setAttribute('aria-label', title);\n }\n\n this._element.setAttribute('data-bs-original-title', title); // DO NOT USE IT. Is only for backwards compatibility\n\n\n this._element.removeAttribute('title');\n }\n\n _enter() {\n if (this._isShown() || this._isHovered) {\n this._isHovered = true;\n return;\n }\n\n this._isHovered = true;\n\n this._setTimeout(() => {\n if (this._isHovered) {\n this.show();\n }\n }, this._config.delay.show);\n }\n\n _leave() {\n if (this._isWithActiveTrigger()) {\n return;\n }\n\n this._isHovered = false;\n\n this._setTimeout(() => {\n if (!this._isHovered) {\n this.hide();\n }\n }, this._config.delay.hide);\n }\n\n _setTimeout(handler, timeout) {\n clearTimeout(this._timeout);\n this._timeout = setTimeout(handler, timeout);\n }\n\n _isWithActiveTrigger() {\n return Object.values(this._activeTrigger).includes(true);\n }\n\n _getConfig(config) {\n const dataAttributes = Manipulator.getDataAttributes(this._element);\n\n for (const dataAttribute of Object.keys(dataAttributes)) {\n if (DISALLOWED_ATTRIBUTES.has(dataAttribute)) {\n delete dataAttributes[dataAttribute];\n }\n }\n\n config = { ...dataAttributes,\n ...(typeof config === 'object' && config ? config : {})\n };\n config = this._mergeConfigObj(config);\n config = this._configAfterMerge(config);\n\n this._typeCheckConfig(config);\n\n return config;\n }\n\n _configAfterMerge(config) {\n config.container = config.container === false ? document.body : getElement(config.container);\n\n if (typeof config.delay === 'number') {\n config.delay = {\n show: config.delay,\n hide: config.delay\n };\n }\n\n if (typeof config.title === 'number') {\n config.title = config.title.toString();\n }\n\n if (typeof config.content === 'number') {\n config.content = config.content.toString();\n }\n\n return config;\n }\n\n _getDelegateConfig() {\n const config = {};\n\n for (const key in this._config) {\n if (this.constructor.Default[key] !== this._config[key]) {\n config[key] = this._config[key];\n }\n }\n\n config.selector = false;\n config.trigger = 'manual'; // In the future can be replaced with:\n // const keysWithDifferentValues = Object.entries(this._config).filter(entry => this.constructor.Default[entry[0]] !== this._config[entry[0]])\n // `Object.fromEntries(keysWithDifferentValues)`\n\n return config;\n }\n\n _disposePopper() {\n if (this._popper) {\n this._popper.destroy();\n\n this._popper = null;\n }\n\n if (this.tip) {\n this.tip.remove();\n this.tip = null;\n }\n } // Static\n\n\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Tooltip.getOrCreateInstance(this, config);\n\n if (typeof config !== 'string') {\n return;\n }\n\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n\n data[config]();\n });\n }\n\n}\n/**\n * jQuery\n */\n\n\ndefineJQueryPlugin(Tooltip);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): popover.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$3 = 'popover';\nconst SELECTOR_TITLE = '.popover-header';\nconst SELECTOR_CONTENT = '.popover-body';\nconst Default$2 = { ...Tooltip.Default,\n content: '',\n offset: [0, 8],\n placement: 'right',\n template: '
' + '
' + '

' + '
' + '
',\n trigger: 'click'\n};\nconst DefaultType$2 = { ...Tooltip.DefaultType,\n content: '(null|string|element|function)'\n};\n/**\n * Class definition\n */\n\nclass Popover extends Tooltip {\n // Getters\n static get Default() {\n return Default$2;\n }\n\n static get DefaultType() {\n return DefaultType$2;\n }\n\n static get NAME() {\n return NAME$3;\n } // Overrides\n\n\n _isWithContent() {\n return this._getTitle() || this._getContent();\n } // Private\n\n\n _getContentForTemplate() {\n return {\n [SELECTOR_TITLE]: this._getTitle(),\n [SELECTOR_CONTENT]: this._getContent()\n };\n }\n\n _getContent() {\n return this._resolvePossibleFunction(this._config.content);\n } // Static\n\n\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Popover.getOrCreateInstance(this, config);\n\n if (typeof config !== 'string') {\n return;\n }\n\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n\n data[config]();\n });\n }\n\n}\n/**\n * jQuery\n */\n\n\ndefineJQueryPlugin(Popover);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): scrollspy.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$2 = 'scrollspy';\nconst DATA_KEY$2 = 'bs.scrollspy';\nconst EVENT_KEY$2 = `.${DATA_KEY$2}`;\nconst DATA_API_KEY = '.data-api';\nconst EVENT_ACTIVATE = `activate${EVENT_KEY$2}`;\nconst EVENT_CLICK = `click${EVENT_KEY$2}`;\nconst EVENT_LOAD_DATA_API$1 = `load${EVENT_KEY$2}${DATA_API_KEY}`;\nconst CLASS_NAME_DROPDOWN_ITEM = 'dropdown-item';\nconst CLASS_NAME_ACTIVE$1 = 'active';\nconst SELECTOR_DATA_SPY = '[data-bs-spy=\"scroll\"]';\nconst SELECTOR_TARGET_LINKS = '[href]';\nconst SELECTOR_NAV_LIST_GROUP = '.nav, .list-group';\nconst SELECTOR_NAV_LINKS = '.nav-link';\nconst SELECTOR_NAV_ITEMS = '.nav-item';\nconst SELECTOR_LIST_ITEMS = '.list-group-item';\nconst SELECTOR_LINK_ITEMS = `${SELECTOR_NAV_LINKS}, ${SELECTOR_NAV_ITEMS} > ${SELECTOR_NAV_LINKS}, ${SELECTOR_LIST_ITEMS}`;\nconst SELECTOR_DROPDOWN = '.dropdown';\nconst SELECTOR_DROPDOWN_TOGGLE$1 = '.dropdown-toggle';\nconst Default$1 = {\n offset: null,\n // TODO: v6 @deprecated, keep it for backwards compatibility reasons\n rootMargin: '0px 0px -25%',\n smoothScroll: false,\n target: null,\n threshold: [0.1, 0.5, 1]\n};\nconst DefaultType$1 = {\n offset: '(number|null)',\n // TODO v6 @deprecated, keep it for backwards compatibility reasons\n rootMargin: 'string',\n smoothScroll: 'boolean',\n target: 'element',\n threshold: 'array'\n};\n/**\n * Class definition\n */\n\nclass ScrollSpy extends BaseComponent {\n constructor(element, config) {\n super(element, config); // this._element is the observablesContainer and config.target the menu links wrapper\n\n this._targetLinks = new Map();\n this._observableSections = new Map();\n this._rootElement = getComputedStyle(this._element).overflowY === 'visible' ? null : this._element;\n this._activeTarget = null;\n this._observer = null;\n this._previousScrollData = {\n visibleEntryTop: 0,\n parentScrollTop: 0\n };\n this.refresh(); // initialize\n } // Getters\n\n\n static get Default() {\n return Default$1;\n }\n\n static get DefaultType() {\n return DefaultType$1;\n }\n\n static get NAME() {\n return NAME$2;\n } // Public\n\n\n refresh() {\n this._initializeTargetsAndObservables();\n\n this._maybeEnableSmoothScroll();\n\n if (this._observer) {\n this._observer.disconnect();\n } else {\n this._observer = this._getNewObserver();\n }\n\n for (const section of this._observableSections.values()) {\n this._observer.observe(section);\n }\n }\n\n dispose() {\n this._observer.disconnect();\n\n super.dispose();\n } // Private\n\n\n _configAfterMerge(config) {\n // TODO: on v6 target should be given explicitly & remove the {target: 'ss-target'} case\n config.target = getElement(config.target) || document.body; // TODO: v6 Only for backwards compatibility reasons. Use rootMargin only\n\n config.rootMargin = config.offset ? `${config.offset}px 0px -30%` : config.rootMargin;\n\n if (typeof config.threshold === 'string') {\n config.threshold = config.threshold.split(',').map(value => Number.parseFloat(value));\n }\n\n return config;\n }\n\n _maybeEnableSmoothScroll() {\n if (!this._config.smoothScroll) {\n return;\n } // unregister any previous listeners\n\n\n EventHandler.off(this._config.target, EVENT_CLICK);\n EventHandler.on(this._config.target, EVENT_CLICK, SELECTOR_TARGET_LINKS, event => {\n const observableSection = this._observableSections.get(event.target.hash);\n\n if (observableSection) {\n event.preventDefault();\n const root = this._rootElement || window;\n const height = observableSection.offsetTop - this._element.offsetTop;\n\n if (root.scrollTo) {\n root.scrollTo({\n top: height,\n behavior: 'smooth'\n });\n return;\n } // Chrome 60 doesn't support `scrollTo`\n\n\n root.scrollTop = height;\n }\n });\n }\n\n _getNewObserver() {\n const options = {\n root: this._rootElement,\n threshold: this._config.threshold,\n rootMargin: this._config.rootMargin\n };\n return new IntersectionObserver(entries => this._observerCallback(entries), options);\n } // The logic of selection\n\n\n _observerCallback(entries) {\n const targetElement = entry => this._targetLinks.get(`#${entry.target.id}`);\n\n const activate = entry => {\n this._previousScrollData.visibleEntryTop = entry.target.offsetTop;\n\n this._process(targetElement(entry));\n };\n\n const parentScrollTop = (this._rootElement || document.documentElement).scrollTop;\n const userScrollsDown = parentScrollTop >= this._previousScrollData.parentScrollTop;\n this._previousScrollData.parentScrollTop = parentScrollTop;\n\n for (const entry of entries) {\n if (!entry.isIntersecting) {\n this._activeTarget = null;\n\n this._clearActiveClass(targetElement(entry));\n\n continue;\n }\n\n const entryIsLowerThanPrevious = entry.target.offsetTop >= this._previousScrollData.visibleEntryTop; // if we are scrolling down, pick the bigger offsetTop\n\n if (userScrollsDown && entryIsLowerThanPrevious) {\n activate(entry); // if parent isn't scrolled, let's keep the first visible item, breaking the iteration\n\n if (!parentScrollTop) {\n return;\n }\n\n continue;\n } // if we are scrolling up, pick the smallest offsetTop\n\n\n if (!userScrollsDown && !entryIsLowerThanPrevious) {\n activate(entry);\n }\n }\n }\n\n _initializeTargetsAndObservables() {\n this._targetLinks = new Map();\n this._observableSections = new Map();\n const targetLinks = SelectorEngine.find(SELECTOR_TARGET_LINKS, this._config.target);\n\n for (const anchor of targetLinks) {\n // ensure that the anchor has an id and is not disabled\n if (!anchor.hash || isDisabled(anchor)) {\n continue;\n }\n\n const observableSection = SelectorEngine.findOne(anchor.hash, this._element); // ensure that the observableSection exists & is visible\n\n if (isVisible(observableSection)) {\n this._targetLinks.set(anchor.hash, anchor);\n\n this._observableSections.set(anchor.hash, observableSection);\n }\n }\n }\n\n _process(target) {\n if (this._activeTarget === target) {\n return;\n }\n\n this._clearActiveClass(this._config.target);\n\n this._activeTarget = target;\n target.classList.add(CLASS_NAME_ACTIVE$1);\n\n this._activateParents(target);\n\n EventHandler.trigger(this._element, EVENT_ACTIVATE, {\n relatedTarget: target\n });\n }\n\n _activateParents(target) {\n // Activate dropdown parents\n if (target.classList.contains(CLASS_NAME_DROPDOWN_ITEM)) {\n SelectorEngine.findOne(SELECTOR_DROPDOWN_TOGGLE$1, target.closest(SELECTOR_DROPDOWN)).classList.add(CLASS_NAME_ACTIVE$1);\n return;\n }\n\n for (const listGroup of SelectorEngine.parents(target, SELECTOR_NAV_LIST_GROUP)) {\n // Set triggered links parents as active\n // With both