From 1ddca1fdf95fc5f3e35d029905d16c933616a9ce Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Wed, 27 Nov 2024 11:25:41 -0500
Subject: [PATCH 01/35] Add common resample code to stcal.

---
 src/stcal/resample/__init__.py |   13 +
 src/stcal/resample/resample.py | 1584 ++++++++++++++++++++++++++++++++
 src/stcal/resample/utils.py    |  299 ++++++
 3 files changed, 1896 insertions(+)
 create mode 100644 src/stcal/resample/__init__.py
 create mode 100644 src/stcal/resample/resample.py
 create mode 100644 src/stcal/resample/utils.py

diff --git a/src/stcal/resample/__init__.py b/src/stcal/resample/__init__.py
new file mode 100644
index 000000000..7152f15c4
--- /dev/null
+++ b/src/stcal/resample/__init__.py
@@ -0,0 +1,13 @@
+from .resample import (
+    OutputTooLargeError,
+    Resample,
+    compute_wcs_pixel_area,
+    UnsupportedWCSError,
+)
+
+__all__ = [
+    "OutputTooLargeError",
+    "Resample",
+    "compute_wcs_pixel_area",
+    "UnsupportedWCSError",
+]
diff --git a/src/stcal/resample/resample.py b/src/stcal/resample/resample.py
new file mode 100644
index 000000000..b9891d3db
--- /dev/null
+++ b/src/stcal/resample/resample.py
@@ -0,0 +1,1584 @@
+import logging
+import math
+import os
+import warnings
+import json
+import abc
+from copy import deepcopy
+import sys
+
+import numpy as np
+from scipy.ndimage import median_filter
+import psutil
+
+from astropy import units as u
+from astropy.nddata.bitmask import (
+    bitfield_to_boolean_mask,
+    interpret_bit_flags,
+)
+from drizzle.utils import calc_pixmap
+from drizzle.resample import Drizzle
+from stdatamodels.jwst.library.basic_utils import bytes2human
+
+
+from stcal.resample.utils import (
+    bytes2human,
+    compute_wcs_pixel_area,
+    get_tmeasure,
+    resample_range,
+)
+
+
+log = logging.getLogger(__name__)
+log.setLevel(logging.DEBUG)
+
+__all__ = [
+    "compute_wcs_pixel_area"
+    "OutputTooLargeError",
+    "Resample",
+    "resampled_wcs_from_models",
+    "UnsupportedWCSError",
+]
+
+
+class OutputTooLargeError(RuntimeError):
+    """Raised when the output is too large for in-memory instantiation"""
+
+
+class UnsupportedWCSError(RuntimeError):
+    """ Raised when provided output WCS has an unexpected number of axes
+    or has an unsupported structure.
+    """
+
+
+class Resample:
+    """
+    This is the controlling routine for the resampling process.
+
+    Notes
+    -----
+    This routine performs the following operations::
+
+      1. Extracts parameter settings from input model, such as pixfrac,
+         weight type, exposure time (if relevant), and kernel, and merges
+         them with any user-provided values.
+      2. Creates output WCS based on input images and define mapping function
+         between all input arrays and the output array.
+      3. Updates output data model with output arrays from drizzle, including
+         a record of metadata from all input models.
+    """
+    resample_suffix = 'i2d'
+    resample_file_ext = '.fits'
+
+    # supported output arrays (subclasses can add more):
+    output_array_types = {
+        "data": np.float32,
+        "wht": np.float32,
+        "con": np.int32,
+        "var_rnoise": np.float32,
+        "var_flat": np.float32,
+        "var_poisson": np.float32,
+        "err": np.float32,
+    }
+
+    dq_flag_name_map = {}
+
+    def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
+                 fillval=0.0, wht_type="ivm", good_bits=0,
+                 output_wcs=None, output_model=None,
+                 accumulate=False, enable_ctx=True, enable_var=True,
+                 compute_err=None,
+                 allowed_memory=None):
+        """
+        Parameters
+        ----------
+        n_input_models : int, None, optional
+            Number of input models expected to be resampled. When provided,
+            this is used to estimate memory requirements and optimize memory
+            allocation for the context array.
+
+        pixfrac : float, optional
+            The fraction of a pixel that the pixel flux is confined to. The
+            default value of 1 has the pixel flux evenly spread across the
+            image. A value of 0.5 confines it to half a pixel in the linear
+            dimension, so the flux is confined to a quarter of the pixel area
+            when the square kernel is used.
+
+        kernel: {"square", "gaussian", "point", "turbo", "lanczos2", "lanczos3"}, optional
+            The name of the kernel used to combine the input. The choice of
+            kernel controls the distribution of flux over the kernel.
+            The square kernel is the default.
+
+            .. warning::
+               The "gaussian" and "lanczos2/3" kernels **DO NOT**
+               conserve flux.
+
+        fillval: float, None, str, optional
+            The value of output pixels that did not have contributions from
+            input images' pixels. When ``fillval`` is either `None` or
+            ``"INDEF"`` and ``out_img`` is provided, the values of ``out_img``
+            will not be modified. When ``fillval`` is either `None` or
+            ``"INDEF"`` and ``out_img`` is **not provided**, the values of
+            ``out_img`` will be initialized to `numpy.nan`. If ``fillval``
+            is a string that can be converted to a number, then the output
+            pixels with no contributions from input images will be set to this
+            ``fillval`` value.
+
+        wht_type : {"exptime", "ivm"}, optional
+            The weighting type for adding models' data. For ``wht_type="ivm"``
+            (the default), the weighting will be determined per-pixel using
+            the inverse of the read noise (VAR_RNOISE) array stored in each
+            input image. If the ``VAR_RNOISE`` array does not exist,
+            the variance is set to 1 for all pixels (i.e., equal weighting).
+            If ``weight_type="exptime"``, the weight will be set equal
+            to the measurement time (``TMEASURE``) when available and to
+            the exposure time (``EFFEXPTM``) otherwise.
+
+        good_bits : int, str, None, optional
+            An integer bit mask, `None`, a Python list of bit flags, a comma-,
+            or ``'|'``-separated, ``'+'``-separated string list of integer
+            bit flags or mnemonic flag names that indicate what bits in models'
+            DQ bitfield array should be *ignored* (i.e., zeroed).
+
+            When co-adding models using :py:meth:`add_model`, any pixels with
+            a non-zero DQ values are assigned a weight of zero and therefore
+            they do not contribute to the output (resampled) data.
+            ``good_bits`` provides a mean to ignore some of the DQ bitflags.
+
+            When ``good_bits`` is an integer, it must be
+            the sum of all the DQ bit values from the input model's
+            DQ array that should be considered "good" (or ignored). For
+            example, if pixels in the DQ array can be
+            combinations of 1, 2, 4, and 8 flags and one wants to consider DQ
+            "defects" having flags 2 and 4 as being acceptable, then
+            ``good_bits`` should be set to 2+4=6. Then a pixel with DQ values
+            2,4, or 6 will be considered a good pixel, while a pixel with
+            DQ value, e.g., 1+2=3, 4+8=12, etc. will be flagged as
+            a "bad" pixel.
+
+            Alternatively, when ``good_bits`` is a string, it can be a
+            comma-separated or '+' separated list of integer bit flags that
+            should be summed to obtain the final "good" bits. For example,
+            both "4,8" and "4+8" are equivalent to integer ``good_bits=12``.
+
+            Finally, instead of integers, ``good_bits`` can be a string of
+            comma-separated mnemonics. For example, for JWST, all the following
+            specifications are equivalent:
+
+            `"12" == "4+8" == "4, 8" == "JUMP_DET, DROPOUT"`
+
+            In order to "translate" mnemonic code to integer bit flags,
+            ``Resample.dq_flag_name_map`` attribute must be set to either
+            a dictionary (with keys being mnemonc codes and the values being
+            integer flags) or a `~astropy.nddata.BitFlagNameMap`.
+
+            In order to reverse the meaning of the flags
+            from indicating values of the "good" DQ flags
+            to indicating the "bad" DQ flags, prepend '~' to the string
+            value. For example, in order to exclude pixels with
+            DQ flags 4 and 8 for computations and to consider
+            as "good" all other pixels (regardless of their DQ flag),
+            use a value of ``~4+8``, or ``~4,8``. A string value of
+            ``~0`` would be equivalent to a setting of ``None``.
+
+            | Default value (0) will make *all* pixels with non-zero DQ
+            values be considered "bad" pixels, and the corresponding data
+            pixels will be assigned zero weight and thus these pixels
+            will not contribute to the output resampled data array.
+
+            | Set `good_bits` to `None` to turn off the use of model's DQ
+            array.
+
+            For more details, see documentation for
+            `astropy.nddata.bitmask.extend_bit_flag_map`.
+
+        output_wcs : dict, WCS object, None
+            Specifies output WCS either directly as a WCS or a dictionary
+            with keys ``'wcs'`` (WCS object) and ``'pixel_scale'``
+            (pixel scale in arcseconds). ``'pixel_scale'``, when provided,
+            will be used for computation of drizzle scaling factor. When it is
+            not provided, output pixel scale will be *estimated* from the
+            provided WCS object. ``output_wcs`` object is required when
+            ``output_model`` is `None`. ``output_wcs`` is ignored when
+            ``output_model`` is provided.
+
+        output_model : dict, None, optional
+            A dictionary containing data arrays and other attributes that
+            will be used to add new models to. use
+            :py:meth:`Resample.output_model_attributes` to get the list of
+            keywords that must be present. When ``accumulate`` is `False`,
+            only the WCS object of the model will be used. When ``accumulate``
+            is `True`, new models will be added to the existing data in the
+            ``output_model``.
+
+            When ``output_model`` is `None`, a new model will be created.
+
+        accumulate : bool, optional
+            Indicates whether resampled models should be added to the
+            provided ``output_model`` data or if new arrays should be
+            created.
+
+        enable_ctx : bool, optional
+            Indicates whether to create a context image. If ``disable_ctx``
+            is set to `True`, parameters ``out_ctx``, ``begin_ctx_id``, and
+            ``max_ctx_id`` will be ignored.
+
+        enable_var : bool, optional
+            Indicates whether to resample variance arrays.
+
+        compute_err : {"from_var", "driz_err"}, None, optional
+            - ``"from_var"``: compute output model's error array from
+              all (Poisson, flat, readout) resampled variance arrays.
+              Setting ``compute_err`` to ``"from_var"`` will assume
+              ``enable_var`` was set to `True` regardless of actual
+              value of the parameter ``enable_var``.
+            - ``"driz_err"``: compute output model's error array by drizzling
+              together all input models' error arrays.
+
+            Error array will be assigned to ``'err'`` key of the output model.
+
+            .. note::
+                At this time, output error array is not equivalent to
+                error propagation results.
+
+        allowed_memory : float, None
+            Fraction of memory allowed to be used for resampling. If
+            ``allowed_memory`` is `None` then no check for available memory
+            will be performed.
+
+        """
+        # to see if setting up arrays and drizzle is needed
+        self._finalized = False
+        self._n_res_models = 0
+
+        self._n_predicted_input_models = n_input_models
+        self.allowed_memory = allowed_memory
+        self._output_model = output_model
+        self._create_new_output_model = output_model is not None
+
+        self._enable_ctx = enable_ctx
+        self._enable_var = enable_var
+        self._compute_err = compute_err
+        self._accumulate = accumulate
+
+        # these are attributes that are used only for information purpose
+        # and are added to created the output_model only if they are
+        # not already present there:
+        self._pixel_scale_ratio = None
+        self._output_pixel_scale = None  # in arcsec
+
+        # resample parameters
+        self.pixfrac = pixfrac
+        self.kernel = kernel
+        self.fillval = fillval
+        self.good_bits = good_bits
+
+        if wht_type in ["ivm", "exptime"]:
+            self.weight_type = wht_type
+        else:
+            raise ValueError("Unexpected weight type: '{self.weight_type}'")
+
+        self._output_wcs = output_wcs
+
+        self.input_file_names = []
+        self._group_ids = []
+
+        # determine output WCS and set-up output model if needed:
+        if output_model is None:
+            if output_wcs is None:
+                raise ValueError(
+                    "Output WCS must be provided either through the "
+                    "'output_wcs' parameter or the 'ouput_model' parameter. "
+                )
+            else:
+                if isinstance(output_wcs, dict):
+                    self._output_pixel_scale = output_wcs.get("pixel_scale")
+                    self._pixel_scale_ratio = output_wcs.get(
+                        "pixel_scale_ratio"
+                    )
+                    self._output_wcs = output_wcs.get("wcs")
+                else:
+                    self._output_wcs = output_wcs
+
+                self.check_output_wcs(self._output_wcs)
+
+        else:
+            self.validate_output_model(
+                output_model=output_model,
+                accumulate=accumulate,
+                enable_ctx=enable_ctx,
+                enable_var=enable_var,
+            )
+            self._output_model = output_model
+            self._output_wcs = output_model["wcs"]
+            self._output_pixel_scale = output_model.get("pixel_scale")
+            if output_wcs:
+                log.warning(
+                    "'output_wcs' will be ignored. Using the 'wcs' supplied "
+                    "by the 'output_model' instead."
+                )
+
+        if self._output_pixel_scale is None:
+            self._output_pixel_scale = 3600.0 * np.rad2deg(
+                math.sqrt(compute_wcs_pixel_area(self._output_wcs))
+            )
+            log.info(
+                "Computed output pixel scale: "
+                f"{self._output_pixel_scale} arcsec."
+            )
+        else:
+            log.info(
+                f"Output pixel scale: {self._output_pixel_scale} arcsec."
+            )
+
+        self._output_array_shape = self._output_wcs.array_shape
+
+        # Check that the output data shape has no zero length dimensions
+        npix = np.prod(self._output_array_shape)
+        if not npix:
+            raise ValueError(
+                "Invalid output frame shape: "
+                f"{tuple(self._output_array_shape)}"
+            )
+
+        log.info(f"Driz parameter kernel: {self.kernel}")
+        log.info(f"Driz parameter pixfrac: {self.pixfrac}")
+        log.info(f"Driz parameter fillval: {self.fillval}")
+        log.info(f"Driz parameter weight_type: {self.weight_type}")
+        log.debug(
+            f"Output mosaic size (nx, ny): {self._output_wcs.pixel_shape}"
+        )
+
+        # set up an empty (don't allocate arrays at this time) output model:
+        if self._output_model is None:
+            self._output_model = self.create_output_model()
+
+        self.reset_arrays(reset_output=False, n_input_models=n_input_models)
+
+    @classmethod
+    def output_model_attributes(cls, accumulate, enable_ctx, enable_var,
+                                compute_err):
+        """
+        Returns a set of string keywords that must be present in an
+        'output_model' that is provided as input at the class initialization.
+
+        Parameters
+        ----------
+
+        accumulate : bool, optional
+            Indicates whether resampled models should be added to the
+            provided ``output_model`` data or if new arrays should be
+            created.
+
+        enable_ctx : bool, optional
+            Indicates whether to create a context image. If ``disable_ctx``
+            is set to `True`, parameters ``out_ctx``, ``begin_ctx_id``, and
+            ``max_ctx_id`` will be ignored.
+
+        enable_var : bool, optional
+            Indicates whether to resample variance arrays.
+
+        compute_err : {"from_var", "driz_err"}, None, optional
+            - ``"from_var"``: compute output model's error array from
+              all (Poisson, flat, readout) resampled variance arrays.
+              Setting ``compute_err`` to ``"from_var"`` will assume
+              ``enable_var`` was set to `True` regardless of actual
+              value of the parameter ``enable_var``.
+            - ``"driz_err"``: compute output model's error array by drizzling
+              together all input models' error arrays.
+
+            Error array will be assigned to ``'err'`` key of the output model.
+
+            .. note::
+                At this time, output error array is not equivalent to
+                error propagation results.
+
+        """
+        # always required:
+        attributes = {
+            "data",
+            "wcs",
+            "wht",
+        }
+
+        if enable_ctx:
+            attributes.add("con")
+        if compute_err:
+            attributes.add("err")
+        if enable_var:
+            attributes.update(
+                ["var_rnoise", "var_poisson", "var_flat"]
+            )
+            # TODO: if we want to support adding more data to
+            # existing output models, we need to also store weights
+            # for variance arrays:
+            # var_rnoise_weight
+            # var_flat_weight
+            # var_poisson_weight
+        if accumulate:
+            if enable_ctx:
+                attributes.add("n_coadds")
+
+            # additional attributes required for input parameter 'output_model'
+            # when data and weight arrays are not None:
+            attributes.update(
+                {
+                    "pixfrac",
+                    "kernel",
+                    "fillval",
+                    "weight_type",
+                    "pointings",
+                    "exposure_time",
+                    "measurement_time",
+                    "start_time",
+                    "end_time",
+                    "duration",
+                }
+            )
+
+        return attributes
+
+    def check_memory_requirements(self, output_model, allowed_memory,
+                                  n_input_models=None):
+        """ Called just before `create_output_model` returns to verify
+        that there is enough memory to hold the output.
+
+        Parameters
+        ----------
+        allowed_memory : float, None
+            Fraction of memory allowed to be used for resampling. If
+
+        output_model : dict, None, optional
+            A dictionary containing data arrays and other attributes that
+            will be used to add new models to. use
+            :py:meth:`Resample.output_model_attributes` to get the list of
+            keywords that must be present. When ``accumulate`` is `False`,
+            only the WCS object of the model will be used. When ``accumulate``
+            is `True`, new models will be added to the existing data in the
+            ``output_model``.
+
+            When ``output_model`` is `None`, a new model will be created.
+
+        n_input_models : int, None, optional
+            Number of input models expected to be resampled. When provided,
+            this is used to estimate memory requirements and optimize memory
+            allocation for the context array.
+
+
+        """
+        if ((allowed_memory is None and
+                "DMODEL_ALLOWED_MEMORY" not in os.environ) or
+                n_input_models is None):
+            return
+
+        allowed_memory = float(allowed_memory)
+
+        # get the available memory
+        available_memory = (
+            psutil.virtual_memory().available + psutil.swap_memory().total
+        )
+
+        # compute the output array size
+        npix = np.prod(self._output_array_shape)
+        nconpl = n_input_models // 32 + (1 if n_input_models % 32 else 0)  # context planes
+        required_memory = 0
+        for arr in self.output_array_types:
+            if arr in output_model:
+                if arr == "con":
+                    f = nconpl
+                elif arr == "err":
+                    if self._compute_err == "from_var":
+                        f = 2  # data and weight arrays
+                    elif self._compute_err == "driz_err":
+                        f = 1
+                elif arr.startswith("var"):
+                    f = 3  # variance data, weight, and total arrays
+                else:
+                    f = 1
+
+                required_memory += f * self.output_array_types[arr].itemsize
+
+        # add pixmap itemsize:
+        required_memory += 2 * np.dtype(float).itemsize
+        required_memory *= npix
+
+        # compare used to available
+        used_fraction = required_memory / available_memory
+        if used_fraction > allowed_memory:
+            raise OutputTooLargeError(
+                f'Combined ImageModel size {self._output_wcs.array_shape} '
+                f'requires {bytes2human(required_memory)}. '
+                f'Model cannot be instantiated.'
+            )
+
+    def check_output_wcs(self, output_wcs, estimate_output_shape=True):
+        """
+        Check that provided WCS has expected properties and that its
+        ``array_shape`` property is defined.
+
+        Parameters
+        ----------
+        output_wcs : WCS object
+            A WCS object corresponding to the output (resampled) image.
+
+        estimate_output_shape : bool, optional
+            Indicates whether to *estimate* pixel scale of the ``output_wcs``
+            from
+
+        """
+        naxes = output_wcs.output_frame.naxes
+        if naxes != 2:
+            raise UnsupportedWCSError(
+                "Output WCS needs 2 coordinate axes but the "
+                f"supplied WCS has {naxes} axes."
+            )
+
+        # make sure array_shape and pixel_shape are set:
+        if output_wcs.array_shape is None and estimate_output_shape:
+            # if wcs_pars and "output_shape" in wcs_pars:
+            #     output_wcs.array_shape = wcs_pars["output_shape"]
+            # else:
+            if output_wcs.bounding_box:
+                halfpix = 0.5 + sys.float_info.epsilon
+                output_wcs.array_shape = (
+                    int(output_wcs.bounding_box[1][1] + halfpix),
+                    int(output_wcs.bounding_box[0][1] + halfpix),
+                )
+            else:
+                # TODO: In principle, we could compute footprints of all
+                # input models, convert them to image coordinates using
+                # `output_wcs`, and then take max(x_i), max(y_i) as
+                # output image size.
+                raise ValueError(
+                    "Unable to infer output image size from provided "
+                    "inputs."
+                )
+
+    def validate_output_model(self, output_model, accumulate,
+                              enable_ctx, enable_var):
+        """ Checks that ``output_model`` dictionary has all the required
+        keywords that the code would expect it to have based on the values
+        of ``accumulate``, ``enable_ctx``, ``enable_var``. It will raise
+        `ValueError` if `output_model` is missing required keywords/values.
+
+        """
+        if output_model is None:
+            if accumulate:
+                raise ValueError(
+                    "'output_model' must be defined when 'accumulate' is True."
+                )
+            return
+
+        required_attributes = self.output_model_attributes(
+            accumulate=accumulate,
+            enable_ctx=enable_ctx,
+            enable_var=enable_var,
+        )
+
+        for attr in required_attributes:
+            if attr not in output_model:
+                raise ValueError(
+                    f"'output_model' dictionary must have '{attr}' set."
+                )
+
+        model_wcs = output_model["wcs"]
+        self.check_output_wcs(model_wcs, estimate_output_shape=False)
+        wcs_shape = model_wcs.array_shape
+        ref_shape = output_model["data"].shape
+        if accumulate and wcs_shape is None:
+            raise ValueError(
+                "Output model's 'wcs' must have 'array_shape' attribute "
+                "set when 'accumulate' parameter is True."
+            )
+
+        if not np.array_equiv(wcs_shape, ref_shape):
+            raise ValueError(
+                "Output model's 'wcs.array_shape' value is not consistent "
+                "with the shape of the data array."
+            )
+
+        for attr in required_attributes.difference(["data", "wcs"]):
+            if (isinstance(output_model[attr], np.ndarray) and
+                    not np.array_equiv(output_model[attr].shape, ref_shape)):
+                raise ValueError(
+                    "'output_wcs.array_shape' value is not consistent "
+                    f"with the shape of the '{attr}' array."
+                )
+
+        # TODO: also check "pixfrac", "kernel", "fillval", "weight_type"
+        # with initializer parameters. log a warning if different.
+
+    def create_output_model(self):
+        """ Create a new "output model": a dictionary of data and meta fields.
+        Check that there is enough memory to hold all arrays by calling
+        `check_memory_requirements`.
+
+        """
+        assert self._output_wcs is not None
+        assert np.array_equiv(
+            self._output_wcs.array_shape,
+            self._output_array_shape
+        )
+        assert self._output_pixel_scale
+
+        pix_area = self._output_pixel_scale**2
+
+        output_model = {
+            # WCS:
+            "wcs": self._output_wcs,
+
+            # main arrays:
+            "data": None,
+            "wht": None,
+            "con": None,
+
+            # resample parameters:
+            "pixfrac": self.pixfrac,
+            "kernel": self.kernel,
+            "fillval": self.fillval,
+            "weight_type": self.weight_type,
+
+            # accumulate-specific:
+            "n_coadds": 0,
+
+            # pixel scale:
+            "pixelarea_steradians": pix_area / np.rad2deg(3600)**2,
+            "pixelarea_arcsecsq": pix_area,
+            "pixel_scale_ratio": self._pixel_scale_ratio,
+
+            # drizzle info:
+            "pointings": 0,
+
+            # exposure time:
+            "exposure_time": 0.0,
+            "measurement_time": None,
+            "start_time": None,
+            "end_time": None,
+            "duration": 0.0,
+        }
+
+        if self._enable_var:
+            output_model.update(
+                {
+                    "var_rnoise": None,
+                    "var_flat": None,
+                    "var_poisson": None,
+                    # TODO: if we want to support adding more data to
+                    # existing output models, we need to also store weights
+                    # for variance arrays:
+                    # var_rnoise_weight
+                    # var_flat_weight
+                    # var_poisson_weight
+                }
+            )
+
+        if self._compute_err is not None:
+            output_model["err"] = None
+
+        if self.allowed_memory:
+            self.check_memory_requirements(
+                output_model,
+                self.allowed_memory,
+                n_input_models=self._n_predicted_input_models,
+            )
+
+        return output_model
+
+    @property
+    def output_model(self):
+        return self._output_model
+
+    @property
+    def output_array_shape(self):
+        return self._output_array_shape
+
+    @property
+    def output_wcs(self):
+        return self._output_wcs
+
+    @property
+    def group_ids(self):
+        return self._group_ids
+
+    def _get_intensity_scale(self, model):
+        """
+        Compute an intensity scale from the input and output pixel area.
+
+        For imaging data, the scaling is used to account for differences
+        between the nominal pixel area and the average pixel area for
+        the input data.
+
+        For spectral data, the scaling is used to account for flux
+        conservation with non-unity pixel scale ratios, when the
+        data units are flux density.
+
+        Parameters
+        ----------
+        model : dict
+            The input data model.
+
+        Returns
+        -------
+        iscale : float
+            The scale to apply to the input data before drizzling.
+
+        """
+        input_pixflux_area = model["pixelarea_steradians"]
+        wcs = model["wcs"]
+
+        if input_pixflux_area:
+            if 'SPECTRAL' in wcs.output_frame.axes_type:
+                # Use the nominal area as is
+                input_pixel_area = input_pixflux_area
+
+                # If input image is in flux density units, correct the
+                # flux for the user-specified change to the spatial dimension
+                if _is_flux_density(model["bunit_data"]):
+                    input_pixel_area *= self.pscale_ratio
+            else:
+                input_pixel_area = compute_wcs_pixel_area(
+                    wcs,
+                    shape=model["data"].shape
+                )
+                if input_pixel_area is None:
+                    model_name = model["filename"]
+                    if not model_name:
+                        model_name = "Unknown"
+                    raise ValueError(
+                        "Unable to compute input pixel area from WCS of input "
+                        f"image {repr(model_name)}."
+                    )
+
+                if self._pixel_scale_ratio is None:
+                    input_pscale = 3600.0 * np.rad2deg(
+                        math.sqrt(input_pixel_area)
+                    )
+
+                    self._pixel_scale_ratio = (
+                        self._output_pixel_scale / input_pscale
+                    )
+
+                    # update output model if "pixel_scale_ratio" was never
+                    # set previously:
+                    if (self._output_model is not None and
+                            self._output_model["pixel_scale_ratio"] is None):
+                        self._output_model["pixel_scale_ratio"] = self._pixel_scale_ratio
+
+            iscale = math.sqrt(input_pixflux_area / input_pixel_area)
+
+        else:
+            iscale = 1.0
+
+        return iscale
+
+    @property
+    def finalized(self):
+        return self._finalized
+
+    def reset_arrays(self, reset_output=True, n_input_models=None):
+        """ Initialize/reset `Drizzle` objects, output model and arrays,
+        and time counters. Output WCS and shape are not modified from
+        `Resample` object initialization. This method needs to be called
+        before calling :py:meth:`add_model` for the first time if
+        :py:meth:`finalize` was previously called.
+
+        Parameters
+        ----------
+        reset_output : bool, optional
+            When `True` a new output model will be created. Otherwise new
+            models will be resampled and added to existing output data arrays.
+
+        n_input_models : int, None, optional
+            Number of input models expected to be resampled. When provided,
+            this is used to estimate memory requirements and optimize memory
+            allocation for the context array.
+
+        """
+        self._n_predicted_input_models = n_input_models
+
+        # set up an empty (don't allocate arrays at this time) output model:
+        if reset_output or getattr(self, "_output_model", None) is None:
+            self._output_model = self.create_output_model()
+
+        om = self._output_model
+
+        begin_ctx_id = om.get("n_coadds", 0)
+        if n_input_models is None:
+            max_ctx_id = None
+        else:
+            max_ctx_id = begin_ctx_id + n_input_models - 1
+
+        self._driz = Drizzle(
+            kernel=self.kernel,
+            fillval=self.fillval,
+            out_shape=self._output_array_shape,
+            out_img=om["data"],
+            out_wht=om["wht"],
+            out_ctx=om["con"],
+            exptime=om["exposure_time"],
+            begin_ctx_id=begin_ctx_id,
+            max_ctx_id=max_ctx_id,
+        )
+
+        # Also make a temporary model to hold error data
+        if self._compute_err == "driz_err":
+            self._driz_error = Drizzle(
+                kernel=self.kernel,
+                fillval=self.fillval,
+                out_shape=self._output_array_shape,
+                out_img=om["err"],
+                exptime=om["exposure_time"],
+                disable_ctx=True,
+            )
+
+        if self._enable_var:
+            self.init_variance_arrays()
+
+        self.init_time_counters()
+
+        self._finalized = False
+
+    def validate_input_model(self, model):
+        """ Checks that ``model`` has all the required keywords needed for
+        processing based on settings used during initialisation if the
+        `Resample` object.
+
+        Parameters
+        ----------
+        model : dict
+            A dictionary containing data arrays and other meta attributes
+            and values of actual models used by pipelines.
+
+        Raises
+        ------
+        KeyError
+            A `KeyError` is raised when ``model`` does not have a required
+            keyword.
+
+        """
+        # TODO: do we need this to just raise a custom
+        assert isinstance(model, dict)
+        min_attributes = [
+            # arrays:
+            "data",
+            "dq",
+
+            # meta:
+            "group_id",
+            "s_region",
+            "wcs",
+
+            "exposure_time",
+            "start_time",
+            "end_time",
+            "duration",
+            "measurement_time",
+            "effective_exposure_time",
+            "elapsed_exposure_time",
+
+            "pixelarea_steradians",
+            # "pixelarea_arcsecsq",
+
+            "level",  # sky level
+            "subtracted",
+        ]
+
+        if self._enable_var:
+            min_attributes += ["var_rnoise", "var_poisson", "var_flat"]
+
+        if self._compute_err == "driz_err":
+            min_attributes.append("err")
+
+        if (not self._enable_var and self.weight_type is not None and
+                self.weight_type.startswith('ivm')):
+            min_attributes.append("var_rnoise")
+
+        for attr in min_attributes:
+            if attr not in model:
+                raise KeyError(
+                    f"Attempt to access non-existent key '{attr}' "
+                    "in a data model."
+                )
+
+    def add_model(self, model):
+        """ Resamples model image and either variance data (if ``enable_var``
+        was `True`) or error data (if ``enable_err`` was `True`) and adds
+        them using appropriate weighting to the corresponding
+        arrays of the output model. It also updates resampled data weight,
+        the context array (if ``enable_ctx`` is `True`), relevant output
+        model's values such as "n_coadds".
+
+        Whenever ``model`` has a unique group ID that was never processed
+        before, the "pointings" value of the output model is incremented and
+        the "group_id" attribute is updated. Also, time counters are updated
+        with new values from the input ``model`` by calling
+        :py:meth:`~Resample.update_time`.
+
+        Parameters
+        ----------
+        model : dict
+            A dictionary containing data arrays and other meta attributes
+            and values of actual models used by pipelines.
+
+        """
+        if self._finalized:
+            raise RuntimeError(
+                "Resampling has been finalized and intermediate arrays have "
+                "been freed. Unable to add new models. Call 'reset_arrays' "
+                "to initialize a new output model and associated arrays."
+            )
+        self.validate_input_model(model)
+        self._n_res_models += 1
+
+        data = model["data"]
+        wcs = model["wcs"]
+
+        # Check that input models are 2D images
+        if data.ndim != 2:
+            raise RuntimeError(
+                f"Input model '{model['filename']}' is not a 2D image."
+            )
+
+        self._output_model["n_coadds"] += 1
+
+        if (group_id := model["group_id"]) not in self._group_ids:
+            self.update_time(model)
+            self._group_ids.append(group_id)
+            self.output_model["pointings"] += 1
+
+        iscale = self._get_intensity_scale(model)
+        log.debug(f'Using intensity scale iscale={iscale}')
+
+        pixmap = calc_pixmap(
+            wcs,
+            self.output_model["wcs"],
+            data.shape,
+        )
+
+        log.info("Resampling science and variance data")
+
+        weight = self.build_driz_weight(
+            model,
+            weight_type=self.weight_type,
+            good_bits=self.good_bits,
+        )
+
+        # apply sky subtraction
+        blevel = model["level"]
+        if not model["subtracted"] and blevel is not None:
+            data = data - blevel
+            # self._output_model["subtracted"] = True
+
+        xmin, xmax, ymin, ymax = resample_range(
+            data.shape,
+            wcs.bounding_box
+        )
+
+        add_image_kwargs = {
+            'exptime': model["exposure_time"],
+            'pixmap': pixmap,
+            'scale': iscale,
+            'weight_map': weight,
+            'wht_scale': 1.0,  # hard-coded for JWST count-rate data
+            'pixfrac': self.pixfrac,
+            'in_units': 'cps',  # TODO: get units from data model
+            'xmin': xmin,
+            'xmax': xmax,
+            'ymin': ymin,
+            'ymax': ymax,
+        }
+
+        self._driz.add_image(data, **add_image_kwargs)
+
+        if self._compute_err == "driz_err":
+            self._driz_error.add_image(model["err"], **add_image_kwargs)
+
+        if self._enable_var:
+            self.resample_variance_arrays(model, pixmap, iscale, weight,
+                                          xmin, xmax, ymin, ymax)
+
+        # update output model (variance is too expensive so it's omitted)
+        self._output_model["data"] = self._driz.out_img
+        self._output_model["wht"] = self._driz.out_wht
+        if self._driz.out_ctx is not None:
+            self._output_model["con"] = self._driz.out_ctx
+
+        if self._compute_err == "driz_err":
+            # use resampled error
+            self.output_model["err"] = self._driz_error.out_img
+
+    def finalize(self, free_memory=True):
+        """ Finalizes all computations and frees temporary objects.
+
+        ``finalize`` calls :py:meth:`~Resample.finalize_resample_variance` and
+        :py:meth:`~Resample.finalize_time_info`.
+
+        .. warning::
+          If ``enable_var=True`` and :py:meth:`~Resample.finalize` is called
+          with ``free_memory=True`` then intermediate arrays holding variance
+          weights will be lost and so continuing adding new models after
+          a call to :py:meth:`~Resample.finalize` will result in incorrect
+          variance.
+
+        """
+        if self._finalized:
+            # can't finalize twice
+            return
+        self._finalized = free_memory
+
+        self._output_model["pointings"] = len(self.group_ids)
+
+        # assign resampled arrays to the output model dictionary:
+        self._output_model["data"] = self._driz.out_img
+        self._output_model["wht"] = self._driz.out_wht
+        if self._driz.out_ctx is not None:
+            # Since the context array is dynamic, it must be re-assigned
+            # back to the product's `con` attribute.
+            self._output_model["con"] = self._driz.out_ctx
+
+        if free_memory:
+            del self._driz
+
+        # compute final variances:
+        if self._enable_var:
+            self.finalize_resample_variance(
+                self._output_model,
+                free_memory=free_memory
+            )
+
+        if self._compute_err == "driz_err":
+            # use resampled error
+            self.output_model["err"] = self._driz_error.out_img
+            if free_memory:
+                del self._driz_error
+
+        elif self._enable_var:
+            # compute error from variance arrays:
+            var_components = [
+                self._output_model["var_rnoise"],
+                self._output_model["var_poisson"],
+                self._output_model["var_flat"],
+            ]
+            if self._compute_err == "from_var":
+                self.output_model["err"] = np.sqrt(
+                    np.nansum(var_components, axis=0)
+                )
+
+                # nansum returns zero for input that is all NaN -
+                # set those values to NaN instead
+                all_nan = np.all(np.isnan(var_components), axis=0)
+                self._output_model["err"][all_nan] = np.nan
+
+            del var_components, all_nan
+
+        self._finalized = True
+
+        self.finalize_time_info()
+
+        return
+
+    def init_variance_arrays(self):
+        """ Allocate arrays that hold co-added resampled variances and their
+        weights. """
+        shape = self.output_array_shape
+
+        for noise_type in ["var_rnoise", "var_flat", "var_poisson"]:
+            var_dtype = self.output_array_types[noise_type]
+            kwd = f"{noise_type}_weight"
+            if self._accumulate:
+                wsum = self._output_model.get(noise_type)
+                wt = self._output_model.get(kwd)
+                if wsum is None or wt is None:
+                    wsum = np.full(shape, np.nan, dtype=var_dtype)
+                    wt = np.zeros(shape, dtype=var_dtype)
+                else:
+                    wsum = wsum * (wt * wt)
+            else:
+                wsum = np.full(shape, np.nan, dtype=var_dtype)
+                wt = np.zeros(shape, dtype=var_dtype)
+
+            setattr(self, f"_{noise_type}_wsum", wsum)
+            setattr(self, f"_{noise_type}_weight", wt)
+
+    def resample_variance_arrays(self, model, pixmap, iscale,
+                                 weight_map, xmin, xmax, ymin, ymax):
+        """ Resample and co-add variance arrays using appropriate weights
+        and update total weights.
+
+        Parameters
+        ----------
+        model : dict
+            A dictionary containing data arrays and other meta attributes
+            and values of actual models used by pipelines.
+
+        pixmap : 3D array
+            A mapping from input image (``data``) coordinates to resampled
+            (``out_img``) coordinates. ``pixmap`` must be an array of shape
+            ``(Ny, Nx, 2)`` where ``(Ny, Nx)`` is the shape of the input image.
+            ``pixmap[..., 0]`` forms a 2D array of X-coordinates of input
+            pixels in the ouput frame and ``pixmap[..., 1]`` forms a 2D array of
+            Y-coordinates of input pixels in the ouput coordinate frame.
+
+        iscale : float
+            The scale to apply to the input variance data before drizzling.
+
+        weight_map : 2D array, None, optional
+            A 2D numpy array containing the pixel by pixel weighting.
+            Must have the same dimensions as ``data``.
+
+            When ``weight_map`` is `None`, the weight of input data pixels will
+            be assumed to be 1.
+
+        xmin : float, optional
+            This and the following three parameters set a bounding rectangle
+            on the input image. Only pixels on the input image inside this
+            rectangle will have their flux added to the output image. Xmin
+            sets the minimum value of the x dimension. The x dimension is the
+            dimension that varies quickest on the image. If the value is zero,
+            no minimum will be set in the x dimension. All four parameters are
+            zero based, counting starts at zero.
+
+        xmax : float, optional
+            Sets the maximum value of the x dimension on the bounding box
+            of the input image. If the value is zero, no maximum will
+            be set in the x dimension, the full x dimension of the output
+            image is the bounding box.
+
+        ymin : float, optional
+            Sets the minimum value in the y dimension on the bounding box. The
+            y dimension varies less rapidly than the x and represents the line
+            index on the input image. If the value is zero, no minimum  will be
+            set in the y dimension.
+
+        ymax : float, optional
+            Sets the maximum value in the y dimension. If the value is zero, no
+            maximum will be set in the y dimension, the full x dimension
+            of the output image is the bounding box.
+
+        """
+        # Do the read noise variance first, so it can be
+        # used for weights if needed
+        pars = {
+            'pixmap': pixmap,
+            'iscale': iscale,
+            'weight_map': weight_map,
+            'xmin': xmin,
+            'xmax': xmax,
+            'ymin': ymin,
+            'ymax': ymax,
+        }
+
+        if self._check_var_array(model, "var_rnoise"):
+            rn_var = self._resample_one_variance_array(
+                "var_rnoise",
+                model=model,
+                **pars,
+            )
+
+            # Find valid weighting values in the variance
+            if rn_var is not None:
+                mask = (rn_var > 0) & np.isfinite(rn_var)
+            else:
+                mask = np.full_like(rn_var, False)
+
+            # Set the weight for the image from the weight type
+            if self.weight_type == "ivm" and rn_var is not None:
+                weight = np.ones(self.output_array_shape)
+                weight[mask] = np.reciprocal(rn_var[mask])
+
+            elif self.weight_type == "exptime":
+                t, _ = get_tmeasure(model)
+                weight = np.full(self.output_array_shape, t)
+
+            # Weight and add the readnoise variance
+            # Note: floating point overflow is an issue if variance weights
+            # are used - it can't be squared before multiplication
+            if rn_var is not None:
+                # Add the inverse of the resampled variance to a running sum.
+                # Update only pixels (in the running sum) with
+                # valid new values:
+                mask = (rn_var >= 0) & np.isfinite(rn_var) & (weight > 0)
+                self._var_rnoise_wsum[mask] = np.nansum(
+                    [
+                        self._var_rnoise_wsum[mask],
+                        rn_var[mask] * weight[mask] * weight[mask]
+                    ],
+                    axis=0
+                )
+                self._var_rnoise_weight[mask] += weight[mask]
+
+        # Now do poisson and flat variance, updating only valid new values
+        # (zero is a valid value; negative, inf, or NaN are not)
+        if self._check_var_array(model, "var_poisson"):
+            pn_var = self._resample_one_variance_array(
+                "var_poisson",
+                model=model,
+                **pars,
+            )
+            if pn_var is not None:
+                mask = (pn_var >= 0) & np.isfinite(pn_var) & (weight > 0)
+                self._var_poisson_wsum[mask] = np.nansum(
+                    [
+                        self._var_poisson_wsum[mask],
+                        pn_var[mask] * weight[mask] * weight[mask]
+                    ],
+                    axis=0
+                )
+                self._var_poisson_weight[mask] += weight[mask]
+
+        if self._check_var_array(model, "var_flat"):
+            flat_var = self._resample_one_variance_array(
+                "var_flat",
+                model=model,
+                **pars,
+            )
+            if flat_var is not None:
+                mask = (flat_var >= 0) & np.isfinite(flat_var) & (weight > 0)
+                self._var_flat_wsum[mask] = np.nansum(
+                    [
+                        self._var_flat_wsum[mask],
+                        flat_var[mask] * weight[mask] * weight[mask]
+                    ],
+                    axis=0
+                )
+                self._var_flat_weight[mask] += weight[mask]
+
+    def finalize_resample_variance(self, output_model, free_memory=True):
+        """ Compute variance for the resampled image from running sums and
+        weights. Free memory (when ``free_memory=True``) that holds these
+        running sums and weights arrays.
+        """
+        # Divide by the total weights, squared, and set in the output model.
+        # Zero weight and missing values are NaN in the output.
+        with warnings.catch_warnings():
+            warnings.filterwarnings("ignore", "invalid value*", RuntimeWarning)
+            warnings.filterwarnings("ignore", "divide by zero*", RuntimeWarning)
+
+            output_variance = (
+                self._var_rnoise_wsum / self._var_rnoise_weight /
+                self._var_rnoise_weight
+            ).astype(
+                dtype=self.output_array_types["var_rnoise"]
+            )
+            output_model["var_rnoise"] = output_variance
+
+            output_variance = (
+                self._var_poisson_wsum / self._var_poisson_weight /
+                self._var_poisson_weight
+            ).astype(
+                dtype=self.output_array_types["var_poisson"]
+            )
+            output_model["var_poisson"] = output_variance
+
+            output_variance = (
+                self._var_flat_wsum / self._var_flat_weight /
+                self._var_flat_weight
+            ).astype(
+                dtype=self.output_array_types["var_flat"]
+            )
+            output_model["var_flat"] = output_variance
+
+        if free_memory:
+            self._finalized = True
+            del (
+                self._var_rnoise_wsum,
+                self._var_poisson_wsum,
+                self._var_flat_wsum,
+                self._var_rnoise_weight,
+                self._var_poisson_weight,
+                self._var_flat_weight,
+            )
+
+    def _resample_one_variance_array(self, name, model, iscale,
+                                     weight_map, pixmap,
+                                     xmin=None, xmax=None, ymin=None,
+                                     ymax=None):
+        """Resample one variance image from an input model.
+
+        The error image is passed to drizzle instead of the variance, to
+        better match kernel overlap and user weights to the data, in the
+        pixel averaging process. The drizzled error image is squared before
+        returning.
+        """
+        variance = model.get(name)
+        if variance is None or variance.size == 0:
+            log.debug(
+                f"No data for '{name}' for model "
+                f"{repr(model['filename'])}. Skipping ..."
+            )
+            return
+
+        elif variance.shape != model["data"].shape:
+            log.warning(
+                f"Data shape mismatch for '{name}' for model "
+                f"{repr(model['filename'])}. Skipping ..."
+            )
+            return
+
+        output_shape = self.output_array_shape
+
+        # Resample the error array. Fill "unpopulated" pixels with NaNs.
+        driz = Drizzle(
+            out_shape=output_shape,
+            kernel=self.kernel,
+            fillval=np.nan,
+            disable_ctx=True
+        )
+
+        # Call 'drizzle' to perform image combination
+        log.info(f"Drizzling {variance.shape} --> {output_shape}")
+
+        driz.add_image(
+            data=np.sqrt(variance),
+            exptime=model["exposure_time"],
+            pixmap=pixmap,
+            scale=iscale,
+            weight_map=weight_map,
+            wht_scale=1.0,  # hard-coded for JWST count-rate data
+            pixfrac=self.pixfrac,
+            in_units="cps",  # TODO: get units from data model
+            xmin=xmin,
+            xmax=xmax,
+            ymin=ymin,
+            ymax=ymax,
+        )
+
+        return driz.out_img ** 2
+
+    def build_driz_weight(self, model, weight_type=None, good_bits=None):
+        """ Create a weight map for use by drizzle.
+
+        Parameters
+        ----------
+        wht_type : {"exptime", "ivm"}, optional
+            The weighting type for adding models' data. For ``wht_type="ivm"``
+            (the default), the weighting will be determined per-pixel using
+            the inverse of the read noise (VAR_RNOISE) array stored in each
+            input image. If the ``VAR_RNOISE`` array does not exist,
+            the variance is set to 1 for all pixels (i.e., equal weighting).
+            If ``weight_type="exptime"``, the weight will be set equal
+            to the measurement time (``TMEASURE``) when available and to
+            the exposure time (``EFFEXPTM``) otherwise.
+
+        good_bits : int, str, None, optional
+            An integer bit mask, `None`, a Python list of bit flags, a comma-,
+            or ``'|'``-separated, ``'+'``-separated string list of integer
+            bit flags or mnemonic flag names that indicate what bits in models'
+            DQ bitfield array should be *ignored* (i.e., zeroed).
+
+            See `Resample` for more information
+
+        """
+        data = model["data"]
+        dq = model["dq"]
+
+        dqmask = bitfield_to_boolean_mask(
+            dq,
+            good_bits,
+            good_mask_value=1,
+            dtype=np.uint8,
+            flag_name_map=self.dq_flag_name_map,
+        )
+
+        if weight_type and weight_type.startswith('ivm'):
+            weight_type = weight_type.strip()
+            selective_median = weight_type.startswith('ivm-smed')
+            bitvalue = interpret_bit_flags(
+                good_bits,
+                flag_name_map=self.dq_flag_name_map
+            )
+            if bitvalue is None:
+                bitvalue = 0
+
+            # disable selective median if SATURATED flag is included
+            # in good_bits:
+            try:
+                saturation = self.dq_flag_name_map["SATURATED"]
+                if selective_median and not (bitvalue & saturation):
+                    selective_median = False
+                    weight_type = 'ivm'
+            except AttributeError:
+                pass
+
+            var_rnoise = model["var_rnoise"]
+            if (var_rnoise is not None and var_rnoise.shape == data.shape):
+                with np.errstate(divide="ignore", invalid="ignore"):
+                    inv_variance = var_rnoise**-1
+
+                inv_variance[~np.isfinite(inv_variance)] = 1
+
+                if weight_type != 'ivm':
+                    ny, nx = data.shape
+
+                    # apply a median filter to smooth the weight at saturated
+                    # (or high read-out noise) single pixels. keep kernel size
+                    # small to still give lower weight to extended CRs, etc.
+                    ksz = weight_type[8 if selective_median else 7:]
+                    if ksz:
+                        kernel_size = int(ksz)
+                        if not (kernel_size % 2):
+                            raise ValueError(
+                                'Kernel size of the median filter in IVM '
+                                'weighting must be an odd integer.'
+                            )
+                    else:
+                        kernel_size = 3
+
+                    ivm_copy = inv_variance.copy()
+
+                    if selective_median:
+                        # apply median filter selectively only at
+                        # points of partially saturated sources:
+                        jumps = np.where(
+                            np.logical_and(dq & saturation, dqmask)
+                        )
+                        w2 = kernel_size // 2
+                        for r, c in zip(*jumps):
+                            x1 = max(0, c - w2)
+                            x2 = min(nx, c + w2 + 1)
+                            y1 = max(0, r - w2)
+                            y2 = min(ny, r + w2 + 1)
+                            data = ivm_copy[y1:y2, x1:x2][dqmask[y1:y2, x1:x2]]
+                            if data.size:
+                                inv_variance[r, c] = np.median(data)
+                            # else: leave it as is
+
+                    else:
+                        # apply median to the entire inv-var array:
+                        inv_variance = median_filter(
+                            inv_variance,
+                            size=kernel_size
+                        )
+                    bad_dqmask = np.logical_not(dqmask)
+                    inv_variance[bad_dqmask] = ivm_copy[bad_dqmask]
+
+            else:
+                warnings.warn(
+                    "var_rnoise array not available. "
+                    "Setting drizzle weight map to 1",
+                    RuntimeWarning
+                )
+                inv_variance = 1.0
+
+            weight = inv_variance * dqmask
+
+        elif weight_type == "exptime":
+            t, _ = get_tmeasure(model)
+            weight = np.full(data.shape, t)
+            weight *= dqmask
+
+        else:
+            weight = np.ones(data.shape, dtype=data.dtype) * dqmask
+
+        return weight.astype(np.float32)
+
+    def init_time_counters(self):
+        """ Initialize variables/arrays needed to process exposure time. """
+        self._total_exposure_time = self.output_model["exposure_time"]
+        self._duration = self.output_model["duration"]
+        self._total_measurement_time = self.output_model["measurement_time"]
+        if self._total_measurement_time is None:
+            self._total_measurement_time = 0.0
+
+        if (start_time := self.output_model.get("start_time", None)) is None:
+            self._exptime_start = []
+        else:
+            self._exptime_start[start_time]
+
+        if (end_time := self.output_model.get("end_time", None)) is None:
+            self._exptime_end = []
+        else:
+            self._exptime_end[end_time]
+
+        self._measurement_time_success = []
+
+    def update_time(self, model):
+        """ A method called by the `~Resample.add_model` method to process each
+        image's time attributes *only when ``model`` has a new group ID.
+
+        """
+        if model["group_id"] in self._group_ids:
+            return
+
+        self._exptime_start.append(model["start_time"])
+        self._exptime_end.append(model["end_time"])
+
+        t, success = get_tmeasure(model)
+        self._total_exposure_time += model["exposure_time"]
+        self._measurement_time_success.append(success)
+        self._total_measurement_time += t
+
+        self._duration += model["duration"]
+
+    def finalize_time_info(self):
+        """ Perform final computations for the total time and update relevant
+        fileds of the output model.
+
+        """
+        assert self._n_res_models
+        # basic exposure time attributes:
+        self._output_model["exposure_time"] = self._total_exposure_time
+        self._output_model["start_time"] = min(self._exptime_start)
+        self._output_model["end_time"] = max(self._exptime_end)
+        # Update other exposure time keywords:
+        # XPOSURE (identical to the total effective exposure time,EFFEXPTM)
+        self._output_model["effective_exposure_time"] = self._total_exposure_time
+        # DURATION (identical to TELAPSE, elapsed time)
+        self._output_model["duration"] = self._duration
+        self._output_model["elapsed_exposure_time"] = self._duration
+
+        if all(self._measurement_time_success):
+            self._output_model["measurement_time"] = self._total_measurement_time
+
+    def _check_var_array(self, model, array_name):
+        """ Check that a variance array has the same shape as the model's
+        data array.
+
+        """
+        array_data = model.get(array_name, None)
+        sci_data = model["data"]
+        model_name = _get_model_name(model)
+
+        if array_data is None or array_data.size == 0:
+            log.debug(
+                f"No data for '{array_name}' for model "
+                f"{repr(model_name)}. Skipping ..."
+            )
+            return False
+
+        elif array_data.shape != sci_data.shape:
+            log.warning(
+                f"Data shape mismatch for '{array_name}' for model "
+                f"{repr(model_name)}. Skipping ..."
+            )
+            return False
+
+        return True
+
+
+def _get_model_name(model):
+    model_name = model.get("filename")
+    if model_name is None or not model_name.strip():
+        model_name = "Unknown"
+    return model_name
+
+
+def _is_flux_density(bunit):
+    """
+    Differentiate between surface brightness and flux density data units.
+
+    Parameters
+    ----------
+    bunit : str or `~astropy.units.Unit`
+       Data units, e.g. 'MJy' (is flux density) or 'MJy/sr' (is not).
+
+    Returns
+    -------
+    bool
+        True if the units are equivalent to flux density units.
+    """
+    try:
+        flux_density = u.Unit(bunit).is_equivalent(u.Jy)
+    except (ValueError, TypeError):
+        flux_density = False
+    return flux_density
diff --git a/src/stcal/resample/utils.py b/src/stcal/resample/utils.py
new file mode 100644
index 000000000..7175af4bc
--- /dev/null
+++ b/src/stcal/resample/utils.py
@@ -0,0 +1,299 @@
+from copy import deepcopy
+import logging
+import math
+
+import asdf
+import numpy as np
+from astropy.nddata.bitmask import interpret_bit_flags
+from spherical_geometry.polygon import SphericalPolygon
+
+
+__all__ = [
+    "build_mask",
+    "bytes2human",
+    "compute_wcs_pixel_area",
+    "get_tmeasure",
+    "is_imaging_wcs",
+    "load_custom_wcs",
+    "resample_range",
+]
+
+log = logging.getLogger(__name__)
+log.setLevel(logging.DEBUG)
+
+
+def resample_range(data_shape, bbox=None):
+    # Find range of input pixels to resample:
+    if bbox is None:
+        xmin = ymin = 0
+        xmax = data_shape[1] - 1
+        ymax = data_shape[0] - 1
+    else:
+        ((x1, x2), (y1, y2)) = bbox
+        xmin = max(0, int(x1 + 0.5))
+        ymin = max(0, int(y1 + 0.5))
+        xmax = min(data_shape[1] - 1, int(x2 + 0.5))
+        ymax = min(data_shape[0] - 1, int(y2 + 0.5))
+
+    return xmin, xmax, ymin, ymax
+
+
+def load_custom_wcs(asdf_wcs_file, output_shape=None):
+    """ Load a custom output WCS from an ASDF file.
+
+    Parameters
+    ----------
+    asdf_wcs_file : str
+        Path to an ASDF file containing a GWCS structure.
+
+    output_shape : tuple of int, optional
+        Array shape (in ``[x, y]`` order) for the output data. If not provided,
+        the custom WCS must specify one of: pixel_shape,
+        array_shape, or bounding_box.
+
+    Returns
+    -------
+    wcs : WCS
+        The output WCS to resample into.
+
+    """
+    if not asdf_wcs_file:
+        return None
+
+    with asdf.open(asdf_wcs_file) as af:
+        wcs = deepcopy(af.tree["wcs"])
+        wcs.pixel_area = af.tree.get("pixel_area", None)
+        wcs.pixel_shape = af.tree.get("pixel_shape", None)
+        wcs.array_shape = af.tree.get("array_shape", None)
+
+    if output_shape is not None:
+        wcs.array_shape = output_shape[::-1]
+        wcs.pixel_shape = output_shape
+    elif wcs.pixel_shape is not None:
+        wcs.array_shape = wcs.pixel_shape[::-1]
+    elif wcs.array_shape is not None:
+        wcs.pixel_shape = wcs.array_shape[::-1]
+    elif wcs.bounding_box is not None:
+        wcs.array_shape = tuple(
+            int(axs[1] + 0.5)
+            for axs in wcs.bounding_box.bounding_box(order="C")
+        )
+    else:
+        raise ValueError(
+            "Step argument 'output_shape' is required when custom WCS "
+            "does not have neither of 'array_shape', 'pixel_shape', or "
+            "'bounding_box' attributes set."
+        )
+
+    return wcs
+
+
+def build_mask(dqarr, bitvalue, flag_name_map=None):
+    """Build a bit mask from an input DQ array and a bitvalue flag
+
+    In the returned bit mask, 1 is good, 0 is bad
+    """
+    bitvalue = interpret_bit_flags(bitvalue, flag_name_map=flag_name_map)
+
+    if bitvalue is None:
+        return np.ones(dqarr.shape, dtype=np.uint8)
+    return np.logical_not(np.bitwise_and(dqarr, ~bitvalue)).astype(np.uint8)
+
+
+def get_tmeasure(model):
+    """
+    Check if the measurement_time keyword is present in the datamodel
+    for use in exptime weighting. If not, revert to using exposure_time.
+
+    Returns a tuple of (exptime, is_measurement_time)
+    """
+    try:
+        tmeasure = model["measurement_time"]
+    except KeyError:
+        return model["exposure_time"], False
+    if tmeasure is None:
+        return model["exposure_time"], False
+    else:
+        return tmeasure, True
+
+
+# FIXME: temporarily copied here to avoid this import:
+# from stdatamodels.jwst.library.basic_utils import bytes2human
+def bytes2human(n):
+    """Convert bytes to human-readable format
+
+    Taken from the `psutil` library which references
+    http://code.activestate.com/recipes/578019
+
+    Parameters
+    ----------
+    n : int
+        Number to convert
+
+    Returns
+    -------
+    readable : str
+        A string with units attached.
+
+    Examples
+    --------
+    >>> bytes2human(10000)
+        '9.8K'
+
+    >>> bytes2human(100001221)
+        '95.4M'
+    """
+    symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
+    prefix = {}
+    for i, s in enumerate(symbols):
+        prefix[s] = 1 << (i + 1) * 10
+    for s in reversed(symbols):
+        if n >= prefix[s]:
+            value = float(n) / prefix[s]
+            return '%.1f%s' % (value, s)
+    return "%sB" % n
+
+
+def is_imaging_wcs(wcs):
+    """ Returns `True` if ``wcs`` is an imaging WCS and `False` otherwise. """
+    imaging = all(
+        ax == 'SPATIAL' for ax in wcs.output_frame.axes_type
+    )
+    return imaging
+
+
+def compute_wcs_pixel_area(wcs, shape=None):
+    """ Computes pixel area in steradians.
+    """
+    if (shape := (shape or wcs.array_shape)) is None:
+        raise ValueError(
+            "Either WCS must have 'array_shape' attribute set or 'shape' "
+            "argument must be supplied."
+        )
+
+    valid_polygon = False
+    spatial_idx = np.where(
+        np.array(wcs.output_frame.axes_type) == 'SPATIAL'
+    )[0]
+
+    ny, nx = shape
+
+    if wcs.bounding_box is None:
+        ((xmin, xmax), (ymin, ymax)) = ((-0.5, nx - 0.5), (-0.5, ny - 0.5))
+    else:
+        ((xmin, xmax), (ymin, ymax)) = wcs.bounding_box
+
+    xmin = max(0, int(xmin + 0.5))
+    xmax = min(nx - 1, int(xmax - 0.5))
+    ymin = max(0, int(ymin + 0.5))
+    ymax = min(ny - 1, int(ymax - 0.5))
+    if xmin > xmax:
+        (xmin, xmax) = (xmax, xmin)
+    if ymin > ymax:
+        (ymin, ymax) = (ymax, ymin)
+
+    k = 0
+    dxy = [1, -1, -1, 1]
+
+    while xmin < xmax and ymin < ymax:
+        try:
+            (x, y, image_area, center, b, r, t, l) = _get_boundary_points(
+                xmin=xmin,
+                xmax=xmax,
+                ymin=ymin,
+                ymax=ymax,
+                dx=min((xmax - xmin) // 4, 15),
+                dy=min((ymax - ymin) // 4, 15)
+            )
+        except ValueError:
+            return None
+
+        world = wcs(x, y)
+        ra = world[spatial_idx[0]]
+        dec = world[spatial_idx[1]]
+
+        limits = [ymin, xmax, ymax, xmin]
+
+        for j in range(4):
+            sl = [b, r, t, l][k]
+            if not (np.all(np.isfinite(ra[sl])) and
+                    np.all(np.isfinite(dec[sl]))):
+                limits[k] += dxy[k]
+                ymin, xmax, ymax, xmin = limits
+                k = (k + 1) % 4
+                break
+            k = (k + 1) % 4
+        else:
+            valid_polygon = True
+            break
+
+        ymin, xmax, ymax, xmin = limits
+
+    if not valid_polygon:
+        return None
+
+    world = wcs(*center)
+    wcenter = (world[spatial_idx[0]], world[spatial_idx[1]])
+
+    sky_area = SphericalPolygon.from_radec(ra, dec, center=wcenter).area()
+    if sky_area > 2 * np.pi:
+        log.warning(
+            "Unexpectedly large computed sky area for an image. "
+            "Setting area to: 4*Pi - area"
+        )
+        sky_area = 4 * np.pi - sky_area
+    pix_area = sky_area / image_area
+
+    return pix_area
+
+
+def _get_boundary_points(xmin, xmax, ymin, ymax, dx=None, dy=None,
+                         shrink=0):  # noqa: E741
+    """
+    xmin, xmax, ymin, ymax - integer coordinates of pixel boundaries
+    step - distance between points along an edge
+    shrink - number of pixels by which to reduce `shape`
+
+    Returns a list of points and the area of the rectangle
+    """
+    nx = xmax - xmin + 1
+    ny = ymax - ymin + 1
+
+    if dx is None:
+        dx = nx
+    if dy is None:
+        dy = ny
+
+    if nx - 2 * shrink < 1 or ny - 2 * shrink < 1:
+        raise ValueError("Image size is too small.")
+
+    sx = max(1, int(np.ceil(nx / dx)))
+    sy = max(1, int(np.ceil(ny / dy)))
+
+    xmin += shrink
+    xmax -= shrink
+    ymin += shrink
+    ymax -= shrink
+
+    size = 2 * sx + 2 * sy
+    x = np.empty(size)
+    y = np.empty(size)
+
+    b = np.s_[0:sx]  # bottom edge
+    r = np.s_[sx:sx + sy]  # right edge
+    t = np.s_[sx + sy:2 * sx + sy]  # top edge
+    l = np.s_[2 * sx + sy:2 * sx + 2 * sy]  # noqa: E741  left edge
+
+    x[b] = np.linspace(xmin, xmax, sx, False)
+    y[b] = ymin
+    x[r] = xmax
+    y[r] = np.linspace(ymin, ymax, sy, False)
+    x[t] = np.linspace(xmax, xmin, sx, False)
+    y[t] = ymax
+    x[l] = xmin
+    y[l] = np.linspace(ymax, ymin, sy, False)
+
+    area = (xmax - xmin) * (ymax - ymin)
+    center = (0.5 * (xmin + xmax), 0.5 * (ymin + ymax))
+
+    return x, y, area, center, b, r, t, l

From df9900e2d546da73ab6cd1aa19a79926bd46f466 Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Wed, 27 Nov 2024 16:21:27 -0500
Subject: [PATCH 02/35] remove unused imports

---
 src/stcal/resample/resample.py | 5 +----
 1 file changed, 1 insertion(+), 4 deletions(-)

diff --git a/src/stcal/resample/resample.py b/src/stcal/resample/resample.py
index b9891d3db..8b3602a1f 100644
--- a/src/stcal/resample/resample.py
+++ b/src/stcal/resample/resample.py
@@ -2,14 +2,11 @@
 import math
 import os
 import warnings
-import json
-import abc
-from copy import deepcopy
 import sys
 
 import numpy as np
-from scipy.ndimage import median_filter
 import psutil
+from scipy.ndimage import median_filter
 
 from astropy import units as u
 from astropy.nddata.bitmask import (

From 80e1bd3c26e6a70ee3989bc0c0ac8276b641c37d Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Wed, 27 Nov 2024 16:27:13 -0500
Subject: [PATCH 03/35] remove available memory check

---
 src/stcal/resample/resample.py | 96 +---------------------------------
 1 file changed, 1 insertion(+), 95 deletions(-)

diff --git a/src/stcal/resample/resample.py b/src/stcal/resample/resample.py
index 8b3602a1f..bb402eae5 100644
--- a/src/stcal/resample/resample.py
+++ b/src/stcal/resample/resample.py
@@ -1,11 +1,9 @@
 import logging
 import math
-import os
 import warnings
 import sys
 
 import numpy as np
-import psutil
 from scipy.ndimage import median_filter
 
 from astropy import units as u
@@ -15,11 +13,9 @@
 )
 from drizzle.utils import calc_pixmap
 from drizzle.resample import Drizzle
-from stdatamodels.jwst.library.basic_utils import bytes2human
 
 
 from stcal.resample.utils import (
-    bytes2human,
     compute_wcs_pixel_area,
     get_tmeasure,
     resample_range,
@@ -84,8 +80,7 @@ def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
                  fillval=0.0, wht_type="ivm", good_bits=0,
                  output_wcs=None, output_model=None,
                  accumulate=False, enable_ctx=True, enable_var=True,
-                 compute_err=None,
-                 allowed_memory=None):
+                 compute_err=None):
         """
         Parameters
         ----------
@@ -238,18 +233,12 @@ def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
                 At this time, output error array is not equivalent to
                 error propagation results.
 
-        allowed_memory : float, None
-            Fraction of memory allowed to be used for resampling. If
-            ``allowed_memory`` is `None` then no check for available memory
-            will be performed.
-
         """
         # to see if setting up arrays and drizzle is needed
         self._finalized = False
         self._n_res_models = 0
 
         self._n_predicted_input_models = n_input_models
-        self.allowed_memory = allowed_memory
         self._output_model = output_model
         self._create_new_output_model = output_model is not None
 
@@ -435,79 +424,6 @@ def output_model_attributes(cls, accumulate, enable_ctx, enable_var,
 
         return attributes
 
-    def check_memory_requirements(self, output_model, allowed_memory,
-                                  n_input_models=None):
-        """ Called just before `create_output_model` returns to verify
-        that there is enough memory to hold the output.
-
-        Parameters
-        ----------
-        allowed_memory : float, None
-            Fraction of memory allowed to be used for resampling. If
-
-        output_model : dict, None, optional
-            A dictionary containing data arrays and other attributes that
-            will be used to add new models to. use
-            :py:meth:`Resample.output_model_attributes` to get the list of
-            keywords that must be present. When ``accumulate`` is `False`,
-            only the WCS object of the model will be used. When ``accumulate``
-            is `True`, new models will be added to the existing data in the
-            ``output_model``.
-
-            When ``output_model`` is `None`, a new model will be created.
-
-        n_input_models : int, None, optional
-            Number of input models expected to be resampled. When provided,
-            this is used to estimate memory requirements and optimize memory
-            allocation for the context array.
-
-
-        """
-        if ((allowed_memory is None and
-                "DMODEL_ALLOWED_MEMORY" not in os.environ) or
-                n_input_models is None):
-            return
-
-        allowed_memory = float(allowed_memory)
-
-        # get the available memory
-        available_memory = (
-            psutil.virtual_memory().available + psutil.swap_memory().total
-        )
-
-        # compute the output array size
-        npix = np.prod(self._output_array_shape)
-        nconpl = n_input_models // 32 + (1 if n_input_models % 32 else 0)  # context planes
-        required_memory = 0
-        for arr in self.output_array_types:
-            if arr in output_model:
-                if arr == "con":
-                    f = nconpl
-                elif arr == "err":
-                    if self._compute_err == "from_var":
-                        f = 2  # data and weight arrays
-                    elif self._compute_err == "driz_err":
-                        f = 1
-                elif arr.startswith("var"):
-                    f = 3  # variance data, weight, and total arrays
-                else:
-                    f = 1
-
-                required_memory += f * self.output_array_types[arr].itemsize
-
-        # add pixmap itemsize:
-        required_memory += 2 * np.dtype(float).itemsize
-        required_memory *= npix
-
-        # compare used to available
-        used_fraction = required_memory / available_memory
-        if used_fraction > allowed_memory:
-            raise OutputTooLargeError(
-                f'Combined ImageModel size {self._output_wcs.array_shape} '
-                f'requires {bytes2human(required_memory)}. '
-                f'Model cannot be instantiated.'
-            )
-
     def check_output_wcs(self, output_wcs, estimate_output_shape=True):
         """
         Check that provided WCS has expected properties and that its
@@ -607,9 +523,6 @@ def validate_output_model(self, output_model, accumulate,
 
     def create_output_model(self):
         """ Create a new "output model": a dictionary of data and meta fields.
-        Check that there is enough memory to hold all arrays by calling
-        `check_memory_requirements`.
-
         """
         assert self._output_wcs is not None
         assert np.array_equiv(
@@ -672,13 +585,6 @@ def create_output_model(self):
         if self._compute_err is not None:
             output_model["err"] = None
 
-        if self.allowed_memory:
-            self.check_memory_requirements(
-                output_model,
-                self.allowed_memory,
-                n_input_models=self._n_predicted_input_models,
-            )
-
         return output_model
 
     @property

From 9456803bbadc1dce3373dcd5207eb2409e764906 Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Tue, 3 Dec 2024 11:41:24 -0500
Subject: [PATCH 04/35] Changes to accomodate outlier detection and spec in
 jwst

---
 changes/320.apichange.rst      |  1 +
 src/stcal/resample/resample.py | 45 +++++++++++++++++++++++++++++++---
 2 files changed, 43 insertions(+), 3 deletions(-)
 create mode 100644 changes/320.apichange.rst

diff --git a/changes/320.apichange.rst b/changes/320.apichange.rst
new file mode 100644
index 000000000..caaf8bd9d
--- /dev/null
+++ b/changes/320.apichange.rst
@@ -0,0 +1 @@
+Added ``resample`` submodule.
diff --git a/src/stcal/resample/resample.py b/src/stcal/resample/resample.py
index bb402eae5..87318bfe2 100644
--- a/src/stcal/resample/resample.py
+++ b/src/stcal/resample/resample.py
@@ -440,6 +440,14 @@ def check_output_wcs(self, output_wcs, estimate_output_shape=True):
 
         """
         naxes = output_wcs.output_frame.naxes
+        if 'SPECTRAL' in output_wcs.output_frame.axes_type:
+            if naxes != 3:
+                raise UnsupportedWCSError(
+                    "Output spectral WCS needs 3 coordinate axes but the "
+                    f"supplied WCS has {naxes} axes."
+                )
+            return
+
         if naxes != 2:
             raise UnsupportedWCSError(
                 "Output WCS needs 2 coordinate axes but the "
@@ -599,10 +607,38 @@ def output_array_shape(self):
     def output_wcs(self):
         return self._output_wcs
 
+    @property
+    def pixel_scale_ratio(self):
+        return self._pixel_scale_ratio
+
+    @property
+    def output_pixel_scale(self):
+        return self._output_pixel_scale  # in arcsec
+
     @property
     def group_ids(self):
         return self._group_ids
 
+    @property
+    def enable_ctx(self):
+        """ Indicates whether context array is enabled. """
+        return self._enable_ctx
+
+    @property
+    def enable_var(self):
+        """ Indicates whether variance arrays are resampled. """
+        return self._enable_var
+
+    @property
+    def compute_err(self):
+        """ Indicates whether error array is computed and how it is computed. """
+        return self._compute_err
+
+    @property
+    def is_in_accumulate_mode(self):
+        """ Indicates whether resample is continuing adding to previous co-adds. """
+        return self._accumulate
+
     def _get_intensity_scale(self, model):
         """
         Compute an intensity scale from the input and output pixel area.
@@ -637,7 +673,9 @@ def _get_intensity_scale(self, model):
                 # If input image is in flux density units, correct the
                 # flux for the user-specified change to the spatial dimension
                 if _is_flux_density(model["bunit_data"]):
-                    input_pixel_area *= self.pscale_ratio
+                    iscale = 1.0 / math.sqrt(self.pixel_scale_ratio)
+                else:
+                    iscale = 1.0
             else:
                 input_pixel_area = compute_wcs_pixel_area(
                     wcs,
@@ -667,7 +705,7 @@ def _get_intensity_scale(self, model):
                             self._output_model["pixel_scale_ratio"] is None):
                         self._output_model["pixel_scale_ratio"] = self._pixel_scale_ratio
 
-            iscale = math.sqrt(input_pixflux_area / input_pixel_area)
+                iscale = math.sqrt(input_pixflux_area / input_pixel_area)
 
         else:
             iscale = 1.0
@@ -971,8 +1009,9 @@ def finalize(self, free_memory=True):
                 # set those values to NaN instead
                 all_nan = np.all(np.isnan(var_components), axis=0)
                 self._output_model["err"][all_nan] = np.nan
+                del all_nan
 
-            del var_components, all_nan
+            del var_components
 
         self._finalized = True
 

From 2d0a6675cd3fc11c1c5993ddafee7b2b1dc455c9 Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Tue, 3 Dec 2024 13:25:03 -0500
Subject: [PATCH 05/35] make code more robust

---
 src/stcal/resample/resample.py | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/src/stcal/resample/resample.py b/src/stcal/resample/resample.py
index 87318bfe2..581a44e57 100644
--- a/src/stcal/resample/resample.py
+++ b/src/stcal/resample/resample.py
@@ -1445,12 +1445,16 @@ def update_time(self, model):
         self._exptime_start.append(model["start_time"])
         self._exptime_end.append(model["end_time"])
 
+        if (exposure_time := model["exposure_time"]) is not None:
+            self._total_exposure_time += exposure_time
+
         t, success = get_tmeasure(model)
-        self._total_exposure_time += model["exposure_time"]
         self._measurement_time_success.append(success)
-        self._total_measurement_time += t
+        if t is not None:
+            self._total_measurement_time += t
 
-        self._duration += model["duration"]
+        if (duration := model["duration"]) is not None:
+            self._duration += duration
 
     def finalize_time_info(self):
         """ Perform final computations for the total time and update relevant

From a1697ae5db92571dbb4e9a81e2614cce85320940 Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Tue, 3 Dec 2024 13:42:41 -0500
Subject: [PATCH 06/35] fix type

---
 src/stcal/resample/resample.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/stcal/resample/resample.py b/src/stcal/resample/resample.py
index 581a44e57..7aef00923 100644
--- a/src/stcal/resample/resample.py
+++ b/src/stcal/resample/resample.py
@@ -74,7 +74,7 @@ class Resample:
         "err": np.float32,
     }
 
-    dq_flag_name_map = {}
+    dq_flag_name_map = {}  # type: dict[str, int]
 
     def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
                  fillval=0.0, wht_type="ivm", good_bits=0,

From 8460cec939d89b460e2eec3708426535dc0b53ea Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Tue, 3 Dec 2024 14:07:23 -0500
Subject: [PATCH 07/35] fix imports

---
 pyproject.toml              | 3 ++-
 src/stcal/resample/utils.py | 1 -
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/pyproject.toml b/pyproject.toml
index 5faca45e0..f50baa437 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -14,10 +14,11 @@ classifiers = [
 ]
 dependencies = [
     "astropy >=6.0.0",
-    "drizzle>=1.15.0",
+    "drizzle >=2.0.0",
     "scipy >=1.14.1",
     "scikit-image>=0.20.0",
     "numpy >=1.25.0",
+    "astropy >=5.0.4",
     "opencv-python-headless >=4.6.0.66",
     "asdf >=3.3.0",
     "gwcs >=0.22.0",
diff --git a/src/stcal/resample/utils.py b/src/stcal/resample/utils.py
index 7175af4bc..6968a8175 100644
--- a/src/stcal/resample/utils.py
+++ b/src/stcal/resample/utils.py
@@ -1,6 +1,5 @@
 from copy import deepcopy
 import logging
-import math
 
 import asdf
 import numpy as np

From f95302a910760fd0bfa2bd6effe2df0c419df167 Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Thu, 12 Dec 2024 21:23:34 -0500
Subject: [PATCH 08/35] address reviewers' comments - part 1

---
 src/stcal/resample/__init__.py |  2 --
 src/stcal/resample/resample.py | 46 +++++++++++++++-------------------
 2 files changed, 20 insertions(+), 28 deletions(-)

diff --git a/src/stcal/resample/__init__.py b/src/stcal/resample/__init__.py
index 7152f15c4..545841f60 100644
--- a/src/stcal/resample/__init__.py
+++ b/src/stcal/resample/__init__.py
@@ -1,12 +1,10 @@
 from .resample import (
-    OutputTooLargeError,
     Resample,
     compute_wcs_pixel_area,
     UnsupportedWCSError,
 )
 
 __all__ = [
-    "OutputTooLargeError",
     "Resample",
     "compute_wcs_pixel_area",
     "UnsupportedWCSError",
diff --git a/src/stcal/resample/resample.py b/src/stcal/resample/resample.py
index 7aef00923..09cd178b8 100644
--- a/src/stcal/resample/resample.py
+++ b/src/stcal/resample/resample.py
@@ -27,17 +27,12 @@
 
 __all__ = [
     "compute_wcs_pixel_area"
-    "OutputTooLargeError",
     "Resample",
     "resampled_wcs_from_models",
     "UnsupportedWCSError",
 ]
 
 
-class OutputTooLargeError(RuntimeError):
-    """Raised when the output is too large for in-memory instantiation"""
-
-
 class UnsupportedWCSError(RuntimeError):
     """ Raised when provided output WCS has an unexpected number of axes
     or has an unsupported structure.
@@ -77,7 +72,7 @@ class Resample:
     dq_flag_name_map = {}  # type: dict[str, int]
 
     def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
-                 fillval=0.0, wht_type="ivm", good_bits=0,
+                 fillval=0.0, weight_type="ivm", good_bits=0,
                  output_wcs=None, output_model=None,
                  accumulate=False, enable_ctx=True, enable_var=True,
                  compute_err=None):
@@ -116,15 +111,15 @@ def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
             pixels with no contributions from input images will be set to this
             ``fillval`` value.
 
-        wht_type : {"exptime", "ivm"}, optional
-            The weighting type for adding models' data. For ``wht_type="ivm"``
-            (the default), the weighting will be determined per-pixel using
-            the inverse of the read noise (VAR_RNOISE) array stored in each
-            input image. If the ``VAR_RNOISE`` array does not exist,
-            the variance is set to 1 for all pixels (i.e., equal weighting).
-            If ``weight_type="exptime"``, the weight will be set equal
-            to the measurement time (``TMEASURE``) when available and to
-            the exposure time (``EFFEXPTM``) otherwise.
+        weight_type : {"exptime", "ivm"}, optional
+            The weighting type for adding models' data. For
+            ``weight_type="ivm"`` (the default), the weighting will be
+            determined per-pixel using the inverse of the read noise
+            (VAR_RNOISE) array stored in each input image. If the
+            ``VAR_RNOISE`` array does not exist, the variance is set to 1 for
+            all pixels (i.e., equal weighting). If ``weight_type="exptime"``,
+            the weight will be set equal to the measurement time (``TMEASURE``)
+            when available and to the exposure time (``EFFEXPTM``) otherwise.
 
         good_bits : int, str, None, optional
             An integer bit mask, `None`, a Python list of bit flags, a comma-,
@@ -247,7 +242,7 @@ def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
         self._compute_err = compute_err
         self._accumulate = accumulate
 
-        # these are attributes that are used only for information purpose
+        # these attributes are used only for informational purposes
         # and are added to created the output_model only if they are
         # not already present there:
         self._pixel_scale_ratio = None
@@ -259,8 +254,8 @@ def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
         self.fillval = fillval
         self.good_bits = good_bits
 
-        if wht_type in ["ivm", "exptime"]:
-            self.weight_type = wht_type
+        if weight_type in ["ivm", "exptime"]:
+            self.weight_type = weight_type
         else:
             raise ValueError("Unexpected weight type: '{self.weight_type}'")
 
@@ -269,12 +264,12 @@ def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
         self.input_file_names = []
         self._group_ids = []
 
-        # determine output WCS and set-up output model if needed:
+        # determine output WCS and set up output model if needed:
         if output_model is None:
             if output_wcs is None:
                 raise ValueError(
                     "Output WCS must be provided either through the "
-                    "'output_wcs' parameter or the 'ouput_model' parameter. "
+                    "'output_wcs' parameter or the 'output_model' parameter. "
                 )
             else:
                 if isinstance(output_wcs, dict):
@@ -319,7 +314,7 @@ def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
 
         self._output_array_shape = self._output_wcs.array_shape
 
-        # Check that the output data shape has no zero length dimensions
+        # Check that the output data shape has no zero-length dimensions
         npix = np.prod(self._output_array_shape)
         if not npix:
             raise ValueError(
@@ -335,7 +330,7 @@ def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
             f"Output mosaic size (nx, ny): {self._output_wcs.pixel_shape}"
         )
 
-        # set up an empty (don't allocate arrays at this time) output model:
+        # set up an empty output model (don't allocate arrays at this time):
         if self._output_model is None:
             self._output_model = self.create_output_model()
 
@@ -737,7 +732,7 @@ def reset_arrays(self, reset_output=True, n_input_models=None):
         """
         self._n_predicted_input_models = n_input_models
 
-        # set up an empty (don't allocate arrays at this time) output model:
+        # set up an empty output model (don't allocate arrays at this time):
         if reset_output or getattr(self, "_output_model", None) is None:
             self._output_model = self.create_output_model()
 
@@ -908,7 +903,6 @@ def add_model(self, model):
         blevel = model["level"]
         if not model["subtracted"] and blevel is not None:
             data = data - blevel
-            # self._output_model["subtracted"] = True
 
         xmin, xmax, ymin, ymax = resample_range(
             data.shape,
@@ -1292,8 +1286,8 @@ def build_driz_weight(self, model, weight_type=None, good_bits=None):
 
         Parameters
         ----------
-        wht_type : {"exptime", "ivm"}, optional
-            The weighting type for adding models' data. For ``wht_type="ivm"``
+        weight_type : {"exptime", "ivm"}, optional
+            The weighting type for adding models' data. For ``weight_type="ivm"``
             (the default), the weighting will be determined per-pixel using
             the inverse of the read noise (VAR_RNOISE) array stored in each
             input image. If the ``VAR_RNOISE`` array does not exist,

From d844f3c8b3ed0d526e7d0bb191c246093c62f75b Mon Sep 17 00:00:00 2001
From: Mihai Cara <mcara@users.noreply.github.com>
Date: Thu, 12 Dec 2024 21:24:59 -0500
Subject: [PATCH 09/35] Update src/stcal/resample/resample.py

Co-authored-by: Ned Molter <emolter@users.noreply.github.com>
---
 src/stcal/resample/resample.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/src/stcal/resample/resample.py b/src/stcal/resample/resample.py
index 09cd178b8..e9a603f68 100644
--- a/src/stcal/resample/resample.py
+++ b/src/stcal/resample/resample.py
@@ -1231,8 +1231,8 @@ def _resample_one_variance_array(self, name, model, iscale,
                                      ymax=None):
         """Resample one variance image from an input model.
 
-        The error image is passed to drizzle instead of the variance, to
-        better match kernel overlap and user weights to the data, in the
+        The error image is passed to drizzle instead of the variance in order to
+        better match kernel overlap and user weights to the data during the
         pixel averaging process. The drizzled error image is squared before
         returning.
         """

From b4d9d361017fbc67d5b67ce4bba7ada0ea0cb714 Mon Sep 17 00:00:00 2001
From: Mihai Cara <mcara@users.noreply.github.com>
Date: Thu, 12 Dec 2024 21:25:12 -0500
Subject: [PATCH 10/35] Update src/stcal/resample/resample.py

Co-authored-by: Ned Molter <emolter@users.noreply.github.com>
---
 src/stcal/resample/resample.py | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/src/stcal/resample/resample.py b/src/stcal/resample/resample.py
index e9a603f68..fa25fef21 100644
--- a/src/stcal/resample/resample.py
+++ b/src/stcal/resample/resample.py
@@ -837,12 +837,12 @@ def validate_input_model(self, model):
                 )
 
     def add_model(self, model):
-        """ Resamples model image and either variance data (if ``enable_var``
-        was `True`) or error data (if ``enable_err`` was `True`) and adds
-        them using appropriate weighting to the corresponding
-        arrays of the output model. It also updates resampled data weight,
-        the context array (if ``enable_ctx`` is `True`), relevant output
-        model's values such as "n_coadds".
+        """ Resamples model image, variance data (if ``enable_var``
+        is `True`) , and error data (if ``enable_err`` is `True`), and adds
+        them to the corresponding
+        arrays of the output model using appropriate weighting.
+        It also updates the weight array and context array (if ``enable_ctx`` is `True`)
+        of the resampled data, as well as relevant metadata such as "n_coadds".
 
         Whenever ``model`` has a unique group ID that was never processed
         before, the "pointings" value of the output model is incremented and

From ef68028653bf95534aa1634f31381fb8b94f0761 Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Sun, 15 Dec 2024 05:51:56 -0500
Subject: [PATCH 11/35] add more docs

---
 docs/conf.py                        |  10 +-
 docs/stcal/package_index.rst        |   1 +
 docs/stcal/resample/description.rst | 142 +++++++++++++++
 docs/stcal/resample/index.rst       |  19 ++
 docs/stcal/resample/utils.rst       |  10 ++
 src/stcal/resample/__init__.py      |   4 +-
 src/stcal/resample/resample.py      | 270 +++++++++++++++++++++-------
 src/stcal/resample/utils.py         | 144 ++++++++++++---
 8 files changed, 504 insertions(+), 96 deletions(-)
 create mode 100644 docs/stcal/resample/description.rst
 create mode 100644 docs/stcal/resample/index.rst
 create mode 100644 docs/stcal/resample/utils.rst

diff --git a/docs/conf.py b/docs/conf.py
index 53c05d508..659bc9c7e 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -39,6 +39,10 @@
     "gwcs": ("https://gwcs.readthedocs.io/en/latest/", None),
     "astropy": ("https://docs.astropy.org/en/stable/", None),
     "tweakwcs": ("https://tweakwcs.readthedocs.io/en/latest/", None),
+    "drizzle": (
+        "https://spacetelescope-drizzle.readthedocs.io/en/latest/",
+        None
+    ),
 }
 
 nitpick_ignore = [("py:class", "optional"), ("py:class", "np.ndarray")]
@@ -75,4 +79,8 @@
 html_use_index = True
 
 # Enable nitpicky mode - which ensures that all references in the docs resolve.
-nitpicky = True
+nitpicky = False
+
+nitpick_ignore = [
+    ('py:obj', 'type'),
+]
diff --git a/docs/stcal/package_index.rst b/docs/stcal/package_index.rst
index e1db02b03..3d3e3ba2c 100644
--- a/docs/stcal/package_index.rst
+++ b/docs/stcal/package_index.rst
@@ -9,3 +9,4 @@ Package Index
    alignment/index.rst
    tweakreg/index.rst
    outlier_detection/index.rst
+   resample/index.rst
diff --git a/docs/stcal/resample/description.rst b/docs/stcal/resample/description.rst
new file mode 100644
index 000000000..fd5c396f4
--- /dev/null
+++ b/docs/stcal/resample/description.rst
@@ -0,0 +1,142 @@
+Description
+===========
+
+:Classes: `stcal.resample.Resample`
+:Alias: resample
+
+This routine will resample each input 2D image based on the WCS and
+distortion information, and will combine multiple resampled images
+into a single undistorted product.
+
+This step uses the interface to the C-based cdriz routine to do the
+resampling via the drizzle method.  The input-to-output pixel
+mapping is determined via a mapping function derived from the
+WCS of each input image and the WCS of the defined output product.
+This mapping function gets passed to cdriz to drive the actual
+drizzling to create the output product.
+
+
+Error Propagation
+-----------------
+
+The error associated with each resampled pixel can in principle be derived
+from the variance components associated with each input pixel, weighted by
+the square of the input user weights and the square of the overlap between
+the input and output pixels. In practice, the ``cdriz`` routine does not
+currently support propagating variance data alongside science images, so
+the output error cannot be precisely calculated.
+
+To approximate the error on a resampled pixel, the variance arrays associated
+with each input model are resampled individually, then combined with a weighted
+sum.  The process is:
+
+#. For each input model, take the square root of each of the read noise variance
+   array to make an error image.
+
+#. Drizzle the read noise error image onto the output WCS, with drizzle
+   parameters matching those used for the science data.
+
+#. Square the resampled read noise to make a variance array.
+
+   a. If the resampling `weight_type` is an inverse variance map (`ivm`), weight
+      the resampled variance by the square of its own inverse.
+
+   #. If the `weight_type` is the exposure time (`exptime`), weight the
+      resampled variance by the square of the exposure time for the image.
+
+#. Add the weighted, resampled read noise variance to a running sum across all
+   images.  Add the weights (unsquared) to a separate running sum across
+   all images.
+
+#. Perform the same steps for the Poisson noise variance and the flat variance.
+   For these components, the weight for the sum is either the resampled read
+   noise variance or else the exposure time.
+
+#. For each variance component (read noise, Poisson, and flat), divide the
+   weighted variance sum by the total weight, squared.
+
+After each variance component is resampled and summed, the final error
+array is computed as the square root of the sum of the three independent
+variance components. This error image is stored in the ``err`` attribute
+in the output data model. Alternatively, the error array of the resampled
+image can be computed by resampling the error array associated with input
+data.
+
+It is expected that the output errors computed in this way will
+generally overestimate the true error on the resampled data.  The magnitude
+of the overestimation depends on the details of the pixel weights
+and error images.  Note, however, that drizzling error images produces
+a much better estimate of the output error than directly drizzling
+the variance images, since the kernel overlap weights do not need to be
+squared for combining error values.
+
+
+Context Image
+-------------
+
+In addition to image data, resample step also creates a "context image" stored
+in the ``con`` attribute in the output data model . Each pixel in the context
+image is a bit field that encodes
+information about which input image has contributed to the corresponding
+pixel in the resampled data array. Context image uses 32 bit integers to encode
+this information and hence it can keep track of only 32 input images.
+First bit corresponds to the first input image, second bit corrsponds to the
+second input image, and so on. If the number of input images is larger than 32,
+then it is necessary to have multiple context images ("planes") to hold
+information about all input images
+with the first plane encoding which of the first 32 images contributed
+to the output data pixel, second plane representing next 32 input images
+(number 33-64), etc. For this reason, context array is a 3D array of the type
+`numpy.int32` and shape ``(np, ny, nx)`` where ``nx`` and ``ny``
+are dimensions of image's data. ``np`` is the number of "planes" equal to
+``(number of input images - 1) // 32 + 1``. If a bit at position ``k`` in a
+pixel with coordinates ``(p, y, x)`` is 0 then input image number
+``32 * p + k`` (0-indexed) did not contribute to the output data pixel
+with array coordinates ``(y, x)`` and if that bit is 1 then input image number
+``32 * p + k`` did contribute to the pixel ``(y, x)`` in the resampled image.
+
+As an example, let's assume we have 8 input images. Then, when ``'CON'`` pixel
+values are displayed using binary representation (and decimal in parenthesis),
+one could see values like this::
+
+    00000001 (1) - only first input image contributed to this output pixel;
+    00000010 (2) - 2nd input image contributed;
+    00000100 (4) - 3rd input image contributed;
+    10000000 (128) - 8th input image contributed;
+    10000100 (132=128+4) - 3rd and 8th input images contributed;
+    11001101 (205=1+4+8+64+128) - input images 1, 3, 4, 7, 8 have contributed
+    to this output pixel.
+
+In order to test if a specific input image contributed to an output pixel,
+one needs to use bitwise operations. Using the example above, to test whether
+input images number 4 and 5 have contributed to the output pixel whose
+corresponding ``'CON'`` value is 205 (11001101 in binary form) we can do
+the following:
+
+>>> bool(205 & (1 << (5 - 1)))  # (205 & 16) = 0 (== 0 => False): did NOT contribute
+False
+>>> bool(205 & (1 << (4 - 1)))  # (205 & 8) = 8 (!= 0 => True): did contribute
+True
+
+In general, to get a list of all input images that have contributed to an
+output resampled pixel with image coordinates ``(x, y)``, and given a
+context array ``con``, one can do something like this:
+
+.. doctest-skip::
+
+    >>> import numpy as np
+    >>> np.flatnonzero([v & (1 << k) for v in con[:, y, x] for k in range(32)])
+
+For convenience, this functionality was implemented in the
+:py:func:`~drizzle.utils.decode_context` function.
+
+
+References
+----------
+
+A full description of the drizzling algorithm can be found in
+`Fruchter and Hook, PASP 2002 <https://doi.org/10.1086/338393>`_.
+A description of the inverse variance map method can be found in
+`Casertano et al., AJ 2000 <https://doi.org/10.1086/316851>`_, see Appendix A2.
+A description of the drizzle parameters and other useful drizzle-related
+resources can be found at `DrizzlePac Handbook <http://drizzlepac.stsci.edu>`_.
diff --git a/docs/stcal/resample/index.rst b/docs/stcal/resample/index.rst
new file mode 100644
index 000000000..6ff35c6e9
--- /dev/null
+++ b/docs/stcal/resample/index.rst
@@ -0,0 +1,19 @@
+.. _resample_module:
+
+========
+Resample
+========
+
+.. toctree::
+   :maxdepth: 1
+
+   description.rst
+
+**Also See:**
+
+.. toctree::
+   :maxdepth: 1
+
+   utils.rst
+
+.. automodapi:: stcal.resample
diff --git a/docs/stcal/resample/utils.rst b/docs/stcal/resample/utils.rst
new file mode 100644
index 000000000..af1f0a9ab
--- /dev/null
+++ b/docs/stcal/resample/utils.rst
@@ -0,0 +1,10 @@
+=================
+Utility Functions
+=================
+
+The ``utils`` module provides helpful functions for `Resample` such as creating image mask from model's DQ array, computing average pixel area, loading a custom WCS from an ASDF file, etc.    
+
+.. currentmodule:: stcal.resample.utils
+
+.. automodapi:: stcal.resample.utils
+   :noindex:
diff --git a/src/stcal/resample/__init__.py b/src/stcal/resample/__init__.py
index 545841f60..9c717832c 100644
--- a/src/stcal/resample/__init__.py
+++ b/src/stcal/resample/__init__.py
@@ -1,11 +1,11 @@
 from .resample import (
     Resample,
-    compute_wcs_pixel_area,
+    compute_mean_pixel_area,
     UnsupportedWCSError,
 )
 
 __all__ = [
     "Resample",
-    "compute_wcs_pixel_area",
+    "compute_mean_pixel_area",
     "UnsupportedWCSError",
 ]
diff --git a/src/stcal/resample/resample.py b/src/stcal/resample/resample.py
index fa25fef21..8c7930f1e 100644
--- a/src/stcal/resample/resample.py
+++ b/src/stcal/resample/resample.py
@@ -16,7 +16,7 @@
 
 
 from stcal.resample.utils import (
-    compute_wcs_pixel_area,
+    compute_mean_pixel_area,
     get_tmeasure,
     resample_range,
 )
@@ -26,9 +26,7 @@
 log.setLevel(logging.DEBUG)
 
 __all__ = [
-    "compute_wcs_pixel_area"
     "Resample",
-    "resampled_wcs_from_models",
     "UnsupportedWCSError",
 ]
 
@@ -40,20 +38,25 @@ class UnsupportedWCSError(RuntimeError):
 
 
 class Resample:
-    """
-    This is the controlling routine for the resampling process.
-
-    Notes
-    -----
-    This routine performs the following operations::
-
-      1. Extracts parameter settings from input model, such as pixfrac,
-         weight type, exposure time (if relevant), and kernel, and merges
-         them with any user-provided values.
-      2. Creates output WCS based on input images and define mapping function
-         between all input arrays and the output array.
-      3. Updates output data model with output arrays from drizzle, including
-         a record of metadata from all input models.
+    """ Base class for resampling images.
+
+    The main purpose of this class is to resample and add input images
+    (data, variance array) to an output image defined by an output WCS.
+
+    In particular, this class performs the following operations:
+
+    1. Sets up output arrays based on arguments used at initialization.
+    2. Based on information about the input images and user arguments, computes
+       scale factors needed to obtain correctly convert resampled counts to
+       fluxes.
+    3. For each input image computes coordinate transformations (``pixmap``)
+       from coordinate system of the input image to the coordinate system of
+       the output image.
+    4. For each input image computes weight image.
+    5. Calls :py:class:`~drizzle.resample.Drizzle` methods to resample and
+       combine input images and their variance/error arrays.
+    6. Keeps track of total exposure time and other time-related quantities.
+
     """
     resample_suffix = 'i2d'
     resample_file_ext = '.fits'
@@ -91,7 +94,7 @@ def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
             dimension, so the flux is confined to a quarter of the pixel area
             when the square kernel is used.
 
-        kernel: {"square", "gaussian", "point", "turbo", "lanczos2", "lanczos3"}, optional
+        kernel : {"square", "gaussian", "point", "turbo", "lanczos2", "lanczos3"}, optional
             The name of the kernel used to combine the input. The choice of
             kernel controls the distribution of flux over the kernel.
             The square kernel is the default.
@@ -100,7 +103,7 @@ def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
                The "gaussian" and "lanczos2/3" kernels **DO NOT**
                conserve flux.
 
-        fillval: float, None, str, optional
+        fillval : float, None, str, optional
             The value of output pixels that did not have contributions from
             input images' pixels. When ``fillval`` is either `None` or
             ``"INDEF"`` and ``out_img`` is provided, the values of ``out_img``
@@ -111,7 +114,7 @@ def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
             pixels with no contributions from input images will be set to this
             ``fillval`` value.
 
-        weight_type : {"exptime", "ivm"}, optional
+        weight_type : {"ivm", "exptime"}, optional
             The weighting type for adding models' data. For
             ``weight_type="ivm"`` (the default), the weighting will be
             determined per-pixel using the inverse of the read noise
@@ -168,12 +171,12 @@ def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
             use a value of ``~4+8``, or ``~4,8``. A string value of
             ``~0`` would be equivalent to a setting of ``None``.
 
-            | Default value (0) will make *all* pixels with non-zero DQ
+            Default value (0) will make *all* pixels with non-zero DQ
             values be considered "bad" pixels, and the corresponding data
             pixels will be assigned zero weight and thus these pixels
             will not contribute to the output resampled data array.
 
-            | Set `good_bits` to `None` to turn off the use of model's DQ
+            Set `good_bits` to `None` to turn off the use of model's DQ
             array.
 
             For more details, see documentation for
@@ -301,7 +304,9 @@ def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
 
         if self._output_pixel_scale is None:
             self._output_pixel_scale = 3600.0 * np.rad2deg(
-                math.sqrt(compute_wcs_pixel_area(self._output_wcs))
+                math.sqrt(
+                    self.get_output_model_pixel_area({"wcs": self._output_wcs})
+                )
             )
             log.info(
                 "Computed output pixel scale: "
@@ -336,6 +341,67 @@ def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
 
         self.reset_arrays(reset_output=False, n_input_models=n_input_models)
 
+    def get_input_model_pixel_area(self, model):
+        """
+        Computes or retrieves pixel area of an input model. Currently,
+        this is the average pixel area of input model's pixels within either
+        the bounding box (if available) or the entire data array.
+
+        This value is used to compute a scale factor that will be applied
+        to input image data. This scale factor takes into account the
+        difference in the definition of the pixel area reported in model's
+        ``meta`` and the pixel area at the location used to construct
+        output WCS from the WCS of input models using ``pixel_scale_ratio``.
+
+        Intensity scale factor is computed elsewhere as the ratio of the value
+        of the pixel area in the meta to the area returned by this function.
+
+        Subclasses can override this method to return the most appropriate
+        pixel area value.
+
+        Parameters
+        ----------
+
+        model : dict, None
+            A dictionary containing data arrays and other meta attributes
+            and values of actual models used by pipelines. In particular, it
+            must have a keyword "wcs" and a WCS associated with it.
+
+        Returns
+        -------
+        pix_area : float
+            Pixel area in steradians.
+
+        """
+        pixel_area = compute_mean_pixel_area(
+            model["wcs"],
+            shape=model["data"].shape
+        )
+        return pixel_area
+
+    def get_output_model_pixel_area(self, model):
+        """
+        Computes or retrieves pixel area of the output model. Currently,
+        this is the average pixel area of the model's pixels within either
+        the bounding box (if available) or the entire data array.
+
+        Parameters
+        ----------
+
+        model : dict, None
+            A dictionary containing data arrays and other meta attributes
+            and values of actual models used by pipelines. In particular, it
+            must have a keyword "wcs" and a WCS associated with it.
+
+        Returns
+        -------
+        pix_area : float
+            Pixel area in steradians.
+
+        """
+        pixel_area = compute_mean_pixel_area(model["wcs"])
+        return pixel_area
+
     @classmethod
     def output_model_attributes(cls, accumulate, enable_ctx, enable_var,
                                 compute_err):
@@ -374,6 +440,13 @@ def output_model_attributes(cls, accumulate, enable_ctx, enable_var,
                 At this time, output error array is not equivalent to
                 error propagation results.
 
+        Returns
+        -------
+
+        attributes : set
+            A set of attributes that an output model must have when it
+            is provided as an input to `Resample.__init__` initializer.
+
         """
         # always required:
         attributes = {
@@ -422,7 +495,7 @@ def output_model_attributes(cls, accumulate, enable_ctx, enable_var,
     def check_output_wcs(self, output_wcs, estimate_output_shape=True):
         """
         Check that provided WCS has expected properties and that its
-        ``array_shape`` property is defined.
+        ``array_shape`` property is defined. May modify ``output_wcs``.
 
         Parameters
         ----------
@@ -430,8 +503,12 @@ def check_output_wcs(self, output_wcs, estimate_output_shape=True):
             A WCS object corresponding to the output (resampled) image.
 
         estimate_output_shape : bool, optional
-            Indicates whether to *estimate* pixel scale of the ``output_wcs``
-            from
+            Indicates whether to *estimate* output image shape of the
+            ``output_wcs`` from other available attributes such as
+            ``bounding_box`` when ``output_wcs.array_shape`` is `None`.
+            If ``estimate_output_shape`` is `True` and
+            ``output_wcs.array_shape`` is `None`, upon return
+            ``output_wcs.array_shape`` will be assigned an estimated value.
 
         """
         naxes = output_wcs.output_frame.naxes
@@ -451,9 +528,6 @@ def check_output_wcs(self, output_wcs, estimate_output_shape=True):
 
         # make sure array_shape and pixel_shape are set:
         if output_wcs.array_shape is None and estimate_output_shape:
-            # if wcs_pars and "output_shape" in wcs_pars:
-            #     output_wcs.array_shape = wcs_pars["output_shape"]
-            # else:
             if output_wcs.bounding_box:
                 halfpix = 0.5 + sys.float_info.epsilon
                 output_wcs.array_shape = (
@@ -473,10 +547,33 @@ def check_output_wcs(self, output_wcs, estimate_output_shape=True):
     def validate_output_model(self, output_model, accumulate,
                               enable_ctx, enable_var):
         """ Checks that ``output_model`` dictionary has all the required
-        keywords that the code would expect it to have based on the values
+        keywords that the code expects it to have based on the values
         of ``accumulate``, ``enable_ctx``, ``enable_var``. It will raise
         `ValueError` if `output_model` is missing required keywords/values.
 
+        Parameters
+        ----------
+
+        output_model : dict
+            A dictionary representing data and meta values from a data model.
+
+        accumulate : bool
+            Indicates whether resampled models should be added to the
+            provided ``output_model`` data or if new arrays should be
+            created.
+
+        enable_ctx : bool
+            Indicates whether to create a context image. If ``disable_ctx``
+            is set to `True`, parameters ``out_ctx``, ``begin_ctx_id``, and
+            ``max_ctx_id`` will be ignored.
+
+        enable_var : bool
+            Indicates whether to resample variance arrays.
+
+        compute_err : {"from_var", "driz_err"}, None
+            A string indicating how error array for the resampled image should
+            be computed. See `Resample.__init__` for more details.
+
         """
         if output_model is None:
             if accumulate:
@@ -526,6 +623,13 @@ def validate_output_model(self, output_model, accumulate,
 
     def create_output_model(self):
         """ Create a new "output model": a dictionary of data and meta fields.
+
+        Returns
+        -------
+
+        output_model : dict
+            A dictionary of data model attributes and values.
+
         """
         assert self._output_wcs is not None
         assert np.array_equiv(
@@ -576,12 +680,6 @@ def create_output_model(self):
                     "var_rnoise": None,
                     "var_flat": None,
                     "var_poisson": None,
-                    # TODO: if we want to support adding more data to
-                    # existing output models, we need to also store weights
-                    # for variance arrays:
-                    # var_rnoise_weight
-                    # var_flat_weight
-                    # var_poisson_weight
                 }
             )
 
@@ -592,26 +690,35 @@ def create_output_model(self):
 
     @property
     def output_model(self):
+        """ Output (resampled) model. """
         return self._output_model
 
     @property
     def output_array_shape(self):
+        """ Shape of the output model arrays. """
         return self._output_array_shape
 
     @property
     def output_wcs(self):
+        """ WCS of the output (resampled) model. """
         return self._output_wcs
 
     @property
     def pixel_scale_ratio(self):
+        """ Get the ratio of the output pixel scale to the input pixel scale.
+        """
         return self._pixel_scale_ratio
 
     @property
     def output_pixel_scale(self):
+        """ Get pixel scale of the output model in arcsec. """
         return self._output_pixel_scale  # in arcsec
 
     @property
     def group_ids(self):
+        """ Get a list of all group IDs of models resampled and added to the
+        output model.
+        """
         return self._group_ids
 
     @property
@@ -626,12 +733,15 @@ def enable_var(self):
 
     @property
     def compute_err(self):
-        """ Indicates whether error array is computed and how it is computed. """
+        """ Indicates whether error array is computed and how it is computed.
+        """
         return self._compute_err
 
     @property
     def is_in_accumulate_mode(self):
-        """ Indicates whether resample is continuing adding to previous co-adds. """
+        """ Indicates whether resample is continuing adding to previous
+        co-adds.
+        """
         return self._accumulate
 
     def _get_intensity_scale(self, model):
@@ -657,13 +767,13 @@ def _get_intensity_scale(self, model):
             The scale to apply to the input data before drizzling.
 
         """
-        input_pixflux_area = model["pixelarea_steradians"]
+        photom_pixel_area = model["pixelarea_steradians"]
         wcs = model["wcs"]
 
-        if input_pixflux_area:
+        if photom_pixel_area:
             if 'SPECTRAL' in wcs.output_frame.axes_type:
                 # Use the nominal area as is
-                input_pixel_area = input_pixflux_area
+                input_pixel_area = photom_pixel_area
 
                 # If input image is in flux density units, correct the
                 # flux for the user-specified change to the spatial dimension
@@ -672,10 +782,8 @@ def _get_intensity_scale(self, model):
                 else:
                     iscale = 1.0
             else:
-                input_pixel_area = compute_wcs_pixel_area(
-                    wcs,
-                    shape=model["data"].shape
-                )
+                input_pixel_area = self.get_input_model_pixel_area(model)
+
                 if input_pixel_area is None:
                     model_name = model["filename"]
                     if not model_name:
@@ -700,7 +808,7 @@ def _get_intensity_scale(self, model):
                             self._output_model["pixel_scale_ratio"] is None):
                         self._output_model["pixel_scale_ratio"] = self._pixel_scale_ratio
 
-                iscale = math.sqrt(input_pixflux_area / input_pixel_area)
+                iscale = math.sqrt(photom_pixel_area / input_pixel_area)
 
         else:
             iscale = 1.0
@@ -709,6 +817,7 @@ def _get_intensity_scale(self, model):
 
     @property
     def finalized(self):
+        """ Indicates whether the output model was "finalized". """
         return self._finalized
 
     def reset_arrays(self, reset_output=True, n_input_models=None):
@@ -848,7 +957,7 @@ def add_model(self, model):
         before, the "pointings" value of the output model is incremented and
         the "group_id" attribute is updated. Also, time counters are updated
         with new values from the input ``model`` by calling
-        :py:meth:`~Resample.update_time`.
+        :py:meth:`~Resample.update_time` .
 
         Parameters
         ----------
@@ -914,9 +1023,9 @@ def add_model(self, model):
             'pixmap': pixmap,
             'scale': iscale,
             'weight_map': weight,
-            'wht_scale': 1.0,  # hard-coded for JWST count-rate data
+            'wht_scale': 1.0,
             'pixfrac': self.pixfrac,
-            'in_units': 'cps',  # TODO: get units from data model
+            'in_units': 'cps',
             'xmin': xmin,
             'xmax': xmax,
             'ymin': ymin,
@@ -1183,6 +1292,21 @@ def finalize_resample_variance(self, output_model, free_memory=True):
         """ Compute variance for the resampled image from running sums and
         weights. Free memory (when ``free_memory=True``) that holds these
         running sums and weights arrays.
+
+        output_model : dict, None
+            A dictionary containing data arrays and other attributes that
+            will be used to add new models to. use
+            :py:meth:`Resample.output_model_attributes` to get the list of
+            keywords that must be present. When ``accumulate`` is `False`,
+            only the WCS object of the model will be used. When ``accumulate``
+            is `True`, new models will be added to the existing data in the
+            ``output_model``.
+
+        free_memory : True
+            Indicates whether to free temporary arrays (i.e., weight arrays)
+            that are no longer needed. If this is `True` it will not be
+            possible to continue adding new models to the output model.
+
         """
         # Divide by the total weights, squared, and set in the output model.
         # Zero weight and missing values are NaN in the output.
@@ -1191,24 +1315,24 @@ def finalize_resample_variance(self, output_model, free_memory=True):
             warnings.filterwarnings("ignore", "divide by zero*", RuntimeWarning)
 
             output_variance = (
-                self._var_rnoise_wsum / self._var_rnoise_weight /
-                self._var_rnoise_weight
+                self._var_rnoise_wsum / (self._var_rnoise_weight *
+                                         self._var_rnoise_weight)
             ).astype(
                 dtype=self.output_array_types["var_rnoise"]
             )
             output_model["var_rnoise"] = output_variance
 
             output_variance = (
-                self._var_poisson_wsum / self._var_poisson_weight /
-                self._var_poisson_weight
+                self._var_poisson_wsum / (self._var_poisson_weight *
+                                          self._var_poisson_weight)
             ).astype(
                 dtype=self.output_array_types["var_poisson"]
             )
             output_model["var_poisson"] = output_variance
 
             output_variance = (
-                self._var_flat_wsum / self._var_flat_weight /
-                self._var_flat_weight
+                self._var_flat_wsum / (self._var_flat_weight *
+                                       self._var_flat_weight)
             ).astype(
                 dtype=self.output_array_types["var_flat"]
             )
@@ -1231,10 +1355,11 @@ def _resample_one_variance_array(self, name, model, iscale,
                                      ymax=None):
         """Resample one variance image from an input model.
 
-        The error image is passed to drizzle instead of the variance in order to
-        better match kernel overlap and user weights to the data during the
+        The error image is passed to drizzle instead of the variance in order
+        to better match kernel overlap and user weights to the data during the
         pixel averaging process. The drizzled error image is squared before
         returning.
+
         """
         variance = model.get(name)
         if variance is None or variance.size == 0:
@@ -1270,9 +1395,9 @@ def _resample_one_variance_array(self, name, model, iscale,
             pixmap=pixmap,
             scale=iscale,
             weight_map=weight_map,
-            wht_scale=1.0,  # hard-coded for JWST count-rate data
+            wht_scale=1.0,
             pixfrac=self.pixfrac,
-            in_units="cps",  # TODO: get units from data model
+            in_units="cps",
             xmin=xmin,
             xmax=xmax,
             ymin=ymin,
@@ -1282,15 +1407,20 @@ def _resample_one_variance_array(self, name, model, iscale,
         return driz.out_img ** 2
 
     def build_driz_weight(self, model, weight_type=None, good_bits=None):
-        """ Create a weight map for use by drizzle.
+        """ Create a weight map that is used for weighting input images when
+        they are co-added to the ouput model.
 
         Parameters
         ----------
+        model : dict
+            Input model: a dictionar of relevant keywords and values.
+
         weight_type : {"exptime", "ivm"}, optional
-            The weighting type for adding models' data. For ``weight_type="ivm"``
-            (the default), the weighting will be determined per-pixel using
-            the inverse of the read noise (VAR_RNOISE) array stored in each
-            input image. If the ``VAR_RNOISE`` array does not exist,
+            The weighting type for adding models' data. For
+            ``weight_type="ivm"`` (the default), the weighting will be
+            determined per-pixel using the inverse of the read noise
+            (VAR_RNOISE) array stored in each input image. If the
+            ``VAR_RNOISE`` array does not exist,
             the variance is set to 1 for all pixels (i.e., equal weighting).
             If ``weight_type="exptime"``, the weight will be set equal
             to the measurement time (``TMEASURE``) when available and to
@@ -1302,7 +1432,7 @@ def build_driz_weight(self, model, weight_type=None, good_bits=None):
             bit flags or mnemonic flag names that indicate what bits in models'
             DQ bitfield array should be *ignored* (i.e., zeroed).
 
-            See `Resample` for more information
+            See `Resample` for more information.
 
         """
         data = model["data"]
@@ -1429,8 +1559,10 @@ def init_time_counters(self):
         self._measurement_time_success = []
 
     def update_time(self, model):
-        """ A method called by the `~Resample.add_model` method to process each
-        image's time attributes *only when ``model`` has a new group ID.
+        """
+        A method called by the :py:meth:`~Resample.add_model` method to
+        process each image's time attributes *only* when ``model`` has a new
+        group ID.
 
         """
         if model["group_id"] in self._group_ids:
@@ -1497,6 +1629,10 @@ def _check_var_array(self, model, array_name):
 
 
 def _get_model_name(model):
+    """ Return the value of ``"filename"`` from the model dictionary or
+    ``"Unknown"`` when ``"filename"`` is either not present or it is `None`.
+
+    """
     model_name = model.get("filename")
     if model_name is None or not model_name.strip():
         model_name = "Unknown"
diff --git a/src/stcal/resample/utils.py b/src/stcal/resample/utils.py
index 6968a8175..35498c23d 100644
--- a/src/stcal/resample/utils.py
+++ b/src/stcal/resample/utils.py
@@ -10,7 +10,7 @@
 __all__ = [
     "build_mask",
     "bytes2human",
-    "compute_wcs_pixel_area",
+    "compute_mean_pixel_area",
     "get_tmeasure",
     "is_imaging_wcs",
     "load_custom_wcs",
@@ -52,7 +52,7 @@ def load_custom_wcs(asdf_wcs_file, output_shape=None):
 
     Returns
     -------
-    wcs : WCS
+    wcs : ~gwcs.wcs.WCS
         The output WCS to resample into.
 
     """
@@ -121,7 +121,7 @@ def get_tmeasure(model):
 def bytes2human(n):
     """Convert bytes to human-readable format
 
-    Taken from the `psutil` library which references
+    Taken from the ``psutil`` library which references
     http://code.activestate.com/recipes/578019
 
     Parameters
@@ -161,8 +161,36 @@ def is_imaging_wcs(wcs):
     return imaging
 
 
-def compute_wcs_pixel_area(wcs, shape=None):
-    """ Computes pixel area in steradians.
+def compute_mean_pixel_area(wcs, shape=None):
+    """ Computes the average pixel area (in steradians) based on input WCS
+    using pixels within either the bounding box (if available) or the entire
+    data array as defined either by ``wcs.array_shape`` or the ``shape``
+    argument.
+
+    Parameters
+    ----------
+    shape : tuple, optional
+        Shape of the region over which average pixel area will be computed.
+        When not provided, pixel average will be estimated over a region
+        defined by ``wcs.array_shape``.
+
+    Returns
+    -------
+    pix_area : float
+        Pixel area in steradians.
+
+    Notes
+    -----
+
+    This function takes the outline of the region in which the average is
+    computed (a rectangle defined by either the bounding box or
+    ``wcs.array_shape`` or the ``shape``) and projects it to world coordinates.
+    It then uses ``spherical_geometry`` to compute the area of the polygon
+    defined by this outline on the sky. In order to minimize errors due to
+    distortions in the ``wcs``, the code defines the outline using pixels
+    spaced no more than 15 pixels apart along the border of the rectangle
+    in which the average is computed.
+
     """
     if (shape := (shape or wcs.array_shape)) is None:
         raise ValueError(
@@ -182,15 +210,16 @@ def compute_wcs_pixel_area(wcs, shape=None):
     else:
         ((xmin, xmax), (ymin, ymax)) = wcs.bounding_box
 
-    xmin = max(0, int(xmin + 0.5))
-    xmax = min(nx - 1, int(xmax - 0.5))
-    ymin = max(0, int(ymin + 0.5))
-    ymax = min(ny - 1, int(ymax - 0.5))
     if xmin > xmax:
         (xmin, xmax) = (xmax, xmin)
     if ymin > ymax:
         (ymin, ymax) = (ymax, ymin)
 
+    xmin = max(0, int(xmin + 0.5))
+    xmax = min(nx - 1, int(xmax - 0.5))
+    ymin = max(0, int(ymin + 0.5))
+    ymax = min(ny - 1, int(ymax - 0.5))
+
     k = 0
     dxy = [1, -1, -1, 1]
 
@@ -249,11 +278,74 @@ def compute_wcs_pixel_area(wcs, shape=None):
 def _get_boundary_points(xmin, xmax, ymin, ymax, dx=None, dy=None,
                          shrink=0):  # noqa: E741
     """
-    xmin, xmax, ymin, ymax - integer coordinates of pixel boundaries
-    step - distance between points along an edge
-    shrink - number of pixels by which to reduce `shape`
+    Creates a list of ``x`` and ``y`` coordinates of points along the perimiter
+    of the rectangle defined by ``xmin``, ``xmax``, ``ymin``, ``ymax``, and
+    ``shrink`` in counter-clockwise order.
+
+    Parameters
+    ----------
+
+    xmin : int
+        X-coordinate of the left edge of a rectangle.
+
+    xmax : int
+        X-coordinate of the right edge of a rectangle.
+
+    ymin : int
+        Y-coordinate of the bottom edge of a rectangle.
+
+    ymax : int
+        Y-coordinate of the top edge of a rectangle.
+
+    dx : int, float, None, optional
+        Desired spacing between ajacent points alog horizontal edges of
+        the rectangle.
+
+    dy : int, float, None, optional
+        Desired spacing between ajacent points alog vertical edges of
+        the rectangle.
+
+    shrink : int, optional
+        Amount to be applied to input ``xmin``, ``xmax``, ``ymin``, ``ymax``
+        to reduce the rectangle size.
+
+    Returns
+    -------
+
+    x : numpy.ndarray
+        An array of X-coordinates of points along the perimiter
+        of the rectangle defined by ``xmin``, ``xmax``, ``ymin``, ``ymax``, and
+        ``shrink`` in counter-clockwise order.
+
+    y : numpy.ndarray
+        An array of Y-coordinates of points along the perimiter
+        of the rectangle defined by ``xmin``, ``xmax``, ``ymin``, ``ymax``, and
+        ``shrink`` in counter-clockwise order.
+
+    area : float
+        Area in units of pixels of the region defined by ``xmin``, ``xmax``,
+        ``ymin``, ``ymax``, and ``shrink``.
+
+    center : tuple
+        A tuple of pixel coordinates at the center of the rectangle defined
+        by ``xmin``, ``xmax``, ``ymin``, ``ymax``.
+
+    bottom : slice
+        A `slice` object that allows selection of pixels from ``x`` and ``y``
+        arrays along the bottom edge of the rectangle.
+
+    right : slice
+        A `slice` object that allows selection of pixels from ``x`` and ``y``
+        arrays along the right edge of the rectangle.
+
+    top : slice
+        A `slice` object that allows selection of pixels from ``x`` and ``y``
+        arrays along the top edge of the rectangle.
+
+    left : slice
+        A `slice` object that allows selection of pixels from ``x`` and ``y``
+        arrays along the left edge of the rectangle.
 
-    Returns a list of points and the area of the rectangle
     """
     nx = xmax - xmin + 1
     ny = ymax - ymin + 1
@@ -278,21 +370,21 @@ def _get_boundary_points(xmin, xmax, ymin, ymax, dx=None, dy=None,
     x = np.empty(size)
     y = np.empty(size)
 
-    b = np.s_[0:sx]  # bottom edge
-    r = np.s_[sx:sx + sy]  # right edge
-    t = np.s_[sx + sy:2 * sx + sy]  # top edge
-    l = np.s_[2 * sx + sy:2 * sx + 2 * sy]  # noqa: E741  left edge
+    bottom = np.s_[0:sx]  # bottom edge
+    right = np.s_[sx:sx + sy]  # right edge
+    top = np.s_[sx + sy:2 * sx + sy]  # top edge
+    left = np.s_[2 * sx + sy:2 * sx + 2 * sy]  # noqa: E741  left edge
 
-    x[b] = np.linspace(xmin, xmax, sx, False)
-    y[b] = ymin
-    x[r] = xmax
-    y[r] = np.linspace(ymin, ymax, sy, False)
-    x[t] = np.linspace(xmax, xmin, sx, False)
-    y[t] = ymax
-    x[l] = xmin
-    y[l] = np.linspace(ymax, ymin, sy, False)
+    x[bottom] = np.linspace(xmin, xmax, sx, False)
+    y[bottom] = ymin
+    x[right] = xmax
+    y[right] = np.linspace(ymin, ymax, sy, False)
+    x[top] = np.linspace(xmax, xmin, sx, False)
+    y[top] = ymax
+    x[left] = xmin
+    y[left] = np.linspace(ymax, ymin, sy, False)
 
     area = (xmax - xmin) * (ymax - ymin)
     center = (0.5 * (xmin + xmax), 0.5 * (ymin + ymax))
 
-    return x, y, area, center, b, r, t, l
+    return x, y, area, center, bottom, right, top, left

From beff4362cb85c1b9b08cf060bd3aede9671d3591 Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Sun, 15 Dec 2024 15:51:20 -0500
Subject: [PATCH 12/35] Fix type warning

---
 src/stcal/resample/utils.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/stcal/resample/utils.py b/src/stcal/resample/utils.py
index 35498c23d..6a13d5053 100644
--- a/src/stcal/resample/utils.py
+++ b/src/stcal/resample/utils.py
@@ -4,7 +4,7 @@
 import asdf
 import numpy as np
 from astropy.nddata.bitmask import interpret_bit_flags
-from spherical_geometry.polygon import SphericalPolygon
+from spherical_geometry.polygon import SphericalPolygon  # type: ignore[import-untyped]
 
 
 __all__ = [

From e69d752069ee54240b79e7059faea184ad4c01e6 Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Mon, 16 Dec 2024 01:54:50 -0500
Subject: [PATCH 13/35] Use updated drizzle to deal with gwcs bbox

---
 pyproject.toml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/pyproject.toml b/pyproject.toml
index f50baa437..08c2621da 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -14,7 +14,7 @@ classifiers = [
 ]
 dependencies = [
     "astropy >=6.0.0",
-    "drizzle >=2.0.0",
+    "drizzle >=2.0.1",
     "scipy >=1.14.1",
     "scikit-image>=0.20.0",
     "numpy >=1.25.0",

From 0c2dc6359e7368482bf399f326e0e4c9d0076dee Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Fri, 31 Jan 2025 10:50:05 -0500
Subject: [PATCH 14/35] extra checks in resample_range

---
 src/stcal/resample/utils.py | 14 ++++++++++----
 1 file changed, 10 insertions(+), 4 deletions(-)

diff --git a/src/stcal/resample/utils.py b/src/stcal/resample/utils.py
index 6a13d5053..06b32f360 100644
--- a/src/stcal/resample/utils.py
+++ b/src/stcal/resample/utils.py
@@ -23,16 +23,22 @@
 
 def resample_range(data_shape, bbox=None):
     # Find range of input pixels to resample:
+    if len(data_shape) != 2:
+        raise ValueError("Expected 'data_shape' to be of length 2.")
+
     if bbox is None:
         xmin = ymin = 0
-        xmax = data_shape[1] - 1
-        ymax = data_shape[0] - 1
+        xmax = max(data_shape[1] - 1, 0)
+        ymax = max(data_shape[0] - 1, 0)
+
     else:
+        if len(bbox) != 2:
+            raise ValueError("Expected bounding box to be of length 2.")
         ((x1, x2), (y1, y2)) = bbox
         xmin = max(0, int(x1 + 0.5))
         ymin = max(0, int(y1 + 0.5))
-        xmax = min(data_shape[1] - 1, int(x2 + 0.5))
-        ymax = min(data_shape[0] - 1, int(y2 + 0.5))
+        xmax = max(xmin, min(data_shape[1] - 1, int(x2 + 0.5)))
+        ymax = max(ymax, min(data_shape[0] - 1, int(y2 + 0.5)))
 
     return xmin, xmax, ymin, ymax
 

From bd228e396550cd3eeddb7ad9a3e9d1bbec30f6d3 Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Fri, 31 Jan 2025 20:15:07 -0500
Subject: [PATCH 15/35] fix a bug in resample_range

---
 src/stcal/resample/utils.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/stcal/resample/utils.py b/src/stcal/resample/utils.py
index 06b32f360..84e2b2a03 100644
--- a/src/stcal/resample/utils.py
+++ b/src/stcal/resample/utils.py
@@ -38,7 +38,7 @@ def resample_range(data_shape, bbox=None):
         xmin = max(0, int(x1 + 0.5))
         ymin = max(0, int(y1 + 0.5))
         xmax = max(xmin, min(data_shape[1] - 1, int(x2 + 0.5)))
-        ymax = max(ymax, min(data_shape[0] - 1, int(y2 + 0.5)))
+        ymax = max(ymin, min(data_shape[0] - 1, int(y2 + 0.5)))
 
     return xmin, xmax, ymin, ymax
 

From 5dc480c55f70894bd93009051018a94740c25ead Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Mon, 3 Feb 2025 09:59:36 -0500
Subject: [PATCH 16/35] Add unit tests

---
 tests/resample/__init__.py            |   0
 tests/resample/conftest.py            |  34 ++++
 tests/resample/helpers.py             | 114 ++++++++++++++
 tests/resample/test_resample.py       |  57 +++++++
 tests/resample/test_resample_utils.py | 213 ++++++++++++++++++++++++++
 5 files changed, 418 insertions(+)
 create mode 100644 tests/resample/__init__.py
 create mode 100644 tests/resample/conftest.py
 create mode 100644 tests/resample/helpers.py
 create mode 100644 tests/resample/test_resample.py
 create mode 100644 tests/resample/test_resample_utils.py

diff --git a/tests/resample/__init__.py b/tests/resample/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/tests/resample/conftest.py b/tests/resample/conftest.py
new file mode 100644
index 000000000..fb7c34ddc
--- /dev/null
+++ b/tests/resample/conftest.py
@@ -0,0 +1,34 @@
+""" Test various utility functions """
+import pytest
+
+from astropy import wcs as fitswcs
+import numpy as np
+
+from . helpers import make_gwcs
+
+
+@pytest.fixture(scope='module')
+def wcs_gwcs():
+    crval = (150.0, 2.0)
+    crpix = (500.0, 500.0)
+    shape = (1000, 1000)
+    pscale = 0.06 / 3600
+    return make_gwcs(crpix, crval, pscale, shape)
+
+
+@pytest.fixture(scope='module')
+def wcs_fitswcs(wcs_gwcs):
+    fits_wcs = fitswcs.WCS(wcs_gwcs.to_fits_sip())
+    fits_wcs.pixel_area = wcs_gwcs.pixel_area
+    fits_wcs.pixel_scale = wcs_gwcs.pixel_scale
+    return fits_wcs
+
+
+@pytest.fixture(scope='module')
+def wcs_slicedwcs(wcs_gwcs):
+    xmin, xmax = 100, 500
+    slices = (slice(xmin, xmax), slice(xmin, xmax))
+    sliced_wcs = fitswcs.wcsapi.SlicedLowLevelWCS(wcs_gwcs, slices)
+    sliced_wcs.pixel_area = wcs_gwcs.pixel_area
+    sliced_wcs.pixel_scale = wcs_gwcs.pixel_scale
+    return sliced_wcs
diff --git a/tests/resample/helpers.py b/tests/resample/helpers.py
new file mode 100644
index 000000000..78ad9a230
--- /dev/null
+++ b/tests/resample/helpers.py
@@ -0,0 +1,114 @@
+from astropy.nddata.bitmask import BitFlagNameMap
+from astropy import coordinates as coord
+from astropy.modeling import models as astmodels
+
+from gwcs import coordinate_frames as cf
+from gwcs.wcstools import wcs_from_fiducial
+import numpy as np
+
+from stcal.alignment import compute_s_region_imaging
+
+
+class JWST_DQ_FLAG_DEF(BitFlagNameMap):
+    DO_NOT_USE = 1
+    SATURATED = 2
+    JUMP_DET = 4
+
+
+def make_gwcs(crpix, crval, pscale, shape):
+    """ Simulate a gwcs from FITS WCS parameters.
+
+    crpix - tuple of floats
+    crval - tuple of floats (RA, DEC)
+    pscale - pixel scale in degrees
+    shape - array shape (numpy's convention)
+
+    """
+    prj = astmodels.Pix2Sky_TAN()
+    fiducial = np.array(crval)
+
+    pc = np.array([[-1., 0.], [0., 1.]])
+    pc_matrix = astmodels.AffineTransformation2D(pc, name='pc_rotation_matrix')
+    scale = (astmodels.Scale(pscale, name='cdelt1') &
+             astmodels.Scale(pscale, name='cdelt2'))
+    transform = pc_matrix | scale
+
+    out_frame = cf.CelestialFrame(
+        name='world',
+        axes_names=('lon', 'lat'),
+        reference_frame=coord.ICRS()
+    )
+    input_frame = cf.Frame2D(name="detector")
+    wnew = wcs_from_fiducial(
+        fiducial,
+        coordinate_frame=out_frame,
+        projection=prj,
+        transform=transform,
+        input_frame=input_frame
+    )
+
+    output_bounding_box = (
+        (-0.5, float(shape[1]) - 0.5),
+        (-0.5, float(shape[0]) - 0.5)
+    )
+    offset1, offset2 = crpix
+    offsets = (astmodels.Shift(-offset1, name='crpix1') &
+               astmodels.Shift(-offset2, name='crpix2'))
+
+    wnew.insert_transform('detector', offsets, after=True)
+    wnew.bounding_box = output_bounding_box
+
+    tr = wnew.pipeline[0].transform
+    pix_area = (
+        np.deg2rad(tr['cdelt1'].factor.value) *
+        np.deg2rad(tr['cdelt2'].factor.value)
+    )
+
+    wnew.pixel_area = pix_area
+    wnew.pixel_scale = pscale
+    wnew.pixel_shape = shape[::-1]
+    wnew.array_shape = shape
+
+    return wnew
+
+
+def make_input_model(crpix, crval, pscale, shape, group_id=1, exptime=1):
+    w = make_gwcs(
+        crpix=crpix,
+        crval=crval,
+        pscale=pscale,
+        shape=shape
+    )
+
+    model = {
+        "data": np.zeros(shape, dtype=np.float32),
+        "dq": np.zeros(shape, dtype=np.int32),
+
+        # meta:
+        "filename": "",
+        "group_id": group_id,
+        "s_region": compute_s_region_imaging(w),
+        "wcs": w,
+        "bunit_data": "MJy",
+
+        "exposure_time": exptime,
+        "start_time": 0.0,
+        "end_time": exptime,
+        "duration": exptime,
+        "measurement_time": exptime,
+        "effective_exposure_time": exptime,
+        "elapsed_exposure_time": exptime,
+
+        "pixelarea_steradians": w.pixel_area,
+        "pixelarea_arcsecsq": w.pixel_area * (np.rad2deg(1) * 3600)**2,
+
+        "level": 0.0,  # sky level
+        "subtracted": False,
+    }
+
+    for arr in ["var_flat", "var_rnoise", "var_poisson"]:
+        model[arr] = np.ones(shape, dtype=np.float32)
+
+    model["err"] = np.sqrt(3.0) * np.ones(shape, dtype=np.float32)
+
+    return model
diff --git a/tests/resample/test_resample.py b/tests/resample/test_resample.py
new file mode 100644
index 000000000..f6e8e1c50
--- /dev/null
+++ b/tests/resample/test_resample.py
@@ -0,0 +1,57 @@
+from stcal.resample import Resample
+
+import numpy as np
+
+from . helpers import make_gwcs, make_input_model, JWST_DQ_FLAG_DEF
+
+
+def test_resample_defaults():
+    crval = (150.0, 2.0)
+    crpix = (500.0, 500.0)
+    shape = (1000, 1000)
+    pscale = 0.06 / 3600
+
+    output_wcs = make_gwcs(
+        crpix=(600, 600),
+        crval=crval,
+        pscale=pscale,
+        shape=(1200, 1200)
+    )
+
+    nmodels = 4
+
+    resample = Resample(n_input_models=nmodels, output_wcs=output_wcs)
+    resample.dq_flag_name_map = JWST_DQ_FLAG_DEF
+
+    influx = 0.0
+    ttime = 0.0
+
+    for k in range(nmodels):
+        im = make_input_model(
+            crpix=tuple(i - 6 * k for i in crpix),
+            crval=crval,
+            pscale=pscale,
+            shape=shape,
+            group_id=k + 1,
+            exptime=k + 1
+        )
+        im["data"][:, :] = k + 0.5
+        influx += k + 0.5
+        ttime += k + 1
+
+        resample.add_model(im)
+
+    resample.finalize()
+
+    odata = resample.output_model["data"]
+    oweight = resample.output_model["wht"]
+
+    assert resample.output_model["pointings"] == nmodels
+    assert resample.output_model["exposure_time"] == ttime
+
+    # next assert assumes constant IVM
+    assert abs(np.sum(odata * oweight) - influx * np.prod(shape)) < 1.0e-6
+
+    assert np.nansum(resample.output_model["var_flat"]) > 0.0
+    assert np.nansum(resample.output_model["var_poisson"]) > 0.0
+    assert np.nansum(resample.output_model["var_rnoise"]) > 0.0
diff --git a/tests/resample/test_resample_utils.py b/tests/resample/test_resample_utils.py
new file mode 100644
index 000000000..4da667340
--- /dev/null
+++ b/tests/resample/test_resample_utils.py
@@ -0,0 +1,213 @@
+""" Test various utility functions """
+import asdf
+from asdf_astropy.testing.helpers import assert_model_equal
+
+from gwcs import coordinate_frames as cf
+from numpy.testing import assert_array_equal
+import numpy as np
+import pytest
+
+from stcal.resample.utils import (
+    build_mask,
+    bytes2human,
+    compute_mean_pixel_area,
+    get_tmeasure,
+    is_imaging_wcs,
+    resample_range,
+    load_custom_wcs,
+)
+
+from . helpers import JWST_DQ_FLAG_DEF
+
+
+DQ = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
+BITVALUES = 2**0 + 2**2
+BITVALUES_STR = f'{2**0}, {2**2}'
+BITVALUES_INV_STR = f'~{2**0}, {2**2}'
+JWST_NAMES = 'DO_NOT_USE,JUMP_DET'
+JWST_NAMES_INV = '~' + JWST_NAMES
+
+
+def _assert_frame_equal(a, b):
+    """ Copied from `gwcs`'s test_wcs.py """
+    __tracebackhide__ = True
+
+    assert type(a) is type(b)
+
+    if a is None:
+        return
+
+    if not isinstance(a, cf.CoordinateFrame):
+        return a == b
+
+    assert a.name == b.name  # nosec
+    assert a.axes_order == b.axes_order  # nosec
+    assert a.axes_names == b.axes_names  # nosec
+    assert a.unit == b.unit  # nosec
+    assert a.reference_frame == b.reference_frame  # nosec
+
+
+def _assert_wcs_equal(a, b):
+    """ Based on corresponding function from `gwcs`'s test_wcs.py """
+    assert a.name == b.name  # nosec
+
+    assert a.pixel_shape == b.pixel_shape
+    assert a.array_shape == b.array_shape
+    if a.array_shape is not None:
+        assert a.array_shape == b.pixel_shape[::-1]
+
+    assert len(a.available_frames) == len(b.available_frames)  # nosec
+    for a_step, b_step in zip(a.pipeline, b.pipeline):
+        _assert_frame_equal(a_step.frame, b_step.frame)
+        assert_model_equal(a_step.transform, b_step.transform)
+
+
+@pytest.mark.parametrize(
+    'dq, bitvalues, expected', [
+        (DQ, 0, np.array([1, 0, 0, 0, 0, 0, 0, 0, 0])),
+        (DQ, BITVALUES, np.array([1, 1, 0, 0, 1, 1, 0, 0, 0])),
+        (DQ, BITVALUES_STR, np.array([1, 1, 0, 0, 1, 1, 0, 0, 0])),
+        (DQ, BITVALUES_INV_STR, np.array([1, 0, 1, 0, 0, 0, 0, 0, 1])),
+        (DQ, JWST_NAMES, np.array([1, 1, 0, 0, 1, 1, 0, 0, 0])),
+        (DQ, JWST_NAMES_INV, np.array([1, 0, 1, 0, 0, 0, 0, 0, 1])),
+        (DQ, None, np.array([1, 1, 1, 1, 1, 1, 1, 1, 1])),
+    ]
+)
+def test_build_mask(dq, bitvalues, expected):
+    """ Test logic of mask building
+
+    Parameters
+    ----------
+    dq: numpy.array
+        The input data quality array
+
+    bitvalues: int or str
+        The bitvalues to match against
+
+    expected: numpy.array
+        Expected mask array
+    """
+    result = build_mask(dq, bitvalues, flag_name_map=JWST_DQ_FLAG_DEF)
+    assert_array_equal(result, expected)
+
+
+@pytest.mark.parametrize(
+    "data_shape, bbox, exception, truth",
+    [
+        ((1, 2, 3), ((1, 500), (0, 350)), True, None),
+        ((1, 2, 3), None, True, None),
+        ((1, ), ((1, 500), (0, 350)), True, None),
+        ((1, ), None, True, None),
+        ((1000, 800), ((1, 500), ), True, None),
+        ((1000, 800), ((1, 500), (0, 350), (0, 350)), True, None),
+        ((1, ), ((1, 500), (0, 350)), True, None),
+        ((1200, 1400), ((700, 300), (600, 800)), False, (700, 700, 600, 800)),
+        ((1200, 1400), ((600, 800), (700, 300)), False, (600, 800, 700, 700)),
+        ((1200, 1400), ((300, 700), (600, 800)), False, (300, 700, 600, 800)),
+        ((750, 470), ((300, 700), (600, 800)), False, (300, 469, 600, 749)),
+        ((750, 470), ((-5, -1), (-800, -600)), False, (0, 0, 0, 0)),
+        ((750, 470), None, False, (0, 469, 0, 749)),
+        ((-750, -470), None, False, (0, 0, 0, 0)),
+    ]
+)
+def test_resample_range(data_shape, bbox, exception, truth):
+    if exception:
+        with pytest.raises(ValueError):
+            resample_range(data_shape, bbox)
+        return
+
+    xyminmax = resample_range(data_shape, bbox)
+    assert np.allclose(xyminmax, truth, rtol=0, atol=1e-12)
+
+
+def test_load_custom_wcs_no_shape(tmpdir, wcs_gwcs):
+    """
+    Test loading a WCS from an asdf file.
+    """
+    wcs_file = str(tmpdir / "wcs.asdf")
+    wcs_gwcs.pixel_shape = None
+    wcs_gwcs.array_shape = None
+    wcs_gwcs.bounding_box = None
+
+    with asdf.AsdfFile({"wcs": wcs_gwcs}) as af:
+        af.write_to(wcs_file)
+
+    with pytest.raises(ValueError):
+        load_custom_wcs(wcs_file, output_shape=None)
+
+
+@pytest.mark.parametrize(
+    "array_shape, pixel_shape, output_shape, expected",
+    [
+        # (None, None, None, (1000, 1000)),  # from the bounding box
+        # # (None, (123, 456), None, (456, 123)),  # fails
+        # ((456, 123), None, None, (456, 123)),
+        # ((456, 123), None, (567, 890), (890, 567)),
+        ((456, 123), (123, 456), (567, 890), (890, 567)),
+        ((456, 123), (123, 456), None, (890, 567)),
+    ]
+)
+def test_load_custom_wcs(tmpdir, wcs_gwcs, array_shape, pixel_shape,
+                         output_shape, expected):
+    """
+    Test loading a WCS from an asdf file. `expected` is expected
+    ``wcs.array_shape``.
+
+    """
+    wcs_file = str(tmpdir / "wcs.asdf")
+
+    wcs_gwcs.pixel_shape = pixel_shape
+    wcs_gwcs.array_shape = array_shape
+
+    with asdf.AsdfFile({"wcs": wcs_gwcs}) as af:
+        af.write_to(wcs_file)
+
+    if output_shape is not None:
+        wcs_gwcs.array_shape = output_shape[::-1]
+
+    wcs_read = load_custom_wcs(wcs_file, output_shape=output_shape)
+
+    assert wcs_read.array_shape == expected
+
+    _assert_wcs_equal(wcs_gwcs, wcs_read)
+
+
+def test_get_tmeasure():
+    model = {
+        "measurement_time": 12.34,
+        "exposure_time": 23.45,
+    }
+
+    assert get_tmeasure(model) == (12.34, True)
+
+    model["measurement_time"] = None
+    assert get_tmeasure(model) == (23.45, False)
+
+    del model["measurement_time"]
+    assert get_tmeasure(model) == (23.45, False)
+
+    del model["exposure_time"]
+    with pytest.raises(KeyError):
+        get_tmeasure(model)
+
+
+@pytest.mark.parametrize(
+        "n, readable",
+        [
+            (10000, "9.8K"),
+            (100001221, "95.4M")
+        ]
+)
+def test_bytes2human(n, readable):
+    assert bytes2human(n) == readable
+
+
+def test_is_imaging_wcs(wcs_gwcs):
+    assert is_imaging_wcs(wcs_gwcs)
+
+
+def test_compute_mean_pixel_area(wcs_gwcs):
+    area = np.deg2rad(wcs_gwcs.pixel_scale)**2
+    assert abs(
+        compute_mean_pixel_area(wcs_gwcs) / area - 1.0
+    ) < 1e-5

From a9307731a8e595a25b1c5d20f1a0f887c53de34f Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Mon, 3 Feb 2025 12:42:49 -0500
Subject: [PATCH 17/35] address reviewers comments

---
 docs/stcal/resample/description.rst | 26 +++++++++++++-------------
 src/stcal/resample/resample.py      | 19 ++++++++++---------
 2 files changed, 23 insertions(+), 22 deletions(-)

diff --git a/docs/stcal/resample/description.rst b/docs/stcal/resample/description.rst
index fd5c396f4..d16d2cd94 100644
--- a/docs/stcal/resample/description.rst
+++ b/docs/stcal/resample/description.rst
@@ -31,7 +31,7 @@ with each input model are resampled individually, then combined with a weighted
 sum.  The process is:
 
 #. For each input model, take the square root of each of the read noise variance
-   array to make an error image.
+   arrays to make an error image.
 
 #. Drizzle the read noise error image onto the output WCS, with drizzle
    parameters matching those used for the science data.
@@ -41,7 +41,7 @@ sum.  The process is:
    a. If the resampling `weight_type` is an inverse variance map (`ivm`), weight
       the resampled variance by the square of its own inverse.
 
-   #. If the `weight_type` is the exposure time (`exptime`), weight the
+   b. If the `weight_type` is the exposure time (`exptime`), weight the
       resampled variance by the square of the exposure time for the image.
 
 #. Add the weighted, resampled read noise variance to a running sum across all
@@ -78,21 +78,21 @@ In addition to image data, resample step also creates a "context image" stored
 in the ``con`` attribute in the output data model . Each pixel in the context
 image is a bit field that encodes
 information about which input image has contributed to the corresponding
-pixel in the resampled data array. Context image uses 32 bit integers to encode
-this information and hence it can keep track of only 32 input images.
-First bit corresponds to the first input image, second bit corrsponds to the
-second input image, and so on. If the number of input images is larger than 32,
-then it is necessary to have multiple context images ("planes") to hold
-information about all input images
+pixel in the resampled data array. The context image uses 32 bit integers to
+encode this information, and hence it can keep track of only 32 input images.
+The first bit corresponds to the first input image, the second bit corresponds
+to the second input image, and so on. If the number of input images is larger
+than 32, then it is necessary to have multiple context images ("planes")
+to hold information about all input images
 with the first plane encoding which of the first 32 images contributed
-to the output data pixel, second plane representing next 32 input images
+to the output data pixel, the second plane representing next 32 input images
 (number 33-64), etc. For this reason, context array is a 3D array of the type
 `numpy.int32` and shape ``(np, ny, nx)`` where ``nx`` and ``ny``
-are dimensions of image's data. ``np`` is the number of "planes" equal to
-``(number of input images - 1) // 32 + 1``. If a bit at position ``k`` in a
-pixel with coordinates ``(p, y, x)`` is 0 then input image number
+are the dimensions of the image data. ``np`` is the number of "planes" computed
+as ``(number of input images - 1) // 32 + 1``. If a bit at position ``k`` in a
+pixel with coordinates ``(p, y, x)`` is 0, then input image number
 ``32 * p + k`` (0-indexed) did not contribute to the output data pixel
-with array coordinates ``(y, x)`` and if that bit is 1 then input image number
+with array coordinates ``(y, x)`` and if that bit is 1, then input image number
 ``32 * p + k`` did contribute to the pixel ``(y, x)`` in the resampled image.
 
 As an example, let's assume we have 8 input images. Then, when ``'CON'`` pixel
diff --git a/src/stcal/resample/resample.py b/src/stcal/resample/resample.py
index 8c7930f1e..10a1d4b66 100644
--- a/src/stcal/resample/resample.py
+++ b/src/stcal/resample/resample.py
@@ -50,9 +50,9 @@ class Resample:
        scale factors needed to obtain correctly convert resampled counts to
        fluxes.
     3. For each input image computes coordinate transformations (``pixmap``)
-       from coordinate system of the input image to the coordinate system of
-       the output image.
-    4. For each input image computes weight image.
+       from the coordinate system of the input image to the coordinate system
+       of the output image.
+    4. Computes the weight image for each input image.
     5. Calls :py:class:`~drizzle.resample.Drizzle` methods to resample and
        combine input images and their variance/error arrays.
     6. Keeps track of total exposure time and other time-related quantities.
@@ -344,17 +344,18 @@ def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
     def get_input_model_pixel_area(self, model):
         """
         Computes or retrieves pixel area of an input model. Currently,
-        this is the average pixel area of input model's pixels within either
-        the bounding box (if available) or the entire data array.
+        this is the average pixel area of the input model's pixels within
+        either the bounding box (if available) or the entire data array.
 
         This value is used to compute a scale factor that will be applied
         to input image data. This scale factor takes into account the
-        difference in the definition of the pixel area reported in model's
-        ``meta`` and the pixel area at the location used to construct
+        difference in the definition of the pixel area reported in
+        ``model.meta`` and the pixel area at the location used to construct
         output WCS from the WCS of input models using ``pixel_scale_ratio``.
 
-        Intensity scale factor is computed elsewhere as the ratio of the value
-        of the pixel area in the meta to the area returned by this function.
+        The intensity scale factor is computed elsewhere as the ratio of the
+        value of the pixel area in the meta to the area returned by this
+        function.
 
         Subclasses can override this method to return the most appropriate
         pixel area value.

From 8f1558b52d960017462b1708f2d6687478fa0cb1 Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Mon, 3 Feb 2025 13:02:18 -0500
Subject: [PATCH 18/35] address reviewers comments

---
 docs/stcal/resample/description.rst | 2 +-
 src/stcal/resample/resample.py      | 9 ++++-----
 2 files changed, 5 insertions(+), 6 deletions(-)

diff --git a/docs/stcal/resample/description.rst b/docs/stcal/resample/description.rst
index d16d2cd94..f692c2879 100644
--- a/docs/stcal/resample/description.rst
+++ b/docs/stcal/resample/description.rst
@@ -83,7 +83,7 @@ encode this information, and hence it can keep track of only 32 input images.
 The first bit corresponds to the first input image, the second bit corresponds
 to the second input image, and so on. If the number of input images is larger
 than 32, then it is necessary to have multiple context images ("planes")
-to hold information about all input images
+to hold information about all input images,
 with the first plane encoding which of the first 32 images contributed
 to the output data pixel, the second plane representing next 32 input images
 (number 33-64), etc. For this reason, context array is a 3D array of the type
diff --git a/src/stcal/resample/resample.py b/src/stcal/resample/resample.py
index 10a1d4b66..21e66ddae 100644
--- a/src/stcal/resample/resample.py
+++ b/src/stcal/resample/resample.py
@@ -47,9 +47,8 @@ class Resample:
 
     1. Sets up output arrays based on arguments used at initialization.
     2. Based on information about the input images and user arguments, computes
-       scale factors needed to obtain correctly convert resampled counts to
-       fluxes.
-    3. For each input image computes coordinate transformations (``pixmap``)
+       scale factors needed to convert resampled counts to fluxes.
+    3. For each input image, computes coordinate transformations (``pixmap``)
        from the coordinate system of the input image to the coordinate system
        of the output image.
     4. Computes the weight image for each input image.
@@ -349,8 +348,8 @@ def get_input_model_pixel_area(self, model):
 
         This value is used to compute a scale factor that will be applied
         to input image data. This scale factor takes into account the
-        difference in the definition of the pixel area reported in
-        ``model.meta`` and the pixel area at the location used to construct
+        difference in the definition of the pixel area reported in model's
+        ``meta`` and the pixel area at the location used to construct
         output WCS from the WCS of input models using ``pixel_scale_ratio``.
 
         The intensity scale factor is computed elsewhere as the ratio of the

From 1fc7736d2513440f5f6deeab1270ad3b07169c10 Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Mon, 3 Feb 2025 14:52:32 -0500
Subject: [PATCH 19/35] attempt to fix conflicts in oldeps

---
 pyproject.toml | 1 -
 1 file changed, 1 deletion(-)

diff --git a/pyproject.toml b/pyproject.toml
index 08c2621da..187165627 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -18,7 +18,6 @@ dependencies = [
     "scipy >=1.14.1",
     "scikit-image>=0.20.0",
     "numpy >=1.25.0",
-    "astropy >=5.0.4",
     "opencv-python-headless >=4.6.0.66",
     "asdf >=3.3.0",
     "gwcs >=0.22.0",

From 5d88c35e085c988a3dde77e1d9268be4cfe476d2 Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Mon, 3 Feb 2025 14:58:40 -0500
Subject: [PATCH 20/35] fix failing test

---
 tests/resample/test_resample.py | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/tests/resample/test_resample.py b/tests/resample/test_resample.py
index f6e8e1c50..d2106c833 100644
--- a/tests/resample/test_resample.py
+++ b/tests/resample/test_resample.py
@@ -50,7 +50,9 @@ def test_resample_defaults():
     assert resample.output_model["exposure_time"] == ttime
 
     # next assert assumes constant IVM
-    assert abs(np.sum(odata * oweight) - influx * np.prod(shape)) < 1.0e-6
+    assert abs(
+        np.sum(odata * oweight, dtype=float) - influx * np.prod(shape)
+    ) < 1.0e-6
 
     assert np.nansum(resample.output_model["var_flat"]) > 0.0
     assert np.nansum(resample.output_model["var_poisson"]) > 0.0

From fdd495b5533655f389e4a0cfeb358d2ca605f1a1 Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Mon, 3 Feb 2025 15:06:39 -0500
Subject: [PATCH 21/35] fix failing test

---
 tests/resample/test_resample.py | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/tests/resample/test_resample.py b/tests/resample/test_resample.py
index d2106c833..317be4a28 100644
--- a/tests/resample/test_resample.py
+++ b/tests/resample/test_resample.py
@@ -50,9 +50,12 @@ def test_resample_defaults():
     assert resample.output_model["exposure_time"] == ttime
 
     # next assert assumes constant IVM
-    assert abs(
-        np.sum(odata * oweight, dtype=float) - influx * np.prod(shape)
-    ) < 1.0e-6
+    assert np.allclose(
+        np.sum(odata * oweight, dtype=float),
+        influx * np.prod(shape),
+        atol=0,
+        rtol=1e-6,
+    )
 
     assert np.nansum(resample.output_model["var_flat"]) > 0.0
     assert np.nansum(resample.output_model["var_poisson"]) > 0.0

From 3ba3a253ffdcd82e118122cef32f9d254a4acb51 Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Mon, 3 Feb 2025 23:57:11 -0500
Subject: [PATCH 22/35] remove load_custom_wcs; clean docs/conf.py

---
 docs/conf.py                          |  5 +-
 src/stcal/resample/utils.py           | 53 ----------------
 tests/resample/conftest.py            |  6 +-
 tests/resample/test_resample_utils.py | 91 ---------------------------
 4 files changed, 5 insertions(+), 150 deletions(-)

diff --git a/docs/conf.py b/docs/conf.py
index 659bc9c7e..a4594f209 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -45,8 +45,6 @@
     ),
 }
 
-nitpick_ignore = [("py:class", "optional"), ("py:class", "np.ndarray")]
-
 extensions = [
     "pytest_doctestplus.sphinx.doctestplus",
     "sphinx.ext.autodoc",
@@ -82,5 +80,6 @@
 nitpicky = False
 
 nitpick_ignore = [
-    ('py:obj', 'type'),
+    ("py:class", "optional"),
+    ("py:class", "np.ndarray"),
 ]
diff --git a/src/stcal/resample/utils.py b/src/stcal/resample/utils.py
index 84e2b2a03..370067cdd 100644
--- a/src/stcal/resample/utils.py
+++ b/src/stcal/resample/utils.py
@@ -1,7 +1,5 @@
-from copy import deepcopy
 import logging
 
-import asdf
 import numpy as np
 from astropy.nddata.bitmask import interpret_bit_flags
 from spherical_geometry.polygon import SphericalPolygon  # type: ignore[import-untyped]
@@ -13,7 +11,6 @@
     "compute_mean_pixel_area",
     "get_tmeasure",
     "is_imaging_wcs",
-    "load_custom_wcs",
     "resample_range",
 ]
 
@@ -43,56 +40,6 @@ def resample_range(data_shape, bbox=None):
     return xmin, xmax, ymin, ymax
 
 
-def load_custom_wcs(asdf_wcs_file, output_shape=None):
-    """ Load a custom output WCS from an ASDF file.
-
-    Parameters
-    ----------
-    asdf_wcs_file : str
-        Path to an ASDF file containing a GWCS structure.
-
-    output_shape : tuple of int, optional
-        Array shape (in ``[x, y]`` order) for the output data. If not provided,
-        the custom WCS must specify one of: pixel_shape,
-        array_shape, or bounding_box.
-
-    Returns
-    -------
-    wcs : ~gwcs.wcs.WCS
-        The output WCS to resample into.
-
-    """
-    if not asdf_wcs_file:
-        return None
-
-    with asdf.open(asdf_wcs_file) as af:
-        wcs = deepcopy(af.tree["wcs"])
-        wcs.pixel_area = af.tree.get("pixel_area", None)
-        wcs.pixel_shape = af.tree.get("pixel_shape", None)
-        wcs.array_shape = af.tree.get("array_shape", None)
-
-    if output_shape is not None:
-        wcs.array_shape = output_shape[::-1]
-        wcs.pixel_shape = output_shape
-    elif wcs.pixel_shape is not None:
-        wcs.array_shape = wcs.pixel_shape[::-1]
-    elif wcs.array_shape is not None:
-        wcs.pixel_shape = wcs.array_shape[::-1]
-    elif wcs.bounding_box is not None:
-        wcs.array_shape = tuple(
-            int(axs[1] + 0.5)
-            for axs in wcs.bounding_box.bounding_box(order="C")
-        )
-    else:
-        raise ValueError(
-            "Step argument 'output_shape' is required when custom WCS "
-            "does not have neither of 'array_shape', 'pixel_shape', or "
-            "'bounding_box' attributes set."
-        )
-
-    return wcs
-
-
 def build_mask(dqarr, bitvalue, flag_name_map=None):
     """Build a bit mask from an input DQ array and a bitvalue flag
 
diff --git a/tests/resample/conftest.py b/tests/resample/conftest.py
index fb7c34ddc..08d396b0e 100644
--- a/tests/resample/conftest.py
+++ b/tests/resample/conftest.py
@@ -7,7 +7,7 @@
 from . helpers import make_gwcs
 
 
-@pytest.fixture(scope='module')
+@pytest.fixture(scope='function')
 def wcs_gwcs():
     crval = (150.0, 2.0)
     crpix = (500.0, 500.0)
@@ -16,7 +16,7 @@ def wcs_gwcs():
     return make_gwcs(crpix, crval, pscale, shape)
 
 
-@pytest.fixture(scope='module')
+@pytest.fixture(scope='function')
 def wcs_fitswcs(wcs_gwcs):
     fits_wcs = fitswcs.WCS(wcs_gwcs.to_fits_sip())
     fits_wcs.pixel_area = wcs_gwcs.pixel_area
@@ -25,7 +25,7 @@ def wcs_fitswcs(wcs_gwcs):
 
 
 @pytest.fixture(scope='module')
-def wcs_slicedwcs(wcs_gwcs):
+def wcs_slicedwcs(function):
     xmin, xmax = 100, 500
     slices = (slice(xmin, xmax), slice(xmin, xmax))
     sliced_wcs = fitswcs.wcsapi.SlicedLowLevelWCS(wcs_gwcs, slices)
diff --git a/tests/resample/test_resample_utils.py b/tests/resample/test_resample_utils.py
index 4da667340..a947bacda 100644
--- a/tests/resample/test_resample_utils.py
+++ b/tests/resample/test_resample_utils.py
@@ -1,8 +1,4 @@
 """ Test various utility functions """
-import asdf
-from asdf_astropy.testing.helpers import assert_model_equal
-
-from gwcs import coordinate_frames as cf
 from numpy.testing import assert_array_equal
 import numpy as np
 import pytest
@@ -14,7 +10,6 @@
     get_tmeasure,
     is_imaging_wcs,
     resample_range,
-    load_custom_wcs,
 )
 
 from . helpers import JWST_DQ_FLAG_DEF
@@ -28,40 +23,6 @@
 JWST_NAMES_INV = '~' + JWST_NAMES
 
 
-def _assert_frame_equal(a, b):
-    """ Copied from `gwcs`'s test_wcs.py """
-    __tracebackhide__ = True
-
-    assert type(a) is type(b)
-
-    if a is None:
-        return
-
-    if not isinstance(a, cf.CoordinateFrame):
-        return a == b
-
-    assert a.name == b.name  # nosec
-    assert a.axes_order == b.axes_order  # nosec
-    assert a.axes_names == b.axes_names  # nosec
-    assert a.unit == b.unit  # nosec
-    assert a.reference_frame == b.reference_frame  # nosec
-
-
-def _assert_wcs_equal(a, b):
-    """ Based on corresponding function from `gwcs`'s test_wcs.py """
-    assert a.name == b.name  # nosec
-
-    assert a.pixel_shape == b.pixel_shape
-    assert a.array_shape == b.array_shape
-    if a.array_shape is not None:
-        assert a.array_shape == b.pixel_shape[::-1]
-
-    assert len(a.available_frames) == len(b.available_frames)  # nosec
-    for a_step, b_step in zip(a.pipeline, b.pipeline):
-        _assert_frame_equal(a_step.frame, b_step.frame)
-        assert_model_equal(a_step.transform, b_step.transform)
-
-
 @pytest.mark.parametrize(
     'dq, bitvalues, expected', [
         (DQ, 0, np.array([1, 0, 0, 0, 0, 0, 0, 0, 0])),
@@ -120,58 +81,6 @@ def test_resample_range(data_shape, bbox, exception, truth):
     assert np.allclose(xyminmax, truth, rtol=0, atol=1e-12)
 
 
-def test_load_custom_wcs_no_shape(tmpdir, wcs_gwcs):
-    """
-    Test loading a WCS from an asdf file.
-    """
-    wcs_file = str(tmpdir / "wcs.asdf")
-    wcs_gwcs.pixel_shape = None
-    wcs_gwcs.array_shape = None
-    wcs_gwcs.bounding_box = None
-
-    with asdf.AsdfFile({"wcs": wcs_gwcs}) as af:
-        af.write_to(wcs_file)
-
-    with pytest.raises(ValueError):
-        load_custom_wcs(wcs_file, output_shape=None)
-
-
-@pytest.mark.parametrize(
-    "array_shape, pixel_shape, output_shape, expected",
-    [
-        # (None, None, None, (1000, 1000)),  # from the bounding box
-        # # (None, (123, 456), None, (456, 123)),  # fails
-        # ((456, 123), None, None, (456, 123)),
-        # ((456, 123), None, (567, 890), (890, 567)),
-        ((456, 123), (123, 456), (567, 890), (890, 567)),
-        ((456, 123), (123, 456), None, (890, 567)),
-    ]
-)
-def test_load_custom_wcs(tmpdir, wcs_gwcs, array_shape, pixel_shape,
-                         output_shape, expected):
-    """
-    Test loading a WCS from an asdf file. `expected` is expected
-    ``wcs.array_shape``.
-
-    """
-    wcs_file = str(tmpdir / "wcs.asdf")
-
-    wcs_gwcs.pixel_shape = pixel_shape
-    wcs_gwcs.array_shape = array_shape
-
-    with asdf.AsdfFile({"wcs": wcs_gwcs}) as af:
-        af.write_to(wcs_file)
-
-    if output_shape is not None:
-        wcs_gwcs.array_shape = output_shape[::-1]
-
-    wcs_read = load_custom_wcs(wcs_file, output_shape=output_shape)
-
-    assert wcs_read.array_shape == expected
-
-    _assert_wcs_equal(wcs_gwcs, wcs_read)
-
-
 def test_get_tmeasure():
     model = {
         "measurement_time": 12.34,

From 70353a8d9873f3c0aad962d41ad9560d810654cc Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Tue, 4 Feb 2025 00:00:19 -0500
Subject: [PATCH 23/35] clean docs/conf.py

---
 docs/conf.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/docs/conf.py b/docs/conf.py
index a4594f209..41092cec0 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -77,7 +77,7 @@
 html_use_index = True
 
 # Enable nitpicky mode - which ensures that all references in the docs resolve.
-nitpicky = False
+nitpicky = False  # True does not work with enumerated parameter values
 
 nitpick_ignore = [
     ("py:class", "optional"),

From deee180c1ec41844dee7e65549e0632416f75cf0 Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Tue, 4 Feb 2025 01:56:40 -0500
Subject: [PATCH 24/35] Add more tests; remove unused fixtures; fix bugs

---
 src/stcal/resample/resample.py  | 30 +++++++----
 tests/resample/conftest.py      | 21 --------
 tests/resample/helpers.py       | 47 +++++++++++++++++
 tests/resample/test_resample.py | 94 ++++++++++++++++++++++++++++++---
 4 files changed, 153 insertions(+), 39 deletions(-)

diff --git a/src/stcal/resample/resample.py b/src/stcal/resample/resample.py
index 21e66ddae..0d9c18ab2 100644
--- a/src/stcal/resample/resample.py
+++ b/src/stcal/resample/resample.py
@@ -291,6 +291,7 @@ def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
                 accumulate=accumulate,
                 enable_ctx=enable_ctx,
                 enable_var=enable_var,
+                compute_err=compute_err,
             )
             self._output_model = output_model
             self._output_wcs = output_model["wcs"]
@@ -545,7 +546,7 @@ def check_output_wcs(self, output_wcs, estimate_output_shape=True):
                 )
 
     def validate_output_model(self, output_model, accumulate,
-                              enable_ctx, enable_var):
+                              enable_ctx, enable_var, compute_err):
         """ Checks that ``output_model`` dictionary has all the required
         keywords that the code expects it to have based on the values
         of ``accumulate``, ``enable_ctx``, ``enable_var``. It will raise
@@ -586,6 +587,7 @@ def validate_output_model(self, output_model, accumulate,
             accumulate=accumulate,
             enable_ctx=enable_ctx,
             enable_var=enable_var,
+            compute_err=compute_err,
         )
 
         for attr in required_attributes:
@@ -597,7 +599,10 @@ def validate_output_model(self, output_model, accumulate,
         model_wcs = output_model["wcs"]
         self.check_output_wcs(model_wcs, estimate_output_shape=False)
         wcs_shape = model_wcs.array_shape
-        ref_shape = output_model["data"].shape
+        if output_model["data"] is None:
+            ref_shape = wcs_shape
+        else:
+            ref_shape = output_model["data"].shape
         if accumulate and wcs_shape is None:
             raise ValueError(
                 "Output model's 'wcs' must have 'array_shape' attribute "
@@ -611,12 +616,15 @@ def validate_output_model(self, output_model, accumulate,
             )
 
         for attr in required_attributes.difference(["data", "wcs"]):
-            if (isinstance(output_model[attr], np.ndarray) and
-                    not np.array_equiv(output_model[attr].shape, ref_shape)):
-                raise ValueError(
-                    "'output_wcs.array_shape' value is not consistent "
-                    f"with the shape of the '{attr}' array."
-                )
+            if isinstance(output_model[attr], np.ndarray):
+                model_shape = output_model[attr].shape[1:]
+                if attr == "con":
+                    model_shape = model_shape[1:]
+                if not np.array_equiv(model_shape, ref_shape):
+                    raise ValueError(
+                        "'output_wcs.array_shape' value is not consistent "
+                        f"with the shape of the '{attr}' array."
+                    )
 
         # TODO: also check "pixfrac", "kernel", "fillval", "weight_type"
         # with initializer parameters. log a warning if different.
@@ -805,7 +813,7 @@ def _get_intensity_scale(self, model):
                     # update output model if "pixel_scale_ratio" was never
                     # set previously:
                     if (self._output_model is not None and
-                            self._output_model["pixel_scale_ratio"] is None):
+                            self._output_model.get("pixel_scale_ratio") is None):
                         self._output_model["pixel_scale_ratio"] = self._pixel_scale_ratio
 
                 iscale = math.sqrt(photom_pixel_area / input_pixel_area)
@@ -1549,12 +1557,12 @@ def init_time_counters(self):
         if (start_time := self.output_model.get("start_time", None)) is None:
             self._exptime_start = []
         else:
-            self._exptime_start[start_time]
+            self._exptime_start = [start_time]
 
         if (end_time := self.output_model.get("end_time", None)) is None:
             self._exptime_end = []
         else:
-            self._exptime_end[end_time]
+            self._exptime_end = [end_time]
 
         self._measurement_time_success = []
 
diff --git a/tests/resample/conftest.py b/tests/resample/conftest.py
index 08d396b0e..0b279eddd 100644
--- a/tests/resample/conftest.py
+++ b/tests/resample/conftest.py
@@ -1,9 +1,6 @@
 """ Test various utility functions """
 import pytest
 
-from astropy import wcs as fitswcs
-import numpy as np
-
 from . helpers import make_gwcs
 
 
@@ -14,21 +11,3 @@ def wcs_gwcs():
     shape = (1000, 1000)
     pscale = 0.06 / 3600
     return make_gwcs(crpix, crval, pscale, shape)
-
-
-@pytest.fixture(scope='function')
-def wcs_fitswcs(wcs_gwcs):
-    fits_wcs = fitswcs.WCS(wcs_gwcs.to_fits_sip())
-    fits_wcs.pixel_area = wcs_gwcs.pixel_area
-    fits_wcs.pixel_scale = wcs_gwcs.pixel_scale
-    return fits_wcs
-
-
-@pytest.fixture(scope='module')
-def wcs_slicedwcs(function):
-    xmin, xmax = 100, 500
-    slices = (slice(xmin, xmax), slice(xmin, xmax))
-    sliced_wcs = fitswcs.wcsapi.SlicedLowLevelWCS(wcs_gwcs, slices)
-    sliced_wcs.pixel_area = wcs_gwcs.pixel_area
-    sliced_wcs.pixel_scale = wcs_gwcs.pixel_scale
-    return sliced_wcs
diff --git a/tests/resample/helpers.py b/tests/resample/helpers.py
index 78ad9a230..f873e956a 100644
--- a/tests/resample/helpers.py
+++ b/tests/resample/helpers.py
@@ -112,3 +112,50 @@ def make_input_model(crpix, crval, pscale, shape, group_id=1, exptime=1):
     model["err"] = np.sqrt(3.0) * np.ones(shape, dtype=np.float32)
 
     return model
+
+
+def make_output_model(crpix, crval, pscale, shape):
+    w = make_gwcs(
+        crpix=crpix,
+        crval=crval,
+        pscale=pscale,
+        shape=shape
+    )
+
+    model = {
+        # WCS:
+        "wcs": w,
+        "pixelarea_steradians": w.pixel_area,
+        "pixelarea_arcsecsq": w.pixel_area * (np.rad2deg(1) * 3600)**2,
+
+        # main arrays:
+        "data": None,
+        "wht": None,
+        "con": None,
+
+        # error arrays:
+        "var_rnoise": None,
+        "var_flat": None,
+        "var_poisson": None,
+        "err": None,
+
+        # accumulate-specific:
+        "n_coadds": 0,
+
+        # drizzle info:
+        "pointings": 0,
+
+        # exposure time:
+        "exposure_time": 0.0,
+        "measurement_time": None,
+        "start_time": None,
+        "end_time": None,
+        "duration": 0.0,
+
+        # other meta:
+        "filename": "",
+        "s_region": compute_s_region_imaging(w),
+        "bunit_data": "MJy",
+    }
+
+    return model
diff --git a/tests/resample/test_resample.py b/tests/resample/test_resample.py
index 317be4a28..5d1d0406c 100644
--- a/tests/resample/test_resample.py
+++ b/tests/resample/test_resample.py
@@ -1,11 +1,19 @@
+import pytest
+
 from stcal.resample import Resample
 
 import numpy as np
 
-from . helpers import make_gwcs, make_input_model, JWST_DQ_FLAG_DEF
+from . helpers import (
+    make_gwcs,
+    make_input_model,
+    make_output_model,
+    JWST_DQ_FLAG_DEF,
+)
 
 
-def test_resample_defaults():
+@pytest.mark.parametrize("weight_type", ["ivm", "exptime"])
+def test_resample_defaults(weight_type):
     crval = (150.0, 2.0)
     crpix = (500.0, 500.0)
     shape = (1000, 1000)
@@ -20,24 +28,96 @@ def test_resample_defaults():
 
     nmodels = 4
 
-    resample = Resample(n_input_models=nmodels, output_wcs=output_wcs)
+    resample = Resample(
+        n_input_models=nmodels,
+        output_wcs=output_wcs,
+        weight_type=weight_type
+    )
+    resample.dq_flag_name_map = JWST_DQ_FLAG_DEF
+
+    influx = 0.0
+    ttime = 0.0
+
+    for k in range(nmodels):
+        exptime = k + 1
+        im = make_input_model(
+            crpix=tuple(i - 6 * k for i in crpix),
+            crval=crval,
+            pscale=pscale,
+            shape=shape,
+            group_id=k + 1,
+            exptime=exptime
+        )
+        data_val = k + 0.5
+        im["data"][:, :] = data_val
+
+        influx += data_val * (exptime if weight_type == "exptime" else 1)
+        ttime += exptime
+
+        resample.add_model(im)
+
+    resample.finalize()
+
+    odata = resample.output_model["data"]
+    oweight = resample.output_model["wht"]
+
+    assert resample.output_model["pointings"] == nmodels
+    assert resample.output_model["exposure_time"] == ttime
+
+    # next assert assumes constant IVM
+    assert np.allclose(
+        np.sum(odata * oweight, dtype=float),
+        influx * np.prod(shape),
+        atol=0,
+        rtol=1e-6,
+    )
+
+    assert np.nansum(resample.output_model["var_flat"]) > 0.0
+    assert np.nansum(resample.output_model["var_poisson"]) > 0.0
+    assert np.nansum(resample.output_model["var_rnoise"]) > 0.0
+
+
+def test_resample_output_model():
+    crval = (150.0, 2.0)
+    crpix = (500.0, 500.0)
+    shape = (1000, 1000)
+    out_shape = (1200, 1200)
+    pscale = 0.06 / 3600
+
+    nmodels = 4
+
+    output_model = make_output_model(
+        crpix=(600, 600),
+        crval=crval,
+        pscale=pscale,
+        shape=out_shape,
+    )
+
+    resample = Resample(
+        n_input_models=nmodels,
+        output_model=output_model,
+        weight_type="exptime"
+    )
     resample.dq_flag_name_map = JWST_DQ_FLAG_DEF
 
     influx = 0.0
     ttime = 0.0
 
     for k in range(nmodels):
+        exptime = k + 1
         im = make_input_model(
             crpix=tuple(i - 6 * k for i in crpix),
             crval=crval,
             pscale=pscale,
             shape=shape,
             group_id=k + 1,
-            exptime=k + 1
+            exptime=exptime
         )
-        im["data"][:, :] = k + 0.5
-        influx += k + 0.5
-        ttime += k + 1
+        data_val = k + 0.5
+        im["data"][:, :] = data_val
+
+        influx += data_val * exptime
+        ttime += exptime
 
         resample.add_model(im)
 

From 9f5c85fe05bb15216866300f190b085689b75d6e Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Tue, 4 Feb 2025 02:15:58 -0500
Subject: [PATCH 25/35] Add more tests

---
 tests/resample/test_resample.py | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/tests/resample/test_resample.py b/tests/resample/test_resample.py
index 5d1d0406c..828610286 100644
--- a/tests/resample/test_resample.py
+++ b/tests/resample/test_resample.py
@@ -77,7 +77,8 @@ def test_resample_defaults(weight_type):
     assert np.nansum(resample.output_model["var_rnoise"]) > 0.0
 
 
-def test_resample_output_model():
+@pytest.mark.parametrize("compute_err", ["from_var", "driz_err"])
+def test_resample_output_model(compute_err):
     crval = (150.0, 2.0)
     crpix = (500.0, 500.0)
     shape = (1000, 1000)
@@ -92,11 +93,13 @@ def test_resample_output_model():
         pscale=pscale,
         shape=out_shape,
     )
+    output_model["wht"] = np.zeros(out_shape, dtype=np.float32)
 
     resample = Resample(
         n_input_models=nmodels,
         output_model=output_model,
-        weight_type="exptime"
+        weight_type="exptime",
+        compute_err=compute_err,
     )
     resample.dq_flag_name_map = JWST_DQ_FLAG_DEF
 

From 2afcae97dc7a01ae4cab0b0eda974a35a0ca379b Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Tue, 4 Feb 2025 02:38:52 -0500
Subject: [PATCH 26/35] increase coverage

---
 src/stcal/resample/resample.py  |  4 ++--
 tests/resample/test_resample.py | 14 ++++++++++----
 2 files changed, 12 insertions(+), 6 deletions(-)

diff --git a/src/stcal/resample/resample.py b/src/stcal/resample/resample.py
index 0d9c18ab2..c137e5693 100644
--- a/src/stcal/resample/resample.py
+++ b/src/stcal/resample/resample.py
@@ -256,7 +256,7 @@ def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
         self.fillval = fillval
         self.good_bits = good_bits
 
-        if weight_type in ["ivm", "exptime"]:
+        if weight_type.startswith("ivm") or weight_type == "exptime":
             self.weight_type = weight_type
         else:
             raise ValueError("Unexpected weight type: '{self.weight_type}'")
@@ -1235,7 +1235,7 @@ def resample_variance_arrays(self, model, pixmap, iscale,
                 mask = np.full_like(rn_var, False)
 
             # Set the weight for the image from the weight type
-            if self.weight_type == "ivm" and rn_var is not None:
+            if self.weight_type.startswith("ivm") and rn_var is not None:
                 weight = np.ones(self.output_array_shape)
                 weight[mask] = np.reciprocal(rn_var[mask])
 
diff --git a/tests/resample/test_resample.py b/tests/resample/test_resample.py
index 828610286..27f457fb1 100644
--- a/tests/resample/test_resample.py
+++ b/tests/resample/test_resample.py
@@ -77,8 +77,14 @@ def test_resample_defaults(weight_type):
     assert np.nansum(resample.output_model["var_rnoise"]) > 0.0
 
 
-@pytest.mark.parametrize("compute_err", ["from_var", "driz_err"])
-def test_resample_output_model(compute_err):
+@pytest.mark.parametrize(
+    "compute_err,weight_type",
+    [
+        ("from_var", "ivm-smed"),
+        ("driz_err", "ivm-med5")
+    ]
+)
+def test_resample_output_model(compute_err, weight_type):
     crval = (150.0, 2.0)
     crpix = (500.0, 500.0)
     shape = (1000, 1000)
@@ -98,7 +104,7 @@ def test_resample_output_model(compute_err):
     resample = Resample(
         n_input_models=nmodels,
         output_model=output_model,
-        weight_type="exptime",
+        weight_type=weight_type,
         compute_err=compute_err,
     )
     resample.dq_flag_name_map = JWST_DQ_FLAG_DEF
@@ -119,7 +125,7 @@ def test_resample_output_model(compute_err):
         data_val = k + 0.5
         im["data"][:, :] = data_val
 
-        influx += data_val * exptime
+        influx += data_val
         ttime += exptime
 
         resample.add_model(im)

From 9203a33d4c3a04e48a95addebebfc4194eda33c0 Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Tue, 4 Feb 2025 02:50:09 -0500
Subject: [PATCH 27/35] increase coverage

---
 tests/resample/test_resample.py | 12 ++++++++++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git a/tests/resample/test_resample.py b/tests/resample/test_resample.py
index 27f457fb1..898916274 100644
--- a/tests/resample/test_resample.py
+++ b/tests/resample/test_resample.py
@@ -12,8 +12,11 @@
 )
 
 
-@pytest.mark.parametrize("weight_type", ["ivm", "exptime"])
-def test_resample_defaults(weight_type):
+@pytest.mark.parametrize(
+    "weight_type,wcs_dict",
+    [("ivm", True), ("exptime", False)]
+)
+def test_resample_defaults(weight_type, wcs_dict):
     crval = (150.0, 2.0)
     crpix = (500.0, 500.0)
     shape = (1000, 1000)
@@ -25,6 +28,11 @@ def test_resample_defaults(weight_type):
         pscale=pscale,
         shape=(1200, 1200)
     )
+    if wcs_dict:
+        output_wcs = {
+            "wcs": output_wcs,
+            "pixel_scale": pscale * 3600,
+        }
 
     nmodels = 4
 

From e30d6a969029f4cdc05de8c1c8a37a4fe73ee63f Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Wed, 5 Feb 2025 02:41:18 -0500
Subject: [PATCH 28/35] Move build_driz_weight out of Resample to utils module

---
 src/stcal/resample/resample.py        | 168 +------------------------
 src/stcal/resample/utils.py           | 172 +++++++++++++++++++++++++-
 tests/resample/helpers.py             |   3 +-
 tests/resample/test_resample.py       |   8 +-
 tests/resample/test_resample_utils.py |  44 ++++++-
 5 files changed, 225 insertions(+), 170 deletions(-)

diff --git a/src/stcal/resample/resample.py b/src/stcal/resample/resample.py
index c137e5693..5fcfd90d2 100644
--- a/src/stcal/resample/resample.py
+++ b/src/stcal/resample/resample.py
@@ -4,21 +4,16 @@
 import sys
 
 import numpy as np
-from scipy.ndimage import median_filter
 
-from astropy import units as u
-from astropy.nddata.bitmask import (
-    bitfield_to_boolean_mask,
-    interpret_bit_flags,
-)
 from drizzle.utils import calc_pixmap
 from drizzle.resample import Drizzle
 
-
 from stcal.resample.utils import (
+    build_driz_weight,
     compute_mean_pixel_area,
     get_tmeasure,
     resample_range,
+    is_flux_density,
 )
 
 
@@ -71,7 +66,7 @@ class Resample:
         "err": np.float32,
     }
 
-    dq_flag_name_map = {}  # type: dict[str, int]
+    dq_flag_name_map = None
 
     def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
                  fillval=0.0, weight_type="ivm", good_bits=0,
@@ -785,7 +780,7 @@ def _get_intensity_scale(self, model):
 
                 # If input image is in flux density units, correct the
                 # flux for the user-specified change to the spatial dimension
-                if _is_flux_density(model["bunit_data"]):
+                if is_flux_density(model["bunit_data"]):
                     iscale = 1.0 / math.sqrt(self.pixel_scale_ratio)
                 else:
                     iscale = 1.0
@@ -1010,10 +1005,11 @@ def add_model(self, model):
 
         log.info("Resampling science and variance data")
 
-        weight = self.build_driz_weight(
+        weight = build_driz_weight(
             model,
             weight_type=self.weight_type,
             good_bits=self.good_bits,
+            flag_name_map=self.dq_flag_name_map
         )
 
         # apply sky subtraction
@@ -1414,137 +1410,6 @@ def _resample_one_variance_array(self, name, model, iscale,
 
         return driz.out_img ** 2
 
-    def build_driz_weight(self, model, weight_type=None, good_bits=None):
-        """ Create a weight map that is used for weighting input images when
-        they are co-added to the ouput model.
-
-        Parameters
-        ----------
-        model : dict
-            Input model: a dictionar of relevant keywords and values.
-
-        weight_type : {"exptime", "ivm"}, optional
-            The weighting type for adding models' data. For
-            ``weight_type="ivm"`` (the default), the weighting will be
-            determined per-pixel using the inverse of the read noise
-            (VAR_RNOISE) array stored in each input image. If the
-            ``VAR_RNOISE`` array does not exist,
-            the variance is set to 1 for all pixels (i.e., equal weighting).
-            If ``weight_type="exptime"``, the weight will be set equal
-            to the measurement time (``TMEASURE``) when available and to
-            the exposure time (``EFFEXPTM``) otherwise.
-
-        good_bits : int, str, None, optional
-            An integer bit mask, `None`, a Python list of bit flags, a comma-,
-            or ``'|'``-separated, ``'+'``-separated string list of integer
-            bit flags or mnemonic flag names that indicate what bits in models'
-            DQ bitfield array should be *ignored* (i.e., zeroed).
-
-            See `Resample` for more information.
-
-        """
-        data = model["data"]
-        dq = model["dq"]
-
-        dqmask = bitfield_to_boolean_mask(
-            dq,
-            good_bits,
-            good_mask_value=1,
-            dtype=np.uint8,
-            flag_name_map=self.dq_flag_name_map,
-        )
-
-        if weight_type and weight_type.startswith('ivm'):
-            weight_type = weight_type.strip()
-            selective_median = weight_type.startswith('ivm-smed')
-            bitvalue = interpret_bit_flags(
-                good_bits,
-                flag_name_map=self.dq_flag_name_map
-            )
-            if bitvalue is None:
-                bitvalue = 0
-
-            # disable selective median if SATURATED flag is included
-            # in good_bits:
-            try:
-                saturation = self.dq_flag_name_map["SATURATED"]
-                if selective_median and not (bitvalue & saturation):
-                    selective_median = False
-                    weight_type = 'ivm'
-            except AttributeError:
-                pass
-
-            var_rnoise = model["var_rnoise"]
-            if (var_rnoise is not None and var_rnoise.shape == data.shape):
-                with np.errstate(divide="ignore", invalid="ignore"):
-                    inv_variance = var_rnoise**-1
-
-                inv_variance[~np.isfinite(inv_variance)] = 1
-
-                if weight_type != 'ivm':
-                    ny, nx = data.shape
-
-                    # apply a median filter to smooth the weight at saturated
-                    # (or high read-out noise) single pixels. keep kernel size
-                    # small to still give lower weight to extended CRs, etc.
-                    ksz = weight_type[8 if selective_median else 7:]
-                    if ksz:
-                        kernel_size = int(ksz)
-                        if not (kernel_size % 2):
-                            raise ValueError(
-                                'Kernel size of the median filter in IVM '
-                                'weighting must be an odd integer.'
-                            )
-                    else:
-                        kernel_size = 3
-
-                    ivm_copy = inv_variance.copy()
-
-                    if selective_median:
-                        # apply median filter selectively only at
-                        # points of partially saturated sources:
-                        jumps = np.where(
-                            np.logical_and(dq & saturation, dqmask)
-                        )
-                        w2 = kernel_size // 2
-                        for r, c in zip(*jumps):
-                            x1 = max(0, c - w2)
-                            x2 = min(nx, c + w2 + 1)
-                            y1 = max(0, r - w2)
-                            y2 = min(ny, r + w2 + 1)
-                            data = ivm_copy[y1:y2, x1:x2][dqmask[y1:y2, x1:x2]]
-                            if data.size:
-                                inv_variance[r, c] = np.median(data)
-                            # else: leave it as is
-
-                    else:
-                        # apply median to the entire inv-var array:
-                        inv_variance = median_filter(
-                            inv_variance,
-                            size=kernel_size
-                        )
-                    bad_dqmask = np.logical_not(dqmask)
-                    inv_variance[bad_dqmask] = ivm_copy[bad_dqmask]
-
-            else:
-                warnings.warn(
-                    "var_rnoise array not available. "
-                    "Setting drizzle weight map to 1",
-                    RuntimeWarning
-                )
-                inv_variance = 1.0
-
-            weight = inv_variance * dqmask
-
-        elif weight_type == "exptime":
-            t, _ = get_tmeasure(model)
-            weight = np.full(data.shape, t)
-            weight *= dqmask
-
-        else:
-            weight = np.ones(data.shape, dtype=data.dtype) * dqmask
-
-        return weight.astype(np.float32)
 
     def init_time_counters(self):
         """ Initialize variables/arrays needed to process exposure time. """
@@ -1645,24 +1510,3 @@ def _get_model_name(model):
     if model_name is None or not model_name.strip():
         model_name = "Unknown"
     return model_name
-
-
-def _is_flux_density(bunit):
-    """
-    Differentiate between surface brightness and flux density data units.
-
-    Parameters
-    ----------
-    bunit : str or `~astropy.units.Unit`
-       Data units, e.g. 'MJy' (is flux density) or 'MJy/sr' (is not).
-
-    Returns
-    -------
-    bool
-        True if the units are equivalent to flux density units.
-    """
-    try:
-        flux_density = u.Unit(bunit).is_equivalent(u.Jy)
-    except (ValueError, TypeError):
-        flux_density = False
-    return flux_density
diff --git a/src/stcal/resample/utils.py b/src/stcal/resample/utils.py
index 370067cdd..c61061051 100644
--- a/src/stcal/resample/utils.py
+++ b/src/stcal/resample/utils.py
@@ -1,15 +1,23 @@
 import logging
+import warnings
 
 import numpy as np
-from astropy.nddata.bitmask import interpret_bit_flags
+from scipy.ndimage import median_filter
+from astropy.nddata.bitmask import (
+    bitfield_to_boolean_mask,
+    interpret_bit_flags,
+)
+from astropy import units as u
 from spherical_geometry.polygon import SphericalPolygon  # type: ignore[import-untyped]
 
 
 __all__ = [
+    "build_driz_weight",
     "build_mask",
     "bytes2human",
     "compute_mean_pixel_area",
     "get_tmeasure",
+    "is_flux_density",
     "is_imaging_wcs",
     "resample_range",
 ]
@@ -52,6 +60,147 @@ def build_mask(dqarr, bitvalue, flag_name_map=None):
     return np.logical_not(np.bitwise_and(dqarr, ~bitvalue)).astype(np.uint8)
 
 
+def build_driz_weight(model, weight_type=None, good_bits=None,
+                      flag_name_map=None):
+    """ Create a weight map that is used for weighting input images when
+    they are co-added to the ouput model.
+
+    Parameters
+    ----------
+    model : dict
+        Input model: a dictionar of relevant keywords and values.
+
+    weight_type : {"exptime", "ivm"}, optional
+        The weighting type for adding models' data. For
+        ``weight_type="ivm"`` (the default), the weighting will be
+        determined per-pixel using the inverse of the read noise
+        (VAR_RNOISE) array stored in each input image. If the
+        ``VAR_RNOISE`` array does not exist,
+        the variance is set to 1 for all pixels (i.e., equal weighting).
+        If ``weight_type="exptime"``, the weight will be set equal
+        to the measurement time (``TMEASURE``) when available and to
+        the exposure time (``EFFEXPTM``) otherwise.
+
+    good_bits : int, str, None, optional
+        An integer bit mask, `None`, a Python list of bit flags, a comma-,
+        or ``'|'``-separated, ``'+'``-separated string list of integer
+        bit flags or mnemonic flag names that indicate what bits in models'
+        DQ bitfield array should be *ignored* (i.e., zeroed).
+
+        See `Resample` for more information.
+
+    flag_name_map : astropy.nddata.BitFlagNameMap, dict, None, optional
+        A `~astropy.nddata.BitFlagNameMap` object or a dictionary that provides
+        mapping from mnemonic bit flag names to integer bit values in order to
+        translate mnemonic flags to numeric values when ``bit_flags``
+        that are comma- or '+'-separated list of menmonic bit flag names.
+
+    """
+    data = model["data"]
+    dq = model["dq"]
+
+    dqmask = bitfield_to_boolean_mask(
+        dq,
+        good_bits,
+        good_mask_value=1,
+        dtype=np.uint8,
+        flag_name_map=flag_name_map,
+    )
+
+    if weight_type and weight_type.startswith('ivm'):
+        weight_type = weight_type.strip()
+        selective_median = weight_type.startswith('ivm-smed')
+        bitvalue = interpret_bit_flags(
+            good_bits,
+            flag_name_map=flag_name_map
+        )
+        if bitvalue is None:
+            bitvalue = 0
+
+        # disable selective median if SATURATED flag is included
+        # in good_bits:
+        try:
+            if flag_name_map is not None:
+                saturation = flag_name_map["SATURATED"]
+                if selective_median and not (bitvalue & saturation):
+                    selective_median = False
+                    weight_type = 'ivm'
+        except AttributeError:
+            pass
+
+        var_rnoise = model["var_rnoise"]
+        if (var_rnoise is not None and var_rnoise.shape == data.shape):
+            with np.errstate(divide="ignore", invalid="ignore"):
+                inv_variance = var_rnoise**-1
+
+            inv_variance[~np.isfinite(inv_variance)] = 1
+
+            if weight_type != 'ivm':
+                ny, nx = data.shape
+
+                # apply a median filter to smooth the weight at saturated
+                # (or high read-out noise) single pixels. keep kernel size
+                # small to still give lower weight to extended CRs, etc.
+                ksz = weight_type[8 if selective_median else 7:]
+                if ksz:
+                    kernel_size = int(ksz)
+                    if not (kernel_size % 2):
+                        raise ValueError(
+                            'Kernel size of the median filter in IVM '
+                            'weighting must be an odd integer.'
+                        )
+                else:
+                    kernel_size = 3
+
+                ivm_copy = inv_variance.copy()
+
+                if selective_median:
+                    # apply median filter selectively only at
+                    # points of partially saturated sources:
+                    jumps = np.where(
+                        np.logical_and(dq & saturation, dqmask)
+                    )
+                    w2 = kernel_size // 2
+                    for r, c in zip(*jumps):
+                        x1 = max(0, c - w2)
+                        x2 = min(nx, c + w2 + 1)
+                        y1 = max(0, r - w2)
+                        y2 = min(ny, r + w2 + 1)
+                        data = ivm_copy[y1:y2, x1:x2][dqmask[y1:y2, x1:x2]]
+                        if data.size:
+                            inv_variance[r, c] = np.median(data)
+                        # else: leave it as is
+
+                else:
+                    # apply median to the entire inv-var array:
+                    inv_variance = median_filter(
+                        inv_variance,
+                        size=kernel_size
+                    )
+                bad_dqmask = np.logical_not(dqmask)
+                inv_variance[bad_dqmask] = ivm_copy[bad_dqmask]
+
+        else:
+            warnings.warn(
+                "var_rnoise array not available. "
+                "Setting drizzle weight map to 1",
+                RuntimeWarning
+            )
+            inv_variance = 1.0
+
+        weight = inv_variance * dqmask
+
+    elif weight_type == "exptime":
+        t, _ = get_tmeasure(model)
+        weight = np.full(data.shape, t)
+        weight *= dqmask
+
+    else:
+        weight = np.ones(data.shape, dtype=data.dtype) * dqmask
+
+    return weight.astype(np.float32)
+
+
 def get_tmeasure(model):
     """
     Check if the measurement_time keyword is present in the datamodel
@@ -341,3 +490,24 @@ def _get_boundary_points(xmin, xmax, ymin, ymax, dx=None, dy=None,
     center = (0.5 * (xmin + xmax), 0.5 * (ymin + ymax))
 
     return x, y, area, center, bottom, right, top, left
+
+
+def is_flux_density(bunit):
+    """
+    Differentiate between surface brightness and flux density data units.
+
+    Parameters
+    ----------
+    bunit : str or `~astropy.units.Unit`
+       Data units, e.g. 'MJy' (is flux density) or 'MJy/sr' (is not).
+
+    Returns
+    -------
+    bool
+        True if the units are equivalent to flux density units.
+    """
+    try:
+        flux_density = u.Unit(bunit).is_equivalent(u.Jy)
+    except (ValueError, TypeError):
+        flux_density = False
+    return flux_density
diff --git a/tests/resample/helpers.py b/tests/resample/helpers.py
index f873e956a..d8b231a22 100644
--- a/tests/resample/helpers.py
+++ b/tests/resample/helpers.py
@@ -72,7 +72,8 @@ def make_gwcs(crpix, crval, pscale, shape):
     return wnew
 
 
-def make_input_model(crpix, crval, pscale, shape, group_id=1, exptime=1):
+def make_input_model(shape, crpix=(0, 0), crval=(0, 0), pscale=2.0e-5,
+                     group_id=1, exptime=1):
     w = make_gwcs(
         crpix=crpix,
         crval=crval,
diff --git a/tests/resample/test_resample.py b/tests/resample/test_resample.py
index 898916274..a194c7d71 100644
--- a/tests/resample/test_resample.py
+++ b/tests/resample/test_resample.py
@@ -16,7 +16,7 @@
     "weight_type,wcs_dict",
     [("ivm", True), ("exptime", False)]
 )
-def test_resample_defaults(weight_type, wcs_dict):
+def test_resample_mostly_defaults(weight_type, wcs_dict):
     crval = (150.0, 2.0)
     crpix = (500.0, 500.0)
     shape = (1000, 1000)
@@ -49,10 +49,10 @@ def test_resample_defaults(weight_type, wcs_dict):
     for k in range(nmodels):
         exptime = k + 1
         im = make_input_model(
+            shape=shape,
             crpix=tuple(i - 6 * k for i in crpix),
             crval=crval,
             pscale=pscale,
-            shape=shape,
             group_id=k + 1,
             exptime=exptime
         )
@@ -92,7 +92,7 @@ def test_resample_defaults(weight_type, wcs_dict):
         ("driz_err", "ivm-med5")
     ]
 )
-def test_resample_output_model(compute_err, weight_type):
+def test_resample_custom_output_model(compute_err, weight_type):
     crval = (150.0, 2.0)
     crpix = (500.0, 500.0)
     shape = (1000, 1000)
@@ -123,10 +123,10 @@ def test_resample_output_model(compute_err, weight_type):
     for k in range(nmodels):
         exptime = k + 1
         im = make_input_model(
+            shape=shape,
             crpix=tuple(i - 6 * k for i in crpix),
             crval=crval,
             pscale=pscale,
-            shape=shape,
             group_id=k + 1,
             exptime=exptime
         )
diff --git a/tests/resample/test_resample_utils.py b/tests/resample/test_resample_utils.py
index a947bacda..4eb1cda9e 100644
--- a/tests/resample/test_resample_utils.py
+++ b/tests/resample/test_resample_utils.py
@@ -4,17 +4,19 @@
 import pytest
 
 from stcal.resample.utils import (
+    build_driz_weight,
     build_mask,
     bytes2human,
     compute_mean_pixel_area,
     get_tmeasure,
+    is_flux_density,
     is_imaging_wcs,
     resample_range,
 )
 
-from . helpers import JWST_DQ_FLAG_DEF
-
+from . helpers import make_input_model, JWST_DQ_FLAG_DEF
 
+GOOD = 0
 DQ = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
 BITVALUES = 2**0 + 2**2
 BITVALUES_STR = f'{2**0}, {2**2}'
@@ -120,3 +122,41 @@ def test_compute_mean_pixel_area(wcs_gwcs):
     assert abs(
         compute_mean_pixel_area(wcs_gwcs) / area - 1.0
     ) < 1e-5
+
+
+@pytest.mark.parametrize('unit,result',
+                         [('Jy', True), ('MJy', True),
+                          ('MJy/sr', False), ('DN/s', False),
+                          ('bad_unit', False), (None, False)])
+def test_is_flux_density(unit, result):
+    assert is_flux_density(unit) is result
+
+
+@pytest.mark.parametrize("weight_type", ["ivm", "exptime"])
+def test_build_driz_weight(weight_type):
+    """Check that correct weight map is returned of different weight types"""
+
+    model = make_input_model((10, 10))
+
+    model["dq"][0] = JWST_DQ_FLAG_DEF.DO_NOT_USE
+    model["measurement_time"] = 10.0
+    model["var_rnoise"] /= 10.0
+
+    weight_map = build_driz_weight(
+        model,
+        weight_type=weight_type,
+        good_bits=GOOD
+    )
+    assert_array_equal(weight_map[0], 0)
+    assert_array_equal(weight_map[1:], 10.0)
+    assert weight_map.dtype == np.float32
+
+
+@pytest.mark.parametrize("weight_type", ["ivm", None])
+def test_build_driz_weight_zeros(weight_type):
+    """Check that zero or not finite weight maps get set to 1"""
+    model = make_input_model((10, 10))
+
+    weight_map = build_driz_weight(model, weight_type=weight_type)
+
+    assert_array_equal(weight_map, 1)

From cf7c56e15a70dea8ad1b09fd608c88801cbc42f3 Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Wed, 5 Feb 2025 02:49:25 -0500
Subject: [PATCH 29/35] fix docstring

---
 src/stcal/resample/resample.py | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/src/stcal/resample/resample.py b/src/stcal/resample/resample.py
index 5fcfd90d2..d2df3bf82 100644
--- a/src/stcal/resample/resample.py
+++ b/src/stcal/resample/resample.py
@@ -1165,14 +1165,14 @@ def resample_variance_arrays(self, model, pixmap, iscale,
             (``out_img``) coordinates. ``pixmap`` must be an array of shape
             ``(Ny, Nx, 2)`` where ``(Ny, Nx)`` is the shape of the input image.
             ``pixmap[..., 0]`` forms a 2D array of X-coordinates of input
-            pixels in the ouput frame and ``pixmap[..., 1]`` forms a 2D array of
-            Y-coordinates of input pixels in the ouput coordinate frame.
+            pixels in the ouput frame and ``pixmap[..., 1]`` forms a 2D array
+            of Y-coordinates of input pixels in the ouput coordinate frame.
 
         iscale : float
             The scale to apply to the input variance data before drizzling.
 
-        weight_map : 2D array, None, optional
-            A 2D numpy array containing the pixel by pixel weighting.
+        weight_map : numpy.ndarray, None, optional
+            A 2D ``numpy`` array containing the pixel by pixel weighting.
             Must have the same dimensions as ``data``.
 
             When ``weight_map`` is `None`, the weight of input data pixels will

From 001b9ca6d0ba40e55a1ab629a2d8f3bdf2d71ce3 Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Thu, 6 Feb 2025 16:17:13 -0500
Subject: [PATCH 30/35] address reviewers comments; clarify finalized; add
 locked flag

---
 src/stcal/resample/resample.py        | 72 +++++++++++++++++++--------
 src/stcal/resample/utils.py           | 44 ++--------------
 tests/resample/test_resample.py       |  4 ++
 tests/resample/test_resample_utils.py | 12 -----
 4 files changed, 58 insertions(+), 74 deletions(-)

diff --git a/src/stcal/resample/resample.py b/src/stcal/resample/resample.py
index d2df3bf82..a0d26b7f9 100644
--- a/src/stcal/resample/resample.py
+++ b/src/stcal/resample/resample.py
@@ -115,8 +115,8 @@ def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
             (VAR_RNOISE) array stored in each input image. If the
             ``VAR_RNOISE`` array does not exist, the variance is set to 1 for
             all pixels (i.e., equal weighting). If ``weight_type="exptime"``,
-            the weight will be set equal to the measurement time (``TMEASURE``)
-            when available and to the exposure time (``EFFEXPTM``) otherwise.
+            the weight will be set equal to the measurement time
+            when available and to the exposure time otherwise.
 
         good_bits : int, str, None, optional
             An integer bit mask, `None`, a Python list of bit flags, a comma-,
@@ -227,10 +227,10 @@ def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
 
         """
         # to see if setting up arrays and drizzle is needed
+        self._locked = False
         self._finalized = False
         self._n_res_models = 0
 
-        self._n_predicted_input_models = n_input_models
         self._output_model = output_model
         self._create_new_output_model = output_model is not None
 
@@ -819,16 +819,30 @@ def _get_intensity_scale(self, model):
         return iscale
 
     @property
-    def finalized(self):
-        """ Indicates whether the output model was "finalized". """
-        return self._finalized
+    def is_locked(self):
+        """ Indicates whether the output model has been "locked". No further
+        calls to :py:meth:`add_model` if output model is locked.
+        """
+        return self._locked
+
+    def lock(self):
+        """ Sets "locked" flag to `True` indicating that intermediate
+        arrays have been freed and therefore no further addition of models
+        to the output model via :py:meth:`add_model` is possible.
+
+        Subclasses should call this method whenever they perform an operation
+        that would prevent further addition of input models to the output
+        model.
+        """
+        self._locked = True
 
     def reset_arrays(self, reset_output=True, n_input_models=None):
         """ Initialize/reset `Drizzle` objects, output model and arrays,
-        and time counters. Output WCS and shape are not modified from
-        `Resample` object initialization. This method needs to be called
-        before calling :py:meth:`add_model` for the first time if
-        :py:meth:`finalize` was previously called.
+        and time counters and clears the "locked" flag. Output WCS and shape
+        are not modified from `Resample` object initialization. This method
+        needs to be called before calling :py:meth:`add_model` for the first
+        time if :py:meth:`finalize` was previously called and locked the output
+        model.
 
         Parameters
         ----------
@@ -884,6 +898,7 @@ def reset_arrays(self, reset_output=True, n_input_models=None):
 
         self.init_time_counters()
 
+        self._locked = False
         self._finalized = False
 
     def validate_input_model(self, model):
@@ -953,8 +968,9 @@ def add_model(self, model):
         is `True`) , and error data (if ``enable_err`` is `True`), and adds
         them to the corresponding
         arrays of the output model using appropriate weighting.
-        It also updates the weight array and context array (if ``enable_ctx`` is `True`)
-        of the resampled data, as well as relevant metadata such as "n_coadds".
+        It also updates the weight array and context array (if ``enable_ctx``
+        is `True`) of the resampled data, as well as relevant metadata such as
+        "n_coadds".
 
         Whenever ``model`` has a unique group ID that was never processed
         before, the "pointings" value of the output model is incremented and
@@ -969,12 +985,13 @@ def add_model(self, model):
             and values of actual models used by pipelines.
 
         """
-        if self._finalized:
+        if self._locked:
             raise RuntimeError(
-                "Resampling has been finalized and intermediate arrays have "
+                "Resampling has been locked and intermediate arrays have "
                 "been freed. Unable to add new models. Call 'reset_arrays' "
                 "to initialize a new output model and associated arrays."
             )
+        self._finalized = False
         self.validate_input_model(model)
         self._n_res_models += 1
 
@@ -1055,8 +1072,16 @@ def add_model(self, model):
             # use resampled error
             self.output_model["err"] = self._driz_error.out_img
 
+    def is_finalized(self):
+        """ Indicates whether all attributes of the ``output_model`` have been
+        computed from intermediate (running) values.
+        """
+        return self._finalized
+
     def finalize(self, free_memory=True):
-        """ Finalizes all computations and frees temporary objects.
+        """ Performs final computations from any intermediate values,
+        sets ouput model values, and optionally frees temporary/intermediate
+        objects.
 
         ``finalize`` calls :py:meth:`~Resample.finalize_resample_variance` and
         :py:meth:`~Resample.finalize_time_info`.
@@ -1066,13 +1091,21 @@ def finalize(self, free_memory=True):
           with ``free_memory=True`` then intermediate arrays holding variance
           weights will be lost and so continuing adding new models after
           a call to :py:meth:`~Resample.finalize` will result in incorrect
-          variance.
+          variance. In this case `finalize` will set the locked flag to `True`.
+
+        .. note::
+          If `Resample` is subclassed and :py:meth:`~Resample.finalize`
+          overridden, make sure to call :py:meth:`~Resample.lock` if an
+          irreversible operation was performed (such as freeing intermediate
+          arrays) that would prevent further addition of input models to the
+          output model.
 
         """
         if self._finalized:
             # can't finalize twice
             return
-        self._finalized = free_memory
+
+        self._finalized = True
 
         self._output_model["pointings"] = len(self.group_ids)
 
@@ -1120,10 +1153,7 @@ def finalize(self, free_memory=True):
 
             del var_components
 
-        self._finalized = True
-
         self.finalize_time_info()
-
         return
 
     def init_variance_arrays(self):
@@ -1343,7 +1373,7 @@ def finalize_resample_variance(self, output_model, free_memory=True):
             output_model["var_flat"] = output_variance
 
         if free_memory:
-            self._finalized = True
+            self.lock()
             del (
                 self._var_rnoise_wsum,
                 self._var_poisson_wsum,
diff --git a/src/stcal/resample/utils.py b/src/stcal/resample/utils.py
index c61061051..62669845e 100644
--- a/src/stcal/resample/utils.py
+++ b/src/stcal/resample/utils.py
@@ -14,7 +14,6 @@
 __all__ = [
     "build_driz_weight",
     "build_mask",
-    "bytes2human",
     "compute_mean_pixel_area",
     "get_tmeasure",
     "is_flux_density",
@@ -68,7 +67,7 @@ def build_driz_weight(model, weight_type=None, good_bits=None,
     Parameters
     ----------
     model : dict
-        Input model: a dictionar of relevant keywords and values.
+        Input model: a dictionary of relevant keywords and values.
 
     weight_type : {"exptime", "ivm"}, optional
         The weighting type for adding models' data. For
@@ -78,8 +77,8 @@ def build_driz_weight(model, weight_type=None, good_bits=None,
         ``VAR_RNOISE`` array does not exist,
         the variance is set to 1 for all pixels (i.e., equal weighting).
         If ``weight_type="exptime"``, the weight will be set equal
-        to the measurement time (``TMEASURE``) when available and to
-        the exposure time (``EFFEXPTM``) otherwise.
+        to the measurement time when available and to
+        the exposure time otherwise.
 
     good_bits : int, str, None, optional
         An integer bit mask, `None`, a Python list of bit flags, a comma-,
@@ -218,43 +217,6 @@ def get_tmeasure(model):
         return tmeasure, True
 
 
-# FIXME: temporarily copied here to avoid this import:
-# from stdatamodels.jwst.library.basic_utils import bytes2human
-def bytes2human(n):
-    """Convert bytes to human-readable format
-
-    Taken from the ``psutil`` library which references
-    http://code.activestate.com/recipes/578019
-
-    Parameters
-    ----------
-    n : int
-        Number to convert
-
-    Returns
-    -------
-    readable : str
-        A string with units attached.
-
-    Examples
-    --------
-    >>> bytes2human(10000)
-        '9.8K'
-
-    >>> bytes2human(100001221)
-        '95.4M'
-    """
-    symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
-    prefix = {}
-    for i, s in enumerate(symbols):
-        prefix[s] = 1 << (i + 1) * 10
-    for s in reversed(symbols):
-        if n >= prefix[s]:
-            value = float(n) / prefix[s]
-            return '%.1f%s' % (value, s)
-    return "%sB" % n
-
-
 def is_imaging_wcs(wcs):
     """ Returns `True` if ``wcs`` is an imaging WCS and `False` otherwise. """
     imaging = all(
diff --git a/tests/resample/test_resample.py b/tests/resample/test_resample.py
index a194c7d71..df1c6947b 100644
--- a/tests/resample/test_resample.py
+++ b/tests/resample/test_resample.py
@@ -66,6 +66,10 @@ def test_resample_mostly_defaults(weight_type, wcs_dict):
 
     resample.finalize()
 
+    # test cannot add another model after finalize():
+    with pytest.raises(RuntimeError):
+        resample.add_model(im)
+
     odata = resample.output_model["data"]
     oweight = resample.output_model["wht"]
 
diff --git a/tests/resample/test_resample_utils.py b/tests/resample/test_resample_utils.py
index 4eb1cda9e..6dae6c547 100644
--- a/tests/resample/test_resample_utils.py
+++ b/tests/resample/test_resample_utils.py
@@ -6,7 +6,6 @@
 from stcal.resample.utils import (
     build_driz_weight,
     build_mask,
-    bytes2human,
     compute_mean_pixel_area,
     get_tmeasure,
     is_flux_density,
@@ -102,17 +101,6 @@ def test_get_tmeasure():
         get_tmeasure(model)
 
 
-@pytest.mark.parametrize(
-        "n, readable",
-        [
-            (10000, "9.8K"),
-            (100001221, "95.4M")
-        ]
-)
-def test_bytes2human(n, readable):
-    assert bytes2human(n) == readable
-
-
 def test_is_imaging_wcs(wcs_gwcs):
     assert is_imaging_wcs(wcs_gwcs)
 

From 3d3178cdfe29d19e71f786fdbcb870e77cb54273 Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Thu, 6 Feb 2025 22:55:43 -0500
Subject: [PATCH 31/35] Limit support for output_wcs only to dict

---
 src/stcal/resample/resample.py  | 18 +++++++-----------
 tests/resample/test_resample.py | 16 +++++++---------
 2 files changed, 14 insertions(+), 20 deletions(-)

diff --git a/src/stcal/resample/resample.py b/src/stcal/resample/resample.py
index a0d26b7f9..f5c781862 100644
--- a/src/stcal/resample/resample.py
+++ b/src/stcal/resample/resample.py
@@ -176,8 +176,8 @@ def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
             For more details, see documentation for
             `astropy.nddata.bitmask.extend_bit_flag_map`.
 
-        output_wcs : dict, WCS object, None
-            Specifies output WCS either directly as a WCS or a dictionary
+        output_wcs : dict, None
+            Specifies output WCS as a dictionary
             with keys ``'wcs'`` (WCS object) and ``'pixel_scale'``
             (pixel scale in arcseconds). ``'pixel_scale'``, when provided,
             will be used for computation of drizzle scaling factor. When it is
@@ -269,15 +269,11 @@ def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
                     "'output_wcs' parameter or the 'output_model' parameter. "
                 )
             else:
-                if isinstance(output_wcs, dict):
-                    self._output_pixel_scale = output_wcs.get("pixel_scale")
-                    self._pixel_scale_ratio = output_wcs.get(
-                        "pixel_scale_ratio"
-                    )
-                    self._output_wcs = output_wcs.get("wcs")
-                else:
-                    self._output_wcs = output_wcs
-
+                self._output_pixel_scale = output_wcs.get("pixel_scale")
+                self._pixel_scale_ratio = output_wcs.get(
+                    "pixel_scale_ratio"
+                )
+                self._output_wcs = output_wcs.get("wcs")
                 self.check_output_wcs(self._output_wcs)
 
         else:
diff --git a/tests/resample/test_resample.py b/tests/resample/test_resample.py
index df1c6947b..fd23724f9 100644
--- a/tests/resample/test_resample.py
+++ b/tests/resample/test_resample.py
@@ -13,26 +13,24 @@
 
 
 @pytest.mark.parametrize(
-    "weight_type,wcs_dict",
-    [("ivm", True), ("exptime", False)]
+    "weight_type", ["ivm", "exptime"]
 )
-def test_resample_mostly_defaults(weight_type, wcs_dict):
+def test_resample_mostly_defaults(weight_type):
     crval = (150.0, 2.0)
     crpix = (500.0, 500.0)
     shape = (1000, 1000)
     pscale = 0.06 / 3600
 
-    output_wcs = make_gwcs(
+    w = make_gwcs(
         crpix=(600, 600),
         crval=crval,
         pscale=pscale,
         shape=(1200, 1200)
     )
-    if wcs_dict:
-        output_wcs = {
-            "wcs": output_wcs,
-            "pixel_scale": pscale * 3600,
-        }
+    output_wcs = {
+        "wcs": w,
+        "pixel_scale": pscale * 3600,
+    }
 
     nmodels = 4
 

From 32c15ffed33f8c22645b438201f9493b26069b65 Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Fri, 7 Feb 2025 00:18:15 -0500
Subject: [PATCH 32/35] Remove support for resuming resampling onto an existing
 output model

---
 src/stcal/resample/resample.py  | 239 ++++----------------------------
 tests/resample/helpers.py       |   3 -
 tests/resample/test_resample.py |   4 +-
 3 files changed, 30 insertions(+), 216 deletions(-)

diff --git a/src/stcal/resample/resample.py b/src/stcal/resample/resample.py
index f5c781862..e8cfa88cc 100644
--- a/src/stcal/resample/resample.py
+++ b/src/stcal/resample/resample.py
@@ -68,14 +68,20 @@ class Resample:
 
     dq_flag_name_map = None
 
-    def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
-                 fillval=0.0, weight_type="ivm", good_bits=0,
-                 output_wcs=None, output_model=None,
-                 accumulate=False, enable_ctx=True, enable_var=True,
-                 compute_err=None):
+    def __init__(self, output_wcs, n_input_models=None, pixfrac=1.0,
+                 kernel="square", fillval=0.0, weight_type="ivm", good_bits=0,
+                 enable_ctx=True, enable_var=True, compute_err=None):
         """
         Parameters
         ----------
+        output_wcs : dict
+            Specifies output WCS as a dictionary
+            with keys ``'wcs'`` (WCS object) and ``'pixel_scale'``
+            (pixel scale in arcseconds). ``'pixel_scale'``, when provided,
+            will be used for computation of drizzle scaling factor. When it is
+            not provided, output pixel scale will be *estimated* from the
+            provided WCS object.
+
         n_input_models : int, None, optional
             Number of input models expected to be resampled. When provided,
             this is used to estimate memory requirements and optimize memory
@@ -176,32 +182,6 @@ def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
             For more details, see documentation for
             `astropy.nddata.bitmask.extend_bit_flag_map`.
 
-        output_wcs : dict, None
-            Specifies output WCS as a dictionary
-            with keys ``'wcs'`` (WCS object) and ``'pixel_scale'``
-            (pixel scale in arcseconds). ``'pixel_scale'``, when provided,
-            will be used for computation of drizzle scaling factor. When it is
-            not provided, output pixel scale will be *estimated* from the
-            provided WCS object. ``output_wcs`` object is required when
-            ``output_model`` is `None`. ``output_wcs`` is ignored when
-            ``output_model`` is provided.
-
-        output_model : dict, None, optional
-            A dictionary containing data arrays and other attributes that
-            will be used to add new models to. use
-            :py:meth:`Resample.output_model_attributes` to get the list of
-            keywords that must be present. When ``accumulate`` is `False`,
-            only the WCS object of the model will be used. When ``accumulate``
-            is `True`, new models will be added to the existing data in the
-            ``output_model``.
-
-            When ``output_model`` is `None`, a new model will be created.
-
-        accumulate : bool, optional
-            Indicates whether resampled models should be added to the
-            provided ``output_model`` data or if new arrays should be
-            created.
-
         enable_ctx : bool, optional
             Indicates whether to create a context image. If ``disable_ctx``
             is set to `True`, parameters ``out_ctx``, ``begin_ctx_id``, and
@@ -231,13 +211,9 @@ def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
         self._finalized = False
         self._n_res_models = 0
 
-        self._output_model = output_model
-        self._create_new_output_model = output_model is not None
-
         self._enable_ctx = enable_ctx
         self._enable_var = enable_var
         self._compute_err = compute_err
-        self._accumulate = accumulate
 
         # these attributes are used only for informational purposes
         # and are added to created the output_model only if they are
@@ -262,36 +238,18 @@ def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
         self._group_ids = []
 
         # determine output WCS and set up output model if needed:
-        if output_model is None:
-            if output_wcs is None:
-                raise ValueError(
-                    "Output WCS must be provided either through the "
-                    "'output_wcs' parameter or the 'output_model' parameter. "
-                )
-            else:
-                self._output_pixel_scale = output_wcs.get("pixel_scale")
-                self._pixel_scale_ratio = output_wcs.get(
-                    "pixel_scale_ratio"
-                )
-                self._output_wcs = output_wcs.get("wcs")
-                self.check_output_wcs(self._output_wcs)
-
+        if output_wcs is None:
+            raise ValueError(
+                "Output WCS must be provided either through the "
+                "'output_wcs' parameter or the 'output_model' parameter. "
+            )
         else:
-            self.validate_output_model(
-                output_model=output_model,
-                accumulate=accumulate,
-                enable_ctx=enable_ctx,
-                enable_var=enable_var,
-                compute_err=compute_err,
+            self._output_pixel_scale = output_wcs.get("pixel_scale")
+            self._pixel_scale_ratio = output_wcs.get(
+                "pixel_scale_ratio"
             )
-            self._output_model = output_model
-            self._output_wcs = output_model["wcs"]
-            self._output_pixel_scale = output_model.get("pixel_scale")
-            if output_wcs:
-                log.warning(
-                    "'output_wcs' will be ignored. Using the 'wcs' supplied "
-                    "by the 'output_model' instead."
-                )
+            self._output_wcs = output_wcs.get("wcs")
+            self.check_output_wcs(self._output_wcs)
 
         if self._output_pixel_scale is None:
             self._output_pixel_scale = 3600.0 * np.rad2deg(
@@ -327,8 +285,7 @@ def __init__(self, n_input_models=None, pixfrac=1.0, kernel="square",
         )
 
         # set up an empty output model (don't allocate arrays at this time):
-        if self._output_model is None:
-            self._output_model = self.create_output_model()
+        self._output_model = self.create_output_model()
 
         self.reset_arrays(reset_output=False, n_input_models=n_input_models)
 
@@ -395,8 +352,7 @@ def get_output_model_pixel_area(self, model):
         return pixel_area
 
     @classmethod
-    def output_model_attributes(cls, accumulate, enable_ctx, enable_var,
-                                compute_err):
+    def output_model_attributes(cls, enable_ctx, enable_var, compute_err):
         """
         Returns a set of string keywords that must be present in an
         'output_model' that is provided as input at the class initialization.
@@ -404,11 +360,6 @@ def output_model_attributes(cls, accumulate, enable_ctx, enable_var,
         Parameters
         ----------
 
-        accumulate : bool, optional
-            Indicates whether resampled models should be added to the
-            provided ``output_model`` data or if new arrays should be
-            created.
-
         enable_ctx : bool, optional
             Indicates whether to create a context image. If ``disable_ctx``
             is set to `True`, parameters ``out_ctx``, ``begin_ctx_id``, and
@@ -455,32 +406,6 @@ def output_model_attributes(cls, accumulate, enable_ctx, enable_var,
             attributes.update(
                 ["var_rnoise", "var_poisson", "var_flat"]
             )
-            # TODO: if we want to support adding more data to
-            # existing output models, we need to also store weights
-            # for variance arrays:
-            # var_rnoise_weight
-            # var_flat_weight
-            # var_poisson_weight
-        if accumulate:
-            if enable_ctx:
-                attributes.add("n_coadds")
-
-            # additional attributes required for input parameter 'output_model'
-            # when data and weight arrays are not None:
-            attributes.update(
-                {
-                    "pixfrac",
-                    "kernel",
-                    "fillval",
-                    "weight_type",
-                    "pointings",
-                    "exposure_time",
-                    "measurement_time",
-                    "start_time",
-                    "end_time",
-                    "duration",
-                }
-            )
 
         return attributes
 
@@ -536,90 +461,6 @@ def check_output_wcs(self, output_wcs, estimate_output_shape=True):
                     "inputs."
                 )
 
-    def validate_output_model(self, output_model, accumulate,
-                              enable_ctx, enable_var, compute_err):
-        """ Checks that ``output_model`` dictionary has all the required
-        keywords that the code expects it to have based on the values
-        of ``accumulate``, ``enable_ctx``, ``enable_var``. It will raise
-        `ValueError` if `output_model` is missing required keywords/values.
-
-        Parameters
-        ----------
-
-        output_model : dict
-            A dictionary representing data and meta values from a data model.
-
-        accumulate : bool
-            Indicates whether resampled models should be added to the
-            provided ``output_model`` data or if new arrays should be
-            created.
-
-        enable_ctx : bool
-            Indicates whether to create a context image. If ``disable_ctx``
-            is set to `True`, parameters ``out_ctx``, ``begin_ctx_id``, and
-            ``max_ctx_id`` will be ignored.
-
-        enable_var : bool
-            Indicates whether to resample variance arrays.
-
-        compute_err : {"from_var", "driz_err"}, None
-            A string indicating how error array for the resampled image should
-            be computed. See `Resample.__init__` for more details.
-
-        """
-        if output_model is None:
-            if accumulate:
-                raise ValueError(
-                    "'output_model' must be defined when 'accumulate' is True."
-                )
-            return
-
-        required_attributes = self.output_model_attributes(
-            accumulate=accumulate,
-            enable_ctx=enable_ctx,
-            enable_var=enable_var,
-            compute_err=compute_err,
-        )
-
-        for attr in required_attributes:
-            if attr not in output_model:
-                raise ValueError(
-                    f"'output_model' dictionary must have '{attr}' set."
-                )
-
-        model_wcs = output_model["wcs"]
-        self.check_output_wcs(model_wcs, estimate_output_shape=False)
-        wcs_shape = model_wcs.array_shape
-        if output_model["data"] is None:
-            ref_shape = wcs_shape
-        else:
-            ref_shape = output_model["data"].shape
-        if accumulate and wcs_shape is None:
-            raise ValueError(
-                "Output model's 'wcs' must have 'array_shape' attribute "
-                "set when 'accumulate' parameter is True."
-            )
-
-        if not np.array_equiv(wcs_shape, ref_shape):
-            raise ValueError(
-                "Output model's 'wcs.array_shape' value is not consistent "
-                "with the shape of the data array."
-            )
-
-        for attr in required_attributes.difference(["data", "wcs"]):
-            if isinstance(output_model[attr], np.ndarray):
-                model_shape = output_model[attr].shape[1:]
-                if attr == "con":
-                    model_shape = model_shape[1:]
-                if not np.array_equiv(model_shape, ref_shape):
-                    raise ValueError(
-                        "'output_wcs.array_shape' value is not consistent "
-                        f"with the shape of the '{attr}' array."
-                    )
-
-        # TODO: also check "pixfrac", "kernel", "fillval", "weight_type"
-        # with initializer parameters. log a warning if different.
-
     def create_output_model(self):
         """ Create a new "output model": a dictionary of data and meta fields.
 
@@ -654,9 +495,6 @@ def create_output_model(self):
             "fillval": self.fillval,
             "weight_type": self.weight_type,
 
-            # accumulate-specific:
-            "n_coadds": 0,
-
             # pixel scale:
             "pixelarea_steradians": pix_area / np.rad2deg(3600)**2,
             "pixelarea_arcsecsq": pix_area,
@@ -736,13 +574,6 @@ def compute_err(self):
         """
         return self._compute_err
 
-    @property
-    def is_in_accumulate_mode(self):
-        """ Indicates whether resample is continuing adding to previous
-        co-adds.
-        """
-        return self._accumulate
-
     def _get_intensity_scale(self, model):
         """
         Compute an intensity scale from the input and output pixel area.
@@ -860,11 +691,10 @@ def reset_arrays(self, reset_output=True, n_input_models=None):
 
         om = self._output_model
 
-        begin_ctx_id = om.get("n_coadds", 0)
         if n_input_models is None:
             max_ctx_id = None
         else:
-            max_ctx_id = begin_ctx_id + n_input_models - 1
+            max_ctx_id = n_input_models - 1
 
         self._driz = Drizzle(
             kernel=self.kernel,
@@ -874,7 +704,7 @@ def reset_arrays(self, reset_output=True, n_input_models=None):
             out_wht=om["wht"],
             out_ctx=om["con"],
             exptime=om["exposure_time"],
-            begin_ctx_id=begin_ctx_id,
+            begin_ctx_id=0,
             max_ctx_id=max_ctx_id,
         )
 
@@ -965,8 +795,7 @@ def add_model(self, model):
         them to the corresponding
         arrays of the output model using appropriate weighting.
         It also updates the weight array and context array (if ``enable_ctx``
-        is `True`) of the resampled data, as well as relevant metadata such as
-        "n_coadds".
+        is `True`) of the resampled data, as well as relevant metadata.
 
         Whenever ``model`` has a unique group ID that was never processed
         before, the "pointings" value of the output model is incremented and
@@ -1000,8 +829,6 @@ def add_model(self, model):
                 f"Input model '{model['filename']}' is not a 2D image."
             )
 
-        self._output_model["n_coadds"] += 1
-
         if (group_id := model["group_id"]) not in self._group_ids:
             self.update_time(model)
             self._group_ids.append(group_id)
@@ -1159,18 +986,8 @@ def init_variance_arrays(self):
 
         for noise_type in ["var_rnoise", "var_flat", "var_poisson"]:
             var_dtype = self.output_array_types[noise_type]
-            kwd = f"{noise_type}_weight"
-            if self._accumulate:
-                wsum = self._output_model.get(noise_type)
-                wt = self._output_model.get(kwd)
-                if wsum is None or wt is None:
-                    wsum = np.full(shape, np.nan, dtype=var_dtype)
-                    wt = np.zeros(shape, dtype=var_dtype)
-                else:
-                    wsum = wsum * (wt * wt)
-            else:
-                wsum = np.full(shape, np.nan, dtype=var_dtype)
-                wt = np.zeros(shape, dtype=var_dtype)
+            wsum = np.full(shape, np.nan, dtype=var_dtype)
+            wt = np.zeros(shape, dtype=var_dtype)
 
             setattr(self, f"_{noise_type}_wsum", wsum)
             setattr(self, f"_{noise_type}_weight", wt)
diff --git a/tests/resample/helpers.py b/tests/resample/helpers.py
index d8b231a22..81f88a354 100644
--- a/tests/resample/helpers.py
+++ b/tests/resample/helpers.py
@@ -140,9 +140,6 @@ def make_output_model(crpix, crval, pscale, shape):
         "var_poisson": None,
         "err": None,
 
-        # accumulate-specific:
-        "n_coadds": 0,
-
         # drizzle info:
         "pointings": 0,
 
diff --git a/tests/resample/test_resample.py b/tests/resample/test_resample.py
index fd23724f9..585a2cd25 100644
--- a/tests/resample/test_resample.py
+++ b/tests/resample/test_resample.py
@@ -94,7 +94,7 @@ def test_resample_mostly_defaults(weight_type):
         ("driz_err", "ivm-med5")
     ]
 )
-def test_resample_custom_output_model(compute_err, weight_type):
+def test_resample_compute_error_mode(compute_err, weight_type):
     crval = (150.0, 2.0)
     crpix = (500.0, 500.0)
     shape = (1000, 1000)
@@ -113,7 +113,7 @@ def test_resample_custom_output_model(compute_err, weight_type):
 
     resample = Resample(
         n_input_models=nmodels,
-        output_model=output_model,
+        output_wcs=output_model,
         weight_type=weight_type,
         compute_err=compute_err,
     )

From 68ead9477ef9c47850b1c4ceeaa5a66120348563 Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Sat, 8 Feb 2025 03:50:18 -0500
Subject: [PATCH 33/35] Simplify interface: remove lock. bug fixes. address
 reviewers comments

---
 src/stcal/resample/resample.py | 120 +++++++++------------------------
 1 file changed, 33 insertions(+), 87 deletions(-)

diff --git a/src/stcal/resample/resample.py b/src/stcal/resample/resample.py
index e8cfa88cc..58b56e90d 100644
--- a/src/stcal/resample/resample.py
+++ b/src/stcal/resample/resample.py
@@ -207,7 +207,6 @@ def __init__(self, output_wcs, n_input_models=None, pixfrac=1.0,
 
         """
         # to see if setting up arrays and drizzle is needed
-        self._locked = False
         self._finalized = False
         self._n_res_models = 0
 
@@ -285,9 +284,7 @@ def __init__(self, output_wcs, n_input_models=None, pixfrac=1.0,
         )
 
         # set up an empty output model (don't allocate arrays at this time):
-        self._output_model = self.create_output_model()
-
-        self.reset_arrays(reset_output=False, n_input_models=n_input_models)
+        self.reset_arrays(n_input_models=n_input_models)
 
     def get_input_model_pixel_area(self, model):
         """
@@ -359,7 +356,6 @@ def output_model_attributes(cls, enable_ctx, enable_var, compute_err):
 
         Parameters
         ----------
-
         enable_ctx : bool, optional
             Indicates whether to create a context image. If ``disable_ctx``
             is set to `True`, parameters ``out_ctx``, ``begin_ctx_id``, and
@@ -615,7 +611,7 @@ def _get_intensity_scale(self, model):
                 input_pixel_area = self.get_input_model_pixel_area(model)
 
                 if input_pixel_area is None:
-                    model_name = model["filename"]
+                    model_name = _get_model_name(model)
                     if not model_name:
                         model_name = "Unknown"
                     raise ValueError(
@@ -645,52 +641,25 @@ def _get_intensity_scale(self, model):
 
         return iscale
 
-    @property
-    def is_locked(self):
-        """ Indicates whether the output model has been "locked". No further
-        calls to :py:meth:`add_model` if output model is locked.
-        """
-        return self._locked
-
-    def lock(self):
-        """ Sets "locked" flag to `True` indicating that intermediate
-        arrays have been freed and therefore no further addition of models
-        to the output model via :py:meth:`add_model` is possible.
-
-        Subclasses should call this method whenever they perform an operation
-        that would prevent further addition of input models to the output
-        model.
-        """
-        self._locked = True
-
-    def reset_arrays(self, reset_output=True, n_input_models=None):
+    def reset_arrays(self, n_input_models=None):
         """ Initialize/reset `Drizzle` objects, output model and arrays,
-        and time counters and clears the "locked" flag. Output WCS and shape
+        and time counters and clears the "finalized" flag. Output WCS and shape
         are not modified from `Resample` object initialization. This method
         needs to be called before calling :py:meth:`add_model` for the first
-        time if :py:meth:`finalize` was previously called and locked the output
-        model.
+        time after :py:meth:`finalize` was called.
 
         Parameters
         ----------
-        reset_output : bool, optional
-            When `True` a new output model will be created. Otherwise new
-            models will be resampled and added to existing output data arrays.
-
         n_input_models : int, None, optional
             Number of input models expected to be resampled. When provided,
             this is used to estimate memory requirements and optimize memory
             allocation for the context array.
 
         """
-        self._n_predicted_input_models = n_input_models
-
         # set up an empty output model (don't allocate arrays at this time):
-        if reset_output or getattr(self, "_output_model", None) is None:
+        if getattr(self, "_output_model", None) is None:
             self._output_model = self.create_output_model()
 
-        om = self._output_model
-
         if n_input_models is None:
             max_ctx_id = None
         else:
@@ -700,10 +669,10 @@ def reset_arrays(self, reset_output=True, n_input_models=None):
             kernel=self.kernel,
             fillval=self.fillval,
             out_shape=self._output_array_shape,
-            out_img=om["data"],
-            out_wht=om["wht"],
-            out_ctx=om["con"],
-            exptime=om["exposure_time"],
+            out_img=self._output_model["data"],
+            out_wht=self._output_model["wht"],
+            out_ctx=self._output_model["con"],
+            exptime=self._output_model["exposure_time"],
             begin_ctx_id=0,
             max_ctx_id=max_ctx_id,
         )
@@ -714,8 +683,8 @@ def reset_arrays(self, reset_output=True, n_input_models=None):
                 kernel=self.kernel,
                 fillval=self.fillval,
                 out_shape=self._output_array_shape,
-                out_img=om["err"],
-                exptime=om["exposure_time"],
+                out_img=self._output_model["err"],
+                exptime=self._output_model["exposure_time"],
                 disable_ctx=True,
             )
 
@@ -724,7 +693,6 @@ def reset_arrays(self, reset_output=True, n_input_models=None):
 
         self.init_time_counters()
 
-        self._locked = False
         self._finalized = False
 
     def validate_input_model(self, model):
@@ -753,8 +721,8 @@ def validate_input_model(self, model):
             "dq",
 
             # meta:
+            "filename",
             "group_id",
-            "s_region",
             "wcs",
 
             "exposure_time",
@@ -762,7 +730,6 @@ def validate_input_model(self, model):
             "end_time",
             "duration",
             "measurement_time",
-            "effective_exposure_time",
             "elapsed_exposure_time",
 
             "pixelarea_steradians",
@@ -772,6 +739,9 @@ def validate_input_model(self, model):
             "subtracted",
         ]
 
+        if 'SPECTRAL' in model["wcs"].output_frame.axes_type:
+            min_attributes.append("bunit_data")
+
         if self._enable_var:
             min_attributes += ["var_rnoise", "var_poisson", "var_flat"]
 
@@ -810,13 +780,13 @@ def add_model(self, model):
             and values of actual models used by pipelines.
 
         """
-        if self._locked:
+        if self._finalized:
             raise RuntimeError(
-                "Resampling has been locked and intermediate arrays have "
-                "been freed. Unable to add new models. Call 'reset_arrays' "
+                "Resampling has been finalized and no new models can be added "
+                "to the resampled output model. Call 'reset_arrays' "
                 "to initialize a new output model and associated arrays."
             )
-        self._finalized = False
+
         self.validate_input_model(model)
         self._n_res_models += 1
 
@@ -826,7 +796,7 @@ def add_model(self, model):
         # Check that input models are 2D images
         if data.ndim != 2:
             raise RuntimeError(
-                f"Input model '{model['filename']}' is not a 2D image."
+                f"Input model '{_get_model_name(model)}' is not a 2D image."
             )
 
         if (group_id := model["group_id"]) not in self._group_ids:
@@ -901,7 +871,7 @@ def is_finalized(self):
         """
         return self._finalized
 
-    def finalize(self, free_memory=True):
+    def finalize(self):
         """ Performs final computations from any intermediate values,
         sets ouput model values, and optionally frees temporary/intermediate
         objects.
@@ -910,18 +880,8 @@ def finalize(self, free_memory=True):
         :py:meth:`~Resample.finalize_time_info`.
 
         .. warning::
-          If ``enable_var=True`` and :py:meth:`~Resample.finalize` is called
-          with ``free_memory=True`` then intermediate arrays holding variance
-          weights will be lost and so continuing adding new models after
-          a call to :py:meth:`~Resample.finalize` will result in incorrect
-          variance. In this case `finalize` will set the locked flag to `True`.
-
-        .. note::
-          If `Resample` is subclassed and :py:meth:`~Resample.finalize`
-          overridden, make sure to call :py:meth:`~Resample.lock` if an
-          irreversible operation was performed (such as freeing intermediate
-          arrays) that would prevent further addition of input models to the
-          output model.
+          Once the resample process has been finalized, adding new models to
+          the output resampled model is not allowed.
 
         """
         if self._finalized:
@@ -940,21 +900,16 @@ def finalize(self, free_memory=True):
             # back to the product's `con` attribute.
             self._output_model["con"] = self._driz.out_ctx
 
-        if free_memory:
-            del self._driz
+        del self._driz
 
         # compute final variances:
         if self._enable_var:
-            self.finalize_resample_variance(
-                self._output_model,
-                free_memory=free_memory
-            )
+            self.finalize_resample_variance(self._output_model)
 
         if self._compute_err == "driz_err":
             # use resampled error
             self.output_model["err"] = self._driz_error.out_img
-            if free_memory:
-                del self._driz_error
+            del self._driz_error
 
         elif self._enable_var:
             # compute error from variance arrays:
@@ -1135,10 +1090,9 @@ def resample_variance_arrays(self, model, pixmap, iscale,
                 )
                 self._var_flat_weight[mask] += weight[mask]
 
-    def finalize_resample_variance(self, output_model, free_memory=True):
+    def finalize_resample_variance(self, output_model):
         """ Compute variance for the resampled image from running sums and
-        weights. Free memory (when ``free_memory=True``) that holds these
-        running sums and weights arrays.
+        weights. Free memory that holds these running sums and weights arrays.
 
         output_model : dict, None
             A dictionary containing data arrays and other attributes that
@@ -1149,11 +1103,6 @@ def finalize_resample_variance(self, output_model, free_memory=True):
             is `True`, new models will be added to the existing data in the
             ``output_model``.
 
-        free_memory : True
-            Indicates whether to free temporary arrays (i.e., weight arrays)
-            that are no longer needed. If this is `True` it will not be
-            possible to continue adding new models to the output model.
-
         """
         # Divide by the total weights, squared, and set in the output model.
         # Zero weight and missing values are NaN in the output.
@@ -1185,8 +1134,6 @@ def finalize_resample_variance(self, output_model, free_memory=True):
             )
             output_model["var_flat"] = output_variance
 
-        if free_memory:
-            self.lock()
             del (
                 self._var_rnoise_wsum,
                 self._var_poisson_wsum,
@@ -1195,6 +1142,7 @@ def finalize_resample_variance(self, output_model, free_memory=True):
                 self._var_poisson_weight,
                 self._var_flat_weight,
             )
+            self._finalized = True
 
     def _resample_one_variance_array(self, name, model, iscale,
                                      weight_map, pixmap,
@@ -1212,14 +1160,14 @@ def _resample_one_variance_array(self, name, model, iscale,
         if variance is None or variance.size == 0:
             log.debug(
                 f"No data for '{name}' for model "
-                f"{repr(model['filename'])}. Skipping ..."
+                f"{repr(_get_model_name(model))}. Skipping ..."
             )
             return
 
         elif variance.shape != model["data"].shape:
             log.warning(
                 f"Data shape mismatch for '{name}' for model "
-                f"{repr(model['filename'])}. Skipping ..."
+                f"{repr(_get_model_name(model))}. Skipping ..."
             )
             return
 
@@ -1309,9 +1257,7 @@ def finalize_time_info(self):
         self._output_model["start_time"] = min(self._exptime_start)
         self._output_model["end_time"] = max(self._exptime_end)
         # Update other exposure time keywords:
-        # XPOSURE (identical to the total effective exposure time,EFFEXPTM)
-        self._output_model["effective_exposure_time"] = self._total_exposure_time
-        # DURATION (identical to TELAPSE, elapsed time)
+        # DURATION (identical to elapsed time)
         self._output_model["duration"] = self._duration
         self._output_model["elapsed_exposure_time"] = self._duration
 

From 909a3648baaf314fe89a76ae3e35b2c9cc149b42 Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Wed, 12 Feb 2025 10:42:22 -0500
Subject: [PATCH 34/35] Fix a bug in checking when to reset arrays. Other
 clean-up

---
 src/stcal/resample/resample.py |  13 ++--
 src/stcal/resample/utils.py    | 116 ++++++++++-----------------------
 2 files changed, 37 insertions(+), 92 deletions(-)

diff --git a/src/stcal/resample/resample.py b/src/stcal/resample/resample.py
index 58b56e90d..ea76e16cb 100644
--- a/src/stcal/resample/resample.py
+++ b/src/stcal/resample/resample.py
@@ -52,9 +52,6 @@ class Resample:
     6. Keeps track of total exposure time and other time-related quantities.
 
     """
-    resample_suffix = 'i2d'
-    resample_file_ext = '.fits'
-
     # supported output arrays (subclasses can add more):
     output_array_types = {
         "data": np.float32,
@@ -657,7 +654,7 @@ def reset_arrays(self, n_input_models=None):
 
         """
         # set up an empty output model (don't allocate arrays at this time):
-        if getattr(self, "_output_model", None) is None:
+        if not hasattr(self, "_output_model") or self.is_finalized():
             self._output_model = self.create_output_model()
 
         if n_input_models is None:
@@ -730,7 +727,6 @@ def validate_input_model(self, model):
             "end_time",
             "duration",
             "measurement_time",
-            "elapsed_exposure_time",
 
             "pixelarea_steradians",
             # "pixelarea_arcsecsq",
@@ -1201,12 +1197,11 @@ def _resample_one_variance_array(self, name, model, iscale,
 
         return driz.out_img ** 2
 
-
     def init_time_counters(self):
         """ Initialize variables/arrays needed to process exposure time. """
-        self._total_exposure_time = self.output_model["exposure_time"]
-        self._duration = self.output_model["duration"]
-        self._total_measurement_time = self.output_model["measurement_time"]
+        self._total_exposure_time = 0
+        self._duration = 0
+        self._total_measurement_time = 0
         if self._total_measurement_time is None:
             self._total_measurement_time = 0.0
 
diff --git a/src/stcal/resample/utils.py b/src/stcal/resample/utils.py
index 62669845e..4faed0702 100644
--- a/src/stcal/resample/utils.py
+++ b/src/stcal/resample/utils.py
@@ -47,16 +47,34 @@ def resample_range(data_shape, bbox=None):
     return xmin, xmax, ymin, ymax
 
 
-def build_mask(dqarr, bitvalue, flag_name_map=None):
+def build_mask(dqarr, good_bits, flag_name_map=None):
     """Build a bit mask from an input DQ array and a bitvalue flag
 
     In the returned bit mask, 1 is good, 0 is bad
     """
-    bitvalue = interpret_bit_flags(bitvalue, flag_name_map=flag_name_map)
+    good_bits = interpret_bit_flags(good_bits, flag_name_map=flag_name_map)
 
-    if bitvalue is None:
-        return np.ones(dqarr.shape, dtype=np.uint8)
-    return np.logical_not(np.bitwise_and(dqarr, ~bitvalue)).astype(np.uint8)
+    dqmask = bitfield_to_boolean_mask(
+        dqarr,
+        good_bits,
+        good_mask_value=1,
+        dtype=np.uint8,
+        flag_name_map=flag_name_map,
+    )
+    return dqmask
+
+    # if bitvalue is None:
+    #     return np.ones(dqarr.shape, dtype=np.uint8)
+    # return np.logical_not(np.bitwise_and(dqarr, ~bitvalue)).astype(np.uint8)
+
+
+    # bitvalue = interpret_bit_flags(bitvalue, mnemonic_map=pixel)
+
+    # if bitvalue is None:
+    #     return np.ones(dqarr.shape, dtype=np.uint8)
+
+    # bitvalue = np.array(bitvalue).astype(dqarr.dtype)
+    # return np.logical_not(np.bitwise_and(dqarr, ~bitvalue)).astype(np.uint8)
 
 
 def build_driz_weight(model, weight_type=None, good_bits=None,
@@ -106,98 +124,30 @@ def build_driz_weight(model, weight_type=None, good_bits=None,
         flag_name_map=flag_name_map,
     )
 
-    if weight_type and weight_type.startswith('ivm'):
-        weight_type = weight_type.strip()
-        selective_median = weight_type.startswith('ivm-smed')
-        bitvalue = interpret_bit_flags(
-            good_bits,
-            flag_name_map=flag_name_map
-        )
-        if bitvalue is None:
-            bitvalue = 0
-
-        # disable selective median if SATURATED flag is included
-        # in good_bits:
-        try:
-            if flag_name_map is not None:
-                saturation = flag_name_map["SATURATED"]
-                if selective_median and not (bitvalue & saturation):
-                    selective_median = False
-                    weight_type = 'ivm'
-        except AttributeError:
-            pass
-
+    if weight_type == 'ivm':
         var_rnoise = model["var_rnoise"]
-        if (var_rnoise is not None and var_rnoise.shape == data.shape):
+        if (var_rnoise is not None and
+                var_rnoise.shape == data.shape):
             with np.errstate(divide="ignore", invalid="ignore"):
                 inv_variance = var_rnoise**-1
-
             inv_variance[~np.isfinite(inv_variance)] = 1
-
-            if weight_type != 'ivm':
-                ny, nx = data.shape
-
-                # apply a median filter to smooth the weight at saturated
-                # (or high read-out noise) single pixels. keep kernel size
-                # small to still give lower weight to extended CRs, etc.
-                ksz = weight_type[8 if selective_median else 7:]
-                if ksz:
-                    kernel_size = int(ksz)
-                    if not (kernel_size % 2):
-                        raise ValueError(
-                            'Kernel size of the median filter in IVM '
-                            'weighting must be an odd integer.'
-                        )
-                else:
-                    kernel_size = 3
-
-                ivm_copy = inv_variance.copy()
-
-                if selective_median:
-                    # apply median filter selectively only at
-                    # points of partially saturated sources:
-                    jumps = np.where(
-                        np.logical_and(dq & saturation, dqmask)
-                    )
-                    w2 = kernel_size // 2
-                    for r, c in zip(*jumps):
-                        x1 = max(0, c - w2)
-                        x2 = min(nx, c + w2 + 1)
-                        y1 = max(0, r - w2)
-                        y2 = min(ny, r + w2 + 1)
-                        data = ivm_copy[y1:y2, x1:x2][dqmask[y1:y2, x1:x2]]
-                        if data.size:
-                            inv_variance[r, c] = np.median(data)
-                        # else: leave it as is
-
-                else:
-                    # apply median to the entire inv-var array:
-                    inv_variance = median_filter(
-                        inv_variance,
-                        size=kernel_size
-                    )
-                bad_dqmask = np.logical_not(dqmask)
-                inv_variance[bad_dqmask] = ivm_copy[bad_dqmask]
-
         else:
             warnings.warn(
-                "var_rnoise array not available. "
+                "'var_rnoise' array not available. "
                 "Setting drizzle weight map to 1",
                 RuntimeWarning
             )
             inv_variance = 1.0
+        result = inv_variance * dqmask
 
-        weight = inv_variance * dqmask
-
-    elif weight_type == "exptime":
-        t, _ = get_tmeasure(model)
-        weight = np.full(data.shape, t)
-        weight *= dqmask
+    elif weight_type == 'exptime':
+        exptime, s = get_tmeasure(model)
+        result = exptime * dqmask
 
     else:
-        weight = np.ones(data.shape, dtype=data.dtype) * dqmask
+        result = np.ones(data.shape, dtype=data.dtype) * dqmask
 
-    return weight.astype(np.float32)
+    return result.astype(np.float32)
 
 
 def get_tmeasure(model):

From 9fba2975cb3dc11adf7f7c214cd311c2e10db261 Mon Sep 17 00:00:00 2001
From: Mihai Cara <mihail.cara@gmail.com>
Date: Wed, 12 Feb 2025 12:18:52 -0500
Subject: [PATCH 35/35] Clean-up code. Address reviewer comments

---
 src/stcal/resample/resample.py | 64 ++--------------------------------
 1 file changed, 2 insertions(+), 62 deletions(-)

diff --git a/src/stcal/resample/resample.py b/src/stcal/resample/resample.py
index ea76e16cb..2bf631c9b 100644
--- a/src/stcal/resample/resample.py
+++ b/src/stcal/resample/resample.py
@@ -230,7 +230,6 @@ def __init__(self, output_wcs, n_input_models=None, pixfrac=1.0,
 
         self._output_wcs = output_wcs
 
-        self.input_file_names = []
         self._group_ids = []
 
         # determine output WCS and set up output model if needed:
@@ -345,63 +344,6 @@ def get_output_model_pixel_area(self, model):
         pixel_area = compute_mean_pixel_area(model["wcs"])
         return pixel_area
 
-    @classmethod
-    def output_model_attributes(cls, enable_ctx, enable_var, compute_err):
-        """
-        Returns a set of string keywords that must be present in an
-        'output_model' that is provided as input at the class initialization.
-
-        Parameters
-        ----------
-        enable_ctx : bool, optional
-            Indicates whether to create a context image. If ``disable_ctx``
-            is set to `True`, parameters ``out_ctx``, ``begin_ctx_id``, and
-            ``max_ctx_id`` will be ignored.
-
-        enable_var : bool, optional
-            Indicates whether to resample variance arrays.
-
-        compute_err : {"from_var", "driz_err"}, None, optional
-            - ``"from_var"``: compute output model's error array from
-              all (Poisson, flat, readout) resampled variance arrays.
-              Setting ``compute_err`` to ``"from_var"`` will assume
-              ``enable_var`` was set to `True` regardless of actual
-              value of the parameter ``enable_var``.
-            - ``"driz_err"``: compute output model's error array by drizzling
-              together all input models' error arrays.
-
-            Error array will be assigned to ``'err'`` key of the output model.
-
-            .. note::
-                At this time, output error array is not equivalent to
-                error propagation results.
-
-        Returns
-        -------
-
-        attributes : set
-            A set of attributes that an output model must have when it
-            is provided as an input to `Resample.__init__` initializer.
-
-        """
-        # always required:
-        attributes = {
-            "data",
-            "wcs",
-            "wht",
-        }
-
-        if enable_ctx:
-            attributes.add("con")
-        if compute_err:
-            attributes.add("err")
-        if enable_var:
-            attributes.update(
-                ["var_rnoise", "var_poisson", "var_flat"]
-            )
-
-        return attributes
-
     def check_output_wcs(self, output_wcs, estimate_output_shape=True):
         """
         Check that provided WCS has expected properties and that its
@@ -608,12 +550,9 @@ def _get_intensity_scale(self, model):
                 input_pixel_area = self.get_input_model_pixel_area(model)
 
                 if input_pixel_area is None:
-                    model_name = _get_model_name(model)
-                    if not model_name:
-                        model_name = "Unknown"
                     raise ValueError(
                         "Unable to compute input pixel area from WCS of input "
-                        f"image {repr(model_name)}."
+                        f"image {repr(_get_model_name(model))}."
                     )
 
                 if self._pixel_scale_ratio is None:
@@ -672,6 +611,7 @@ def reset_arrays(self, n_input_models=None):
             exptime=self._output_model["exposure_time"],
             begin_ctx_id=0,
             max_ctx_id=max_ctx_id,
+            disable_ctx=not self._enable_ctx,
         )
 
         # Also make a temporary model to hold error data