Skip to content

Commit

Permalink
Merge branch '404-utilisation-de-mesure-type-max-avec-l-ambiguite' in…
Browse files Browse the repository at this point in the history
…to 'release'

Resolve "Utilisation de mesure type "max" avec l'ambiguité"

See merge request 3d/PandoraBox/pandora!363
  • Loading branch information
lecontm committed Aug 19, 2024
2 parents 1fd5eb7 + d7fff7a commit e589951
Show file tree
Hide file tree
Showing 2 changed files with 90 additions and 7 deletions.
30 changes: 24 additions & 6 deletions pandora/cost_volume_confidence/ambiguity.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,12 +133,15 @@ def confidence_prediction(
)
# Get disparity intervals parameters
disparity_range = cv["disp"].data.astype(np.float32)

type_measure_min = cv.attrs["type_measure"] == "min"

# This silences numba's TBB threading layer warning
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
# Computes ambiguity using numba in parallel for memory and computation time optimization
ambiguity = self.compute_ambiguity(
cv["cost_volume"].data, self._etas, self._nbr_etas, grids, disparity_range
cv["cost_volume"].data, self._etas, self._nbr_etas, grids, disparity_range, type_measure_min
)

# If activated, ambiguity normalization with percentile
Expand Down Expand Up @@ -181,7 +184,7 @@ def normalize_with_percentile(self, ambiguity: np.ndarray) -> np.ndarray:

@staticmethod
@njit(
"f4[:, :](f4[:, :, :], f8[:], i8, i8[:, :, :],f4[:])",
"f4[:, :](f4[:, :, :], f8[:], i8, i8[:, :, :],f4[:], bool_)",
parallel=literal_eval(os.environ.get("PANDORA_NUMBA_PARALLEL", "False")),
cache=True,
)
Expand All @@ -191,6 +194,7 @@ def compute_ambiguity(
nbr_etas: int,
grids: np.ndarray,
disparity_range: np.ndarray,
type_measure_min: bool,
) -> np.ndarray:
"""
Computes ambiguity.
Expand All @@ -205,16 +209,23 @@ def compute_ambiguity(
:type grids: 2D np.ndarray (min, max)
:param disparity_range: array containing disparity range
:type disparity_range: np.ndarray
:param type_measure_min: True for min and False for max
:type type_measure_min: bool
:return: the normalized ambiguity
:rtype: 2D np.ndarray (row, col) dtype = float32
"""

# Minimum and maximum of all costs, useful to normalize the cost volume
min_cost = np.nanmin(cv)
max_cost = np.nanmax(cv)
min_cost = np.nanmin(cv)

n_row, n_col, nb_disps = cv.shape

if type_measure_min:
extremum_cost = min_cost
else:
extremum_cost = max_cost

# Numba does not support the np.tile operation
two_dim_etas = np.repeat(etas, nb_disps).reshape((-1, nb_disps)).T.flatten()

Expand All @@ -226,7 +237,11 @@ def compute_ambiguity(
for row in prange(n_row): # pylint: disable=not-an-iterable
for col in prange(n_col): # pylint: disable=not-an-iterable
# Normalized minimum cost for one point
normalized_min_cost = (np.nanmin(cv[row, col, :]) - min_cost) / diff_cost

if type_measure_min:
normalized_min_cost = (np.nanmin(cv[row, col, :]) - extremum_cost) / diff_cost
else:
normalized_min_cost = (np.nanmax(cv[row, col, :]) - extremum_cost) / diff_cost

# If all costs are at nan, set the maximum value of the ambiguity for this point
if np.isnan(normalized_min_cost):
Expand All @@ -238,7 +253,7 @@ def compute_ambiguity(

normalized_min_cost = np.repeat(normalized_min_cost, nb_disps * nbr_etas)
# Normalized cost volume for one point
normalized_cv = (cv[row, col, :] - min_cost) / diff_cost
normalized_cv = (cv[row, col, :] - extremum_cost) / diff_cost

# Mask nan to -inf to increase the value of the ambiguity if a point contains nan costs
normalized_cv[idx_disp_min:idx_disp_max][
Expand All @@ -250,7 +265,10 @@ def compute_ambiguity(

normalized_cv = np.repeat(normalized_cv, nbr_etas)

ambiguity[row, col] += np.nansum(normalized_cv <= (normalized_min_cost + two_dim_etas))
if type_measure_min:
ambiguity[row, col] += np.nansum(normalized_cv <= (normalized_min_cost + two_dim_etas))
else:
ambiguity[row, col] += np.nansum(normalized_cv >= (normalized_min_cost - two_dim_etas))

return ambiguity

Expand Down
67 changes: 66 additions & 1 deletion tests/test_confidence/test_ambiguity.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ def test_ambiguity(create_img_for_confidence):
{"cost_volume": (["row", "col", "disp"], cv_)}, coords={"row": [0, 1], "col": [0, 1, 2], "disp": [-1, 0, 1]}
)

cv_.attrs["type_measure"] = "min"

ambiguity_ = confidence.AbstractCostVolumeConfidence(
**{"confidence_method": "ambiguity", "eta_max": 0.2, "eta_step": 0.1}
)
Expand Down Expand Up @@ -79,6 +81,8 @@ def test_ambiguity_without_normalization(create_img_for_confidence):
{"cost_volume": (["row", "col", "disp"], cv_)}, coords={"row": [0, 1], "col": [0, 1, 2], "disp": [-1, 0, 1]}
)

cv_.attrs["type_measure"] = "min"

ambiguity_ = confidence.AbstractCostVolumeConfidence(
**{"confidence_method": "ambiguity", "eta_max": 0.2, "eta_step": 0.1, "normalization": False}
)
Expand Down Expand Up @@ -150,7 +154,7 @@ def test_compute_ambiguity_with_variable_disparity(
etas = np.arange(0.0, 0.2, 0.1)
nbr_etas = etas.shape[0]

amb = ambiguity_.compute_ambiguity(cv_, etas, nbr_etas, grids, disparity_range)
amb = ambiguity_.compute_ambiguity(cv_, etas, nbr_etas, grids, disparity_range, type_measure_min=True)

# Ambiguity integral
gt_amb_int = np.array([[6.0, 4.0, 4.0, 4.0], [4.0, 4.0, 4.0, 6.0], [4.0, 4.0, 2.0, 4.0], [4.0, 4.0, 4.0, 4.0]])
Expand Down Expand Up @@ -225,3 +229,64 @@ def test_normalize_with_extremum(create_img_for_confidence):
amb_vt = np.copy(ambiguity) / ((2 - (-2)) * nbr_etas)

np.testing.assert_array_equal(amb_test, amb_vt)


def test_perfect_case_min(
create_grids_and_disparity_range_with_variable_disparities, create_cv_for_variable_disparities
):
"""
Test a perfect case for min matching cost functions
"""

grids, disparity_range = create_grids_and_disparity_range_with_variable_disparities

cv_ = create_cv_for_variable_disparities

value_min = 0.1
ind_min = np.nanargmin(cv_[1, 1, :])
cv_[1, 1, :] = np.full(3, 24)
cv_[1, 1, ind_min] = value_min

ambiguity_ = confidence.AbstractCostVolumeConfidence(
**{"confidence_method": "ambiguity", "eta_max": 0.2, "eta_step": 0.1}
)

amb = ambiguity_.compute_ambiguity(
cv_, ambiguity_._etas, ambiguity_._nbr_etas, grids, disparity_range, type_measure_min=True
)

ambiguity = ambiguity_.normalize_with_percentile(amb)

confidence_measure = 1 - ambiguity

np.testing.assert_almost_equal(1.0, confidence_measure[1, 1])


def test_perfect_case_max(
create_grids_and_disparity_range_with_variable_disparities, create_cv_for_variable_disparities
):
"""
Test a perfect case for max matching cost functions
"""
grids, disparity_range = create_grids_and_disparity_range_with_variable_disparities

cv_ = create_cv_for_variable_disparities

value_max = 20
ind_max = np.nanargmax(cv_[1, 1, :])
cv_[1, 1, :] = np.full(3, -30)
cv_[1, 1, ind_max] = value_max

ambiguity_ = confidence.AbstractCostVolumeConfidence(
**{"confidence_method": "ambiguity", "eta_max": 0.2, "eta_step": 0.1}
)

amb = ambiguity_.compute_ambiguity(
cv_, ambiguity_._etas, ambiguity_._nbr_etas, grids, disparity_range, type_measure_min=False
)

ambiguity = ambiguity_.normalize_with_percentile(amb)

confidence_measure = 1 - ambiguity

np.testing.assert_almost_equal(1.0, confidence_measure[1, 1])

0 comments on commit e589951

Please sign in to comment.