Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove correspondences from translation averaging outliers downstream #755

Merged
merged 4 commits into from
Jan 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions gtsfm/averaging/translation/averaging_1dsfm.py
Original file line number Diff line number Diff line change
Expand Up @@ -491,7 +491,7 @@ def run_translation_averaging(
i2Ti1_priors: Dict[Tuple[int, int], PosePrior] = {},
scale_factor: float = 1.0,
gt_wTi_list: List[Optional[Pose3]] = [],
) -> Tuple[List[Optional[Pose3]], Optional[GtsfmMetricsGroup]]:
) -> Tuple[List[Optional[Pose3]], Optional[GtsfmMetricsGroup], Optional[List[Tuple[int, int]]]]:
"""Run the translation averaging.

Args:
Expand All @@ -508,6 +508,7 @@ def run_translation_averaging(
may contain `None` where the global translations could not be computed (either underconstrained system
or ill-constrained system).
A GtsfmMetricsGroup of 1DSfM metrics.
List of camera pair indices that are classified as inliers by 1dsfm.
"""
logger.info("Running translation averaging on %d unit translations", len(i2Ui1_dict))

Expand Down Expand Up @@ -565,7 +566,7 @@ def run_translation_averaging(
ta_metrics.add_metric(GtsfmMetric("outlier_rejection_duration_sec", inlier_computation_time))
ta_metrics.add_metric(GtsfmMetric("optimization_duration_sec", averaging_time))

return wTi_list, ta_metrics
return wTi_list, ta_metrics, list(w_i2Ui1_dict_inliers.keys())


def compute_metrics(
Expand Down
9 changes: 6 additions & 3 deletions gtsfm/averaging/translation/translation_averaging_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def run_translation_averaging(
i2Ti1_priors: Dict[Tuple[int, int], PosePrior] = {},
scale_factor: float = 1.0,
gt_wTi_list: List[Optional[Pose3]] = [],
) -> Tuple[List[Optional[Pose3]], Optional[GtsfmMetricsGroup]]:
) -> Tuple[List[Optional[Pose3]], Optional[GtsfmMetricsGroup], Optional[List[Tuple[int, int]]]]:
"""Run the translation averaging, and combine the estimated global translations with global rotations.

Args:
Expand All @@ -76,6 +76,8 @@ def run_translation_averaging(
Global camera poses wTi. The number of entries in the list is `num_images`. The list
may contain `None` where the global translations could not be computed (either underconstrained system
or ill-constrained system).
A GtsfmMetricsGroup with translation averaging metrics.
Indices of inlier measurements (list of camera pair indices).
"""

def create_computation_graph(
Expand All @@ -89,7 +91,7 @@ def create_computation_graph(
i2Ti1_priors: Dict[Tuple[int, int], PosePrior] = {},
scale_factor: float = 1.0,
gt_wTi_list: List[Optional[Pose3]] = [],
) -> Tuple[Delayed, Delayed]:
) -> Tuple[Delayed, Delayed, Delayed]:
"""Create the computation graph for performing translation averaging.

Args:
Expand All @@ -106,8 +108,9 @@ def create_computation_graph(
Returns:
Global poses wrapped as Delayed.
A GtsfmMetricsGroup with translation averaging metrics wrapped as Delayed.
Indices of inlier measurements (List[tuple[int, int]]) after running 1dsfm wrapped as Delayed.
"""
return dask.delayed(self.run_translation_averaging, nout=2)(
return dask.delayed(self.run_translation_averaging, nout=3)(
num_images=num_images,
i2Ui1_dict=i2Ui1_graph,
wRi_list=wRi_graph,
Expand Down
20 changes: 18 additions & 2 deletions gtsfm/multi_view_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ def create_computation_graph(
)
tracks2d_graph = dask.delayed(get_2d_tracks)(viewgraph_v_corr_idxs_graph, keypoints_list)

wTi_graph, ta_metrics = self.trans_avg_module.create_computation_graph(
wTi_graph, ta_metrics, ta_inlier_idx_i1_i2 = self.trans_avg_module.create_computation_graph(
num_images,
pruned_i2Ui1_graph,
delayed_wRi,
Expand All @@ -138,13 +138,16 @@ def create_computation_graph(
relative_pose_priors,
gt_wTi_list=gt_wTi_list,
)
ta_v_corr_idxs_graph = dask.delayed(filter_corr_by_idx)(viewgraph_v_corr_idxs_graph, ta_inlier_idx_i1_i2)
ta_inlier_tracks_2d_graph = dask.delayed(get_2d_tracks)(ta_v_corr_idxs_graph, keypoints_list)
# TODO(akshay-krishnan): update pose priors also with the same inlier indices, right now these are unused.

init_cameras_graph = dask.delayed(init_cameras)(wTi_graph, all_intrinsics)

ba_input_graph, data_assoc_metrics_graph = self.data_association_module.create_computation_graph(
num_images,
init_cameras_graph,
tracks2d_graph,
ta_inlier_tracks_2d_graph,
cameras_gt,
relative_pose_priors,
images,
Expand Down Expand Up @@ -196,3 +199,16 @@ def get_2d_tracks(
) -> List[SfmTrack2d]:
tracks_estimator = CppDsfTracksEstimator()
return tracks_estimator.run(corr_idxs_dict, keypoints_list)


def filter_corr_by_idx(correspondences: Dict[Tuple[int, int], np.ndarray], idxs: List[Tuple[int, int]]):
"""Filter correspondences by indices.

Args:
correspondences: Correspondences as a dictionary.
idxs: Indices to filter by.

Returns:
Filtered correspondences.
"""
return {k: v for k, v in correspondences.items() if k in idxs}
8 changes: 4 additions & 4 deletions tests/averaging/translation/test_averaging_1dsfm.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ def __execute_test(
) -> None:
"""Helper function to run the averagaing and assert w/ expected."""

wTi_computed, _ = self.obj.run_translation_averaging(len(wRi_input), i2Ui1_input, wRi_input)
wTi_computed, _, _ = self.obj.run_translation_averaging(len(wRi_input), i2Ui1_input, wRi_input)
wTi_expected = [Pose3(wRi, wti) for wRi, wti in zip(wRi_input, wti_expected)]
self.assertTrue(
geometry_comparisons.compare_global_poses(
Expand Down Expand Up @@ -194,7 +194,7 @@ def test_computation_graph(self):
i2Ui1_dict[(i1, i2)] = Unit3(expected_wTi_list[i2].between(expected_wTi_list[i1]).translation())

# use the `run` API to get expected results, ignore the metrics
wTi_expected, _ = self.obj.run_translation_averaging(len(wRi_list), i2Ui1_dict, wRi_list)
wTi_expected, _, _ = self.obj.run_translation_averaging(len(wRi_list), i2Ui1_dict, wRi_list)

# Form computation graph and execute
i2Ui1_graph = dask.delayed(i2Ui1_dict)
Expand All @@ -205,7 +205,7 @@ def test_computation_graph(self):
wRi_graph,
)
with dask.config.set(scheduler="single-threaded"):
wTi_computed, _ = dask.compute(computation_graph)[0]
wTi_computed, _, _ = dask.compute(computation_graph)[0]

self.assertTrue(
geometry_comparisons.compare_global_poses(
Expand Down Expand Up @@ -285,7 +285,7 @@ def test_outlier_case_missing_value(self) -> None:
(3, 4): np.array([0.994791, -0.033332, -0.0963361]),
}
i2Ui1_input = {(i, j): Unit3(t) for (i, j), t in i2Ui1_input.items()}
wTi_computed, _ = self.obj.run_translation_averaging(len(wRi_input), i2Ui1_input, wRi_input)
wTi_computed, _, _ = self.obj.run_translation_averaging(len(wRi_input), i2Ui1_input, wRi_input)

assert len(wTi_computed) == 5
assert wTi_computed[-1] is None
Expand Down
Loading