From 9450b5d0c1e7c0cef794de2c14dddad9628ae0a3 Mon Sep 17 00:00:00 2001 From: B1ueber2y Date: Sat, 19 Oct 2024 15:42:37 +0200 Subject: [PATCH 1/2] upgrade black to 24.10.0 --- .github/workflows/format-ubuntu.yml | 3 ++- scripts/format/black.sh | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/format-ubuntu.yml b/.github/workflows/format-ubuntu.yml index e3bc1879..f6b1118c 100644 --- a/.github/workflows/format-ubuntu.yml +++ b/.github/workflows/format-ubuntu.yml @@ -37,7 +37,8 @@ jobs: exit 0 fi set +x -euo pipefail - sudo apt-get update && sudo apt-get install -y clang-format-14 black + sudo apt-get update && sudo apt-get install -y clang-format-14 + python -m pip install black==24.10.0 ./scripts/format/clang_format.sh ./scripts/format/black.sh git diff --name-only diff --git a/scripts/format/black.sh b/scripts/format/black.sh index ea0566a5..51d58e7e 100755 --- a/scripts/format/black.sh +++ b/scripts/format/black.sh @@ -4,7 +4,7 @@ # Check version version_string=$(black --version | sed -E 's/^.*(\d+\.\d+-.*).*$/\1/') -expected_version_string='21.12' +expected_version_string='24.10.0' if [[ "$version_string" =~ "$expected_version_string" ]]; then echo "black version '$version_string' matches '$expected_version_string'" else From f4ba99766c74608de188711c7eb941c51a635a14 Mon Sep 17 00:00:00 2001 From: B1ueber2y Date: Sat, 19 Oct 2024 15:43:26 +0200 Subject: [PATCH 2/2] formatting. --- limap/base/unit_test.py | 4 ++-- .../_pl_estimate_absolute_pose.py | 15 ++++++--------- limap/features/models/s2dnet.py | 4 ++-- limap/features/models/vggnet.py | 2 +- limap/line2d/LineTR/line_attention.py | 2 +- limap/line2d/LineTR/line_process.py | 4 ++-- limap/line2d/LineTR/line_transformer.py | 2 +- limap/line2d/LineTR/linetr_pipeline.py | 18 +++++++++--------- limap/line2d/SOLD2/model/line_detection.py | 6 ++---- limap/line2d/SOLD2/model/loss.py | 2 +- limap/line2d/SOLD2/train.py | 4 ++-- limap/line2d/line_utils/merge_lines.py | 4 ++-- limap/point2d/superglue/superglue.py | 2 +- limap/pointsfm/database.py | 2 +- limap/util/geometry.py | 6 +++--- runners/inloc/localization.py | 4 +--- 16 files changed, 37 insertions(+), 44 deletions(-) diff --git a/limap/base/unit_test.py b/limap/base/unit_test.py index 004f2f87..81235345 100644 --- a/limap/base/unit_test.py +++ b/limap/base/unit_test.py @@ -34,10 +34,10 @@ def report_error(imagecols_pred, imagecols): R_error = ( imagecols_pred.camimage(img_id).R() - imagecols.camimage(img_id).R() ) - R_error = np.sqrt(np.sum(R_error ** 2)) + R_error = np.sqrt(np.sum(R_error**2)) T_error = ( imagecols_pred.camimage(img_id).T() - imagecols.camimage(img_id).T() ) - T_error = np.sqrt(np.sum(T_error ** 2)) + T_error = np.sqrt(np.sum(T_error**2)) pose_errors.append(np.array([R_error, T_error])) print("pose_error: (R, T)", np.array(pose_errors).mean(0)) diff --git a/limap/estimators/absolute_pose/_pl_estimate_absolute_pose.py b/limap/estimators/absolute_pose/_pl_estimate_absolute_pose.py index b66e7ae7..d0f53e48 100644 --- a/limap/estimators/absolute_pose/_pl_estimate_absolute_pose.py +++ b/limap/estimators/absolute_pose/_pl_estimate_absolute_pose.py @@ -110,15 +110,12 @@ def _pl_estimate_absolute_pose( ransac_options.data_type_weights_ = np.array( [ransac_cfg["weight_point"], ransac_cfg["weight_line"]] ) - ransac_options.data_type_weights_ *= ( - np.array( - [ - ransac_options.squared_inlier_thresholds_[1], - ransac_options.squared_inlier_thresholds_[0], - ] - ) - / np.sum(ransac_options.squared_inlier_thresholds_) - ) + ransac_options.data_type_weights_ *= np.array( + [ + ransac_options.squared_inlier_thresholds_[1], + ransac_options.squared_inlier_thresholds_[0], + ] + ) / np.sum(ransac_options.squared_inlier_thresholds_) ransac_options.min_num_iterations_ = ransac_cfg["min_num_iterations"] ransac_options.final_least_squares_ = ransac_cfg["final_least_squares"] diff --git a/limap/features/models/s2dnet.py b/limap/features/models/s2dnet.py index 6bc26f95..0b2378ee 100644 --- a/limap/features/models/s2dnet.py +++ b/limap/features/models/s2dnet.py @@ -66,7 +66,7 @@ def print_gpu_memory(): a = torch.cuda.memory_allocated(0) f = r - a # free inside reserved - print(np.array([t, r, a, f]) / 2 ** 30) + print(np.array([t, r, a, f]) / 2**30) class AdapLayers(nn.Module): @@ -130,7 +130,7 @@ def _init(self, conf): if isinstance(layer, torch.nn.MaxPool2d): current_scale += 1 if i in self.hypercolumn_indices: - self.scales.append(2 ** current_scale) + self.scales.append(2**current_scale) self.adaptation_layers = AdapLayers( conf.hypercolumn_layers, conf.output_dim diff --git a/limap/features/models/vggnet.py b/limap/features/models/vggnet.py index 35b58563..f4ab7134 100644 --- a/limap/features/models/vggnet.py +++ b/limap/features/models/vggnet.py @@ -31,7 +31,7 @@ def _init(self, conf=default_conf): if isinstance(layer, torch.nn.MaxPool2d): current_scale += 1 if i in self.hypercolumn_indices: - self.scales.append(2 ** current_scale) + self.scales.append(2**current_scale) def _forward(self, data): image = data # data['image'] diff --git a/limap/line2d/LineTR/line_attention.py b/limap/line2d/LineTR/line_attention.py index 786a96ba..7fcc79e9 100755 --- a/limap/line2d/LineTR/line_attention.py +++ b/limap/line2d/LineTR/line_attention.py @@ -38,7 +38,7 @@ def __init__(self, n_heads: int, d_feature: int, dropout=0.1): self.w_vs = nn.Linear(d_feature, n_heads * dim, bias=True) self.fc = nn.Linear(n_heads * dim, d_feature, bias=True) - self.attention = ScaledDotProduct(scale=dim ** 0.5) + self.attention = ScaledDotProduct(scale=dim**0.5) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_feature, eps=1e-6) diff --git a/limap/line2d/LineTR/line_process.py b/limap/line2d/LineTR/line_process.py index 8298fbc6..fabefc57 100755 --- a/limap/line2d/LineTR/line_process.py +++ b/limap/line2d/LineTR/line_process.py @@ -54,7 +54,7 @@ def point_on_line(line, dist_px): vec = ep - sp if vec[0] != 0: m = vec[1] / vec[0] - x = np.sqrt(dist_px ** 2 / (1 + m ** 2)) + x = np.sqrt(dist_px**2 / (1 + m**2)) y = m * x else: x = 0 @@ -275,7 +275,7 @@ def change_cv2_T_np(klines_cv): kline_ep = [sp_x, sp_y] # linelength = math.sqrt((kline_ep[0]-kline_sp[0])**2 +(kline_ep[1]-kline_sp[1])**2) - linelength = line.lineLength * (2 ** line.octave) + linelength = line.lineLength * (2**line.octave) klines_sp.append(kline_sp) klines_ep.append(kline_ep) diff --git a/limap/line2d/LineTR/line_transformer.py b/limap/line2d/LineTR/line_transformer.py index bb54125c..6860a319 100755 --- a/limap/line2d/LineTR/line_transformer.py +++ b/limap/line2d/LineTR/line_transformer.py @@ -186,7 +186,7 @@ def forward( def attention(query, key, value): dim = query.shape[1] scores = ( - torch.einsum("bdhn,bdhm->bhnm", query, key) / dim ** 0.5 + torch.einsum("bdhn,bdhm->bhnm", query, key) / dim**0.5 ) # [3, 64, 4, 512] -> [3, 4, 512, 512] prob = torch.nn.functional.softmax(scores, dim=-1) return torch.einsum("bhnm,bdhm->bdhn", prob, value), prob diff --git a/limap/line2d/LineTR/linetr_pipeline.py b/limap/line2d/LineTR/linetr_pipeline.py index 678ece39..cb1fa231 100755 --- a/limap/line2d/LineTR/linetr_pipeline.py +++ b/limap/line2d/LineTR/linetr_pipeline.py @@ -308,21 +308,21 @@ def process_siamese(data, i): assert match_mat.shape[0] == 1 bool_match_mat = match_mat[0] > 0 pred["line_matches0"] = np.argmax(bool_match_mat, axis=1) - pred["line_matches0"][ - ~np.any(bool_match_mat, axis=1) - ] = UNMATCHED_FEATURE + pred["line_matches0"][~np.any(bool_match_mat, axis=1)] = ( + UNMATCHED_FEATURE + ) pred["line_matches1"] = np.argmax(bool_match_mat, axis=0) - pred["line_matches1"][ - ~np.any(bool_match_mat, axis=0) - ] = UNMATCHED_FEATURE + pred["line_matches1"][~np.any(bool_match_mat, axis=0)] = ( + UNMATCHED_FEATURE + ) pred["line_matches0"] = torch.from_numpy(pred["line_matches0"])[None] pred["line_matches1"] = torch.from_numpy(pred["line_matches1"])[None] lmatch_scores = torch.from_numpy( distance_matrix[(0,) + np.where(match_mat[0] > 0)] ) - pred["line_match_scores0"] = pred[ - "line_match_scores1" - ] = -lmatch_scores[None] + pred["line_match_scores0"] = pred["line_match_scores1"] = ( + -lmatch_scores[None] + ) return pred def loss(self, pred, data): diff --git a/limap/line2d/SOLD2/model/line_detection.py b/limap/line2d/SOLD2/model/line_detection.py index 90456c19..79d911bb 100644 --- a/limap/line2d/SOLD2/model/line_detection.py +++ b/limap/line2d/SOLD2/model/line_detection.py @@ -178,9 +178,7 @@ def detect(self, junctions, heatmap, device=torch.device("cpu")): dim=-1, ) ) - normalized_seg_length = segments_length / ( - ((H ** 2) + (W ** 2)) ** 0.5 - ) + normalized_seg_length = segments_length / (((H**2) + (W**2)) ** 0.5) # Perform local max search num_cand = cand_h.shape[0] @@ -552,7 +550,7 @@ def detect_local_max( """Detection by local maximum search.""" # Compute the distance threshold dist_thresh = ( - 0.5 * (2 ** 0.5) + self.lambda_radius * normalized_seg_length + 0.5 * (2**0.5) + self.lambda_radius * normalized_seg_length ) # Make it N x 64 dist_thresh = torch.repeat_interleave( diff --git a/limap/line2d/SOLD2/model/loss.py b/limap/line2d/SOLD2/model/loss.py index 4c5e7525..2a3f2354 100644 --- a/limap/line2d/SOLD2/model/loss.py +++ b/limap/line2d/SOLD2/model/loss.py @@ -154,7 +154,7 @@ def space_to_depth(input_tensor, grid_size): # (N, bs, bs, C, H//bs, W//bs) x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, C*bs^2, H//bs, W//bs) - x = x.view(N, C * (grid_size ** 2), H // grid_size, W // grid_size) + x = x.view(N, C * (grid_size**2), H // grid_size, W // grid_size) return x diff --git a/limap/line2d/SOLD2/train.py b/limap/line2d/SOLD2/train.py index 206a700b..10af3b80 100644 --- a/limap/line2d/SOLD2/train.py +++ b/limap/line2d/SOLD2/train.py @@ -373,7 +373,7 @@ def train_single_epoch( results = metric_func.metric_results average = average_meter.average() # Get gpu memory usage in GB - gpu_mem_usage = torch.cuda.max_memory_allocated() / (1024 ** 3) + gpu_mem_usage = torch.cuda.max_memory_allocated() / (1024**3) if compute_descriptors: print( "Epoch [%d / %d] Iter [%d / %d] loss=%.4f (%.4f), junc_loss=%.4f (%.4f), heatmap_loss=%.4f (%.4f), descriptor_loss=%.4f (%.4f), gpu_mem=%.4fGB" @@ -734,7 +734,7 @@ def record_train_summaries(writer, global_step, scalars, images): # GPU memory part # Get gpu memory usage in GB - gpu_mem_usage = torch.cuda.max_memory_allocated() / (1024 ** 3) + gpu_mem_usage = torch.cuda.max_memory_allocated() / (1024**3) writer.add_scalar("GPU/GPU_memory_usage", gpu_mem_usage, global_step) # Loss part diff --git a/limap/line2d/line_utils/merge_lines.py b/limap/line2d/line_utils/merge_lines.py index d3cc93ed..2184ec78 100644 --- a/limap/line2d/line_utils/merge_lines.py +++ b/limap/line2d/line_utils/merge_lines.py @@ -104,8 +104,8 @@ def merge_line_cluster(lines): if b == 0: u = np.array([1, 0]) if a >= c else np.array([0, 1]) else: - m = (c - a + np.sqrt((a - c) ** 2 + 4 * b ** 2)) / (2 * b) - u = np.array([1, m]) / np.sqrt(1 + m ** 2) + m = (c - a + np.sqrt((a - c) ** 2 + 4 * b**2)) / (2 * b) + u = np.array([1, m]) / np.sqrt(1 + m**2) # Get the center of gravity of all endpoints cross = np.mean(points, axis=0) diff --git a/limap/point2d/superglue/superglue.py b/limap/point2d/superglue/superglue.py index 27f33a8d..b83ee5bf 100644 --- a/limap/point2d/superglue/superglue.py +++ b/limap/point2d/superglue/superglue.py @@ -91,7 +91,7 @@ def attention( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: dim = query.shape[1] - scores = torch.einsum("bdhn,bdhm->bhnm", query, key) / dim ** 0.5 + scores = torch.einsum("bdhn,bdhm->bhnm", query, key) / dim**0.5 prob = torch.nn.functional.softmax(scores, dim=-1) return torch.einsum("bhnm,bdhm->bdhn", prob, value), prob diff --git a/limap/pointsfm/database.py b/limap/pointsfm/database.py index c7f49914..9c21e4df 100644 --- a/limap/pointsfm/database.py +++ b/limap/pointsfm/database.py @@ -38,7 +38,7 @@ IS_PYTHON3 = sys.version_info[0] >= 3 -MAX_IMAGE_ID = 2 ** 31 - 1 +MAX_IMAGE_ID = 2**31 - 1 CREATE_CAMERAS_TABLE = """CREATE TABLE IF NOT EXISTS cameras ( camera_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, diff --git a/limap/util/geometry.py b/limap/util/geometry.py index 2620050e..d446a83b 100644 --- a/limap/util/geometry.py +++ b/limap/util/geometry.py @@ -48,13 +48,13 @@ def rotation_from_quaternion(quad): quad = quad / norm qr, qi, qj, qk = quad[0], quad[1], quad[2], quad[3] rot_mat = np.zeros((3, 3)) - rot_mat[0, 0] = 1 - 2 * (qj ** 2 + qk ** 2) + rot_mat[0, 0] = 1 - 2 * (qj**2 + qk**2) rot_mat[0, 1] = 2 * (qi * qj - qk * qr) rot_mat[0, 2] = 2 * (qi * qk + qj * qr) rot_mat[1, 0] = 2 * (qi * qj + qk * qr) - rot_mat[1, 1] = 1 - 2 * (qi ** 2 + qk ** 2) + rot_mat[1, 1] = 1 - 2 * (qi**2 + qk**2) rot_mat[1, 2] = 2 * (qj * qk - qi * qr) rot_mat[2, 0] = 2 * (qi * qk - qj * qr) rot_mat[2, 1] = 2 * (qj * qk + qi * qr) - rot_mat[2, 2] = 1 - 2 * (qi ** 2 + qj ** 2) + rot_mat[2, 2] = 1 - 2 * (qi**2 + qj**2) return rot_mat diff --git a/runners/inloc/localization.py b/runners/inloc/localization.py index cfc3e2de..4e0b9938 100644 --- a/runners/inloc/localization.py +++ b/runners/inloc/localization.py @@ -98,9 +98,7 @@ def parse_config(): # Output folder for LIMAP linetracks (in tmp) if cfg["output_folder"] is None: cfg["output_folder"] = "finaltracks" - cfg[ - "inloc_dataset" - ] = ( + cfg["inloc_dataset"] = ( args.dataset ) # For reading camera poses for estimating 3D lines fron depth return cfg, args