Skip to content

Commit

Permalink
Fix some linting issues (#90)
Browse files Browse the repository at this point in the history
* Fix SIM113

* All SIM error fixed

* fix B007.

* fix E402.

* fix E741.

* minor.

* move B904 to file-specific.

* fix B028.

* file-specific UP031.

* fix E722.

* minor.

* minor.

* fix s2dnet and vggnet linting.

* fix s2dnet linting.

* specific.

---------

Co-authored-by: Yifan Yu <[email protected]>
  • Loading branch information
B1ueber2y and MarkYu98 authored Oct 19, 2024
1 parent 542a9b2 commit 64530fc
Show file tree
Hide file tree
Showing 31 changed files with 129 additions and 128 deletions.
4 changes: 2 additions & 2 deletions limap/base/align.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,8 +130,8 @@ def read_trans(fname):
lines = f.readlines()
mat = []
for idx in range(4):
l = lines[idx].strip("\n").split()
mat.append([float(k) for k in l])
line = lines[idx].strip("\n").split()
mat.append([float(k) for k in line])
mat = np.array(mat)
assert np.all(mat[3, :] == np.array([0, 0, 0, 1]))
return mat[:3, :]
Expand Down
7 changes: 3 additions & 4 deletions limap/features/models/s2dnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,13 +70,13 @@ def __init__(self, hypercolumn_layers: List[str], output_dim: int = 128):
hypercolumn_layers: The list of the hypercolumn layer names.
output_dim: The output channel dimension.
"""
super(AdapLayers, self).__init__()
super().__init__()
self.layers = []
channel_sizes = [vgg16_layers[name] for name in hypercolumn_layers]
print(channel_sizes)
for i, l in enumerate(channel_sizes):
for i, ll in enumerate(channel_sizes):
layer = nn.Sequential(
nn.Conv2d(l, 64, kernel_size=1, stride=1, padding=0),
nn.Conv2d(ll, 64, kernel_size=1, stride=1, padding=0),
nn.ReLU(),
nn.Conv2d(64, output_dim, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(output_dim),
Expand Down Expand Up @@ -134,7 +134,6 @@ def _init(self, conf):
self.download_s2dnet_model(path)
logging.info(f"Loading S2DNet checkpoint at {path}.")
state_dict = torch.load(path, map_location="cpu")["state_dict"]
params = self.state_dict()
state_dict = {k: v for k, v in state_dict.items()}
self.load_state_dict(state_dict, strict=False)

Expand Down
3 changes: 2 additions & 1 deletion limap/features/models/vggnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@
import torch.nn as nn
import torchvision.models as models

from limap.features.models.s2dnet import *
from .base_model import BaseModel
from .s2dnet import vgg16_layers


class VGGNet(BaseModel):
Expand Down
3 changes: 1 addition & 2 deletions limap/line2d/L2D2/RAL_net_cov.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,7 @@ def weights_init(m):
nn.init.orthogonal_(m.weight.data, gain=0.6)
try:
nn.init.constant_(m.bias.data, 0.01)

except:
except AttributeError:
pass
return

Expand Down
4 changes: 2 additions & 2 deletions limap/line2d/L2D2/extractor.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,8 +123,8 @@ def compute_descinfo(self, img, segs):
return {"line_descriptors": np.empty((0, 128))}

patches, line_desc = [], []
for i, l in enumerate(lines):
patches.append(self.get_patch(img, l))
for i, line in enumerate(lines):
patches.append(self.get_patch(img, line))

if (i + 1) % self.mini_batch == 0 or i == len(lines) - 1:
# Extract the descriptors
Expand Down
6 changes: 3 additions & 3 deletions limap/line2d/LBD/extractor.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def process_pyramid(
cur_sigma2 = 1.0
pyramid = []
multiscale_segs = []
for i in range(n_levels):
for _ in range(n_levels):
increase_sigma = np.sqrt(cur_sigma2 - pre_sigma2)
blurred = cv2.GaussianBlur(
octave_img, (5, 5), increase_sigma, borderType=cv2.BORDER_REPLICATE
Expand Down Expand Up @@ -47,8 +47,8 @@ def process_pyramid(

def to_multiscale_lines(lines):
ms_lines = []
for l in lines.reshape(-1, 4):
ll = np.append(l, [0, np.linalg.norm(l[:2] - l[2:4])])
for line in lines.reshape(-1, 4):
ll = np.append(line, [0, np.linalg.norm(line[:2] - line[2:4])])
ms_lines.append(
[(0, ll)] + [(i, ll / (i * np.sqrt(2))) for i in range(1, 5)]
)
Expand Down
7 changes: 4 additions & 3 deletions limap/line2d/LineTR/line_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,10 +210,10 @@ def __init__(self, num_heads: int, d_model: int):
def forward(self, query, key, value):
batch_dim = query.size(0)
query, key, value = (
l(x).view(
layer(x).view(
batch_dim, self.dim, self.num_heads, -1
) # [3, 64, 4, 512]
for l, x in zip(self.proj, (query, key, value))
for layer, x in zip(self.proj, (query, key, value))
)
x, prob = attention(query, key, value)
return (
Expand Down Expand Up @@ -257,7 +257,8 @@ def __init__(self, feature_dim: int, layer_names: list):
def forward(self, kline_desc):
d_desc_kline = kline_desc.size(2)

for layer, name in zip(self.layers, self.names):
for layer in self.layers:
# for layer, name in zip(self.layers, self.names):
delta, _ = layer(kline_desc, kline_desc)
kline_desc = kline_desc + delta

Expand Down
4 changes: 2 additions & 2 deletions limap/line2d/SOLD2/model/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -577,15 +577,15 @@ def nms_fast(in_corners, H, W, dist_thresh):
out = np.vstack((rcorners, in_corners[2])).reshape(3, 1)
return out, np.zeros(1).astype(int)
# Initialize the grid.
for i, rc in enumerate(rcorners.T):
for i, _ in enumerate(rcorners.T):
grid[rcorners[1, i], rcorners[0, i]] = 1
inds[rcorners[1, i], rcorners[0, i]] = i
# Pad the border of the grid, so that we can NMS points near the border.
pad = dist_thresh
grid = np.pad(grid, ((pad, pad), (pad, pad)), mode="constant")
# Iterate through points, highest to lowest conf, suppress neighborhood.
count = 0
for i, rc in enumerate(rcorners.T):
for rc in rcorners.T:
# Account for top and left padding.
pt = (rc[0] + pad, rc[1] + pad)
if grid[pt[1], pt[0]] == 1: # If not yet suppressed.
Expand Down
6 changes: 3 additions & 3 deletions limap/line2d/SOLD2/model/nets/lcnn_hourglass.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,15 +81,15 @@ def __init__(self, block, num_blocks, planes, depth):

def _make_residual(self, block, num_blocks, planes):
layers = []
for i in range(0, num_blocks):
for _ in range(0, num_blocks):
layers.append(block(planes * block.expansion, planes))
return nn.Sequential(*layers)

def _make_hour_glass(self, block, num_blocks, planes, depth):
hg = []
for i in range(depth):
res = []
for j in range(3):
for _ in range(3):
res.append(self._make_residual(block, num_blocks, planes))
if i == 0:
res.append(self._make_residual(block, num_blocks, planes))
Expand Down Expand Up @@ -183,7 +183,7 @@ def _make_residual(self, block, planes, blocks, stride=1):
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))

return nn.Sequential(*layers)
Expand Down
2 changes: 1 addition & 1 deletion limap/line2d/SOLD2/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def restore_weights(model, state_dict, strict=True):
try:
model.load_state_dict(state_dict, strict=strict)
# Deal with some version compatibility issue (catch version incompatible)
except:
except KeyError:
err = model.load_state_dict(state_dict, strict=False)

# missing keys are those in model but not in state_dict
Expand Down
5 changes: 0 additions & 5 deletions limap/optimize/functions.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,6 @@
import os

import cv2
import numpy as np
from tqdm import tqdm

import limap.base as _base
import limap.visualize as limapvis


def unit_test_add_noise_to_track(track):
Expand Down
2 changes: 1 addition & 1 deletion limap/optimize/line_refinement/line_refinement.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def line_refinement(
# output
newtracks = []
counter = 0
for idx, track in enumerate(tracks):
for track in tracks:
if track.count_images() < n_visible_views:
newtracks.append(track)
else:
Expand Down
4 changes: 2 additions & 2 deletions limap/point2d/superglue/superglue.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,8 +112,8 @@ def forward(
) -> torch.Tensor:
batch_dim = query.size(0)
query, key, value = (
l(x).view(batch_dim, self.dim, self.num_heads, -1)
for l, x in zip(self.proj, (query, key, value))
layer(x).view(batch_dim, self.dim, self.num_heads, -1)
for layer, x in zip(self.proj, (query, key, value))
)
x, _ = attention(query, key, value)
return self.merge(
Expand Down
9 changes: 4 additions & 5 deletions limap/point2d/superpoint/main.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,19 @@
import collections.abc as collections
import pprint
from pathlib import Path
from typing import Dict, List, Optional, Union

import h5py
import numpy as np
import torch
from tqdm import tqdm

string_classes = str
import collections.abc as collections

from hloc import extract_features
from hloc.utils.io import list_h5_names
from tqdm import tqdm

from .superpoint import SuperPoint

string_classes = str


# Copy from legacy hloc code
def map_tensor(input_, func):
Expand Down
4 changes: 2 additions & 2 deletions limap/pointsfm/bundler_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def ReadModelBundler(bundler_path, list_path, model_path):
imagecols = _base.ImageCollection(cameras, camimages)

# read points
for point_id in tqdm(range(n_points)):
for _ in tqdm(range(n_points)):
line = lines[counter].strip("\n").split(" ")
x, y, z = float(line[0]), float(line[1]), float(line[2])
counter += 1
Expand All @@ -97,7 +97,7 @@ def ReadModelBundler(bundler_path, list_path, model_path):
n_views = int(line[0])
subcounter = 1
track = []
for view_id in range(n_views):
for _ in range(n_views):
track.append(int(line[subcounter]))
subcounter += 4
model.addPoint(x, y, z, track)
Expand Down
4 changes: 2 additions & 2 deletions limap/pointsfm/colmap_sfm.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ def run_colmap_sfm(

### copy images to tmp folder
keypoints_in_order = []
for idx, img_id in enumerate(imagecols.get_img_ids()):
for img_id in imagecols.get_img_ids():
img = imagecols.read_image(img_id)
fname_to_save = os.path.join(image_path, f"image{img_id:08d}.png")
cv2.imwrite(fname_to_save, img)
Expand Down Expand Up @@ -247,7 +247,7 @@ def run_colmap_sfm_with_known_poses(
### copy images to tmp folder
keypoints_in_order = []
imagecols_tmp = copy.deepcopy(imagecols)
for idx, img_id in enumerate(imagecols.get_img_ids()):
for img_id in imagecols.get_img_ids():
img = imagecols.read_image(img_id)
fname_to_save = os.path.join(image_path, f"image{img_id:08d}.png")
cv2.imwrite(fname_to_save, img)
Expand Down
9 changes: 4 additions & 5 deletions limap/pointsfm/model_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,9 @@ def convert_colmap_to_visualsfm(colmap_model_path, output_nvm_file):
# write images
f.write(f"{len(colmap_images)}\n")
map_image_id = dict()
counter = 0
for img_id, colmap_image in colmap_images.items():
map_image_id[img_id] = counter
counter += 1
for cnt, item in enumerate(colmap_images.items()):
img_id, colmap_image = item
map_image_id[img_id] = cnt
img_name = colmap_image.name
cam_id = colmap_image.camera_id
cam = colmap_cameras[cam_id]
Expand Down Expand Up @@ -58,7 +57,7 @@ def convert_colmap_to_visualsfm(colmap_model_path, output_nvm_file):

# write points
f.write(f"{len(colmap_points)}\n")
for pid, point in colmap_points.items():
for _, point in colmap_points.items():
xyz = point.xyz
f.write(f"{xyz[0]} {xyz[1]} {xyz[2]}")
f.write(" 128 128 128") # dummy color
Expand Down
4 changes: 2 additions & 2 deletions limap/pointsfm/visualsfm_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,15 +69,15 @@ def ReadModelVisualSfM(vsfm_path, nvm_file="reconstruction.nvm"):
counter += 1
n_points = int(txt_lines[counter].strip())
counter += 1
for point_id in tqdm(range(n_points)):
for _ in tqdm(range(n_points)):
line = txt_lines[counter].strip().split()
counter += 1
point = np.array([float(line[k]) for k in range(3)])
# color = np.array([int(line[k]) for k in np.arange(3, 6).tolist()])
n_views = int(line[6])
track = []
subcounter = 7
for view_in in range(n_views):
for _ in range(n_views):
track.append(int(line[subcounter]))
subcounter += 4
model.addPoint(point[0], point[1], point[2], track)
Expand Down
6 changes: 3 additions & 3 deletions limap/runners/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def process(imagecols, img_id):
cam_dict[cam_id] = cam_undistorted
imagecols_undistorted.change_camera(cam_id, cam_undistorted)
imagecols_undistorted.change_image_name(img_id, imname_out)
for idx, img_id in enumerate(loaded_ids):
for img_id in loaded_ids:
imname_out = os.path.join(output_dir, f"image{img_id:08d}.png")
cam_id = loaded_imagecols.camimage(img_id).cam_id
cam_undistorted = loaded_imagecols.cam(cam_id)
Expand Down Expand Up @@ -172,7 +172,7 @@ def compute_sfminfos(cfg, imagecols, fname="metainfos.txt"):
limapio.check_path(cfg["dir_load"])
fname_load = os.path.join(cfg["dir_load"], fname)
neighbors, ranges = limapio.read_txt_metainfos(fname_load)
for img_id, neighbor in neighbors.items():
for img_id, _ in neighbors.items():
neighbors[img_id] = neighbors[img_id][: cfg["n_neighbors"]]
return colmap_output_path, neighbors, ranges

Expand Down Expand Up @@ -207,7 +207,7 @@ def compute_2d_segs(cfg, imagecols, compute_descinfo=True):
import limap.line2d

if not imagecols.IsUndistorted():
warnings.warn("The input images are distorted!")
warnings.warn("The input images are distorted!", stacklevel=2)
basedir = os.path.join(
"line_detections", cfg["line2d"]["detector"]["method"]
)
Expand Down
6 changes: 3 additions & 3 deletions limap/runners/line_fitnmerge.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def process(all_2d_segs, imagecols, depths, fitting_config, img_id):
segs, camview = all_2d_segs[img_id], imagecols.camview(img_id)
depth = depths[img_id].read_depth(img_hw=[camview.h(), camview.w()])
seg3d_list_idx = []
for seg_id, s in enumerate(segs):
for s in segs:
seg3d = _fit.estimate_seg3d_from_depth(
s,
depth,
Expand Down Expand Up @@ -81,7 +81,7 @@ def process(all_2d_segs, imagecols, p3d_reader, fitting_config, img_id):
segs, camview = all_2d_segs[img_id], imagecols.camview(img_id)
p3ds = p3d_reader[img_id].read_p3ds()
seg3d_list_idx = []
for seg_id, s in enumerate(segs):
for s in segs:
seg3d = _fit.estimate_seg3d_from_points3d(
s,
p3ds,
Expand Down Expand Up @@ -150,7 +150,7 @@ def line_fitnmerge(cfg, imagecols, depths, neighbors=None, ranges=None):
_, neighbors, ranges = _runners.compute_sfminfos(cfg, imagecols)
else:
neighbors = imagecols.update_neighbors(neighbors)
for img_id, neighbor in neighbors.items():
for img_id, _ in neighbors.items():
neighbors[img_id] = neighbors[img_id][: cfg["n_neighbors"]]

##########################################################
Expand Down
2 changes: 1 addition & 1 deletion limap/runners/line_triangulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def line_triangulation(cfg, imagecols, neighbors=None, ranges=None):
os.path.join(cfg["dir_save"], "metainfos.txt"), neighbors, ranges
)
neighbors = imagecols.update_neighbors(neighbors)
for img_id, neighbor in neighbors.items():
for img_id, _ in neighbors.items():
neighbors[img_id] = neighbors[img_id][: cfg["n_neighbors"]]
limapio.save_txt_metainfos(
os.path.join(cfg["dir_save"], "metainfos.txt"), neighbors, ranges
Expand Down
4 changes: 2 additions & 2 deletions limap/triangulation/triangulation.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from _limap import _triangulation as _tri


def get_normal_direction(l, view):
return _tri.get_normal_direction(l, view)
def get_normal_direction(line2d, view):
return _tri.get_normal_direction(line2d, view)


def get_direction_from_VP(vp, view):
Expand Down
Loading

0 comments on commit 64530fc

Please sign in to comment.