diff --git a/localization/sparse_mapping/scripts/colmap/colmap.py b/localization/sparse_mapping/scripts/colmap/colmap.py new file mode 100644 index 0000000000..5044767a55 --- /dev/null +++ b/localization/sparse_mapping/scripts/colmap/colmap.py @@ -0,0 +1,238 @@ +# This module contains helper functions to read colmap databases and models +# as well as call colmap functions + +import argparse +import collections +import os +import re +import sqlite3 +import struct +import subprocess +import sys +import tempfile + +import numpy as np + +_MAX_IMAGE_ID = 2**31 - 1 + +# Access colmap sqlite database with cameras, images and matches +class COLMAPDatabase(sqlite3.Connection): + @staticmethod + def connect(database_path): + return sqlite3.connect(database_path, factory=COLMAPDatabase) + + def __init__(self, *args, **kwargs): + super(COLMAPDatabase, self).__init__(*args, **kwargs) + + def __image_ids_to_pair_id(image_id1, image_id2): + if image_id1 > image_id2: + image_id1, image_id2 = image_id2, image_id1 + return image_id1 * _MAX_IMAGE_ID + image_id2 + + def cameras(self): + cameras = [] + rows = self.execute("SELECT * FROM cameras") + for r in rows: + cameras.append(r) + return cameras + + def images(self): + images = [] + rows = self.execute("SELECT image_id, name FROM images") + for r in rows: + images.append(r) + return images + + def image_id(self, image_name): + images = [] + rows = self.execute( + "SELECT image_id FROM images WHERE images.name = '%s'" % (image_name) + ) + if rows == None: + return None + return rows.fetchall()[0][0] + + def num_matches(self, image1, image2): + if image1 == image2: + return 0 + rows = self.execute( + "SELECT rows FROM matches WHERE pair_id = %d" + % (self.__image_ids_to_pair_id(image1, image2)) + ) + return next(rows)[0] + + +_FILE_PATH = os.path.dirname(os.path.realpath(__file__)) + +# creates a colmap project ini file based on a template, filling in key arguments (use in a "with:" block) +class ColmapProjectConfig: + def __init__( + self, + database_path, + image_path, + output_path, + image_list=None, + ini_file="mapper.ini", + input_path=None, + ): + self.database_path = database_path + self.image_path = image_path + self.output_path = output_path + self.image_list = image_list + self.ini_file = ini_file + self.input_path = input_path + + def file_name(self): + return self.config.name + + def __enter__(self): + self.config = tempfile.NamedTemporaryFile(delete=False) + if self.image_list: + self.image_config = tempfile.NamedTemporaryFile(delete=False) + with open(self.image_config.name, "w") as f: + for image in self.image_list: + f.write(image + "\n") + + with open(os.path.join(_FILE_PATH, self.ini_file), "r") as f: + content = f.read() + content = re.sub("DATABASE_PATH", self.database_path, content) + content = re.sub("IMAGE_PATH", self.image_path, content) + content = re.sub("OUTPUT_PATH", self.output_path, content) + if self.input_path != None: + content = re.sub("INPUT_PATH", self.input_path, content) + image_list_set = ( + "image_list_path = %s" % (self.image_config.name) + if self.image_list + else "" + ) + content = re.sub("IMAGE_LIST_SET", image_list_set, content) + with open(self.config.name, "w") as cfg: + cfg.write(content) + + return self + + def __exit__(self, exception_type, exception_value, exception_traceback): + os.remove(self.config.name) + if self.image_list: + os.remove(self.image_config.name) + + +CameraModel = collections.namedtuple( + "CameraModel", ["model_id", "model_name", "num_params"] +) +Camera = collections.namedtuple("Camera", ["id", "model", "width", "height", "params"]) +BaseImage = collections.namedtuple( + "Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"] +) +Point3D = collections.namedtuple( + "Point3D", ["id", "xyz", "rgb", "error", "image_ids", "point2D_idxs"] +) + + +class Image(BaseImage): + def qvec2rotmat(self): + return qvec2rotmat(self.qvec) + + +def _read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): + """Read and unpack the next bytes from a binary file. + :param fid: + :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. + :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. + :param endian_character: Any of {@, =, <, >, !} + :return: Tuple of read and unpacked values. + """ + data = fid.read(num_bytes) + return struct.unpack(endian_character + format_char_sequence, data) + + +def _skip_next_bytes(fid, num_bytes): + fid.seek(num_bytes, 1) + + +# snippets taken from colmap scripts/python/read_write_model.py +# this class reads a colmap model model and exposes fields of interest (mainly just the images) +class Model: + def __init__(self, fname): + self.filename = fname + self.__read_images_binary(os.path.join(fname, "images.bin")) + self.__analyze_model() + + def __str__(self): + return "%d images, %g mean track length, %g mean reprojection error" % ( + self.num_images, + self.mean_track_length, + self.mean_reprojection_error, + ) + + def __read_images_binary(self, path_to_model_file): + """ + see: src/colmap/scene/reconstruction.cc + void Reconstruction::ReadImagesBinary(const std::string& path) + void Reconstruction::WriteImagesBinary(const std::string& path) + """ + images = {} + with open(path_to_model_file, "rb") as fid: + num_reg_images = _read_next_bytes(fid, 8, "Q")[0] + for _ in range(num_reg_images): + binary_image_properties = _read_next_bytes( + fid, num_bytes=64, format_char_sequence="idddddddi" + ) + image_id = binary_image_properties[0] + qvec = np.array(binary_image_properties[1:5]) + tvec = np.array(binary_image_properties[5:8]) + camera_id = binary_image_properties[8] + binary_image_name = b"" + current_char = _read_next_bytes(fid, 1, "c")[0] + while current_char != b"\x00": # look for the ASCII 0 entry + binary_image_name += current_char + current_char = _read_next_bytes(fid, 1, "c")[0] + image_name = binary_image_name.decode("utf-8") + num_points2D = _read_next_bytes( + fid, num_bytes=8, format_char_sequence="Q" + )[0] + _skip_next_bytes( + fid, num_bytes=24 * num_points2D + ) # faster to skip, we don't care + xys = None + point3D_ids = None + # x_y_id_s = read_next_bytes( + # fid, + # num_bytes=24 * num_points2D, + # format_char_sequence="ddq" * num_points2D, + # ) + # xys = np.column_stack( + # [ + # tuple(map(float, x_y_id_s[0::3])), + # tuple(map(float, x_y_id_s[1::3])), + # ] + # ) + # point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) + images[image_id] = Image( + id=image_id, + qvec=qvec, + tvec=tvec, + camera_id=camera_id, + name=image_name, + xys=xys, + point3D_ids=point3D_ids, + ) + self.images = images + + def __analyze_model(self): + cmd = subprocess.Popen( + "colmap model_analyzer --path %s" % (self.filename), + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + output = cmd.communicate()[1] + if cmd.returncode != 0: + raise Exception("Model %s could not be analyzed." % (self.filename)) + result = dict() + m = re.search("Registered images: (\d+)", str(output)) + self.num_images = int(m.groups()[0]) + m = re.search("Mean track length: ([-+]?(?:\d*\.*\d+))", str(output)) + self.mean_track_length = float(m.groups()[0]) + m = re.search("Mean reprojection error: (.+)px", str(output)) + self.mean_reprojection_error = float(m.groups()[0]) diff --git a/localization/sparse_mapping/scripts/colmap/incremental.py b/localization/sparse_mapping/scripts/colmap/incremental.py new file mode 100755 index 0000000000..2d935b41f8 --- /dev/null +++ b/localization/sparse_mapping/scripts/colmap/incremental.py @@ -0,0 +1,314 @@ +import argparse +import heapq +import os +import re +import shutil +import sys +import tempfile + +import numpy as np + +import colmap + +# pass a colmap model with features detected and match, with images nested in subdirectories. +# builds submodels for each image subdirectory, then attempts to intelligently merge them. + + +class IncrementalMapper: + def __init__(self, database_path, image_path, output_path): + self.database_path = database_path + self.image_path = image_path + self.output_path = output_path + + self.db = colmap.COLMAPDatabase.connect(database_path) + self.images = self.list_images() + + # builds nested dictionary of image folder structure + def list_images(self): + image_list = self.db.images() + images = dict() + for img in image_list: + parts = img[1].split(os.sep) + d = images + for p in parts[:-2]: + if p not in d: + d[p] = dict() + d = d[p] + if parts[-2] not in d: + d[parts[-2]] = list() + d[parts[-2]].append(img) + return images + + def images_to_paths(self, images): + paths = [] + for img in images: + if type(img) is dict: + for (d, i) in images.items(): + paths.extend(self.images_to_paths(i)) + else: + paths.append(img[1]) + return paths + + def build_map(self, image_list, output_path): + if os.path.exists(output_path): + print("Model already exists in %s, skipping rebuild." % (output_path)) + return + try: + os.makedirs(output_path, exist_ok=True) + except: + print("Could not create directory in %s." % (output_path)) + sys.exit(1) + with colmap.ColmapProjectConfig( + self.database_path, + self.image_path, + output_path, + image_list, + input_path=output_path, + ) as cfg: + os.system( + "colmap mapper --project_path %s > %s/colmap.out 2>&1" + % (cfg.file_name(), output_path) + ) + + def create_leaf_models(self, images, subdir="leaf"): + if type(images) is dict: + for (d, i) in images.items(): + self.create_leaf_models(i, os.path.join(subdir, d)) + else: + path = os.path.join(self.output_path, subdir) + self.build_map(self.images_to_paths(images), path) + for fname in os.listdir(path): + model = colmap.Model(os.path.join(path, fname)) + print(" Model %s: %s" % (os.path.join(subdir, fname), model)) + + def get_best_overlap(self, base_model, models): + best = None + best_matches = 0 + for m in models: + total = 0 + for img1 in base_model.images.values(): + for img2 in m.images.values(): + total += self.db.num_matches(img1.id, img2.id) + if total > best_matches: + best_matches = total + best = m + return (best, best_matches) + + # Merge two models by adding overlapping images to one with the most overlapping features, then + # merging with colmap's model_merger, adding points and bundle adjusting. Worked but results did not seem great + def merge_models(self, model1, model2): + print("Merging: %s and %s" % (model1.filename, model2.filename)) + image_choices = [] + for img2 in model2.images.values(): + matches = 0 + for img1 in model1.images.values(): + matches += self.db.num_matches(img1.id, img2.id) + image_choices.append((img2.name, matches)) + best_matches = heapq.nlargest(10, image_choices, lambda x: x[1]) + images = list(map(lambda x: x.name, model1.images.values())) + images.extend(list(map(lambda x: x[0], best_matches))) + print("Adding top %d images." % (len(best_matches))) + with colmap.ColmapProjectConfig( + self.database_path, + self.image_path, + model1.filename, + images, + ini_file="merge.ini", + input_path=model1.filename, + ) as cfg: + os.system( + "colmap mapper --project_path %s >> %s/colmap_add.out 2>&1" + % (cfg.file_name(), model1.filename) + ) + print("Results: %s" % (colmap.Model(model1.filename))) + os.mkdir(model1.filename + "_merged") + os.system( + "colmap model_merger --input_path1 %s --input_path2 %s --output_path %s" + % (model2.filename, model1.filename, model1.filename + "_merged") + ) + print("Merged maps: %s" % (colmap.Model(model1.filename + "_merged"))) + os.mkdir(model1.filename + "_points") + with colmap.ColmapProjectConfig( + self.database_path, + self.image_path, + model1.filename + "_points", + images, + ini_file="merge.ini", + input_path=model1.filename + "_merged", + ) as cfg: + os.system( + "colmap mapper --project_path %s >> %s/colmap_points.out 2>&1" + % (cfg.file_name(), model1.filename) + ) + # os.system('colmap mapper --database_path %s --image_path %s --input_path %s --output_path %s >> %s/colmap_points.out 2>&1' % (self.database_path, self.image_path, model1.filename + '_merged', model1.filename + '_points', model1.filename)) + print("Points: %s" % (colmap.Model(model1.filename + "_points"))) + os.mkdir(model1.filename + "_ba") + os.system( + "colmap bundle_adjuster --input_path %s --output_path %s >> %s/colmap_ba.out 2>&1" + % (model1.filename + "_points", model1.filename + "_ba", model1.filename) + ) + print("BA: %s" % (colmap.Model(model1.filename + "_ba"))) + sys.exit(1) + return colmap.Model(model1.filename) + + def merge_models_recursive(self, images, subdir=""): + # get models at leaf nodes + if type(images) is not dict: + models = [] + path = os.path.join(self.output_path, "leaf", subdir) + for fname in os.listdir(path): + model = colmap.Model(os.path.join(path, fname)) + if model.num_images > 5 and model.mean_reprojection_error < 1.5: + models.append(model) + return models + + # sort models in folder by number of images, start with biggest model + models = [] + for (d, i) in images.items(): + models.extend(self.merge_models_recursive(i, os.path.join(subdir, d))) + models.sort(key=lambda x: x.num_images, reverse=True) + + if len(models) <= 1: + return models + print("Merging models in %s..." % (subdir)) + model = models.pop(0) + print( + "Starting with %s: %s" + % (model.filename[len(self.output_path) + 5 :], model) + ) + merged_path = os.path.join(self.output_path, "merged", subdir) + if os.path.exists(merged_path): + # this was failing to delete the top level folder for me with errno 26. didn't understand why. + shutil.rmtree(merged_path, ignore_errors=True) + if os.path.exists(merged_path): + os.rmdir(merged_path) + shutil.copytree(model.filename, merged_path) + model = colmap.Model(merged_path) + + while len(models) > 0: + (to_merge, shared_features) = self.get_best_overlap(model, models) + models.remove(to_merge) + print( + "Merging %s with %d shared features: %s" + % ( + to_merge.filename[len(self.output_path) + 5 :], + shared_features, + to_merge, + ) + ) + model = self.merge_models(model, to_merge) + print("Result: %s" % (model)) + return [model] + + def list_subdirs(self, images, subdir=""): + if type(images) is dict: + result = [] + for (d, i) in images.items(): + result.extend(self.list_subdirs(i, os.path.join(subdir, d))) + return result + else: + return [(subdir, self.images_to_paths(images))] + + def get_best_overlap_images(self, cur_images, subdirs): + best = None + best_matches = 0 + for i in range(len(subdirs)): + total = 0 + for img1 in subdirs[i][1]: + img1_id = self.db.image_id(img1) + for img2 in cur_images: + img2_id = self.db.image_id(img2) + total += self.db.num_matches(img1_id, img2_id) + if total > best_matches: + best_matches = total + best = i + return (i, best_matches) + + # add images to model a subdirectory at a time, save models separately. Idea is to see which images mapping fails and remove them. + # seems to be much slower than mapping with all the images at once. + def incremental_add(self): + subdirs = self.list_subdirs(self.images) + (best_idx, best_matches) = self.get_best_overlap_images([], subdirs) + # start with directory with most images + max_id = max(enumerate(subdirs), key=lambda x: len(x[1][1]))[0] + (data_source, images) = subdirs.pop(max_id) + map_num = 0 + map_path = os.path.join(self.output_path, str(map_num)) + print( + "Building initial map %s from %s with %d images..." + % (str(map_num), data_source, len(images)) + ) + self.build_map(images, map_path) + best_count = 0 + best = None + for fname in os.listdir(map_path): + if fname.endswith(".out"): + continue + model = colmap.Model(os.path.join(map_path, fname)) + print(" %s: %s" % (fname, model)) + count = len(model.images.keys()) + if count > best_count: + best_count = count + best = os.path.join(map_path, fname) + map_path = best + + while len(subdirs) > 0: + map_num += 1 + (best_idx, best_matches) = self.get_best_overlap_images(images, subdirs) + (data_source, new_images) = subdirs.pop(best_idx) + images.extend(new_images) + next_map_path = os.path.join(self.output_path, str(map_num)) + print( + "Building map %s, adding %s with %d images and %d overlapping features." + % (str(map_num), data_source, len(new_images), best_matches) + ) + os.mkdir(next_map_path) + with colmap.ColmapProjectConfig( + self.database_path, + self.image_path, + next_map_path, + images, + ini_file="merge.ini", + input_path=map_path, + ) as cfg: + os.system( + "colmap mapper --project_path %s > %s/colmap.out 2>&1" + % (cfg.file_name(), next_map_path) + ) + print(" %s" % (colmap.Model(next_map_path))) + + def incremental_map(self): + self.create_leaf_models(self.images) + self.merge_models_recursive(self.images) + + +def main(): + parser = argparse.ArgumentParser( + description="Incrementally build a map with colmap from a nested directory of images." + ) + parser.add_argument("--database_path", required=True, help="Colmap database file.") + parser.add_argument( + "--output_path", required=True, help="Path to write output model." + ) + parser.add_argument("--image_path", required=True, help="Colmap image directory.") + args = parser.parse_args() + if not os.path.exists(args.database_path): + print("Database not found.") + return 1 + if os.path.exists(args.output_path): + print("Output model already exists.") + return 1 + try: + os.mkdir(args.output_path) + except: + print("Could not create output directory.") + return 1 + + mapper = IncrementalMapper(args.database_path, args.image_path, args.output_path) + mapper.incremental_add() + + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/localization/sparse_mapping/scripts/colmap/mapper.ini b/localization/sparse_mapping/scripts/colmap/mapper.ini new file mode 100644 index 0000000000..922e6d4dc7 --- /dev/null +++ b/localization/sparse_mapping/scripts/colmap/mapper.ini @@ -0,0 +1,62 @@ +database_path=DATABASE_PATH +image_path=IMAGE_PATH +output_path=OUTPUT_PATH +IMAGE_LIST_SET +[Mapper] +ignore_watermarks=false +multiple_models=true +extract_colors=true +ba_refine_focal_length=true +ba_refine_principal_point=false +ba_refine_extra_params=true +fix_existing_images=false +tri_ignore_two_view_tracks=true +min_num_matches=15 +max_num_models=50 +max_model_overlap=20 +min_model_size=10 +init_image_id1=-1 +init_image_id2=-1 +init_num_trials=200 +num_threads=-1 +ba_min_num_residuals_for_multi_threading=50000 +ba_local_num_images=6 +ba_local_max_num_iterations=25 +ba_global_images_freq=500 +ba_global_points_freq=250000 +ba_global_max_num_iterations=50 +ba_global_max_refinements=5 +ba_local_max_refinements=2 +snapshot_images_freq=0 +init_min_num_inliers=100 +init_max_reg_trials=2 +abs_pose_min_num_inliers=30 +max_reg_trials=3 +tri_max_transitivity=1 +tri_complete_max_transitivity=5 +tri_re_max_trials=1 +min_focal_length_ratio=0.10000000000000001 +max_focal_length_ratio=10 +max_extra_param=1 +ba_local_function_tolerance=0 +ba_global_images_ratio=1.1000000000000001 +ba_global_points_ratio=1.1000000000000001 +ba_global_function_tolerance=0 +ba_global_max_refinement_change=0.00050000000000000001 +ba_local_max_refinement_change=0.001 +init_max_error=4 +init_max_forward_motion=0.94999999999999996 +init_min_tri_angle=16 +abs_pose_max_error=12 +abs_pose_min_inlier_ratio=0.25 +filter_max_reproj_error=4 +filter_min_tri_angle=1.5 +local_ba_min_tri_angle=6 +tri_create_max_angle_error=2 +tri_continue_max_angle_error=2 +tri_merge_max_reproj_error=4 +tri_complete_max_reproj_error=4 +tri_re_max_angle_error=5 +tri_re_min_ratio=0.20000000000000001 +tri_min_angle=1.5 +snapshot_path= diff --git a/localization/sparse_mapping/scripts/colmap/merge.ini b/localization/sparse_mapping/scripts/colmap/merge.ini new file mode 100644 index 0000000000..5fa5361e2b --- /dev/null +++ b/localization/sparse_mapping/scripts/colmap/merge.ini @@ -0,0 +1,63 @@ +database_path=DATABASE_PATH +image_path=IMAGE_PATH +output_path=OUTPUT_PATH +input_path=INPUT_PATH +IMAGE_LIST_SET +[Mapper] +ignore_watermarks=false +multiple_models=false +extract_colors=true +ba_refine_focal_length=true +ba_refine_principal_point=false +ba_refine_extra_params=true +fix_existing_images=false +tri_ignore_two_view_tracks=true +min_num_matches=15 +max_num_models=50 +max_model_overlap=20 +min_model_size=10 +init_image_id1=-1 +init_image_id2=-1 +init_num_trials=200 +num_threads=-1 +ba_min_num_residuals_for_multi_threading=50000 +ba_local_num_images=6 +ba_local_max_num_iterations=25 +ba_global_images_freq=500 +ba_global_points_freq=250000 +ba_global_max_num_iterations=50 +ba_global_max_refinements=5 +ba_local_max_refinements=2 +snapshot_images_freq=0 +init_min_num_inliers=100 +init_max_reg_trials=2 +abs_pose_min_num_inliers=30 +max_reg_trials=3 +tri_max_transitivity=1 +tri_complete_max_transitivity=5 +tri_re_max_trials=1 +min_focal_length_ratio=0.10000000000000001 +max_focal_length_ratio=10 +max_extra_param=1 +ba_local_function_tolerance=0 +ba_global_images_ratio=1.1000000000000001 +ba_global_points_ratio=1.1000000000000001 +ba_global_function_tolerance=0 +ba_global_max_refinement_change=0.00050000000000000001 +ba_local_max_refinement_change=0.001 +init_max_error=4 +init_max_forward_motion=0.94999999999999996 +init_min_tri_angle=16 +abs_pose_max_error=12 +abs_pose_min_inlier_ratio=0.25 +filter_max_reproj_error=4 +filter_min_tri_angle=1.5 +local_ba_min_tri_angle=6 +tri_create_max_angle_error=2 +tri_continue_max_angle_error=2 +tri_merge_max_reproj_error=4 +tri_complete_max_reproj_error=4 +tri_re_max_angle_error=5 +tri_re_min_ratio=0.20000000000000001 +tri_min_angle=1.5 +snapshot_path= diff --git a/localization/sparse_mapping/scripts/colmap/readme.md b/localization/sparse_mapping/scripts/colmap/readme.md new file mode 100644 index 0000000000..805c5f8aeb --- /dev/null +++ b/localization/sparse_mapping/scripts/colmap/readme.md @@ -0,0 +1,89 @@ +# COLMAP + +This directory contains tools for building Astrobee maps using colmap. Downloading colmap: https://colmap.github.io/ +and add the colmap executable to your path to use. This was tested with colmap 3.9.1. + +incremental.py contains two methods to build maps incrementally: one by merging submaps, and one by adding images over time. +colmap.py allows viewing the colmap database and models in python. +remove_images.py removes a set of images from a colmap map. + +Colmap seems extremely promising but we did not have time to fully test it. Generally the standard colmap map creation works well, +but depending on the image sources may sometimes fail and create maps that are not right. This +is seen with ISS walls that do not align. Likely this could be overcome through manual fiddling with the image set as done with our current mapping +procedure. + +## Create Colmap map + +Follow this procedure to build a map in colmap. + +1. Extract images from bags into a single folder (optionally with subfolders) following existing procedure. + +2. colmap gui +3. File --> New Project + Select image folder and create .db file + Save + +4. Processing --> Feature Extraction + Camera model: RADIAL_FISHEYE + Check "Shared for all images" + Extract + +5. Processing --> Feature Matching + Go to "VocabTree" tab + vocab_tree path --> Select file + vocab_tree_flickr100K_words32K.bin (from colmap website) + Run (this step takes a while) + + Exhaustive matching will give better results but is slower. Do exhaustive if you are not in a hurry. + +6. Reconstruction --> Start Reconstruction + Check that it looks good + +7. Reconstruction --> Bundle Adjustment + Check "refine_principal_point" to get full camera calibration + Run + If it doesn't converge, try increasing max iterations and / or running it again + +8. File --> Save Project As (Lets you reload the data in colmap) +9. File --> Export Model (model_output_folder, Saves in a folder you pick, this lets you reload the model in colmap later, need to load project first) + +## Camera Calibration + +Colmap generates a camera calibration that is believed to be better than our existing calibration. After building a map, you +can extract the calibration for use in the Astrobee software. + +1. (in Colamp gui) File --> Export Model as Text +2. Open cameras.txt in exported text model folder +3. Should have line in format: + 1 RADIAL_FISHEYE 1280 960 f a b k1 k2 + +4. Copy bumble.config, rename colmap.config, update w/ colmap intrinsics and distortion values as: + + robot_camera_calibrations = { + nav_cam = { + distortion_coeff = {k1, k2}, + intrinsic_matrix = { + f, 0.0, a, + 0.0, f, b, + 0.0, 0.0, 1.0 + }, + +## Convert to Astrobee Map + +Follow these instructions to convert a map generated with colmap into the format used by Astrobee. + +1. colmap model_converter --output_type NVM --skip_distortion true --input_path model_output_folder/ --output_path colmap.nvm + +2. export ASTROBEE_ROBOT=colmap + export ASTROBEE_WORLD=iss +3. rosrun sparse_mapping import_map --input_map colmap.nvm --output_map colmap.map + +4. Register map: + rosrun sparse_mapping build_map -registration file.pto file.txt -num_ba_passes 0 -skip_filtering -output_map colmap.map +5. Rebuild with new features: + rosrun sparse_mapping build_map -rebuild -rebuild_replace_camera -histogram_equalization -output_map colmap.map -rebuild_detector ORGBRISK + Reproject error should be ~0.2 (less is better) and have many features +6. Now check map looks good: + rosrun sparse_mapping nvm_visualize --skip_3d_images colmap.map +7. Build vocab db: + rosrun sparse_mapping build_map -vocab_db -output_map colmap.map diff --git a/localization/sparse_mapping/scripts/colmap/remove_images.py b/localization/sparse_mapping/scripts/colmap/remove_images.py new file mode 100755 index 0000000000..6033131d7e --- /dev/null +++ b/localization/sparse_mapping/scripts/colmap/remove_images.py @@ -0,0 +1,55 @@ +# Deletes a folder of images from an existing colmap model +import argparse +import os +import sys +import tempfile + +import colmap + + +def main(): + parser = argparse.ArgumentParser( + description="Deletes a directory of images from a given colmap model." + ) + parser.add_argument("--input_path", required=True, help="Model to delete from") + parser.add_argument("--output_path", required=True, help="Model to output to") + parser.add_argument( + "--image_path", required=True, help="Base path for images in model" + ) + parser.add_argument( + "--remove_dir", + required=True, + help="Directory within image_path with images to remove", + ) + args = parser.parse_args() + if not os.path.exists(args.input_path): + print("Input not found.") + return 1 + if not os.path.exists(args.output_path): + try: + os.mkdir(args.output_path) + except: + print("Output directory does not exist.") + return 1 + + try: + tmp = tempfile.NamedTemporaryFile(mode="w+", delete=False) + for root, dirs, files in os.walk(args.remove_dir): + for f in files: + tmp.write( + os.path.relpath(os.path.join(root, f), args.image_path) + "\n" + ) + tmp.close() + os.system( + "colmap image_deleter --input_path %s --output_path %s --image_names_path %s" + % (args.input_path, args.output_path, tmp.name) + ) + finally: + tmp.close() + os.unlink(tmp.name) + + return 0 + + +if __name__ == "__main__": + sys.exit(main())