Skip to content

Commit

Permalink
Merge main
Browse files Browse the repository at this point in the history
  • Loading branch information
pierotofy committed Jun 2, 2022
2 parents 564ff2c + 379962a commit 7154bcc
Show file tree
Hide file tree
Showing 199 changed files with 3,395 additions and 2,525 deletions.
2 changes: 1 addition & 1 deletion annotation_gui_gcp/js/WebView.js
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ function initialize_event_source(handlers) {
if (this.readyState == EventSource.CONNECTING) {
console.log(`Reconnecting (readyState=${this.readyState})...`);
} else {
console.log("Error has occured.", err);
console.log("Error has occurred.", err);
}
};

Expand Down
6 changes: 4 additions & 2 deletions annotation_gui_gcp/lib/geometry.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,17 @@
from opensfm import dataset
from numpy import ndarray
from typing import Dict, Tuple


def get_all_track_observations(gcp_database, track_id):
def get_all_track_observations(gcp_database, track_id: str) -> Dict[str, ndarray]:
print(f"Getting all observations of track {track_id}")
data = dataset.DataSet(gcp_database.path)
tracks_manager = data.load_tracks_manager()
track_obs = tracks_manager.get_track_observations(track_id)
return {shot_id: obs.point for shot_id, obs in track_obs.items()}


def get_tracks_visible_in_image(gcp_database, image_key, min_len=5):
def get_tracks_visible_in_image(gcp_database, image_key, min_len: int=5) -> Dict[str, Tuple[ndarray, int]]:
print(f"Getting track observations visible in {image_key}")
data = dataset.DataSet(gcp_database.path)
tracks_manager = data.load_tracks_manager()
Expand Down
37 changes: 17 additions & 20 deletions annotation_gui_gcp/lib/views/cad_view.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
import json
import logging
from pathlib import Path
from typing import Dict, Any
from typing import Any, Dict, Tuple

import rasterio
from annotation_gui_gcp.lib.views.web_view import WebView, distinct_colors
from flask import send_file
from PIL import ImageColor

logger = logging.getLogger(__name__)
logger: logging.Logger = logging.getLogger(__name__)


def _load_georeference_metadata(path_cad_model):
def _load_georeference_metadata(path_cad_model) -> Dict[str, Any]:
path_metadata = path_cad_model.with_suffix(".json")

if not path_metadata.is_file():
Expand All @@ -30,7 +30,7 @@ def __init__(
route_prefix,
path_cad_file,
is_geo_reference=False,
):
)-> None:
super().__init__(main_ui, web_app, route_prefix)

self.main_ui = main_ui
Expand All @@ -47,7 +47,7 @@ def __init__(
view_func=self.get_model,
)

def get_model(self):
def get_model(self) -> Any:
return send_file(self.cad_path, mimetype="application/octet-stream")

def process_client_message(self, data: Dict[str, Any]) -> None:
Expand All @@ -59,7 +59,7 @@ def process_client_message(self, data: Dict[str, Any]) -> None:
else:
raise ValueError(f"Unknown event {event}")

def add_remove_update_point_observation(self, point_coordinates=None):
def add_remove_update_point_observation(self, point_coordinates=None)->None:
gcp_manager = self.main_ui.gcp_manager
active_gcp = self.main_ui.curr_point
if active_gcp is None:
Expand All @@ -72,12 +72,9 @@ def add_remove_update_point_observation(self, point_coordinates=None):
)

# Add the new observation
if point_coordinates is not None:
lla = (
self.xyz_to_latlon(*point_coordinates)
if self.is_geo_reference
else None
)
if point_coordinates is not None and self.is_geo_reference is not None:
lla = self.xyz_to_latlon(*point_coordinates)

geo = {
"latitude": lla[0],
"longitude": lla[1],
Expand All @@ -97,31 +94,31 @@ def add_remove_update_point_observation(self, point_coordinates=None):
)
self.main_ui.populate_gcp_list()

def display_points(self):
def display_points(self) -> None:
pass

def refocus(self, lat, lon):
def refocus(self, lat, lon)->None:
x, y, z = self.latlon_to_xyz(lat, lon)
self.send_sse_message(
{"x": x, "y": y, "z": z},
event_type="move_camera",
)

def highlight_gcp_reprojection(self, *args, **kwargs):
def highlight_gcp_reprojection(self, *args, **kwargs)->None:
pass

def populate_image_list(self, *args, **kwargs):
def populate_image_list(self, *args, **kwargs)->None:
pass

def latlon_to_xyz(self, lat, lon):
def latlon_to_xyz(self, lat, lon) -> Tuple[float, float, float]:
xs, ys, zs = rasterio.warp.transform("EPSG:4326", self.crs, [lon], [lat], [0])
x = xs[0] * self.scale - self.offset[0]
y = ys[0] * self.scale - self.offset[1]
z = zs[0] * self.scale - self.offset[2]
y, z = z, -y
return x, y, z

def xyz_to_latlon(self, x, y, z):
def xyz_to_latlon(self, x, y, z) -> Tuple[float, float, float]:
y, z = -z, y

# Add offset (cm) and transform to m
Expand All @@ -131,13 +128,13 @@ def xyz_to_latlon(self, x, y, z):
lons, lats, alts = rasterio.warp.transform(self.crs, "EPSG:4326", [x], [y], [z])
return lats[0], lons[0], alts[0]

def load_georeference_metadata(self, path_cad_model):
def load_georeference_metadata(self, path_cad_model)->None:
metadata = _load_georeference_metadata(path_cad_model)
self.scale = metadata["scale"]
self.crs = metadata["crs"]
self.offset = metadata["offset"]

def sync_to_client(self):
def sync_to_client(self)->None:
"""
Sends all the data required to initialize or sync the CAD view
"""
Expand Down
2 changes: 1 addition & 1 deletion annotation_gui_gcp/lib/views/image_view.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def process_client_message(self, data: Dict[str, Any]) -> None:
"add_or_update_point_observation",
"remove_point_observation",
):
raise ValueError(f"Unknown commmand {command}")
raise ValueError(f"Unknown command {command}")

if data["point_id"] != self.main_ui.curr_point:
print(data["point_id"], self.main_ui.curr_point)
Expand Down
24 changes: 15 additions & 9 deletions annotation_gui_gcp/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,9 @@
import json
import typing as t
from collections import OrderedDict, defaultdict
from os import PathLike
from pathlib import Path
from typing import Union

import numpy as np
from annotation_gui_gcp.lib import GUI
Expand All @@ -12,7 +14,7 @@
from opensfm import dataset, io


def get_parser():
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("dataset", help="dataset")
parser.add_argument(
Expand Down Expand Up @@ -52,7 +54,7 @@ def get_parser():
return parser


def file_sanity_check(root, seq_dict, fname):
def file_sanity_check(root, seq_dict, fname) -> t.Set[str]:
# Images available under ./images for a sanity check
available_images = {p.name for p in (root / "images").iterdir()}
keys_in_seq_dict = {im_key for seq_keys in seq_dict.values() for im_key in seq_keys}
Expand Down Expand Up @@ -88,7 +90,9 @@ def load_rig_assignments(root: Path) -> t.Dict[str, t.List[str]]:


def load_sequence_database_from_file(
root, fname="sequence_database.json", skip_missing=False
root: Path,
fname: Union["PathLike[str]", str] = "sequence_database.json",
skip_missing: bool = False,
):
"""
Simply loads a sequence file and returns it.
Expand Down Expand Up @@ -119,7 +123,7 @@ def load_sequence_database_from_file(
return seq_dict


def load_shots_from_reconstructions(path, min_ims):
def load_shots_from_reconstructions(path, min_ims) -> t.List[t.List[str]]:
data = dataset.DataSet(path)
reconstructions = data.load_reconstruction()

Expand Down Expand Up @@ -150,7 +154,9 @@ def load_shots_from_reconstructions(path, min_ims):
return output


def group_by_reconstruction(args, groups_from_sequence_database):
def group_by_reconstruction(
args, groups_from_sequence_database
) -> t.Dict[str, t.List[str]]:
all_recs_shots = load_shots_from_reconstructions(
args.dataset, min_ims=args.min_images_in_reconstruction
)
Expand All @@ -173,7 +179,7 @@ def group_by_reconstruction(args, groups_from_sequence_database):
return groups


def group_images(args):
def group_images(args) -> t.Dict[str, t.List[str]]:
"""
Groups the images to be shown in different windows/views
Expand Down Expand Up @@ -203,11 +209,11 @@ def group_images(args):
return groups_from_sequence_database


def find_suitable_cad_paths(path_cad_files, path_dataset, n_paths=6):
def find_suitable_cad_paths(path_cad_files: Path, path_dataset, n_paths: int = 6):
if path_cad_files is None:
return []

def latlon_from_meta(path_cad):
def latlon_from_meta(path_cad) -> t.Tuple[float, float]:
path_meta = path_cad.with_suffix(".json")
with open(path_meta) as f:
meta = json.load(f)
Expand All @@ -231,7 +237,7 @@ def latlon_from_meta(path_cad):
return [cad_files[i] for i in ixs_sort]


def init_ui():
def init_ui() -> t.Tuple[Flask, argparse.Namespace]:
app = Flask(__name__)
parser = get_parser()
args = parser.parse_args()
Expand Down
2 changes: 1 addition & 1 deletion bin/plot_features
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ if __name__ == "__main__":
if not features_data:
continue
points = features_data.points
print("ploting {0} points".format(len(points)))
print("plotting {0} points".format(len(points)))
plt.figure()
plt.title('Image: ' + image + ', features: ' + str(len(points)))
fig = plot_features(data.load_image(image), points)
Expand Down
2 changes: 1 addition & 1 deletion bin/plot_inliers
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ def triangulate_tracks(tracks, reconstruction, graph, min_ray_angle):
:return: An array of booleans determining if each track was successfully triangulated or not.
"""
succeeded = []
triangulator = reconstruct.TrackTriangulator(graph, reconstruction)
triangulator = reconstruct.TrackTriangulator(reconstruction, reconstruct.TrackHandlerTrackManager(graph, reconstruction)

for track in tracks:
# Triangulate with 1 as reprojection threshold to avoid excluding tracks because of error.
Expand Down
12 changes: 7 additions & 5 deletions bin/plot_matches.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,11 @@
from opensfm import dataset
from opensfm import features
from opensfm import io
from numpy import ndarray
from typing import List


def plot_matches(im1, im2, p1, p2):
def plot_matches(im1, im2, p1: ndarray, p2: ndarray) -> None:
h1, w1, c = im1.shape
h2, w2, c = im2.shape
image = np.zeros((max(h1, h2), w1 + w2, 3), dtype=im1.dtype)
Expand All @@ -29,7 +31,7 @@ def plot_matches(im1, im2, p1, p2):
pl.plot(p2[:, 0] + w1, p2[:, 1], "ob")


def plot_graph(data):
def plot_graph(data) -> None:
cmap = cm.get_cmap("viridis")
connectivity = {}
for im1 in images:
Expand Down Expand Up @@ -70,7 +72,7 @@ def plot_graph(data):
pl.savefig(os.path.join(data.data_path, "matchgraph.png"))


def plot_matches_for_images(data, image, images):
def plot_matches_for_images(data, image, images) -> None:
if image:
pairs = [(image, o) for o in images if o != image]
elif images:
Expand Down Expand Up @@ -122,10 +124,10 @@ def plot_matches_for_images(data, image, images):
parser.add_argument(
"--save_figs", help="save figures instead of showing them", action="store_true"
)
args = parser.parse_args()
args: argparse.Namespace = parser.parse_args()

data = dataset.DataSet(args.dataset)
images = data.images()
images: List[str] = data.images()

if args.graph:
plot_graph(data)
Expand Down
4 changes: 2 additions & 2 deletions doc/source/annotation_tool.rst
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ Detailed documentation for this is not available as the feature is experimental,
- Start from a dataset containing more than one reconstruction in ``reconstruction.json``.

- Launch the tool:
- If the two reconstructions come from different sequences, lauch as usual.
- If the two reconstructions come from different sequences, launch as usual.
- If the two reconstructions come from the same sequence, launch using the ``--group-by-reconstruction`` argument.
This will split the images into two windows, one for each reconstruction.

Expand All @@ -150,7 +150,7 @@ Use the 'Rigid', 'Flex' or 'Full' buttons to run the alignment using the annotat
- The 'Rigid' option triangulates the control points in each reconstruction independently and finds a rigid transform to align them.
- The 'Flex' option additionally re-runs bundle adjustment, allowing for some deformation of both reconstructions to fit the annotations.
- The 'Full' option attempts to obtain positional covariances for each camera pose.
If succesful, the frame list on the image views is populated with the positional covariance norm. Lower is better.
If successful, the frame list on the image views is populated with the positional covariance norm. Lower is better.

After running analysis, the reprojection errors are overlaid on the image views as shown in :ref:`running-alignment`.
The aligned reconstructions are saved with new filenames in the root folder and can be viewed in 3D with the OpenSfM viewer.
Expand Down
2 changes: 1 addition & 1 deletion doc/source/dense.rst
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ Undistortion

The dense module assumes that images are taken with perspective projection and no radial distortion. For perspective images, undistorted versions can be generated by taking into account the computed distortion parameters, :math:`k1` and :math:`k2`.

Spherical images (360 panoramas) however can not be unwarped into a single persepective view. We need to generate multiple perspective views to cover the field of view of a panorama.
Spherical images (360 panoramas) however can not be unwarped into a single perspective view. We need to generate multiple perspective views to cover the field of view of a panorama.

This means that the undistortion process will create new views of the reconstruction. Thus the undistortion process is one where a reconstruction is taken as input and a new reconstruction is produced as output. The input may contain radially distorted images and panoramas and the output reconstruction will only have undistorted perspective images.

Expand Down
2 changes: 1 addition & 1 deletion doc/source/gcp.rst
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ When EXIF data contains GPS location, it is used by OpenSfM to georeference the

Ground control points (GCP) are landmarks visible on the images for which the geospatial position (latitude, longitude and altitude) is known. A single GCP can be observed in one or more images.

OpenSfM uses GCP in two steps of the reconstruction process: alignment and bundle adjustment. In the alignment step, points are used to globaly move the reconstruction so that the observed GCP align with their GPS position. Two or more observations for each GCP are required for it to be used during the aligment step.
OpenSfM uses GCP in two steps of the reconstruction process: alignment and bundle adjustment. In the alignment step, points are used to globaly move the reconstruction so that the observed GCP align with their GPS position. Two or more observations for each GCP are required for it to be used during the alignment step.

In the bundle adjustment step, GCP observations are used as a constraint to refine the reconstruction. In this step, all ground control points are used. No minimum number of observation is required.

Expand Down
4 changes: 2 additions & 2 deletions doc/source/geometry.rst
Original file line number Diff line number Diff line change
Expand Up @@ -144,8 +144,8 @@ Identifier `brown`
y_n = \frac{y}{z} \\
r^2 = x_n^2 + y_n^2 \\
d_r = 1 + k_1 r^2 + k_2 r^4 + k_3 r^6\\
d^t_x = 2p_1\ x_n\ y_n + p_2\ (r^2 + 2x)\\
d^t_y = 2p_2\ x_n\ y_n + p_1\ (r^2 + 2y)\\
d^t_x = 2p_1\ x_n\ y_n + p_2\ (r^2 + 2x^2)\\
d^t_y = 2p_2\ x_n\ y_n + p_1\ (r^2 + 2y^2)\\
u = f_x\ (d_r\ x_n + d^t_x) + c_x \\
v = f_y\ (d_r\ y_n + d^t_y) + c_y
\end{array}
Expand Down
1 change: 1 addition & 0 deletions doc/source/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ OpenSfM
rig
annotation_tool
api
sensor_database

Indices and tables
==================
Expand Down
10 changes: 5 additions & 5 deletions doc/source/quality_report.rst
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,11 @@ Processing Summary
|processing|

- Reconstructed Images : reconstructed images over total number of images
- Reconstructed Images : reconstructed points over total number of points in the `tracks.csv` file (`create_tracks`)
- Reconstructed Points : reconstructed points over total number of points in the `tracks.csv` file (`create_tracks`)
- Reconstructed Components : number of continously reconstructed sets of images
- Detected Features : median number (accross images) of detected features
- Reconstructed Features : median number (accross images) of reconstructed features
- Geographic Reference : indicated wether GPS and/or GCP have been used for geo-alignment
- Detected Features : median number (across images) of detected features
- Reconstructed Features : median number (across images) of reconstructed features
- Geographic Reference : indicated whether GPS and/or GCP have been used for geo-alignment
- GPS / GCP errors : GPS and/or GCP RMS errors

|topview|
Expand All @@ -45,7 +45,7 @@ The heatmap represent the density of detected features : the gradient goes from

|feat2|

The table below lists minimum/maximum/mean and median detected and reconstructed features accross images.
The table below lists minimum/maximum/mean and median detected and reconstructed features across images.

Reconstruction Details
~~~~~~~~~~~~~~~~~~~~~~
Expand Down
2 changes: 1 addition & 1 deletion doc/source/reporting.rst
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
Reporting
=========

OpenSfM commands write reports on the work done. Reports are stored in the ``reports`` folder in json format so that they can be loaded programatically. Here is the list of reports produced and the data included.
OpenSfM commands write reports on the work done. Reports are stored in the ``reports`` folder in json format so that they can be loaded programmatically. Here is the list of reports produced and the data included.

Feature detection
-----------------
Expand Down
Loading

0 comments on commit 7154bcc

Please sign in to comment.