From 983d6cb26c32b2b04f3de8061b13b7b30b7c1a6d Mon Sep 17 00:00:00 2001 From: MRo47 Date: Fri, 26 Apr 2024 17:58:18 +0000 Subject: [PATCH 1/2] fix dictionary names --- .../src/camera_calibration/calibrator.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/camera_calibration/src/camera_calibration/calibrator.py b/camera_calibration/src/camera_calibration/calibrator.py index 6f7744612..f0fe63cf5 100644 --- a/camera_calibration/src/camera_calibration/calibrator.py +++ b/camera_calibration/src/camera_calibration/calibrator.py @@ -80,14 +80,14 @@ def __init__(self, pattern="chessboard", n_cols = 0, n_rows = 0, dim = 0.0, mark "5x5_100" : cv2.aruco.DICT_5X5_100, "5x5_250" : cv2.aruco.DICT_5X5_250, "5x5_1000" : cv2.aruco.DICT_5X5_1000, - "6x6_50" : cv2.aruco.DICT_6x6_50, - "6x6_100" : cv2.aruco.DICT_6x6_100, - "6x6_250" : cv2.aruco.DICT_6x6_250, - "6x6_1000" : cv2.aruco.DICT_6x6_1000, - "7x7_50" : cv2.aruco.DICT_7x7_50, - "7x7_100" : cv2.aruco.DICT_7x7_100, - "7x7_250" : cv2.aruco.DICT_7x7_250, - "7x7_1000" : cv2.aruco.DICT_7x7_1000}[aruco_dict]) + "6x6_50" : cv2.aruco.DICT_6X6_50, + "6x6_100" : cv2.aruco.DICT_6X6_100, + "6x6_250" : cv2.aruco.DICT_6X6_250, + "6x6_1000" : cv2.aruco.DICT_6X6_1000, + "7x7_50" : cv2.aruco.DICT_7X7_50, + "7x7_100" : cv2.aruco.DICT_7X7_100, + "7x7_250" : cv2.aruco.DICT_7X7_250, + "7x7_1000" : cv2.aruco.DICT_7X7_1000}[aruco_dict]) self.charuco_board = cv2.aruco.CharucoBoard_create(self.n_cols, self.n_rows, self.dim, self.marker_size, self.aruco_dict) From fd096ecdd1c711ca402a9a2a68dad30f89722ae6 Mon Sep 17 00:00:00 2001 From: MRo47 Date: Fri, 26 Apr 2024 20:50:49 +0000 Subject: [PATCH 2/2] separated calibration types --- camera_calibration/doc/index.rst | 4 +- .../scripts/tarfile_calibration.py | 110 +- .../src/camera_calibration/calibrator.py | 963 +++--------------- .../camera_calibration/camera_calibrator.py | 124 ++- .../src/camera_calibration/camera_checker.py | 47 +- .../src/camera_calibration/mono_calibrator.py | 368 +++++++ .../nodes/cameracalibrator.py | 23 +- .../camera_calibration/nodes/cameracheck.py | 7 +- .../camera_calibration/stereo_calibrator.py | 439 ++++++++ camera_calibration/test/test_directed.py | 5 +- .../test/test_multiple_boards.py | 4 +- 11 files changed, 1144 insertions(+), 950 deletions(-) create mode 100644 camera_calibration/src/camera_calibration/mono_calibrator.py create mode 100644 camera_calibration/src/camera_calibration/stereo_calibrator.py diff --git a/camera_calibration/doc/index.rst b/camera_calibration/doc/index.rst index 741e4e5b7..b068a2ef7 100644 --- a/camera_calibration/doc/index.rst +++ b/camera_calibration/doc/index.rst @@ -11,8 +11,8 @@ need to extend or make a new calibration tool. For details on the camera model and camera calibration process, see http://docs.opencv.org/master/d9/d0c/group__calib3d.html -.. autoclass:: camera_calibration.calibrator.MonoCalibrator +.. autoclass:: camera_calibration.mono_calibrator.MonoCalibrator :members: -.. autoclass:: camera_calibration.calibrator.StereoCalibrator +.. autoclass:: camera_calibration.stereo_calibrator.StereoCalibrator :members: diff --git a/camera_calibration/scripts/tarfile_calibration.py b/camera_calibration/scripts/tarfile_calibration.py index 059f2d303..8317f96f3 100755 --- a/camera_calibration/scripts/tarfile_calibration.py +++ b/camera_calibration/scripts/tarfile_calibration.py @@ -39,21 +39,24 @@ import cv_bridge import tarfile -from camera_calibration.calibrator import MonoCalibrator, StereoCalibrator, CalibrationException, ChessboardInfo +from camera_calibration.mono_calibrator import MonoCalibrator +from camera_calibration.stereo_calibrator import StereoCalibrator +from camera_calibration.calibrator import ChessboardInfo import rclpy import sensor_msgs.srv + def display(win_name, img): cv2.namedWindow(win_name, cv2.WINDOW_NORMAL) - cv2.imshow( win_name, numpy.asarray( img[:,:] )) + cv2.imshow(win_name, numpy.asarray(img[:, :])) k = cv2.waitKey(0) cv2.destroyWindow(win_name) if k in [27, ord('q')]: rclpy.shutdown() -def cal_from_tarfile(boards, tarname, mono = False, upload = False, calib_flags = 0, visualize = False, alpha=1.0): +def cal_from_tarfile(boards, tarname, mono=False, upload=False, calib_flags=0, visualize=False, alpha=1.0): if mono: calibrator = MonoCalibrator(boards, calib_flags) else: @@ -63,26 +66,31 @@ def cal_from_tarfile(boards, tarname, mono = False, upload = False, calib_flags print(calibrator.ost()) - if upload: + if upload: info = calibrator.as_message() if mono: - set_camera_info_service = rclpy.ServiceProxy("%s/set_camera_info" % "camera", sensor_msgs.srv.SetCameraInfo) + set_camera_info_service = rclpy.ServiceProxy( + "%s/set_camera_info" % "camera", sensor_msgs.srv.SetCameraInfo) response = set_camera_info_service.call(info) if not response.success: - raise RuntimeError("connected to set_camera_info service, but failed setting camera_info") + raise RuntimeError( + "connected to set_camera_info service, but failed setting camera_info") else: - set_left_camera_info_service = rclpy.ServiceProxy("%s/set_camera_info" % "left_camera", sensor_msgs.srv.SetCameraInfo) - set_right_camera_info_service = rclpy.ServiceProxy("%s/set_camera_info" % "right_camera", sensor_msgs.srv.SetCameraInfo) + set_left_camera_info_service = rclpy.ServiceProxy( + "%s/set_camera_info" % "left_camera", sensor_msgs.srv.SetCameraInfo) + set_right_camera_info_service = rclpy.ServiceProxy( + "%s/set_camera_info" % "right_camera", sensor_msgs.srv.SetCameraInfo) response1 = set_left_camera_info_service(info[0]) response2 = set_right_camera_info_service(info[1]) if not (response1.result().success and response2.result().success): - raise RuntimeError("connected to set_camera_info service, but failed setting camera_info") + raise RuntimeError( + "connected to set_camera_info service, but failed setting camera_info") if visualize: - #Show rectified images + # Show rectified images calibrator.set_alpha(alpha) archive = tarfile.open(tarname, 'r') @@ -90,62 +98,68 @@ def cal_from_tarfile(boards, tarname, mono = False, upload = False, calib_flags for f in archive.getnames(): if f.startswith('left') and (f.endswith('.pgm') or f.endswith('png')): filedata = archive.extractfile(f).read() - file_bytes = numpy.asarray(bytearray(filedata), dtype=numpy.uint8) - im=cv2.imdecode(file_bytes,cv2.IMREAD_COLOR) + file_bytes = numpy.asarray( + bytearray(filedata), dtype=numpy.uint8) + im = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR) bridge = cv_bridge.CvBridge() try: - msg=bridge.cv2_to_imgmsg(im, "bgr8") + msg = bridge.cv2_to_imgmsg(im, "bgr8") except cv_bridge.CvBridgeError as e: print(e) - #handle msg returns the recitifed image with corner detection once camera is calibrated. - drawable=calibrator.handle_msg(msg) - vis=numpy.asarray( drawable.scrib[:,:]) - #Display. Name of window:f + # handle msg returns the recitifed image with corner detection once camera is calibrated. + drawable = calibrator.handle_msg(msg) + vis = numpy.asarray(drawable.scrib[:, :]) + # Display. Name of window:f display(f, vis) else: - limages = [ f for f in archive.getnames() if (f.startswith('left') and (f.endswith('pgm') or f.endswith('png'))) ] + limages = [f for f in archive.getnames() if (f.startswith( + 'left') and (f.endswith('pgm') or f.endswith('png')))] limages.sort() - rimages = [ f for f in archive.getnames() if (f.startswith('right') and (f.endswith('pgm') or f.endswith('png'))) ] + rimages = [f for f in archive.getnames() if (f.startswith( + 'right') and (f.endswith('pgm') or f.endswith('png')))] rimages.sort() if not len(limages) == len(rimages): - raise RuntimeError("Left, right images don't match. %d left images, %d right" % (len(limages), len(rimages))) - + raise RuntimeError("Left, right images don't match. %d left images, %d right" % ( + len(limages), len(rimages))) + for i in range(len(limages)): - l=limages[i] - r=rimages[i] + l = limages[i] + r = rimages[i] if l.startswith('left') and (l.endswith('.pgm') or l.endswith('png')) and r.startswith('right') and (r.endswith('.pgm') or r.endswith('png')): # LEFT IMAGE filedata = archive.extractfile(l).read() - file_bytes = numpy.asarray(bytearray(filedata), dtype=numpy.uint8) - im_left=cv2.imdecode(file_bytes,cv2.IMREAD_COLOR) - + file_bytes = numpy.asarray( + bytearray(filedata), dtype=numpy.uint8) + im_left = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR) + bridge = cv_bridge.CvBridge() try: - msg_left=bridge.cv2_to_imgmsg(im_left, "bgr8") + msg_left = bridge.cv2_to_imgmsg(im_left, "bgr8") except cv_bridge.CvBridgeError as e: print(e) - #RIGHT IMAGE + # RIGHT IMAGE filedata = archive.extractfile(r).read() - file_bytes = numpy.asarray(bytearray(filedata), dtype=numpy.uint8) - im_right=cv2.imdecode(file_bytes,cv2.IMREAD_COLOR) + file_bytes = numpy.asarray( + bytearray(filedata), dtype=numpy.uint8) + im_right = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR) try: - msg_right=bridge.cv2_to_imgmsg(im_right, "bgr8") + msg_right = bridge.cv2_to_imgmsg(im_right, "bgr8") except cv_bridge.CvBridgeError as e: print(e) - drawable=calibrator.handle_msg([ msg_left,msg_right] ) + drawable = calibrator.handle_msg([msg_left, msg_right]) - h, w = numpy.asarray(drawable.lscrib[:,:]).shape[:2] + h, w = numpy.asarray(drawable.lscrib[:, :]).shape[:2] vis = numpy.zeros((h, w*2, 3), numpy.uint8) - vis[:h, :w ,:] = numpy.asarray(drawable.lscrib[:,:]) - vis[:h, w:w*2, :] = numpy.asarray(drawable.rscrib[:,:]) - - display(l+" "+r,vis) + vis[:h, :w, :] = numpy.asarray(drawable.lscrib[:, :]) + vis[:h, w:w*2, :] = numpy.asarray(drawable.rscrib[:, :]) + + display(l+" "+r, vis) if __name__ == '__main__': @@ -160,23 +174,23 @@ def cal_from_tarfile(boards, tarname, mono = False, upload = False, calib_flags parser.add_option("--upload", default=False, action="store_true", dest="upload", help="Upload results to camera(s).") parser.add_option("--fix-principal-point", action="store_true", default=False, - help="fix the principal point at the image center") + help="fix the principal point at the image center") parser.add_option("--fix-aspect-ratio", action="store_true", default=False, - help="enforce focal lengths (fx, fy) are equal") + help="enforce focal lengths (fx, fy) are equal") parser.add_option("--zero-tangent-dist", action="store_true", default=False, - help="set tangential distortion coefficients (p1, p2) to zero") + help="set tangential distortion coefficients (p1, p2) to zero") parser.add_option("-k", "--k-coefficients", type="int", default=2, metavar="NUM_COEFFS", - help="number of radial distortion coefficients to use (up to 6, default %default)") + help="number of radial distortion coefficients to use (up to 6, default %default)") parser.add_option("--visualize", action="store_true", default=False, - help="visualize rectified images after calibration") + help="visualize rectified images after calibration") parser.add_option("-a", "--alpha", type="float", default=1.0, metavar="ALPHA", - help="zoom for visualization of rectifies images. Ranges from 0 (zoomed in, all pixels in calibrated image are valid) to 1 (zoomed out, all pixels in original image are in calibrated image). default %default)") + help="zoom for visualization of rectifies images. Ranges from 0 (zoomed in, all pixels in calibrated image are valid) to 1 (zoomed out, all pixels in original image are in calibrated image). default %default)") options, args = parser.parse_args() - + if len(options.size) != len(options.square): parser.error("Number of size and square inputs must be the same!") - + if not options.square: options.square.append("0.108") options.size.append("8x6") @@ -197,7 +211,8 @@ def cal_from_tarfile(boards, tarname, mono = False, upload = False, calib_flags if not args: parser.error("Must give tarfile name") if not os.path.exists(args[0]): - parser.error("Tarfile %s does not exist. Please select valid tarfile" % args[0]) + parser.error( + "Tarfile %s does not exist. Please select valid tarfile" % args[0]) tarname = args[0] @@ -225,4 +240,5 @@ def cal_from_tarfile(boards, tarname, mono = False, upload = False, calib_flags if (num_ks < 1): calib_flags |= cv2.CALIB_FIX_K1 - cal_from_tarfile(boards, tarname, options.mono, options.upload, calib_flags, options.visualize, options.alpha) + cal_from_tarfile(boards, tarname, options.mono, options.upload, + calib_flags, options.visualize, options.alpha) diff --git a/camera_calibration/src/camera_calibration/calibrator.py b/camera_calibration/src/camera_calibration/calibrator.py index f0fe63cf5..9a51e974c 100644 --- a/camera_calibration/src/camera_calibration/calibrator.py +++ b/camera_calibration/src/camera_calibration/calibrator.py @@ -32,80 +32,85 @@ # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. -from io import BytesIO import cv2 import cv_bridge -import image_geometry import math import numpy.linalg -import pickle -import random import sensor_msgs.msg -import sys import tarfile -import time -from distutils.version import LooseVersion from enum import Enum # Supported camera models + + class CAMERA_MODEL(Enum): PINHOLE = 0 FISHEYE = 1 # Supported calibration patterns + + class Patterns: Chessboard, Circles, ACircles, ChArUco = list(range(4)) + class CalibrationException(Exception): pass # TODO: Make pattern per-board? + + class ChessboardInfo(): - def __init__(self, pattern="chessboard", n_cols = 0, n_rows = 0, dim = 0.0, marker_size = 0.0, aruco_dict = None): + def __init__(self, pattern="chessboard", n_cols=0, n_rows=0, dim=0.0, marker_size=0.0, aruco_dict=None): self.pattern = pattern self.n_cols = n_cols self.n_rows = n_rows self.dim = dim self.marker_size = marker_size self.aruco_dict = None - self.charuco_board = None; - if pattern=="charuco": + self.charuco_board = None + if pattern == "charuco": self.aruco_dict = cv2.aruco.getPredefinedDictionary({ - "aruco_orig" : cv2.aruco.DICT_ARUCO_ORIGINAL, - "4x4_50" : cv2.aruco.DICT_4X4_50, - "4x4_100" : cv2.aruco.DICT_4X4_100, - "4x4_250" : cv2.aruco.DICT_4X4_250, - "4x4_1000" : cv2.aruco.DICT_4X4_1000, - "5x5_50" : cv2.aruco.DICT_5X5_50, - "5x5_100" : cv2.aruco.DICT_5X5_100, - "5x5_250" : cv2.aruco.DICT_5X5_250, - "5x5_1000" : cv2.aruco.DICT_5X5_1000, - "6x6_50" : cv2.aruco.DICT_6X6_50, - "6x6_100" : cv2.aruco.DICT_6X6_100, - "6x6_250" : cv2.aruco.DICT_6X6_250, - "6x6_1000" : cv2.aruco.DICT_6X6_1000, - "7x7_50" : cv2.aruco.DICT_7X7_50, - "7x7_100" : cv2.aruco.DICT_7X7_100, - "7x7_250" : cv2.aruco.DICT_7X7_250, - "7x7_1000" : cv2.aruco.DICT_7X7_1000}[aruco_dict]) + "aruco_orig": cv2.aruco.DICT_ARUCO_ORIGINAL, + "4x4_50": cv2.aruco.DICT_4X4_50, + "4x4_100": cv2.aruco.DICT_4X4_100, + "4x4_250": cv2.aruco.DICT_4X4_250, + "4x4_1000": cv2.aruco.DICT_4X4_1000, + "5x5_50": cv2.aruco.DICT_5X5_50, + "5x5_100": cv2.aruco.DICT_5X5_100, + "5x5_250": cv2.aruco.DICT_5X5_250, + "5x5_1000": cv2.aruco.DICT_5X5_1000, + "6x6_50": cv2.aruco.DICT_6X6_50, + "6x6_100": cv2.aruco.DICT_6X6_100, + "6x6_250": cv2.aruco.DICT_6X6_250, + "6x6_1000": cv2.aruco.DICT_6X6_1000, + "7x7_50": cv2.aruco.DICT_7X7_50, + "7x7_100": cv2.aruco.DICT_7X7_100, + "7x7_250": cv2.aruco.DICT_7X7_250, + "7x7_1000": cv2.aruco.DICT_7X7_1000}[aruco_dict]) self.charuco_board = cv2.aruco.CharucoBoard_create(self.n_cols, self.n_rows, self.dim, self.marker_size, - self.aruco_dict) + self.aruco_dict) # Make all private!!!!! + + def lmin(seq1, seq2): """ Pairwise minimum of two sequences """ return [min(a, b) for (a, b) in zip(seq1, seq2)] + def lmax(seq1, seq2): """ Pairwise maximum of two sequences """ return [max(a, b) for (a, b) in zip(seq1, seq2)] + def _pdist(p1, p2): """ Distance bwt two points. p1 = (x, y), p2 = (x, y) """ return math.sqrt(math.pow(p1[0] - p2[0], 2) + math.pow(p1[1] - p2[1], 2)) + def _get_outside_corners(corners, board): """ Return the four corners of the board as a whole, as (up_left, up_right, down_right, down_left). @@ -115,19 +120,20 @@ def _get_outside_corners(corners, board): if board.pattern != "charuco" and corners.shape[1] * corners.shape[0] != xdim * ydim: raise Exception("Invalid number of corners! %d corners. X: %d, Y: %d" % (corners.shape[1] * corners.shape[0], - xdim, ydim)) + xdim, ydim)) if board.pattern == "charuco" and corners.shape[1] * corners.shape[0] != (xdim-1) * (ydim-1): raise Exception(("Invalid number of corners! %d corners. X: %d, Y: %d\n for ChArUco boards, " + - "_get_largest_rectangle_corners handles partial views of the target") % (corners.shape[1] * - corners.shape[0], xdim-1, ydim-1)) + "_get_largest_rectangle_corners handles partial views of the target") % (corners.shape[1] * + corners.shape[0], xdim-1, ydim-1)) - up_left = corners[0,0] - up_right = corners[xdim - 1,0] - down_right = corners[-1,0] - down_left = corners[-xdim,0] + up_left = corners[0, 0] + up_right = corners[xdim - 1, 0] + down_right = corners[-1, 0] + down_left = corners[-xdim, 0] return (up_left, up_right, down_right, down_left) + def _get_largest_rectangle_corners(corners, ids, board): """ Return the largest rectangle with all four corners visible in a partial view of a ChArUco board, as (up_left, @@ -147,7 +153,8 @@ def _get_largest_rectangle_corners(corners, ids, board): # xdim and ydim are number of squares, but we're working with inner corners xdim = board.n_cols - 1 ydim = board.n_rows - 1 - board_vis = [[[i*xdim + j] in ids for j in range(xdim)] for i in range(ydim)] + board_vis = [ + [[i*xdim + j] in ids for j in range(xdim)] for i in range(ydim)] best_area = 0 best_rect = [-1, -1, -1, -1] @@ -162,10 +169,12 @@ def _get_largest_rectangle_corners(corners, ids, board): best_rect = [x1, x2, y1, y2] (x1, x2, y1, y2) = best_rect corner_ids = (y2*xdim+x1, y2*xdim+x2, y1*xdim+x2, y1*xdim + x1) - corners = tuple(corners[numpy.where(ids == corner_id)[0]][0][0] for corner_id in corner_ids) + corners = tuple(corners[numpy.where(ids == corner_id)[0]][0][0] + for corner_id in corner_ids) return corners + def _calculate_skew(corners): """ Get skew for given checkerboard detection. @@ -182,11 +191,13 @@ def angle(a, b, c): """ ab = a - b cb = c - b - return math.acos(numpy.dot(ab,cb) / (numpy.linalg.norm(ab) * numpy.linalg.norm(cb))) + return math.acos(numpy.dot(ab, cb) / (numpy.linalg.norm(ab) * numpy.linalg.norm(cb))) - skew = min(1.0, 2. * abs((math.pi / 2.) - angle(up_left, up_right, down_right))) + skew = min(1.0, 2. * abs((math.pi / 2.) - + angle(up_left, up_right, down_right))) return skew + def _calculate_area(corners): """ Get 2d image area of the detected checkerboard. @@ -201,7 +212,8 @@ def _calculate_area(corners): q = a + b return abs(p[0]*q[1] - p[1]*q[0]) / 2. -def _get_corners(img, board, refine = True, checkerboard_flags=0): + +def _get_corners(img, board, refine=True, checkerboard_flags=0): """ Get corners for a particular chessboard for an image """ @@ -211,7 +223,7 @@ def _get_corners(img, board, refine = True, checkerboard_flags=0): mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) else: mono = img - (ok, corners) = cv2.findChessboardCorners(mono, (board.n_cols, board.n_rows), flags = cv2.CALIB_CB_ADAPTIVE_THRESH | + (ok, corners) = cv2.findChessboardCorners(mono, (board.n_cols, board.n_rows), flags=cv2.CALIB_CB_ADAPTIVE_THRESH | cv2.CALIB_CB_NORMALIZE_IMAGE | checkerboard_flags) if not ok: return (ok, corners) @@ -224,19 +236,22 @@ def _get_corners(img, board, refine = True, checkerboard_flags=0): ok = False # Ensure that all corner-arrays are going from top to bottom. - if board.n_rows!=board.n_cols: + if board.n_rows != board.n_cols: if corners[0, 0, 1] > corners[-1, 0, 1]: corners = numpy.copy(numpy.flipud(corners)) else: - direction_corners=(corners[-1]-corners[0])>=numpy.array([[0.0,0.0]]) + direction_corners = (corners[-1]-corners[0] + ) >= numpy.array([[0.0, 0.0]]) if not numpy.all(direction_corners): if not numpy.any(direction_corners): corners = numpy.copy(numpy.flipud(corners)) elif direction_corners[0][0]: - corners=numpy.rot90(corners.reshape(board.n_rows,board.n_cols,2)).reshape(board.n_cols*board.n_rows,1,2) + corners = numpy.rot90(corners.reshape(board.n_rows, board.n_cols, 2)).reshape( + board.n_cols*board.n_rows, 1, 2) else: - corners=numpy.rot90(corners.reshape(board.n_rows,board.n_cols,2),3).reshape(board.n_cols*board.n_rows,1,2) + corners = numpy.rot90(corners.reshape(board.n_rows, board.n_cols, 2), 3).reshape( + board.n_cols*board.n_rows, 1, 2) if refine and ok: # Use a radius of half the minimum distance between corners. This should be large enough to snap to the @@ -245,17 +260,20 @@ def _get_corners(img, board, refine = True, checkerboard_flags=0): for row in range(board.n_rows): for col in range(board.n_cols - 1): index = row*board.n_rows + col - min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + 1, 0])) + min_distance = min(min_distance, _pdist( + corners[index, 0], corners[index + 1, 0])) for row in range(board.n_rows - 1): for col in range(board.n_cols): index = row*board.n_rows + col - min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + board.n_cols, 0])) + min_distance = min(min_distance, _pdist( + corners[index, 0], corners[index + board.n_cols, 0])) radius = int(math.ceil(min_distance * 0.5)) - cv2.cornerSubPix(mono, corners, (radius,radius), (-1,-1), - ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1 )) + cv2.cornerSubPix(mono, corners, (radius, radius), (-1, -1), + (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)) return (ok, corners) + def _get_charuco_corners(img, board, refine): """ Get chessboard corners from image of ChArUco board @@ -268,12 +286,15 @@ def _get_charuco_corners(img, board, refine): else: mono = img - marker_corners, marker_ids, _ = cv2.aruco.detectMarkers(img, board.aruco_dict) + marker_corners, marker_ids, _ = cv2.aruco.detectMarkers( + img, board.aruco_dict) if len(marker_corners) == 0: return (False, None, None) - _, square_corners, ids = cv2.aruco.interpolateCornersCharuco(marker_corners, marker_ids, img, board.charuco_board) + _, square_corners, ids = cv2.aruco.interpolateCornersCharuco( + marker_corners, marker_ids, img, board.charuco_board) return ((square_corners is not None) and (len(square_corners) > 5), square_corners, ids) + def _get_circles(img, board, pattern): """ Get circle centers for a symmetric or asymmetric grid @@ -289,16 +310,19 @@ def _get_circles(img, board, pattern): if pattern == Patterns.ACircles: flag = cv2.CALIB_CB_ASYMMETRIC_GRID mono_arr = numpy.array(mono) - (ok, corners) = cv2.findCirclesGrid(mono_arr, (board.n_cols, board.n_rows), flags=flag) + (ok, corners) = cv2.findCirclesGrid( + mono_arr, (board.n_cols, board.n_rows), flags=flag) # In symmetric case, findCirclesGrid does not detect the target if it's turned sideways. So we try # again with dimensions swapped - not so efficient. # TODO Better to add as second board? Corner ordering will change. if not ok and pattern == Patterns.Circles: - (ok, corners) = cv2.findCirclesGrid(mono_arr, (board.n_rows, board.n_cols), flags=flag) + (ok, corners) = cv2.findCirclesGrid( + mono_arr, (board.n_rows, board.n_cols), flags=flag) return (ok, corners) + def _get_dist_model(dist_params, cam_model): # Select dist model if CAMERA_MODEL.PINHOLE == cam_model: @@ -313,21 +337,26 @@ def _get_dist_model(dist_params, cam_model): return dist_model # TODO self.size needs to come from CameraInfo, full resolution + + class Calibrator(): """ Base class for calibration system """ - def __init__(self, boards, flags=0, fisheye_flags = 0, pattern=Patterns.Chessboard, name='', - checkerboard_flags=cv2.CALIB_CB_FAST_CHECK, max_chessboard_speed = -1.0): + + def __init__(self, boards, flags=0, fisheye_flags=0, pattern=Patterns.Chessboard, name='', + checkerboard_flags=cv2.CALIB_CB_FAST_CHECK, max_chessboard_speed=-1.0): # Ordering the dimensions for the different detectors is actually a minefield... if pattern == Patterns.Chessboard: # Make sure n_cols > n_rows to agree with OpenCV CB detector output - self._boards = [ChessboardInfo("chessboard", max(i.n_cols, i.n_rows), min(i.n_cols, i.n_rows), i.dim) for i in boards] + self._boards = [ChessboardInfo("chessboard", max( + i.n_cols, i.n_rows), min(i.n_cols, i.n_rows), i.dim) for i in boards] if pattern == Patterns.ChArUco: self._boards = boards elif pattern == Patterns.ACircles: # 7x4 and 4x7 are actually different patterns. Assume square-ish pattern, so n_rows > n_cols. - self._boards = [ChessboardInfo("acircles", min(i.n_cols, i.n_rows), max(i.n_cols, i.n_rows), i.dim) for i in boards] + self._boards = [ChessboardInfo("acircles", min(i.n_cols, i.n_rows), max( + i.n_cols, i.n_rows), i.dim) for i in boards] elif pattern == Patterns.Circles: # We end up having to check both ways anyway self._boards = boards @@ -382,10 +411,11 @@ def get_parameters(self, corners, ids, board, size): Return list of parameters [X, Y, size, skew] describing the checkerboard view. """ (width, height) = size - Xs = corners[:,:,0] - Ys = corners[:,:,1] + Xs = corners[:, :, 0] + Ys = corners[:, :, 1] if board.pattern == 'charuco': - outside_corners = _get_largest_rectangle_corners(corners, ids, board) + outside_corners = _get_largest_rectangle_corners( + corners, ids, board) else: outside_corners = _get_outside_corners(corners, board) area = _calculate_area(outside_corners) @@ -393,7 +423,7 @@ def get_parameters(self, corners, ids, board, size): border = math.sqrt(area) # For X and Y, we "shrink" the image all around by approx. half the board size. # Otherwise large boards are penalized because you can't get much X/Y variation. - p_x = min(1.0, max(0.0, (numpy.mean(Xs) - border / 2) / (width - border))) + p_x = min(1.0, max(0.0, (numpy.mean(Xs) - border / 2) / (width - border))) p_y = min(1.0, max(0.0, (numpy.mean(Ys) - border / 2) / (height - border))) p_size = math.sqrt(area / (width * height)) params = [p_x, p_y, p_size, skew] @@ -412,19 +442,23 @@ def is_slow_moving(self, corners, ids, last_frame_corners, last_frame_ids): return False if ids is None: num_corners = len(corners) - corner_deltas = (corners - last_frame_corners).reshape(num_corners, 2) + corner_deltas = ( + corners - last_frame_corners).reshape(num_corners, 2) else: corner_deltas = [] last_frame_ids = list(last_frame_ids.transpose()[0]) for i, c_id in enumerate(ids): try: last_i = last_frame_ids.index(c_id) - corner_deltas.append(corners[i] - last_frame_corners[last_i]) - except ValueError: pass + corner_deltas.append( + corners[i] - last_frame_corners[last_i]) + except ValueError: + pass corner_deltas = numpy.concatenate(corner_deltas) # Average distance travelled overall for all corners - average_motion = numpy.average(numpy.linalg.norm(corner_deltas, axis = 1)) + average_motion = numpy.average( + numpy.linalg.norm(corner_deltas, axis=1)) return average_motion <= self.max_chessboard_speed def is_good_sample(self, params, corners, ids, last_frame_corners, last_frame_ids): @@ -435,11 +469,11 @@ def is_good_sample(self, params, corners, ids, last_frame_corners, last_frame_id return True def param_distance(p1, p2): - return sum([abs(a-b) for (a,b) in zip(p1, p2)]) + return sum([abs(a-b) for (a, b) in zip(p1, p2)]) db_params = [sample[0] for sample in self.db] d = min([param_distance(params, p) for p in db_params]) - #print "d = %.3f" % d #DEBUG + # print "d = %.3f" % d #DEBUG # TODO What's a good threshold here? Should it be configurable? if d <= 0.2: return False @@ -468,14 +502,16 @@ def compute_goodenough(self): min_params = [min_params[0], min_params[1], 0., 0.] # For each parameter, judge how much progress has been made toward adequate variation - progress = [min((hi - lo) / r, 1.0) for (lo, hi, r) in zip(min_params, max_params, self.param_ranges)] + progress = [min((hi - lo) / r, 1.0) for (lo, hi, r) + in zip(min_params, max_params, self.param_ranges)] # If we have lots of samples, allow calibration even if not all parameters are green # TODO Awkward that we update self.goodenough instead of returning it - self.goodenough = (len(self.db) >= 40) or all([p == 1.0 for p in progress]) + self.goodenough = (len(self.db) >= 40) or all( + [p == 1.0 for p in progress]) return list(zip(self._param_names, min_params, max_params, progress)) - def mk_object_points(self, boards, use_board_size = False): + def mk_object_points(self, boards, use_board_size=False): opts = [] for i, b in enumerate(boards): num_pts = b.n_cols * b.n_rows @@ -483,7 +519,8 @@ def mk_object_points(self, boards, use_board_size = False): for j in range(num_pts): opts_loc[j, 0, 0] = (j // b.n_cols) if self.pattern == Patterns.ACircles: - opts_loc[j, 0, 1] = 2*(j % b.n_cols) + (opts_loc[j, 0, 0] % 2) + opts_loc[j, 0, 1] = 2 * \ + (j % b.n_cols) + (opts_loc[j, 0, 0] % 2) else: opts_loc[j, 0, 1] = (j % b.n_cols) opts_loc[j, 0, 2] = 0 @@ -492,7 +529,7 @@ def mk_object_points(self, boards, use_board_size = False): opts.append(opts_loc) return opts - def get_corners(self, img, refine = True): + def get_corners(self, img, refine=True): """ Use cvFindChessboardCorners to find corners of chessboard in image. @@ -507,7 +544,8 @@ def get_corners(self, img, refine = True): for b in self._boards: if self.pattern == Patterns.Chessboard: - (ok, corners) = _get_corners(img, b, refine, self.checkerboard_flags) + (ok, corners) = _get_corners( + img, b, refine, self.checkerboard_flags) ids = None elif self.pattern == Patterns.ChArUco: (ok, corners, ids) = _get_charuco_corners(img, b, refine) @@ -532,7 +570,7 @@ def downsample_and_detect(self, img): # Scale the input image down to ~VGA size height = img.shape[0] width = img.shape[1] - scale = math.sqrt( (width*height) / (640.*480.) ) + scale = math.sqrt((width*height) / (640.*480.)) if scale > 1.0: scrib = cv2.resize(img, (int(width / scale), int(height / scale))) else: @@ -543,7 +581,8 @@ def downsample_and_detect(self, img): if self.pattern == Patterns.Chessboard: # Detect checkerboard - (ok, downsampled_corners, ids, board) = self.get_corners(scrib, refine = True) + (ok, downsampled_corners, ids, board) = self.get_corners( + scrib, refine=True) # Scale corners back to full size image corners = None @@ -559,8 +598,8 @@ def downsample_and_detect(self, img): mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) else: mono = img - cv2.cornerSubPix(mono, corners_unrefined, (radius,radius), (-1,-1), - ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1 )) + cv2.cornerSubPix(mono, corners_unrefined, (radius, radius), (-1, -1), + (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)) corners = corners_unrefined else: corners = downsampled_corners @@ -572,8 +611,8 @@ def downsample_and_detect(self, img): if ok: if scale > 1.0: downsampled_corners = corners.copy() - downsampled_corners[:,:,0] /= x_scale - downsampled_corners[:,:,1] /= y_scale + downsampled_corners[:, :, 0] /= x_scale + downsampled_corners[:, :, 1] /= y_scale else: downsampled_corners = corners @@ -619,34 +658,36 @@ def lrost(name, d, k, r, p, size): "[%s]" % name, "", "camera matrix", - " ".join("%8f" % k[0,i] for i in range(3)), - " ".join("%8f" % k[1,i] for i in range(3)), - " ".join("%8f" % k[2,i] for i in range(3)), + " ".join("%8f" % k[0, i] for i in range(3)), + " ".join("%8f" % k[1, i] for i in range(3)), + " ".join("%8f" % k[2, i] for i in range(3)), "", "distortion", " ".join("%8f" % x for x in d.flat), "", "rectification", - " ".join("%8f" % r[0,i] for i in range(3)), - " ".join("%8f" % r[1,i] for i in range(3)), - " ".join("%8f" % r[2,i] for i in range(3)), + " ".join("%8f" % r[0, i] for i in range(3)), + " ".join("%8f" % r[1, i] for i in range(3)), + " ".join("%8f" % r[2, i] for i in range(3)), "", "projection", - " ".join("%8f" % p[0,i] for i in range(4)), - " ".join("%8f" % p[1,i] for i in range(4)), - " ".join("%8f" % p[2,i] for i in range(4)), + " ".join("%8f" % p[0, i] for i in range(4)), + " ".join("%8f" % p[1, i] for i in range(4)), + " ".join("%8f" % p[2, i] for i in range(4)), "" ]) - assert len(calmessage) < 525, "Calibration info must be less than 525 bytes" + assert len( + calmessage) < 525, "Calibration info must be less than 525 bytes" return calmessage @staticmethod def lryaml(name, d, k, r, p, size, cam_model): def format_mat(x, precision): return ("[%s]" % ( - numpy.array2string(x, precision=precision, suppress_small=True, separator=", ") + numpy.array2string(x, precision=precision, + suppress_small=True, separator=", ") .replace("[", "").replace("]", "").replace("\n", "\n ") - )) + )) dist_model = _get_dist_model(d, cam_model) @@ -681,10 +722,11 @@ def format_mat(x, precision): def do_save(self): filename = '/tmp/calibrationdata.tar.gz' tf = tarfile.open(filename, 'w:gz') - self.do_tarfile_save(tf) # Must be overridden in subclasses + self.do_tarfile_save(tf) # Must be overridden in subclasses tf.close() print(("Wrote calibration data to", filename)) + def image_from_archive(archive, name): """ Load image PGM file from tar archive. @@ -692,24 +734,29 @@ def image_from_archive(archive, name): Used for tarfile loading and unit test. """ member = archive.getmember(name) - imagefiledata = numpy.frombuffer(archive.extractfile(member).read(), numpy.uint8) + imagefiledata = numpy.frombuffer( + archive.extractfile(member).read(), numpy.uint8) imagefiledata.resize((1, imagefiledata.size)) return cv2.imdecode(imagefiledata, cv2.IMREAD_COLOR) + class ImageDrawable(): """ Passed to CalibrationNode after image handled. Allows plotting of images with detected corner points """ + def __init__(self): self.params = None + class MonoDrawable(ImageDrawable): def __init__(self): ImageDrawable.__init__(self) self.scrib = None self.linear_error = -1.0 + class StereoDrawable(ImageDrawable): def __init__(self): ImageDrawable.__init__(self) @@ -717,733 +764,3 @@ def __init__(self): self.rscrib = None self.epierror = -1 self.dim = -1 - - -class MonoCalibrator(Calibrator): - """ - Calibration class for monocular cameras:: - - images = [cv2.imread("mono%d.png") for i in range(8)] - mc = MonoCalibrator() - mc.cal(images) - print mc.as_message() - """ - - is_mono = True # TODO Could get rid of is_mono - - def __init__(self, *args, **kwargs): - if 'name' not in kwargs: - kwargs['name'] = 'narrow_stereo/left' - super(MonoCalibrator, self).__init__(*args, **kwargs) - - def cal(self, images): - """ - Calibrate camera from given images - """ - goodcorners = self.collect_corners(images) - self.cal_fromcorners(goodcorners) - self.calibrated = True - - def collect_corners(self, images): - """ - :param images: source images containing chessboards - :type images: list of :class:`cvMat` - - Find chessboards in all images. - - Return [ (corners, ids, ChessboardInfo) ] - """ - self.size = (images[0].shape[1], images[0].shape[0]) - corners = [self.get_corners(i) for i in images] - - goodcorners = [(co, ids, b) for (ok, co, ids, b) in corners if ok] - if not goodcorners: - raise CalibrationException("No corners found in images!") - return goodcorners - - def cal_fromcorners(self, good): - """ - :param good: Good corner positions and boards - :type good: [(corners, ChessboardInfo)] - """ - - (ipts, ids, boards) = zip(*good) - opts = self.mk_object_points(boards) - # If FIX_ASPECT_RATIO flag set, enforce focal lengths have 1/1 ratio - intrinsics_in = numpy.eye(3, dtype=numpy.float64) - - if self.pattern == Patterns.ChArUco: - if self.camera_model == CAMERA_MODEL.FISHEYE: - raise NotImplemented("Can't perform fisheye calibration with ChArUco board") - - reproj_err, self.intrinsics, self.distortion, rvecs, tvecs = cv2.aruco.calibrateCameraCharuco( - ipts, ids, boards[0].charuco_board, self.size, intrinsics_in, None) - - elif self.camera_model == CAMERA_MODEL.PINHOLE: - print("mono pinhole calibration...") - reproj_err, self.intrinsics, dist_coeffs, rvecs, tvecs = cv2.calibrateCamera( - opts, ipts, - self.size, - intrinsics_in, - None, - flags = self.calib_flags) - # OpenCV returns more than 8 coefficients (the additional ones all zeros) when CALIB_RATIONAL_MODEL is set. - # The extra ones include e.g. thin prism coefficients, which we are not interested in. - if self.calib_flags & cv2.CALIB_RATIONAL_MODEL: - self.distortion = dist_coeffs.flat[:8].reshape(-1, 1) # rational polynomial - else: - self.distortion = dist_coeffs.flat[:5].reshape(-1, 1) # plumb bob - elif self.camera_model == CAMERA_MODEL.FISHEYE: - print("mono fisheye calibration...") - # WARNING: cv2.fisheye.calibrate wants float64 points - ipts64 = numpy.asarray(ipts, dtype=numpy.float64) - ipts = ipts64 - opts64 = numpy.asarray(opts, dtype=numpy.float64) - opts = opts64 - reproj_err, self.intrinsics, self.distortion, rvecs, tvecs = cv2.fisheye.calibrate( - opts, ipts, self.size, - intrinsics_in, None, flags = self.fisheye_calib_flags) - - # R is identity matrix for monocular calibration - self.R = numpy.eye(3, dtype=numpy.float64) - self.P = numpy.zeros((3, 4), dtype=numpy.float64) - - self.set_alpha(0.0) - - def set_alpha(self, a): - """ - Set the alpha value for the calibrated camera solution. The alpha - value is a zoom, and ranges from 0 (zoomed in, all pixels in - calibrated image are valid) to 1 (zoomed out, all pixels in - original image are in calibrated image). - """ - - if self.camera_model == CAMERA_MODEL.PINHOLE: - # NOTE: Prior to Electric, this code was broken such that we never actually saved the new - # camera matrix. In effect, this enforced P = [K|0] for monocular cameras. - # TODO: Verify that OpenCV #1199 gets applied (improved GetOptimalNewCameraMatrix) - ncm, _ = cv2.getOptimalNewCameraMatrix(self.intrinsics, self.distortion, self.size, a) - for j in range(3): - for i in range(3): - self.P[j,i] = ncm[j, i] - self.mapx, self.mapy = cv2.initUndistortRectifyMap(self.intrinsics, self.distortion, self.R, ncm, self.size, cv2.CV_32FC1) - elif self.camera_model == CAMERA_MODEL.FISHEYE: - # NOTE: cv2.fisheye.estimateNewCameraMatrixForUndistortRectify not producing proper results, using a naive approach instead: - self.P[:3,:3] = self.intrinsics[:3,:3] - self.P[0,0] /= (1. + a) - self.P[1,1] /= (1. + a) - self.mapx, self.mapy = cv2.fisheye.initUndistortRectifyMap(self.intrinsics, self.distortion, self.R, self.P, self.size, cv2.CV_32FC1) - - - def remap(self, src): - """ - :param src: source image - :type src: :class:`cvMat` - - Apply the post-calibration undistortion to the source image - """ - return cv2.remap(src, self.mapx, self.mapy, cv2.INTER_LINEAR) - - def undistort_points(self, src): - """ - :param src: N source pixel points (u,v) as an Nx2 matrix - :type src: :class:`cvMat` - - Apply the post-calibration undistortion to the source points - """ - if self.camera_model == CAMERA_MODEL.PINHOLE: - return cv2.undistortPoints(src, self.intrinsics, self.distortion, R = self.R, P = self.P) - elif self.camera_model == CAMERA_MODEL.FISHEYE: - return cv2.fisheye.undistortPoints(src, self.intrinsics, self.distortion, R = self.R, P = self.P) - - def as_message(self): - """ Return the camera calibration as a CameraInfo message """ - return self.lrmsg(self.distortion, self.intrinsics, self.R, self.P, self.size, self.camera_model) - - def from_message(self, msg, alpha = 0.0): - """ Initialize the camera calibration from a CameraInfo message """ - - self.size = (msg.width, msg.height) - self.intrinsics = numpy.array(msg.k, dtype=numpy.float64, copy=True).reshape((3, 3)) - self.distortion = numpy.array(msg.d, dtype=numpy.float64, copy=True).reshape((len(msg.d), 1)) - self.R = numpy.array(msg.r, dtype=numpy.float64, copy=True).reshape((3, 3)) - self.P = numpy.array(msg.p, dtype=numpy.float64, copy=True).reshape((3, 4)) - - self.set_alpha(0.0) - - def report(self): - self.lrreport(self.distortion, self.intrinsics, self.R, self.P) - - def ost(self): - return self.lrost(self.name, self.distortion, self.intrinsics, self.R, self.P, self.size) - - def yaml(self): - return self.lryaml(self.name, self.distortion, self.intrinsics, self.R, self.P, self.size, self.camera_model) - - def linear_error_from_image(self, image): - """ - Detect the checkerboard and compute the linear error. - Mainly for use in tests. - """ - _, corners, _, ids, board, _ = self.downsample_and_detect(image) - if corners is None: - return None - - undistorted = self.undistort_points(corners) - return self.linear_error(undistorted, ids, board) - - @staticmethod - def linear_error(corners, ids, b): - - """ - Returns the linear error for a set of corners detected in the unrectified image. - """ - - if corners is None: - return None - - corners = numpy.squeeze(corners) - - def pt2line(x0, y0, x1, y1, x2, y2): - """ point is (x0, y0), line is (x1, y1, x2, y2) """ - return abs((x2 - x1) * (y1 - y0) - (x1 - x0) * (y2 - y1)) / math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2) - - n_cols = b.n_cols - n_rows = b.n_rows - if b.pattern == 'charuco': - n_cols -= 1 - n_rows -= 1 - n_pts = n_cols * n_rows - - if ids is None: - ids = numpy.arange(n_pts).reshape((n_pts, 1)) - - ids_to_idx = dict((ids[i, 0], i) for i in range(len(ids))) - - errors = [] - for row in range(n_rows): - row_min = row * n_cols - row_max = (row+1) * n_cols - pts_in_row = [x for x in ids if row_min <= x < row_max] - - # not enough points to calculate error - if len(pts_in_row) <= 2: continue - - left_pt = min(pts_in_row)[0] - right_pt = max(pts_in_row)[0] - x_left = corners[ids_to_idx[left_pt], 0] - y_left = corners[ids_to_idx[left_pt], 1] - x_right = corners[ids_to_idx[right_pt], 0] - y_right = corners[ids_to_idx[right_pt], 1] - - for pt in pts_in_row: - if pt[0] in (left_pt, right_pt): continue - x = corners[ids_to_idx[pt[0]], 0] - y = corners[ids_to_idx[pt[0]], 1] - errors.append(pt2line(x, y, x_left, y_left, x_right, y_right)) - - if errors: - return math.sqrt(sum([e**2 for e in errors]) / len(errors)) - else: - return None - - - def handle_msg(self, msg): - """ - Detects the calibration target and, if found and provides enough new information, - adds it to the sample database. - - Returns a MonoDrawable message with the display image and progress info. - """ - gray = self.mkgray(msg) - linear_error = -1 - - # Get display-image-to-be (scrib) and detection of the calibration target - scrib_mono, corners, downsampled_corners, ids, board, (x_scale, y_scale) = self.downsample_and_detect(gray) - - if self.calibrated: - # Show rectified image - # TODO Pull out downsampling code into function - gray_remap = self.remap(gray) - gray_rect = gray_remap - if x_scale != 1.0 or y_scale != 1.0: - gray_rect = cv2.resize(gray_remap, (scrib_mono.shape[1], scrib_mono.shape[0])) - - scrib = cv2.cvtColor(gray_rect, cv2.COLOR_GRAY2BGR) - - if corners is not None: - # Report linear error - undistorted = self.undistort_points(corners) - linear_error = self.linear_error(undistorted, ids, board) - - # Draw rectified corners - scrib_src = undistorted.copy() - scrib_src[:,:,0] /= x_scale - scrib_src[:,:,1] /= y_scale - cv2.drawChessboardCorners(scrib, (board.n_cols, board.n_rows), scrib_src, True) - - else: - scrib = cv2.cvtColor(scrib_mono, cv2.COLOR_GRAY2BGR) - if corners is not None: - # Draw (potentially downsampled) corners onto display image - if board.pattern == "charuco": - cv2.aruco.drawDetectedCornersCharuco(scrib, downsampled_corners, ids) - else: - cv2.drawChessboardCorners(scrib, (board.n_cols, board.n_rows), downsampled_corners, True) - - # Add sample to database only if it's sufficiently different from any previous sample. - params = self.get_parameters(corners, ids, board, (gray.shape[1], gray.shape[0])) - if self.is_good_sample(params, corners, ids, self.last_frame_corners, self.last_frame_ids): - self.db.append((params, gray)) - if self.pattern == Patterns.ChArUco: - self.good_corners.append((corners, ids, board)) - else: - self.good_corners.append((corners, None, board)) - print(("*** Added sample %d, p_x = %.3f, p_y = %.3f, p_size = %.3f, skew = %.3f" % tuple([len(self.db)] + params))) - - self.last_frame_corners = corners - self.last_frame_ids = ids - rv = MonoDrawable() - rv.scrib = scrib - rv.params = self.compute_goodenough() - rv.linear_error = linear_error - return rv - - def do_calibration(self, dump = False): - if not self.good_corners: - print("**** Collecting corners for all images! ****") #DEBUG - images = [i for (p, i) in self.db] - self.good_corners = self.collect_corners(images) - self.size = (self.db[0][1].shape[1], self.db[0][1].shape[0]) # TODO Needs to be set externally - # Dump should only occur if user wants it - if dump: - pickle.dump((self.is_mono, self.size, self.good_corners), - open("/tmp/camera_calibration_%08x.pickle" % random.getrandbits(32), "w")) - self.cal_fromcorners(self.good_corners) - self.calibrated = True - # DEBUG - print((self.report())) - print((self.ost())) - - def do_tarfile_save(self, tf): - """ Write images and calibration solution to a tarfile object """ - - def taradd(name, buf): - if isinstance(buf, str): - s = BytesIO(buf.encode('utf-8')) - else: - s = BytesIO(buf) - ti = tarfile.TarInfo(name) - ti.size = len(s.getvalue()) - ti.uname = 'calibrator' - ti.mtime = int(time.time()) - tf.addfile(tarinfo=ti, fileobj=s) - - ims = [("left-%04d.png" % i, im) for i,(_, im) in enumerate(self.db)] - for (name, im) in ims: - taradd(name, cv2.imencode(".png", im)[1].tostring()) - taradd('ost.yaml', self.yaml()) - taradd('ost.txt', self.ost()) - - def do_tarfile_calibration(self, filename): - archive = tarfile.open(filename, 'r') - - limages = [ image_from_archive(archive, f) for f in archive.getnames() if (f.startswith('left') and (f.endswith('.pgm') or f.endswith('png'))) ] - - self.cal(limages) - -# TODO Replicate MonoCalibrator improvements in stereo -class StereoCalibrator(Calibrator): - """ - Calibration class for stereo cameras:: - - limages = [cv2.imread("left%d.png") for i in range(8)] - rimages = [cv2.imread("right%d.png") for i in range(8)] - sc = StereoCalibrator() - sc.cal(limages, rimages) - print sc.as_message() - """ - - is_mono = False - - def __init__(self, *args, **kwargs): - if 'name' not in kwargs: - kwargs['name'] = 'narrow_stereo' - super(StereoCalibrator, self).__init__(*args, **kwargs) - self.l = MonoCalibrator(*args, **kwargs) - self.r = MonoCalibrator(*args, **kwargs) - # Collecting from two cameras in a horizontal stereo rig, can't get - # full X range in the left camera. - self.param_ranges[0] = 0.4 - - #override - def set_cammodel(self, modeltype): - super(StereoCalibrator, self).set_cammodel(modeltype) - self.l.set_cammodel(modeltype) - self.r.set_cammodel(modeltype) - - def cal(self, limages, rimages): - """ - :param limages: source left images containing chessboards - :type limages: list of :class:`cvMat` - :param rimages: source right images containing chessboards - :type rimages: list of :class:`cvMat` - - Find chessboards in images, and runs the OpenCV calibration solver. - """ - goodcorners = self.collect_corners(limages, rimages) - self.size = (limages[0].shape[1], limages[0].shape[0]) - self.l.size = self.size - self.r.size = self.size - self.cal_fromcorners(goodcorners) - self.calibrated = True - - def collect_corners(self, limages, rimages): - """ - For a sequence of left and right images, find pairs of images where both - left and right have a chessboard, and return their corners as a list of pairs. - """ - # Pick out (corners, ids, board) tuples - lcorners = [] - rcorners = [] - for img in limages: - (_, corners, _, ids, board, _) = self.downsample_and_detect(img) - lcorners.append((corners, ids, board)) - for img in rimages: - (_, corners, _, ids, board, _) = self.downsample_and_detect(img) - rcorners.append((corners, ids, board)) - - good = [(lco, rco, lid, rid, b) for ((lco, lid, b), (rco, rid, br)) in zip( lcorners, rcorners) - if (lco is not None and rco is not None)] - - if len(good) == 0: - raise CalibrationException("No corners found in images!") - return good - - def cal_fromcorners(self, good): - # Perform monocular calibrations - lcorners = [(lco, lid, b) for (lco, rco, lid, rid, b) in good] - rcorners = [(rco, rid, b) for (lco, rco, lid, rid, b) in good] - self.l.cal_fromcorners(lcorners) - self.r.cal_fromcorners(rcorners) - - (lipts, ripts, _, _, boards) = zip(*good) - - opts = self.mk_object_points(boards, True) - - flags = cv2.CALIB_FIX_INTRINSIC - - self.T = numpy.zeros((3, 1), dtype=numpy.float64) - self.R = numpy.eye(3, dtype=numpy.float64) - - if self.pattern == Patterns.ChArUco: - # TODO: implement stereo ChArUco calibration - raise NotImplemented("Stereo calibration not implemented for ChArUco boards") - - if self.camera_model == CAMERA_MODEL.PINHOLE: - print("stereo pinhole calibration...") - if LooseVersion(cv2.__version__).version[0] == 2: - cv2.stereoCalibrate(opts, lipts, ripts, self.size, - self.l.intrinsics, self.l.distortion, - self.r.intrinsics, self.r.distortion, - self.R, # R - self.T, # T - criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5), - flags = flags) - else: - cv2.stereoCalibrate(opts, lipts, ripts, - self.l.intrinsics, self.l.distortion, - self.r.intrinsics, self.r.distortion, - self.size, - self.R, # R - self.T, # T - criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5), - flags = flags) - elif self.camera_model == CAMERA_MODEL.FISHEYE: - print("stereo fisheye calibration...") - if LooseVersion(cv2.__version__).version[0] == 2: - print("ERROR: You need OpenCV >3 to use fisheye camera model") - sys.exit() - else: - # WARNING: cv2.fisheye.stereoCalibrate wants float64 points - lipts64 = numpy.asarray(lipts, dtype=numpy.float64) - lipts = lipts64 - ripts64 = numpy.asarray(ripts, dtype=numpy.float64) - ripts = ripts64 - opts64 = numpy.asarray(opts, dtype=numpy.float64) - opts = opts64 - - cv2.fisheye.stereoCalibrate(opts, lipts, ripts, - self.l.intrinsics, self.l.distortion, - self.r.intrinsics, self.r.distortion, - self.size, - self.R, # R - self.T, # T - criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5), # 30, 1e-6 - flags = flags) - - self.set_alpha(0.0) - - def set_alpha(self, a): - """ - Set the alpha value for the calibrated camera solution. The - alpha value is a zoom, and ranges from 0 (zoomed in, all pixels - in calibrated image are valid) to 1 (zoomed out, all pixels in - original image are in calibrated image). - """ - if self.camera_model == CAMERA_MODEL.PINHOLE: - cv2.stereoRectify(self.l.intrinsics, - self.l.distortion, - self.r.intrinsics, - self.r.distortion, - self.size, - self.R, - self.T, - self.l.R, self.r.R, self.l.P, self.r.P, - alpha = a) - - cv2.initUndistortRectifyMap(self.l.intrinsics, self.l.distortion, self.l.R, self.l.P, self.size, cv2.CV_32FC1, - self.l.mapx, self.l.mapy) - cv2.initUndistortRectifyMap(self.r.intrinsics, self.r.distortion, self.r.R, self.r.P, self.size, cv2.CV_32FC1, - self.r.mapx, self.r.mapy) - - elif self.camera_model == CAMERA_MODEL.FISHEYE: - self.Q = numpy.zeros((4,4), dtype=numpy.float64) - - flags = cv2.CALIB_ZERO_DISPARITY # Operation flags that may be zero or CALIB_ZERO_DISPARITY . - # If the flag is set, the function makes the principal points of each camera have the same pixel coordinates in the rectified views. - # And if the flag is not set, the function may still shift the images in the horizontal or vertical direction - # (depending on the orientation of epipolar lines) to maximize the useful image area. - - cv2.fisheye.stereoRectify(self.l.intrinsics, self.l.distortion, - self.r.intrinsics, self.r.distortion, - self.size, - self.R, self.T, - flags, - self.l.R, self.r.R, - self.l.P, self.r.P, - self.Q, - self.size, - a, - 1.0 ) - self.l.P[:3,:3] = numpy.dot(self.l.intrinsics,self.l.R) - self.r.P[:3,:3] = numpy.dot(self.r.intrinsics,self.r.R) - cv2.fisheye.initUndistortRectifyMap(self.l.intrinsics, self.l.distortion, self.l.R, self.l.intrinsics, self.size, cv2.CV_32FC1, - self.l.mapx, self.l.mapy) - cv2.fisheye.initUndistortRectifyMap(self.r.intrinsics, self.r.distortion, self.r.R, self.r.intrinsics, self.size, cv2.CV_32FC1, - self.r.mapx, self.r.mapy) - - def as_message(self): - """ - Return the camera calibration as a pair of CameraInfo messages, for left - and right cameras respectively. - """ - - return (self.lrmsg(self.l.distortion, self.l.intrinsics, self.l.R, self.l.P, self.size, self.l.camera_model), - self.lrmsg(self.r.distortion, self.r.intrinsics, self.r.R, self.r.P, self.size, self.r.camera_model)) - - def from_message(self, msgs, alpha = 0.0): - """ Initialize the camera calibration from a pair of CameraInfo messages. """ - self.size = (msgs[0].width, msgs[0].height) - - self.T = numpy.zeros((3, 1), dtype=numpy.float64) - self.R = numpy.eye(3, dtype=numpy.float64) - - self.l.from_message(msgs[0]) - self.r.from_message(msgs[1]) - # Need to compute self.T and self.R here, using the monocular parameters above - if False: - self.set_alpha(0.0) - - def report(self): - print("\nLeft:") - self.lrreport(self.l.distortion, self.l.intrinsics, self.l.R, self.l.P) - print("\nRight:") - self.lrreport(self.r.distortion, self.r.intrinsics, self.r.R, self.r.P) - print("self.T =", numpy.ravel(self.T).tolist()) - print("self.R =", numpy.ravel(self.R).tolist()) - - def ost(self): - return (self.lrost(self.name + "/left", self.l.distortion, self.l.intrinsics, self.l.R, self.l.P, self.size) + - self.lrost(self.name + "/right", self.r.distortion, self.r.intrinsics, self.r.R, self.r.P, self.size)) - - def yaml(self, suffix, info): - return self.lryaml(self.name + suffix, info.distortion, info.intrinsics, info.R, info.P, self.size, self.camera_model) - - # TODO Get rid of "from_images" versions of these, instead have function to get undistorted corners - def epipolar_error_from_images(self, limage, rimage): - """ - Detect the checkerboard in both images and compute the epipolar error. - Mainly for use in tests. - """ - lcorners = self.downsample_and_detect(limage)[1] - rcorners = self.downsample_and_detect(rimage)[1] - if lcorners is None or rcorners is None: - return None - - lundistorted = self.l.undistort_points(lcorners) - rundistorted = self.r.undistort_points(rcorners) - - return self.epipolar_error(lundistorted, rundistorted) - - def epipolar_error(self, lcorners, rcorners): - """ - Compute the epipolar error from two sets of matching undistorted points - """ - d = lcorners[:,:,1] - rcorners[:,:,1] - return numpy.sqrt(numpy.square(d).sum() / d.size) - - def chessboard_size_from_images(self, limage, rimage): - _, lcorners, _, _, board, _ = self.downsample_and_detect(limage) - _, rcorners, _, _, board, _ = self.downsample_and_detect(rimage) - if lcorners is None or rcorners is None: - return None - - lundistorted = self.l.undistort_points(lcorners) - rundistorted = self.r.undistort_points(rcorners) - - return self.chessboard_size(lundistorted, rundistorted, board) - - def chessboard_size(self, lcorners, rcorners, board, msg = None): - """ - Compute the square edge length from two sets of matching undistorted points - given the current calibration. - :param msg: a tuple of (left_msg, right_msg) - """ - # Project the points to 3d - cam = image_geometry.StereoCameraModel() - if msg == None: - msg = self.as_message() - cam.fromCameraInfo(*msg) - disparities = lcorners[:,:,0] - rcorners[:,:,0] - pt3d = [cam.projectPixelTo3d((lcorners[i,0,0], lcorners[i,0,1]), disparities[i,0]) for i in range(lcorners.shape[0]) ] - def l2(p0, p1): - return math.sqrt(sum([(c0 - c1) ** 2 for (c0, c1) in zip(p0, p1)])) - - # Compute the length from each horizontal and vertical line, and return the mean - cc = board.n_cols - cr = board.n_rows - lengths = ( - [l2(pt3d[cc * r + 0], pt3d[cc * r + (cc - 1)]) / (cc - 1) for r in range(cr)] + - [l2(pt3d[c + 0], pt3d[c + (cc * (cr - 1))]) / (cr - 1) for c in range(cc)]) - return sum(lengths) / len(lengths) - - def handle_msg(self, msg): - # TODO Various asserts that images have same dimension, same board detected... - (lmsg, rmsg) = msg - lgray = self.mkgray(lmsg) - rgray = self.mkgray(rmsg) - epierror = -1 - - # Get display-images-to-be and detections of the calibration target - lscrib_mono, lcorners, ldownsampled_corners, lids, lboard, (x_scale, y_scale) = self.downsample_and_detect(lgray) - rscrib_mono, rcorners, rdownsampled_corners, rids, rboard, _ = self.downsample_and_detect(rgray) - - if self.calibrated: - # Show rectified images - lremap = self.l.remap(lgray) - rremap = self.r.remap(rgray) - lrect = lremap - rrect = rremap - if x_scale != 1.0 or y_scale != 1.0: - lrect = cv2.resize(lremap, (lscrib_mono.shape[1], lscrib_mono.shape[0])) - rrect = cv2.resize(rremap, (rscrib_mono.shape[1], rscrib_mono.shape[0])) - - lscrib = cv2.cvtColor(lrect, cv2.COLOR_GRAY2BGR) - rscrib = cv2.cvtColor(rrect, cv2.COLOR_GRAY2BGR) - - # Draw rectified corners - if lcorners is not None: - lundistorted = self.l.undistort_points(lcorners) - scrib_src = lundistorted.copy() - scrib_src[:,:,0] /= x_scale - scrib_src[:,:,1] /= y_scale - cv2.drawChessboardCorners(lscrib, (lboard.n_cols, lboard.n_rows), scrib_src, True) - - if rcorners is not None: - rundistorted = self.r.undistort_points(rcorners) - scrib_src = rundistorted.copy() - scrib_src[:,:,0] /= x_scale - scrib_src[:,:,1] /= y_scale - cv2.drawChessboardCorners(rscrib, (rboard.n_cols, rboard.n_rows), scrib_src, True) - - # Report epipolar error - if lcorners is not None and rcorners is not None and len(lcorners) == len(rcorners): - epierror = self.epipolar_error(lundistorted, rundistorted) - - else: - lscrib = cv2.cvtColor(lscrib_mono, cv2.COLOR_GRAY2BGR) - rscrib = cv2.cvtColor(rscrib_mono, cv2.COLOR_GRAY2BGR) - # Draw any detected chessboards onto display (downsampled) images - if lcorners is not None: - cv2.drawChessboardCorners(lscrib, (lboard.n_cols, lboard.n_rows), - ldownsampled_corners, True) - if rcorners is not None: - cv2.drawChessboardCorners(rscrib, (rboard.n_cols, rboard.n_rows), - rdownsampled_corners, True) - - # Add sample to database only if it's sufficiently different from any previous sample - if lcorners is not None and rcorners is not None and len(lcorners) == len(rcorners): - params = self.get_parameters(lcorners, lids, lboard, (lgray.shape[1], lgray.shape[0])) - if self.is_good_sample(params, lcorners, lids, self.last_frame_corners, self.last_frame_ids): - self.db.append( (params, lgray, rgray) ) - self.good_corners.append( (lcorners, rcorners, lids, rids, lboard) ) - print(("*** Added sample %d, p_x = %.3f, p_y = %.3f, p_size = %.3f, skew = %.3f" % tuple([len(self.db)] + params))) - - self.last_frame_corners = lcorners - self.last_frame_ids = lids - rv = StereoDrawable() - rv.lscrib = lscrib - rv.rscrib = rscrib - rv.params = self.compute_goodenough() - rv.epierror = epierror - return rv - - def do_calibration(self, dump = False): - # TODO MonoCalibrator collects corners if needed here - self.size = (self.db[0][1].shape[1], self.db[0][1].shape[0]) # TODO Needs to be set externally - # Dump should only occur if user wants it - if dump: - pickle.dump((self.is_mono, self.size, self.good_corners), - open("/tmp/camera_calibration_%08x.pickle" % random.getrandbits(32), "w")) - self.l.size = self.size - self.r.size = self.size - self.cal_fromcorners(self.good_corners) - self.calibrated = True - # DEBUG - print((self.report())) - print((self.ost())) - - def do_tarfile_save(self, tf): - """ Write images and calibration solution to a tarfile object """ - ims = ([("left-%04d.png" % i, im) for i,(_, im, _) in enumerate(self.db)] + - [("right-%04d.png" % i, im) for i,(_, _, im) in enumerate(self.db)]) - - def taradd(name, buf): - if isinstance(buf, str): - s = BytesIO(buf.encode('utf-8')) - else: - s = BytesIO(buf) - ti = tarfile.TarInfo(name) - ti.size = len(s.getvalue()) - ti.uname = 'calibrator' - ti.mtime = int(time.time()) - tf.addfile(tarinfo=ti, fileobj=s) - - for (name, im) in ims: - taradd(name, cv2.imencode(".png", im)[1].tostring()) - taradd('left.yaml', self.yaml("/left", self.l)) - taradd('right.yaml', self.yaml("/right", self.r)) - taradd('ost.txt', self.ost()) - - def do_tarfile_calibration(self, filename): - archive = tarfile.open(filename, 'r') - limages = [ image_from_archive(archive, f) for f in archive.getnames() if (f.startswith('left') and (f.endswith('pgm') or f.endswith('png'))) ] - rimages = [ image_from_archive(archive, f) for f in archive.getnames() if (f.startswith('right') and (f.endswith('pgm') or f.endswith('png'))) ] - - if not len(limages) == len(rimages): - raise CalibrationException("Left, right images don't match. %d left images, %d right" % (len(limages), len(rimages))) - - ##\todo Check that the filenames match and stuff - - self.cal(limages, rimages) diff --git a/camera_calibration/src/camera_calibration/camera_calibrator.py b/camera_calibration/src/camera_calibration/camera_calibrator.py index 6d7ebdd40..1cad70521 100755 --- a/camera_calibration/src/camera_calibration/camera_calibrator.py +++ b/camera_calibration/src/camera_calibration/camera_calibrator.py @@ -40,7 +40,9 @@ import sensor_msgs.srv import threading import time -from camera_calibration.calibrator import MonoCalibrator, StereoCalibrator, Patterns +from camera_calibration.mono_calibrator import MonoCalibrator +from camera_calibration.stereo_calibrator import StereoCalibrator +from camera_calibration.calibrator import Patterns try: from queue import Queue except ImportError: @@ -54,6 +56,7 @@ class BufferQueue(Queue): """Slight modification of the standard Queue that discards the oldest item when adding an item and the queue is full. """ + def put(self, item, *args, **kwargs): # The base implementation, for reference: # https://github.com/python/cpython/blob/2.7/Lib/Queue.py#L107 @@ -92,14 +95,17 @@ def run(self): class CalibrationNode(Node): - def __init__(self, name, boards, service_check = True, synchronizer = message_filters.TimeSynchronizer, flags = 0, - fisheye_flags = 0, pattern=Patterns.Chessboard, camera_name='', checkerboard_flags = 0, - max_chessboard_speed = -1, queue_size = 1): + def __init__(self, name, boards, service_check=True, synchronizer=message_filters.TimeSynchronizer, flags=0, + fisheye_flags=0, pattern=Patterns.Chessboard, camera_name='', checkerboard_flags=0, + max_chessboard_speed=-1, queue_size=1): super().__init__(name) - self.set_camera_info_service = self.create_client(sensor_msgs.srv.SetCameraInfo, "camera/set_camera_info") - self.set_left_camera_info_service = self.create_client(sensor_msgs.srv.SetCameraInfo, "left_camera/set_camera_info") - self.set_right_camera_info_service = self.create_client(sensor_msgs.srv.SetCameraInfo, "right_camera/set_camera_info") + self.set_camera_info_service = self.create_client( + sensor_msgs.srv.SetCameraInfo, "camera/set_camera_info") + self.set_left_camera_info_service = self.create_client( + sensor_msgs.srv.SetCameraInfo, "left_camera/set_camera_info") + self.set_right_camera_info_service = self.create_client( + sensor_msgs.srv.SetCameraInfo, "right_camera/set_camera_info") if service_check: available = False @@ -123,12 +129,15 @@ def __init__(self, name, boards, service_check = True, synchronizer = message_fi self._pattern = pattern self._camera_name = camera_name self._max_chessboard_speed = max_chessboard_speed - lsub = message_filters.Subscriber(self, sensor_msgs.msg.Image, 'left', qos_profile=self.get_topic_qos("left")) - rsub = message_filters.Subscriber(self, sensor_msgs.msg.Image, 'right', qos_profile=self.get_topic_qos("right")) + lsub = message_filters.Subscriber( + self, sensor_msgs.msg.Image, 'left', qos_profile=self.get_topic_qos("left")) + rsub = message_filters.Subscriber( + self, sensor_msgs.msg.Image, 'right', qos_profile=self.get_topic_qos("right")) ts = synchronizer([lsub, rsub], 4) ts.registerCallback(self.queue_stereo) - msub = message_filters.Subscriber(self, sensor_msgs.msg.Image, 'image', qos_profile=self.get_topic_qos("image")) + msub = message_filters.Subscriber( + self, sensor_msgs.msg.Image, 'image', qos_profile=self.get_topic_qos("image")) msub.registerCallback(self.queue_monocular) self.q_mono = BufferQueue(queue_size) @@ -148,6 +157,7 @@ def __init__(self, name, boards, service_check = True, synchronizer = message_fi def redraw_stereo(self, *args): pass + def redraw_monocular(self, *args): pass @@ -162,11 +172,11 @@ def handle_monocular(self, msg): if self._camera_name: self.c = MonoCalibrator(self._boards, self._calib_flags, self._fisheye_calib_flags, self._pattern, name=self._camera_name, checkerboard_flags=self._checkerboard_flags, - max_chessboard_speed = self._max_chessboard_speed) + max_chessboard_speed=self._max_chessboard_speed) else: self.c = MonoCalibrator(self._boards, self._calib_flags, self._fisheye_calib_flags, self._pattern, checkerboard_flags=self.checkerboard_flags, - max_chessboard_speed = self._max_chessboard_speed) + max_chessboard_speed=self._max_chessboard_speed) # This should just call the MonoCalibrator drawable = self.c.handle_msg(msg) @@ -178,17 +188,16 @@ def handle_stereo(self, msg): if self._camera_name: self.c = StereoCalibrator(self._boards, self._calib_flags, self._fisheye_calib_flags, self._pattern, name=self._camera_name, checkerboard_flags=self._checkerboard_flags, - max_chessboard_speed = self._max_chessboard_speed) + max_chessboard_speed=self._max_chessboard_speed) else: self.c = StereoCalibrator(self._boards, self._calib_flags, self._fisheye_calib_flags, self._pattern, checkerboard_flags=self._checkerboard_flags, - max_chessboard_speed = self._max_chessboard_speed) + max_chessboard_speed=self._max_chessboard_speed) drawable = self.c.handle_msg(msg) self.displaywidth = drawable.lscrib.shape[1] + drawable.rscrib.shape[1] self.redraw_stereo(drawable) - def check_set_camera_info(self, response): if response.success: return True @@ -196,12 +205,14 @@ def check_set_camera_info(self, response): for i in range(10): print("!" * 80) print() - print("Attempt to set camera info failed: " + response.result() if response.result() is not None else "Not available") + print("Attempt to set camera info failed: " + response.result() + if response.result() is not None else "Not available") print() for i in range(10): print("!" * 80) print() - self.get_logger().error('Unable to set camera info for calibration. Failure message: %s' % response.result() if response.result() is not None else "Not available") + self.get_logger().error('Unable to set camera info for calibration. Failure message: %s' % + response.result() if response.result() is not None else "Not available") return False def do_upload(self): @@ -240,7 +251,8 @@ def get_topic_qos(self, topic_name: str) -> QoSProfile: qos_profile.depth = qos_profile_system_default.depth return qos_profile else: - self.get_logger().warn(f"No publishers available for topic {topic_name}. Using system default QoS for subscriber.") + self.get_logger().warn( + f"No publishers available for topic {topic_name}. Using system default QoS for subscriber.") return qos_profile_system_default @@ -276,12 +288,14 @@ def spin(self): def initWindow(self): cv2.namedWindow("display", cv2.WINDOW_NORMAL) cv2.setMouseCallback("display", self.on_mouse) - cv2.createTrackbar("Camera type: \n 0 : pinhole \n 1 : fisheye", "display", 0,1, self.on_model_change) + cv2.createTrackbar("Camera type: \n 0 : pinhole \n 1 : fisheye", + "display", 0, 1, self.on_model_change) cv2.createTrackbar("scale", "display", 0, 100, self.on_scale) @classmethod - def putText(cls, img, text, org, color = (0,0,0)): - cv2.putText(img, text, org, cls.FONT_FACE, cls.FONT_SCALE, color, thickness = cls.FONT_THICKNESS) + def putText(cls, img, text, org, color=(0, 0, 0)): + cv2.putText(img, text, org, cls.FONT_FACE, cls.FONT_SCALE, + color, thickness=cls.FONT_THICKNESS) @classmethod def getTextSize(cls, text): @@ -293,7 +307,8 @@ def on_mouse(self, event, x, y, flags, param): if 180 <= y < 280: print("**** Calibrating ****") # Perform calibration in another thread to prevent UI blocking - threading.Thread(target=self.c.do_calibration, name="Calibration").start() + threading.Thread(target=self.c.do_calibration, + name="Calibration").start() self.buttons(self._last_display) self.queue_display.put(self._last_display) if self.c.calibrated: @@ -303,12 +318,14 @@ def on_mouse(self, event, x, y, flags, param): # Only shut down if we set camera info correctly, #3993 if self.do_upload(): rclpy.shutdown() + def on_model_change(self, model_select_val): if self.c == None: print("Cannot change camera model until the first image has been received") return - self.c.set_cammodel( CAMERA_MODEL.PINHOLE if model_select_val < 0.5 else CAMERA_MODEL.FISHEYE) + self.c.set_cammodel( + CAMERA_MODEL.PINHOLE if model_select_val < 0.5 else CAMERA_MODEL.FISHEYE) def on_scale(self, scalevalue): if self.c.calibrated: @@ -321,15 +338,17 @@ def button(self, dst, label, enable): color = (155, 155, 80) else: color = (224, 224, 224) - cv2.circle(dst, (size[0] // 2, size[1] // 2), min(size) // 2, color, -1) + cv2.circle(dst, (size[0] // 2, size[1] // 2), + min(size) // 2, color, -1) (w, h) = self.getTextSize(label) - self.putText(dst, label, ((size[0] - w) // 2, (size[1] + h) // 2), (255,255,255)) + self.putText( + dst, label, ((size[0] - w) // 2, (size[1] + h) // 2), (255, 255, 255)) def buttons(self, display): x = self.displaywidth - self.button(display[180:280,x:x+100], "CALIBRATE", self.c.goodenough) - self.button(display[280:380,x:x+100], "SAVE", self.c.calibrated) - self.button(display[380:480,x:x+100], "COMMIT", self.c.calibrated) + self.button(display[180:280, x:x+100], "CALIBRATE", self.c.goodenough) + self.button(display[280:380, x:x+100], "SAVE", self.c.calibrated) + self.button(display[380:480, x:x+100], "COMMIT", self.c.calibrated) def y(self, i): """Set up right-size images""" @@ -346,23 +365,25 @@ def redraw_monocular(self, drawable): height = drawable.scrib.shape[0] width = drawable.scrib.shape[1] - display = numpy.zeros((max(480, height), width + 100, 3), dtype=numpy.uint8) - display[0:height, 0:width,:] = drawable.scrib - display[0:height, width:width+100,:].fill(255) + display = numpy.zeros( + (max(480, height), width + 100, 3), dtype=numpy.uint8) + display[0:height, 0:width, :] = drawable.scrib + display[0:height, width:width+100, :].fill(255) self.buttons(display) if not self.c.calibrated: if drawable.params: - for i, (label, lo, hi, progress) in enumerate(drawable.params): - (w,_) = self.getTextSize(label) - self.putText(display, label, (width + (100 - w) // 2, self.y(i))) - color = (0,255,0) + for i, (label, lo, hi, progress) in enumerate(drawable.params): + (w, _) = self.getTextSize(label) + self.putText(display, label, + (width + (100 - w) // 2, self.y(i))) + color = (0, 255, 0) if progress < 1.0: color = (0, int(progress*255.), 255) cv2.line(display, - (int(width + lo * 100), self.y(i) + 20), - (int(width + hi * 100), self.y(i) + 20), - color, 4) + (int(width + lo * 100), self.y(i) + 20), + (int(width + hi * 100), self.y(i) + 20), + color, 4) else: self.putText(display, "lin.", (width, self.y(0))) @@ -371,7 +392,7 @@ def redraw_monocular(self, drawable): msg = "?" else: msg = "%.2f" % linerror - #print "linear", linerror + # print "linear", linerror self.putText(display, msg, (width, self.y(1))) self._last_display = display @@ -381,25 +402,27 @@ def redraw_stereo(self, drawable): height = drawable.lscrib.shape[0] width = drawable.lscrib.shape[1] - display = numpy.zeros((max(480, height), 2 * width + 100, 3), dtype=numpy.uint8) - display[0:height, 0:width,:] = drawable.lscrib - display[0:height, width:2*width,:] = drawable.rscrib - display[0:height, 2*width:2*width+100,:].fill(255) + display = numpy.zeros( + (max(480, height), 2 * width + 100, 3), dtype=numpy.uint8) + display[0:height, 0:width, :] = drawable.lscrib + display[0:height, width:2*width, :] = drawable.rscrib + display[0:height, 2*width:2*width+100, :].fill(255) self.buttons(display) if not self.c.calibrated: if drawable.params: for i, (label, lo, hi, progress) in enumerate(drawable.params): - (w,_) = self.getTextSize(label) - self.putText(display, label, (2 * width + (100 - w) // 2, self.y(i))) - color = (0,255,0) + (w, _) = self.getTextSize(label) + self.putText(display, label, (2 * width + + (100 - w) // 2, self.y(i))) + color = (0, 255, 0) if progress < 1.0: color = (0, int(progress*255.), 255) cv2.line(display, - (int(2 * width + lo * 100), self.y(i) + 20), - (int(2 * width + hi * 100), self.y(i) + 20), - color, 4) + (int(2 * width + lo * 100), self.y(i) + 20), + (int(2 * width + hi * 100), self.y(i) + 20), + color, 4) else: self.putText(display, "epi.", (2 * width, self.y(0))) @@ -411,7 +434,8 @@ def redraw_stereo(self, drawable): # TODO dim is never set anywhere. Supposed to be observed chessboard size? if drawable.dim != -1: self.putText(display, "dim", (2 * width, self.y(2))) - self.putText(display, "%.3f" % drawable.dim, (2 * width, self.y(3))) + self.putText(display, "%.3f" % + drawable.dim, (2 * width, self.y(3))) self._last_display = display self.queue_display.put(display) diff --git a/camera_calibration/src/camera_calibration/camera_checker.py b/camera_calibration/src/camera_calibration/camera_checker.py index 19149b5ec..514dee7b5 100755 --- a/camera_calibration/src/camera_calibration/camera_checker.py +++ b/camera_calibration/src/camera_calibration/camera_checker.py @@ -43,7 +43,10 @@ import sensor_msgs.srv import threading -from camera_calibration.calibrator import MonoCalibrator, StereoCalibrator, ChessboardInfo +from camera_calibration.mono_calibrator import MonoCalibrator +from camera_calibration.stereo_calibrator import StereoCalibrator +from camera_calibration.calibrator import ChessboardInfo + from message_filters import ApproximateTimeSynchronizer try: @@ -55,14 +58,17 @@ def mean(seq): return sum(seq) / len(seq) + def lmin(seq1, seq2): """ Pairwise minimum of two sequences """ return [min(a, b) for (a, b) in zip(seq1, seq2)] + def lmax(seq1, seq2): """ Pairwise maximum of two sequences """ return [max(a, b) for (a, b) in zip(seq1, seq2)] + class ConsumerThread(threading.Thread): def __init__(self, queue, function): threading.Thread.__init__(self) @@ -76,6 +82,7 @@ def run(self): break self.function(m) + class CameraCheckerNode(Node): def __init__(self, name, chess_size, dim, approximate=0): @@ -100,9 +107,11 @@ def __init__(self, name, chess_size, dim, approximate=0): if approximate <= 0: sync = message_filters.TimeSynchronizer else: - sync = functools.partial(ApproximateTimeSynchronizer, slop=approximate) + sync = functools.partial( + ApproximateTimeSynchronizer, slop=approximate) - tsm = sync([message_filters.Subscriber(self, type, topic) for (topic, type) in tosync_mono], 10) + tsm = sync([message_filters.Subscriber(self, type, topic) + for (topic, type) in tosync_mono], 10) tsm.registerCallback(self.queue_monocular) left_topic = "stereo/left/image_rect" @@ -117,7 +126,8 @@ def __init__(self, name, chess_size, dim, approximate=0): (right_camera_topic, sensor_msgs.msg.CameraInfo) ] - tss = sync([message_filters.Subscriber(self, type, topic) for (topic, type) in tosync_stereo], 10) + tss = sync([message_filters.Subscriber(self, type, topic) + for (topic, type) in tosync_stereo], 10) tss.registerCallback(self.queue_stereo) self.br = cv_bridge.CvBridge() @@ -162,24 +172,29 @@ def handle_monocular(self, msg): # Add in reprojection check image_points = C - object_points = self.mc.mk_object_points([self.board], use_board_size=True)[0] + object_points = self.mc.mk_object_points( + [self.board], use_board_size=True)[0] dist_coeffs = numpy.zeros((4, 1)) - camera_matrix = numpy.array( [ [ camera.P[0], camera.P[1], camera.P[2] ], - [ camera.P[4], camera.P[5], camera.P[6] ], - [ camera.P[8], camera.P[9], camera.P[10] ] ] ) - ok, rot, trans = cv2.solvePnP(object_points, image_points, camera_matrix, dist_coeffs) + camera_matrix = numpy.array([[camera.P[0], camera.P[1], camera.P[2]], + [camera.P[4], camera.P[5], camera.P[6]], + [camera.P[8], camera.P[9], camera.P[10]]]) + ok, rot, trans = cv2.solvePnP( + object_points, image_points, camera_matrix, dist_coeffs) # Convert rotation into a 3x3 Rotation Matrix rot3x3, _ = cv2.Rodrigues(rot) # Reproject model points into image - object_points_world = numpy.asmatrix(rot3x3) * numpy.asmatrix(object_points.squeeze().T) + numpy.asmatrix(trans) + object_points_world = numpy.asmatrix( + rot3x3) * numpy.asmatrix(object_points.squeeze().T) + numpy.asmatrix(trans) reprojected_h = camera_matrix * object_points_world - reprojected = (reprojected_h[0:2, :] / reprojected_h[2, :]) + reprojected = (reprojected_h[0:2, :] / reprojected_h[2, :]) reprojection_errors = image_points.squeeze().T - reprojected - reprojection_rms = numpy.sqrt(numpy.sum(numpy.array(reprojection_errors) ** 2) / numpy.product(reprojection_errors.shape)) + reprojection_rms = numpy.sqrt(numpy.sum(numpy.array( + reprojection_errors) ** 2) / numpy.product(reprojection_errors.shape)) # Print the results - print("Linearity RMS Error: %.3f Pixels Reprojection RMS Error: %.3f Pixels" % (linearity_rms, reprojection_rms)) + print("Linearity RMS Error: %.3f Pixels Reprojection RMS Error: %.3f Pixels" % ( + linearity_rms, reprojection_rms)) else: print('no chessboard') @@ -194,8 +209,10 @@ def handle_stereo(self, msg): if L is not None and R is not None: epipolar = self.sc.epipolar_error(L, R) - dimension = self.sc.chessboard_size(L, R, self.board, msg=(lcmsg, rcmsg)) + dimension = self.sc.chessboard_size( + L, R, self.board, msg=(lcmsg, rcmsg)) - print("epipolar error: %f pixels dimension: %f m" % (epipolar, dimension)) + print("epipolar error: %f pixels dimension: %f m" % + (epipolar, dimension)) else: print("no chessboard") diff --git a/camera_calibration/src/camera_calibration/mono_calibrator.py b/camera_calibration/src/camera_calibration/mono_calibrator.py new file mode 100644 index 000000000..3b7c2b210 --- /dev/null +++ b/camera_calibration/src/camera_calibration/mono_calibrator.py @@ -0,0 +1,368 @@ +import numpy +import cv2 +import math +import time +from io import BytesIO +import pickle +import random +import tarfile +from camera_calibration.calibrator import ( + Calibrator, + CalibrationException, + CAMERA_MODEL, + Patterns, + MonoDrawable, + image_from_archive +) + + +class MonoCalibrator(Calibrator): + """ + Calibration class for monocular cameras:: + + images = [cv2.imread("mono%d.png") for i in range(8)] + mc = MonoCalibrator() + mc.cal(images) + print mc.as_message() + """ + + is_mono = True # TODO Could get rid of is_mono + + def __init__(self, *args, **kwargs): + if 'name' not in kwargs: + kwargs['name'] = 'narrow_stereo/left' + super(MonoCalibrator, self).__init__(*args, **kwargs) + + def cal(self, images): + """ + Calibrate camera from given images + """ + goodcorners = self.collect_corners(images) + self.cal_fromcorners(goodcorners) + self.calibrated = True + + def collect_corners(self, images): + """ + :param images: source images containing chessboards + :type images: list of :class:`cvMat` + + Find chessboards in all images. + + Return [ (corners, ids, ChessboardInfo) ] + """ + self.size = (images[0].shape[1], images[0].shape[0]) + corners = [self.get_corners(i) for i in images] + + goodcorners = [(co, ids, b) for (ok, co, ids, b) in corners if ok] + if not goodcorners: + raise CalibrationException("No corners found in images!") + return goodcorners + + def cal_fromcorners(self, good): + """ + :param good: Good corner positions and boards + :type good: [(corners, ChessboardInfo)] + """ + + (ipts, ids, boards) = zip(*good) + opts = self.mk_object_points(boards) + # If FIX_ASPECT_RATIO flag set, enforce focal lengths have 1/1 ratio + intrinsics_in = numpy.eye(3, dtype=numpy.float64) + + if self.pattern == Patterns.ChArUco: + if self.camera_model == CAMERA_MODEL.FISHEYE: + raise NotImplemented( + "Can't perform fisheye calibration with ChArUco board") + + reproj_err, self.intrinsics, self.distortion, rvecs, tvecs = cv2.aruco.calibrateCameraCharuco( + ipts, ids, boards[0].charuco_board, self.size, intrinsics_in, None) + + elif self.camera_model == CAMERA_MODEL.PINHOLE: + print("mono pinhole calibration...") + reproj_err, self.intrinsics, dist_coeffs, rvecs, tvecs = cv2.calibrateCamera( + opts, ipts, + self.size, + intrinsics_in, + None, + flags=self.calib_flags) + # OpenCV returns more than 8 coefficients (the additional ones all zeros) when CALIB_RATIONAL_MODEL is set. + # The extra ones include e.g. thin prism coefficients, which we are not interested in. + if self.calib_flags & cv2.CALIB_RATIONAL_MODEL: + # rational polynomial + self.distortion = dist_coeffs.flat[:8].reshape(-1, 1) + else: + # plumb bob + self.distortion = dist_coeffs.flat[:5].reshape(-1, 1) + elif self.camera_model == CAMERA_MODEL.FISHEYE: + print("mono fisheye calibration...") + # WARNING: cv2.fisheye.calibrate wants float64 points + ipts64 = numpy.asarray(ipts, dtype=numpy.float64) + ipts = ipts64 + opts64 = numpy.asarray(opts, dtype=numpy.float64) + opts = opts64 + reproj_err, self.intrinsics, self.distortion, rvecs, tvecs = cv2.fisheye.calibrate( + opts, ipts, self.size, + intrinsics_in, None, flags=self.fisheye_calib_flags) + + # R is identity matrix for monocular calibration + self.R = numpy.eye(3, dtype=numpy.float64) + self.P = numpy.zeros((3, 4), dtype=numpy.float64) + + self.set_alpha(0.0) + + def set_alpha(self, a): + """ + Set the alpha value for the calibrated camera solution. The alpha + value is a zoom, and ranges from 0 (zoomed in, all pixels in + calibrated image are valid) to 1 (zoomed out, all pixels in + original image are in calibrated image). + """ + + if self.camera_model == CAMERA_MODEL.PINHOLE: + # NOTE: Prior to Electric, this code was broken such that we never actually saved the new + # camera matrix. In effect, this enforced P = [K|0] for monocular cameras. + # TODO: Verify that OpenCV #1199 gets applied (improved GetOptimalNewCameraMatrix) + ncm, _ = cv2.getOptimalNewCameraMatrix( + self.intrinsics, self.distortion, self.size, a) + for j in range(3): + for i in range(3): + self.P[j, i] = ncm[j, i] + self.mapx, self.mapy = cv2.initUndistortRectifyMap( + self.intrinsics, self.distortion, self.R, ncm, self.size, cv2.CV_32FC1) + elif self.camera_model == CAMERA_MODEL.FISHEYE: + # NOTE: cv2.fisheye.estimateNewCameraMatrixForUndistortRectify not producing proper results, using a naive approach instead: + self.P[:3, :3] = self.intrinsics[:3, :3] + self.P[0, 0] /= (1. + a) + self.P[1, 1] /= (1. + a) + self.mapx, self.mapy = cv2.fisheye.initUndistortRectifyMap( + self.intrinsics, self.distortion, self.R, self.P, self.size, cv2.CV_32FC1) + + def remap(self, src): + """ + :param src: source image + :type src: :class:`cvMat` + + Apply the post-calibration undistortion to the source image + """ + return cv2.remap(src, self.mapx, self.mapy, cv2.INTER_LINEAR) + + def undistort_points(self, src): + """ + :param src: N source pixel points (u,v) as an Nx2 matrix + :type src: :class:`cvMat` + + Apply the post-calibration undistortion to the source points + """ + if self.camera_model == CAMERA_MODEL.PINHOLE: + return cv2.undistortPoints(src, self.intrinsics, self.distortion, R=self.R, P=self.P) + elif self.camera_model == CAMERA_MODEL.FISHEYE: + return cv2.fisheye.undistortPoints(src, self.intrinsics, self.distortion, R=self.R, P=self.P) + + def as_message(self): + """ Return the camera calibration as a CameraInfo message """ + return self.lrmsg(self.distortion, self.intrinsics, self.R, self.P, self.size, self.camera_model) + + def from_message(self, msg, alpha=0.0): + """ Initialize the camera calibration from a CameraInfo message """ + + self.size = (msg.width, msg.height) + self.intrinsics = numpy.array( + msg.k, dtype=numpy.float64, copy=True).reshape((3, 3)) + self.distortion = numpy.array( + msg.d, dtype=numpy.float64, copy=True).reshape((len(msg.d), 1)) + self.R = numpy.array(msg.r, dtype=numpy.float64, + copy=True).reshape((3, 3)) + self.P = numpy.array(msg.p, dtype=numpy.float64, + copy=True).reshape((3, 4)) + + self.set_alpha(0.0) + + def report(self): + self.lrreport(self.distortion, self.intrinsics, self.R, self.P) + + def ost(self): + return self.lrost(self.name, self.distortion, self.intrinsics, self.R, self.P, self.size) + + def yaml(self): + return self.lryaml(self.name, self.distortion, self.intrinsics, self.R, self.P, self.size, self.camera_model) + + def linear_error_from_image(self, image): + """ + Detect the checkerboard and compute the linear error. + Mainly for use in tests. + """ + _, corners, _, ids, board, _ = self.downsample_and_detect(image) + if corners is None: + return None + + undistorted = self.undistort_points(corners) + return self.linear_error(undistorted, ids, board) + + @staticmethod + def linear_error(corners, ids, b): + """ + Returns the linear error for a set of corners detected in the unrectified image. + """ + + if corners is None: + return None + + corners = numpy.squeeze(corners) + + def pt2line(x0, y0, x1, y1, x2, y2): + """ point is (x0, y0), line is (x1, y1, x2, y2) """ + return abs((x2 - x1) * (y1 - y0) - (x1 - x0) * (y2 - y1)) / math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2) + + n_cols = b.n_cols + n_rows = b.n_rows + if b.pattern == 'charuco': + n_cols -= 1 + n_rows -= 1 + n_pts = n_cols * n_rows + + if ids is None: + ids = numpy.arange(n_pts).reshape((n_pts, 1)) + + ids_to_idx = dict((ids[i, 0], i) for i in range(len(ids))) + + errors = [] + for row in range(n_rows): + row_min = row * n_cols + row_max = (row+1) * n_cols + pts_in_row = [x for x in ids if row_min <= x < row_max] + + # not enough points to calculate error + if len(pts_in_row) <= 2: + continue + + left_pt = min(pts_in_row)[0] + right_pt = max(pts_in_row)[0] + x_left = corners[ids_to_idx[left_pt], 0] + y_left = corners[ids_to_idx[left_pt], 1] + x_right = corners[ids_to_idx[right_pt], 0] + y_right = corners[ids_to_idx[right_pt], 1] + + for pt in pts_in_row: + if pt[0] in (left_pt, right_pt): + continue + x = corners[ids_to_idx[pt[0]], 0] + y = corners[ids_to_idx[pt[0]], 1] + errors.append(pt2line(x, y, x_left, y_left, x_right, y_right)) + + if errors: + return math.sqrt(sum([e**2 for e in errors]) / len(errors)) + else: + return None + + def handle_msg(self, msg): + """ + Detects the calibration target and, if found and provides enough new information, + adds it to the sample database. + + Returns a MonoDrawable message with the display image and progress info. + """ + gray = self.mkgray(msg) + linear_error = -1 + + # Get display-image-to-be (scrib) and detection of the calibration target + scrib_mono, corners, downsampled_corners, ids, board, ( + x_scale, y_scale) = self.downsample_and_detect(gray) + + if self.calibrated: + # Show rectified image + # TODO Pull out downsampling code into function + gray_remap = self.remap(gray) + gray_rect = gray_remap + if x_scale != 1.0 or y_scale != 1.0: + gray_rect = cv2.resize( + gray_remap, (scrib_mono.shape[1], scrib_mono.shape[0])) + + scrib = cv2.cvtColor(gray_rect, cv2.COLOR_GRAY2BGR) + + if corners is not None: + # Report linear error + undistorted = self.undistort_points(corners) + linear_error = self.linear_error(undistorted, ids, board) + + # Draw rectified corners + scrib_src = undistorted.copy() + scrib_src[:, :, 0] /= x_scale + scrib_src[:, :, 1] /= y_scale + cv2.drawChessboardCorners( + scrib, (board.n_cols, board.n_rows), scrib_src, True) + + else: + scrib = cv2.cvtColor(scrib_mono, cv2.COLOR_GRAY2BGR) + if corners is not None: + # Draw (potentially downsampled) corners onto display image + if board.pattern == "charuco": + cv2.aruco.drawDetectedCornersCharuco( + scrib, downsampled_corners, ids) + else: + cv2.drawChessboardCorners( + scrib, (board.n_cols, board.n_rows), downsampled_corners, True) + + # Add sample to database only if it's sufficiently different from any previous sample. + params = self.get_parameters( + corners, ids, board, (gray.shape[1], gray.shape[0])) + if self.is_good_sample(params, corners, ids, self.last_frame_corners, self.last_frame_ids): + self.db.append((params, gray)) + if self.pattern == Patterns.ChArUco: + self.good_corners.append((corners, ids, board)) + else: + self.good_corners.append((corners, None, board)) + print(("*** Added sample %d, p_x = %.3f, p_y = %.3f, p_size = %.3f, skew = %.3f" % + tuple([len(self.db)] + params))) + + self.last_frame_corners = corners + self.last_frame_ids = ids + rv = MonoDrawable() + rv.scrib = scrib + rv.params = self.compute_goodenough() + rv.linear_error = linear_error + return rv + + def do_calibration(self, dump=False): + if not self.good_corners: + print("**** Collecting corners for all images! ****") # DEBUG + images = [i for (p, i) in self.db] + self.good_corners = self.collect_corners(images) + # TODO Needs to be set externally + self.size = (self.db[0][1].shape[1], self.db[0][1].shape[0]) + # Dump should only occur if user wants it + if dump: + pickle.dump((self.is_mono, self.size, self.good_corners), + open("/tmp/camera_calibration_%08x.pickle" % random.getrandbits(32), "w")) + self.cal_fromcorners(self.good_corners) + self.calibrated = True + # DEBUG + print((self.report())) + print((self.ost())) + + def do_tarfile_save(self, tf): + """ Write images and calibration solution to a tarfile object """ + + def taradd(name, buf): + if isinstance(buf, str): + s = BytesIO(buf.encode('utf-8')) + else: + s = BytesIO(buf) + ti = tarfile.TarInfo(name) + ti.size = len(s.getvalue()) + ti.uname = 'calibrator' + ti.mtime = int(time.time()) + tf.addfile(tarinfo=ti, fileobj=s) + + ims = [("left-%04d.png" % i, im) for i, (_, im) in enumerate(self.db)] + for (name, im) in ims: + taradd(name, cv2.imencode(".png", im)[1].tostring()) + taradd('ost.yaml', self.yaml()) + taradd('ost.txt', self.ost()) + + def do_tarfile_calibration(self, filename): + archive = tarfile.open(filename, 'r') + + limages = [image_from_archive(archive, f) for f in archive.getnames() if ( + f.startswith('left') and (f.endswith('.pgm') or f.endswith('png')))] + + self.cal(limages) diff --git a/camera_calibration/src/camera_calibration/nodes/cameracalibrator.py b/camera_calibration/src/camera_calibration/nodes/cameracalibrator.py index df08e2706..ab3bab438 100755 --- a/camera_calibration/src/camera_calibration/nodes/cameracalibrator.py +++ b/camera_calibration/src/camera_calibration/nodes/cameracalibrator.py @@ -40,17 +40,19 @@ from camera_calibration.calibrator import ChessboardInfo, Patterns from message_filters import ApproximateTimeSynchronizer + def optionsValidCharuco(options, parser): """ Validates the provided options when the pattern type is 'charuco' """ - if options.pattern != 'charuco': return False + if options.pattern != 'charuco': + return False n_boards = len(options.size) if (n_boards != len(options.square) or n_boards != len(options.charuco_marker_size) or n_boards != len(options.aruco_dict)): parser.error("When using ChArUco boards, --size, --square, --charuco_marker_size, and --aruco_dict " + - "must be specified for each board") + "must be specified for each board") return False # TODO: check for fisheye and stereo (not implemented with ChArUco) @@ -62,8 +64,8 @@ def main(): parser = OptionParser("%prog --size SIZE1 --square SQUARE1 [ --size SIZE2 --square SQUARE2 ]", description=None) parser.add_option("-c", "--camera_name", - type="string", default='narrow_stereo', - help="name of the camera to appear in the calibration file") + type="string", default='narrow_stereo', + help="name of the camera to appear in the calibration file") group = OptionGroup(parser, "Chessboard Options", "You must specify one or more chessboards as pairs of --size and --square options.") group.add_option("-p", "--pattern", @@ -147,16 +149,19 @@ def main(): if options.pattern == "charuco" and optionsValidCharuco(options, parser): for (sz, sq, ms, ad) in zip(options.size, options.square, options.charuco_marker_size, options.aruco_dict): size = tuple([int(c) for c in sz.split('x')]) - boards.append(ChessboardInfo('charuco', size[0], size[1], float(sq), float(ms), ad)) + boards.append(ChessboardInfo( + 'charuco', size[0], size[1], float(sq), float(ms), ad)) else: for (sz, sq) in zip(options.size, options.square): size = tuple([int(c) for c in sz.split('x')]) - boards.append(ChessboardInfo(options.pattern, size[0], size[1], float(sq))) + boards.append(ChessboardInfo( + options.pattern, size[0], size[1], float(sq))) if options.approximate == 0.0: sync = message_filters.TimeSynchronizer else: - sync = functools.partial(ApproximateTimeSynchronizer, slop=options.approximate) + sync = functools.partial( + ApproximateTimeSynchronizer, slop=options.approximate) # Pinhole opencv calibration options parsing num_ks = options.k_coefficients @@ -211,7 +216,8 @@ def main(): elif options.pattern == 'charuco': pattern = Patterns.ChArUco elif options.pattern != 'chessboard': - print('Unrecognized pattern %s, defaulting to chessboard' % options.pattern) + print('Unrecognized pattern %s, defaulting to chessboard' % + options.pattern) if options.disable_calib_cb_fast_check: checkerboard_flags = 0 @@ -226,6 +232,7 @@ def main(): node.spin() rclpy.shutdown() + if __name__ == "__main__": try: main() diff --git a/camera_calibration/src/camera_calibration/nodes/cameracheck.py b/camera_calibration/src/camera_calibration/nodes/cameracheck.py index b2b3906cf..43fd84aef 100755 --- a/camera_calibration/src/camera_calibration/nodes/cameracheck.py +++ b/camera_calibration/src/camera_calibration/nodes/cameracheck.py @@ -39,8 +39,10 @@ def main(): from optparse import OptionParser parser = OptionParser() - parser.add_option("-s", "--size", default="8x6", help="specify chessboard size as nxm [default: %default]") - parser.add_option("-q", "--square", default=".108", help="specify chessboard square size in meters [default: %default]") + parser.add_option("-s", "--size", default="8x6", + help="specify chessboard size as nxm [default: %default]") + parser.add_option("-q", "--square", default=".108", + help="specify chessboard square size in meters [default: %default]") parser.add_option("--approximate", type="float", default=0.0, help="allow specified slop (in seconds) when pairing images from unsynchronized stereo cameras") @@ -54,5 +56,6 @@ def main(): node = CameraCheckerNode("cameracheck", size, dim, approximate) rclpy.spin(node) + if __name__ == "__main__": main() diff --git a/camera_calibration/src/camera_calibration/stereo_calibrator.py b/camera_calibration/src/camera_calibration/stereo_calibrator.py new file mode 100644 index 000000000..b0ed890f3 --- /dev/null +++ b/camera_calibration/src/camera_calibration/stereo_calibrator.py @@ -0,0 +1,439 @@ +from io import BytesIO +import sys +import numpy +import time +import math +import random +import pickle +import cv2 +import tarfile +import image_geometry +from distutils.version import LooseVersion +from camera_calibration.mono_calibrator import MonoCalibrator +from camera_calibration.calibrator import ( + Calibrator, + CalibrationException, + CAMERA_MODEL, + Patterns, + StereoDrawable, + image_from_archive +) + +# TODO Replicate MonoCalibrator improvements in stereo + + +class StereoCalibrator(Calibrator): + """ + Calibration class for stereo cameras:: + + limages = [cv2.imread("left%d.png") for i in range(8)] + rimages = [cv2.imread("right%d.png") for i in range(8)] + sc = StereoCalibrator() + sc.cal(limages, rimages) + print sc.as_message() + """ + + is_mono = False + + def __init__(self, *args, **kwargs): + if 'name' not in kwargs: + kwargs['name'] = 'narrow_stereo' + super(StereoCalibrator, self).__init__(*args, **kwargs) + self.l = MonoCalibrator(*args, **kwargs) + self.r = MonoCalibrator(*args, **kwargs) + # Collecting from two cameras in a horizontal stereo rig, can't get + # full X range in the left camera. + self.param_ranges[0] = 0.4 + + # override + def set_cammodel(self, modeltype): + super(StereoCalibrator, self).set_cammodel(modeltype) + self.l.set_cammodel(modeltype) + self.r.set_cammodel(modeltype) + + def cal(self, limages, rimages): + """ + :param limages: source left images containing chessboards + :type limages: list of :class:`cvMat` + :param rimages: source right images containing chessboards + :type rimages: list of :class:`cvMat` + + Find chessboards in images, and runs the OpenCV calibration solver. + """ + goodcorners = self.collect_corners(limages, rimages) + self.size = (limages[0].shape[1], limages[0].shape[0]) + self.l.size = self.size + self.r.size = self.size + self.cal_fromcorners(goodcorners) + self.calibrated = True + + def collect_corners(self, limages, rimages): + """ + For a sequence of left and right images, find pairs of images where both + left and right have a chessboard, and return their corners as a list of pairs. + """ + # Pick out (corners, ids, board) tuples + lcorners = [] + rcorners = [] + for img in limages: + (_, corners, _, ids, board, _) = self.downsample_and_detect(img) + lcorners.append((corners, ids, board)) + for img in rimages: + (_, corners, _, ids, board, _) = self.downsample_and_detect(img) + rcorners.append((corners, ids, board)) + + good = [(lco, rco, lid, rid, b) for ((lco, lid, b), (rco, rid, br)) in zip(lcorners, rcorners) + if (lco is not None and rco is not None)] + + if len(good) == 0: + raise CalibrationException("No corners found in images!") + return good + + def cal_fromcorners(self, good): + # Perform monocular calibrations + lcorners = [(lco, lid, b) for (lco, rco, lid, rid, b) in good] + rcorners = [(rco, rid, b) for (lco, rco, lid, rid, b) in good] + self.l.cal_fromcorners(lcorners) + self.r.cal_fromcorners(rcorners) + + (lipts, ripts, _, _, boards) = zip(*good) + + opts = self.mk_object_points(boards, True) + + flags = cv2.CALIB_FIX_INTRINSIC + + self.T = numpy.zeros((3, 1), dtype=numpy.float64) + self.R = numpy.eye(3, dtype=numpy.float64) + + if self.pattern == Patterns.ChArUco: + # TODO: implement stereo ChArUco calibration + raise NotImplemented( + "Stereo calibration not implemented for ChArUco boards") + + if self.camera_model == CAMERA_MODEL.PINHOLE: + print("stereo pinhole calibration...") + if LooseVersion(cv2.__version__).version[0] == 2: + cv2.stereoCalibrate(opts, lipts, ripts, self.size, + self.l.intrinsics, self.l.distortion, + self.r.intrinsics, self.r.distortion, + self.R, # R + self.T, # T + criteria=(cv2.TERM_CRITERIA_EPS + \ + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5), + flags=flags) + else: + cv2.stereoCalibrate(opts, lipts, ripts, + self.l.intrinsics, self.l.distortion, + self.r.intrinsics, self.r.distortion, + self.size, + self.R, # R + self.T, # T + criteria=(cv2.TERM_CRITERIA_EPS + \ + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5), + flags=flags) + elif self.camera_model == CAMERA_MODEL.FISHEYE: + print("stereo fisheye calibration...") + if LooseVersion(cv2.__version__).version[0] == 2: + print("ERROR: You need OpenCV >3 to use fisheye camera model") + sys.exit() + else: + # WARNING: cv2.fisheye.stereoCalibrate wants float64 points + lipts64 = numpy.asarray(lipts, dtype=numpy.float64) + lipts = lipts64 + ripts64 = numpy.asarray(ripts, dtype=numpy.float64) + ripts = ripts64 + opts64 = numpy.asarray(opts, dtype=numpy.float64) + opts = opts64 + + cv2.fisheye.stereoCalibrate(opts, lipts, ripts, + self.l.intrinsics, self.l.distortion, + self.r.intrinsics, self.r.distortion, + self.size, + self.R, # R + self.T, # T + # 30, 1e-6 + criteria=( + cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5), + flags=flags) + + self.set_alpha(0.0) + + def set_alpha(self, a): + """ + Set the alpha value for the calibrated camera solution. The + alpha value is a zoom, and ranges from 0 (zoomed in, all pixels + in calibrated image are valid) to 1 (zoomed out, all pixels in + original image are in calibrated image). + """ + if self.camera_model == CAMERA_MODEL.PINHOLE: + cv2.stereoRectify(self.l.intrinsics, + self.l.distortion, + self.r.intrinsics, + self.r.distortion, + self.size, + self.R, + self.T, + self.l.R, self.r.R, self.l.P, self.r.P, + alpha=a) + + cv2.initUndistortRectifyMap(self.l.intrinsics, self.l.distortion, self.l.R, self.l.P, self.size, cv2.CV_32FC1, + self.l.mapx, self.l.mapy) + cv2.initUndistortRectifyMap(self.r.intrinsics, self.r.distortion, self.r.R, self.r.P, self.size, cv2.CV_32FC1, + self.r.mapx, self.r.mapy) + + elif self.camera_model == CAMERA_MODEL.FISHEYE: + self.Q = numpy.zeros((4, 4), dtype=numpy.float64) + + # Operation flags that may be zero or CALIB_ZERO_DISPARITY . + flags = cv2.CALIB_ZERO_DISPARITY + # If the flag is set, the function makes the principal points of each camera have the same pixel coordinates in the rectified views. + # And if the flag is not set, the function may still shift the images in the horizontal or vertical direction + # (depending on the orientation of epipolar lines) to maximize the useful image area. + + cv2.fisheye.stereoRectify(self.l.intrinsics, self.l.distortion, + self.r.intrinsics, self.r.distortion, + self.size, + self.R, self.T, + flags, + self.l.R, self.r.R, + self.l.P, self.r.P, + self.Q, + self.size, + a, + 1.0) + self.l.P[:3, :3] = numpy.dot(self.l.intrinsics, self.l.R) + self.r.P[:3, :3] = numpy.dot(self.r.intrinsics, self.r.R) + cv2.fisheye.initUndistortRectifyMap(self.l.intrinsics, self.l.distortion, self.l.R, self.l.intrinsics, self.size, cv2.CV_32FC1, + self.l.mapx, self.l.mapy) + cv2.fisheye.initUndistortRectifyMap(self.r.intrinsics, self.r.distortion, self.r.R, self.r.intrinsics, self.size, cv2.CV_32FC1, + self.r.mapx, self.r.mapy) + + def as_message(self): + """ + Return the camera calibration as a pair of CameraInfo messages, for left + and right cameras respectively. + """ + + return (self.lrmsg(self.l.distortion, self.l.intrinsics, self.l.R, self.l.P, self.size, self.l.camera_model), + self.lrmsg(self.r.distortion, self.r.intrinsics, self.r.R, self.r.P, self.size, self.r.camera_model)) + + def from_message(self, msgs, alpha=0.0): + """ Initialize the camera calibration from a pair of CameraInfo messages. """ + self.size = (msgs[0].width, msgs[0].height) + + self.T = numpy.zeros((3, 1), dtype=numpy.float64) + self.R = numpy.eye(3, dtype=numpy.float64) + + self.l.from_message(msgs[0]) + self.r.from_message(msgs[1]) + # Need to compute self.T and self.R here, using the monocular parameters above + if False: + self.set_alpha(0.0) + + def report(self): + print("\nLeft:") + self.lrreport(self.l.distortion, self.l.intrinsics, self.l.R, self.l.P) + print("\nRight:") + self.lrreport(self.r.distortion, self.r.intrinsics, self.r.R, self.r.P) + print("self.T =", numpy.ravel(self.T).tolist()) + print("self.R =", numpy.ravel(self.R).tolist()) + + def ost(self): + return (self.lrost(self.name + "/left", self.l.distortion, self.l.intrinsics, self.l.R, self.l.P, self.size) + + self.lrost(self.name + "/right", self.r.distortion, self.r.intrinsics, self.r.R, self.r.P, self.size)) + + def yaml(self, suffix, info): + return self.lryaml(self.name + suffix, info.distortion, info.intrinsics, info.R, info.P, self.size, self.camera_model) + + # TODO Get rid of "from_images" versions of these, instead have function to get undistorted corners + def epipolar_error_from_images(self, limage, rimage): + """ + Detect the checkerboard in both images and compute the epipolar error. + Mainly for use in tests. + """ + lcorners = self.downsample_and_detect(limage)[1] + rcorners = self.downsample_and_detect(rimage)[1] + if lcorners is None or rcorners is None: + return None + + lundistorted = self.l.undistort_points(lcorners) + rundistorted = self.r.undistort_points(rcorners) + + return self.epipolar_error(lundistorted, rundistorted) + + def epipolar_error(self, lcorners, rcorners): + """ + Compute the epipolar error from two sets of matching undistorted points + """ + d = lcorners[:, :, 1] - rcorners[:, :, 1] + return numpy.sqrt(numpy.square(d).sum() / d.size) + + def chessboard_size_from_images(self, limage, rimage): + _, lcorners, _, _, board, _ = self.downsample_and_detect(limage) + _, rcorners, _, _, board, _ = self.downsample_and_detect(rimage) + if lcorners is None or rcorners is None: + return None + + lundistorted = self.l.undistort_points(lcorners) + rundistorted = self.r.undistort_points(rcorners) + + return self.chessboard_size(lundistorted, rundistorted, board) + + def chessboard_size(self, lcorners, rcorners, board, msg=None): + """ + Compute the square edge length from two sets of matching undistorted points + given the current calibration. + :param msg: a tuple of (left_msg, right_msg) + """ + # Project the points to 3d + cam = image_geometry.StereoCameraModel() + if msg == None: + msg = self.as_message() + cam.fromCameraInfo(*msg) + disparities = lcorners[:, :, 0] - rcorners[:, :, 0] + pt3d = [cam.projectPixelTo3d( + (lcorners[i, 0, 0], lcorners[i, 0, 1]), disparities[i, 0]) for i in range(lcorners.shape[0])] + + def l2(p0, p1): + return math.sqrt(sum([(c0 - c1) ** 2 for (c0, c1) in zip(p0, p1)])) + + # Compute the length from each horizontal and vertical line, and return the mean + cc = board.n_cols + cr = board.n_rows + lengths = ( + [l2(pt3d[cc * r + 0], pt3d[cc * r + (cc - 1)]) / (cc - 1) for r in range(cr)] + + [l2(pt3d[c + 0], pt3d[c + (cc * (cr - 1))]) / (cr - 1) for c in range(cc)]) + return sum(lengths) / len(lengths) + + def handle_msg(self, msg): + # TODO Various asserts that images have same dimension, same board detected... + (lmsg, rmsg) = msg + lgray = self.mkgray(lmsg) + rgray = self.mkgray(rmsg) + epierror = -1 + + # Get display-images-to-be and detections of the calibration target + lscrib_mono, lcorners, ldownsampled_corners, lids, lboard, ( + x_scale, y_scale) = self.downsample_and_detect(lgray) + rscrib_mono, rcorners, rdownsampled_corners, rids, rboard, _ = self.downsample_and_detect( + rgray) + + if self.calibrated: + # Show rectified images + lremap = self.l.remap(lgray) + rremap = self.r.remap(rgray) + lrect = lremap + rrect = rremap + if x_scale != 1.0 or y_scale != 1.0: + lrect = cv2.resize( + lremap, (lscrib_mono.shape[1], lscrib_mono.shape[0])) + rrect = cv2.resize( + rremap, (rscrib_mono.shape[1], rscrib_mono.shape[0])) + + lscrib = cv2.cvtColor(lrect, cv2.COLOR_GRAY2BGR) + rscrib = cv2.cvtColor(rrect, cv2.COLOR_GRAY2BGR) + + # Draw rectified corners + if lcorners is not None: + lundistorted = self.l.undistort_points(lcorners) + scrib_src = lundistorted.copy() + scrib_src[:, :, 0] /= x_scale + scrib_src[:, :, 1] /= y_scale + cv2.drawChessboardCorners( + lscrib, (lboard.n_cols, lboard.n_rows), scrib_src, True) + + if rcorners is not None: + rundistorted = self.r.undistort_points(rcorners) + scrib_src = rundistorted.copy() + scrib_src[:, :, 0] /= x_scale + scrib_src[:, :, 1] /= y_scale + cv2.drawChessboardCorners( + rscrib, (rboard.n_cols, rboard.n_rows), scrib_src, True) + + # Report epipolar error + if lcorners is not None and rcorners is not None and len(lcorners) == len(rcorners): + epierror = self.epipolar_error(lundistorted, rundistorted) + + else: + lscrib = cv2.cvtColor(lscrib_mono, cv2.COLOR_GRAY2BGR) + rscrib = cv2.cvtColor(rscrib_mono, cv2.COLOR_GRAY2BGR) + # Draw any detected chessboards onto display (downsampled) images + if lcorners is not None: + cv2.drawChessboardCorners(lscrib, (lboard.n_cols, lboard.n_rows), + ldownsampled_corners, True) + if rcorners is not None: + cv2.drawChessboardCorners(rscrib, (rboard.n_cols, rboard.n_rows), + rdownsampled_corners, True) + + # Add sample to database only if it's sufficiently different from any previous sample + if lcorners is not None and rcorners is not None and len(lcorners) == len(rcorners): + params = self.get_parameters( + lcorners, lids, lboard, (lgray.shape[1], lgray.shape[0])) + if self.is_good_sample(params, lcorners, lids, self.last_frame_corners, self.last_frame_ids): + self.db.append((params, lgray, rgray)) + self.good_corners.append( + (lcorners, rcorners, lids, rids, lboard)) + print(("*** Added sample %d, p_x = %.3f, p_y = %.3f, p_size = %.3f, skew = %.3f" % + tuple([len(self.db)] + params))) + + self.last_frame_corners = lcorners + self.last_frame_ids = lids + rv = StereoDrawable() + rv.lscrib = lscrib + rv.rscrib = rscrib + rv.params = self.compute_goodenough() + rv.epierror = epierror + return rv + + def do_calibration(self, dump=False): + # TODO MonoCalibrator collects corners if needed here + # TODO Needs to be set externally + self.size = (self.db[0][1].shape[1], self.db[0][1].shape[0]) + # Dump should only occur if user wants it + if dump: + pickle.dump((self.is_mono, self.size, self.good_corners), + open("/tmp/camera_calibration_%08x.pickle" % random.getrandbits(32), "w")) + self.l.size = self.size + self.r.size = self.size + self.cal_fromcorners(self.good_corners) + self.calibrated = True + # DEBUG + print((self.report())) + print((self.ost())) + + def do_tarfile_save(self, tf): + """ Write images and calibration solution to a tarfile object """ + ims = ([("left-%04d.png" % i, im) for i, (_, im, _) in enumerate(self.db)] + + [("right-%04d.png" % i, im) for i, (_, _, im) in enumerate(self.db)]) + + def taradd(name, buf): + if isinstance(buf, str): + s = BytesIO(buf.encode('utf-8')) + else: + s = BytesIO(buf) + ti = tarfile.TarInfo(name) + ti.size = len(s.getvalue()) + ti.uname = 'calibrator' + ti.mtime = int(time.time()) + tf.addfile(tarinfo=ti, fileobj=s) + + for (name, im) in ims: + taradd(name, cv2.imencode(".png", im)[1].tostring()) + taradd('left.yaml', self.yaml("/left", self.l)) + taradd('right.yaml', self.yaml("/right", self.r)) + taradd('ost.txt', self.ost()) + + def do_tarfile_calibration(self, filename): + archive = tarfile.open(filename, 'r') + limages = [image_from_archive(archive, f) for f in archive.getnames() if ( + f.startswith('left') and (f.endswith('pgm') or f.endswith('png')))] + rimages = [image_from_archive(archive, f) for f in archive.getnames() if ( + f.startswith('right') and (f.endswith('pgm') or f.endswith('png')))] + + if not len(limages) == len(rimages): + raise CalibrationException( + "Left, right images don't match. %d left images, %d right" % (len(limages), len(rimages))) + + # \todo Check that the filenames match and stuff + + self.cal(limages, rimages) diff --git a/camera_calibration/test/test_directed.py b/camera_calibration/test/test_directed.py index 80ec1a8b9..b6e66bccf 100644 --- a/camera_calibration/test/test_directed.py +++ b/camera_calibration/test/test_directed.py @@ -42,8 +42,9 @@ import tarfile import unittest -from camera_calibration.calibrator import MonoCalibrator, StereoCalibrator, \ - Patterns, CalibrationException, ChessboardInfo, image_from_archive +from camera_calibration.mono_calibrator import MonoCalibrator +from camera_calibration.stereo_calibrator import StereoCalibrator +from camera_calibration.calibrator import Patterns, CalibrationException, ChessboardInfo, image_from_archive board = ChessboardInfo() board.n_cols = 8 diff --git a/camera_calibration/test/test_multiple_boards.py b/camera_calibration/test/test_multiple_boards.py index 40e1faa50..312f732c9 100644 --- a/camera_calibration/test/test_multiple_boards.py +++ b/camera_calibration/test/test_multiple_boards.py @@ -39,7 +39,9 @@ import tarfile import os -from camera_calibration.calibrator import StereoCalibrator, ChessboardInfo, image_from_archive +from camera_calibration.stereo_calibrator import StereoCalibrator +from camera_calibration.calibrator import ChessboardInfo, image_from_archive + # Large board used for PR2 calibration board = ChessboardInfo()