Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision

Target

Select target project
  • nicolaspope/ftl
1 result
Select Git revision
Show changes
Showing
with 2747 additions and 407 deletions
### OpenCV Codec Unit ################################################################
add_executable(mixer_unit
$<TARGET_OBJECTS:CatchTest>
mixer_unit.cpp
)
target_include_directories(mixer_unit PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include")
target_link_libraries(mixer_unit
Threads::Threads ${OS_LIBS} ftlcommon)
add_test(MixerUnitTest mixer_unit)
#include "catch.hpp"
#include <ftl/audio/mixer.hpp>
using ftl::audio::StereoMixerF;
TEST_CASE("Audio Mixer Stereo Float", "") {
SECTION("Add two in sync tracks") {
auto mixer = std::make_unique<StereoMixerF<100>>();
mixer->add("Track1");
mixer->add("Track2");
// Three 960 sample stereo frames
std::vector<float> in1(960*2*3);
std::vector<float> in2(960*2*3);
for (int i=0; i<960*2*3; ++i) in1[i] = float(i)+1.0f;
for (int i=0; i<960*2*3; ++i) in2[i] = float(i)+2.0f;
mixer->write(0, in1);
mixer->write(1, in2);
mixer->mix();
REQUIRE( mixer->writePosition() == 3 );
REQUIRE( mixer->readPosition() == 0 );
// Read one of the three valid frames
std::vector<float> out;
mixer->read(out, 1);
bool correct = true;
// Check all values are correct
for (int i=0; i<960*2*1; ++i) {
float e = float(i)+1.0f + float(i)+2.0f;
correct &= int(e) == int(out[i]);
}
REQUIRE( correct );
}
SECTION("Add two out of sync tracks") {
auto mixer = std::make_unique<StereoMixerF<100>>();
mixer->add("Track1");
mixer->add("Track2");
// Three 960 sample stereo frames
std::vector<float> in1(960*2*3);
std::vector<float> in2(960*2*2);
for (int i=0; i<960*2*3; ++i) in1[i] = float(i)+1.0f;
for (int i=0; i<960*2*2; ++i) in2[i] = float(i)+2.0f;
mixer->write(0, in1);
mixer->write(1, in2);
mixer->mix();
REQUIRE( mixer->writePosition() == 2 );
REQUIRE( mixer->readPosition() == 0 );
// Read one of the three valid frames
std::vector<float> out;
mixer->read(out, 2);
bool correct = true;
// Check all values are correct
for (int i=0; i<960*2*2; ++i) {
float e = float(i)+1.0f + float(i)+2.0f;
correct &= int(e) == int(out[i]);
}
REQUIRE( correct );
// Now add final frame
std::vector<float> in3(960*2*1);
for (int i=0; i<960*2*1; ++i) in3[i] = float(i)+1.0f;
mixer->write(1, in3);
mixer->mix();
REQUIRE( mixer->writePosition() == 3 );
REQUIRE( mixer->readPosition() == 2 );
mixer->read(out, 1);
// Check all values are correct
for (int i=0; i<960*2*1; ++i) {
float e = float(i)+1.0f + float(i+960*2*2)+1.0f;
correct &= int(e) == int(out[i]);
}
REQUIRE( correct );
}
}
TEST_CASE("Audio Mixer Stereo Float Dynamic Tracks", "") {
SECTION("Add one track after write") {
auto mixer = std::make_unique<StereoMixerF<100>>();
mixer->add("Track1");
// Three 960 sample stereo frames
std::vector<float> in1(960*2*3);
for (int i=0; i<960*2*3; ++i) in1[i] = float(i)+1.0f;
mixer->write(0, in1);
mixer->mix();
REQUIRE( mixer->writePosition() == 3 );
REQUIRE( mixer->readPosition() == 0 );
std::vector<float> in2(960*2*3);
for (int i=0; i<960*2*3; ++i) in2[i] = float(i)+2.0f;
mixer->add("Track2");
mixer->write(0, in1);
mixer->write(1, in2);
mixer->mix();
REQUIRE( mixer->writePosition() == 6 );
REQUIRE( mixer->readPosition() == 0 );
REQUIRE( mixer->frames() == 6 );
// Read one of the three valid frames
std::vector<float> out;
mixer->read(out, mixer->frames());
bool correct = true;
// Check all values are correct
for (int i=0; i<960*2*3; ++i) {
float e = float(i)+1.0f;
correct &= int(e) == int(out[i]);
}
for (int i=960*2*3; i<960*2*6; ++i) {
float e = float(i-960*2*3)+1.0f + float(i-960*2*3)+2.0f;
correct &= int(e) == int(out[i]);
}
REQUIRE( correct );
}
}
set(CALIBSRC
src/parameters.cpp
src/extrinsic.cpp
src/structures.cpp
src/visibility.cpp
src/object.cpp
src/stereorectify.cpp
)
if (WITH_CERES)
list(APPEND CALIBSRC src/optimize.cpp)
set_source_files_properties(src/optimize.cpp PROPERTIES COMPILE_FLAGS -O3)
endif()
add_library(ftlcalibration ${CALIBSRC})
......@@ -17,7 +23,8 @@ target_include_directories(ftlcalibration
${OpenCV_INCLUDE_DIRS}
)
target_link_libraries(ftlcalibration ftlcommon Threads::Threads ${OpenCV_LIBS} Eigen3::Eigen ceres)
# ftlcodecs required for ftl::data::Channel
target_link_libraries(ftlcalibration ftlcommon ftlcodecs Threads::Threads ${OpenCV_LIBS} Eigen3::Eigen ceres)
if (BUILD_TESTS)
ADD_SUBDIRECTORY(test)
......
#include "calibration/parameters.hpp"
#include "calibration/optimize.hpp"
#include "calibration/extrinsic.hpp"
#include "calibration/structures.hpp"
#Include "calibration/object.hpp"
#pragma once
#include <ftl/utility/msgpack.hpp>
#include <ftl/calibration/visibility.hpp>
#include <ftl/calibration/structures.hpp>
#include <ftl/calibration/optimize.hpp>
#include <opencv2/core.hpp>
#include <ftl/utility/msgpack.hpp>
#include <set>
namespace ftl {
namespace calibration {
/**
* Helper for saving data from multiple cameras and sets image points. Each
* set of images dosn't have to be complete (all cameras included).
*
* Implementation limit: maximum number of cameras limited to 64; Valid camera
* indices between 0 and 63; other values are UB!
*
* Template parameter float or double.
*/
template<typename T>
class CalibrationPoints {
public:
struct Points {
// camera index
uint64_t cameras;
// object index
unsigned int object;
// points in image coordinates, camera index as key
std::map<unsigned int, std::vector<cv::Point_<T>>> points;
// triangulated points, camera pair as map key
std::map<std::pair<unsigned int, unsigned int>,
std::vector<cv::Point3_<T>>> triangulated;
bool has(unsigned int c) const { return (cameras & (uint64_t(1) << c)); }
MSGPACK_DEFINE(cameras, object, points, triangulated);
};
CalibrationPoints() : count_(0), visibility_(64), current_{0, ~(unsigned int)(0), {}, {}} {};
/** Set calibration target. Can be changed after calling and before adding
* any points next(). */
/* 2d (planar) target. Returns object ID */
unsigned int setObject(const std::vector<cv::Point_<T>> &target);
/* 3d target. Returns object ID */
unsigned int setObject(const std::vector<cv::Point3_<T>> &target);
/* Add points for current set. Points can only be set once for each set. */
void addPoints(unsigned int c, const std::vector<cv::Point_<T>>& points);
/** Continue next set of images. Target must be set. If no points were added
* next() is no-op. */
void next();
/** Set triangulated points. Note: flat input.
* @param c_base base camera (origin to point coordinates)
* @param c_match match camera
* @param points points
* @param idx index offset, if more image points are added, adjust idx
* accordingly (value of getPointsCount() before adding new
* points).
*/
void setTriangulatedPoints(unsigned int c_base, unsigned int c_match, const std::vector<cv::Point3_<T>>& points, int idx=0);
void resetTriangulatedPoints();
/** TODO: same as above but non-flat input
void setTriangulatedPoints(unsigned int c_base, unsigned int c_match, const std::vector<std::vector<cv::Point3_<T>>>& points, int idx=0);
*/
/** Clear current set of points (clears queue for next()) */
void clear();
/** Get count (how many sets) for camera(s). */
int getCount(unsigned int c);
int getCount(std::vector<unsigned int> cs);
/** total number of points */
int getPointsCount();
/** Get intersection of points for given cameras. Returns vector of Points
* contain object and vector of image points. Image points in same order as
* in input parameter. */
std::vector<std::vector<cv::Point_<T>>> getPoints(const std::vector<unsigned int> &cameras, unsigned int object);
std::vector<cv::Point3_<T>> getObject(unsigned int);
const Visibility& visibility();
/** Get all points. See Points struct. */
const std::vector<Points>& all() { return points_; }
protected:
bool hasCamera(unsigned int c);
void setCamera(unsigned int c);
private:
int count_;
Visibility visibility_;
Points current_;
std::vector<Points> points_;
std::vector<std::vector<cv::Point3_<T>>> objects_;
public:
MSGPACK_DEFINE(count_, visibility_, current_, points_, objects_);
};
/**
* Same as OpenCV's recoverPose(), but does not assume same intrinsic paramters
* for both cameras.
*
* @todo Write unit tests to check that intrinsic parameters work as expected.
*/
int recoverPose(const cv::Mat &E, const std::vector<cv::Point2d> &_points1,
const std::vector<cv::Point2d> &_points2, const cv::Mat &_cameraMatrix1,
const cv::Mat &_cameraMatrix2, cv::Mat &_R, cv::Mat &_t,
double distanceThresh, cv::Mat &triangulatedPoints);
/** @brief Calibrate camera pair.
*
* Alternative to cv::StereoCalibrate.
*
* Essential matrix is estimated using all point correspondencies, and pose is
* calculated with OpenCV's recoverPose() (modification to allow different
* intrinsic parameters for each camera).
*
* Non-linear optimization is used to
* determine scale from object points and bundle adjustment is applied to points
* and extrisnic parameters. Calibration target shape is also included.
*
* @param K1 intrinsic matrix for first camera
* @param D1 distortion coefficients for first camera
* @param K2 intrinsic matrix for second camera
* @param D2 distortion coefficients for second camera
* @param points1 image points obeserved in first camera
* @param points2 image points observed in second camera
* @param object calibration target points (once)
* @param R (out) rotation matrix (camera 1 to 2)
* @param t (out) translation vector (camera 1 to 2)
* @param points_out triangulated points
* @param optimize optimize points
*
* @returns RMS reprojection error
*
* Following conditions must hold for input parameters: (points1.size() ==
* points2.size()) and (points1.size() % object_points.size() == 0).
*/
double calibratePair(const cv::Mat &K1, const cv::Mat &D1,
const cv::Mat &K2, const cv::Mat &D2, const std::vector<cv::Point2d> &points1,
const std::vector<cv::Point2d> &points2, const std::vector<cv::Point3d> &object_points,
cv::Mat &R, cv::Mat &t, std::vector<cv::Point3d> &points_out, bool optimize=true);
class ExtrinsicCalibration {
public:
/** add a single camera (if valid calibration). Returns index of camera. */
unsigned int addCamera(const CalibrationData::Calibration &);
/** Add stereo camera */
unsigned int addStereoCamera(const CalibrationData::Calibration &, const CalibrationData::Calibration &);
const CalibrationData::Intrinsic& intrinsic(unsigned int c);
const CalibrationData::Extrinsic& extrinsic(unsigned int c);
const CalibrationData::Calibration& calibration(unsigned int c);
const CalibrationData::Calibration& calibrationOptimized(unsigned int c);
/** Add points/targets; Only one calibration target supported!
*
* TODO: Support multiple calibration targets: calibrate pair without
* optimization or support multiple calibration objects there. Input at the
* moment is flat vector, need to group by calibration target size (similar
* to cv::stereoCalibrate/cv::calibrateCamera).
*/
CalibrationPoints<double>& points() { return points_; }
/* set bundle adjustment options */
void setOptions(ftl::calibration::BundleAdjustment::Options options) { options_ = options; }
ftl::calibration::BundleAdjustment::Options& options() { return options_; }
/** Number of cameras added */
unsigned int camerasCount() { return calib_.size(); }
/** use existing extrinsic calibration for camera */
void setUseExtrinsic(unsigned int c, bool v) { is_calibrated_.at(c) = v; }
/** is existing extrinsic parameters used for given camera */
bool useExtrinsic(unsigned int c) { return is_calibrated_.at(c); };
/** status message */
std::string status();
/** run calibration, returns reprojection error */
double run();
double reprojectionError(unsigned int c); // reprojection error rmse
double reprojectionError(); // total reprojection error rmse
/** debug methods */
bool fromFile(const std::string& fname);
bool toFile(const std::string& fname); // should return new instance...
MSGPACK_DEFINE(points_, mask_, pairs_, calib_, is_calibrated_);
protected:
/** Calculate initial pose and triangulate points for two cameras **/
void calculatePairPose(unsigned int c1, unsigned int c2);
/** Only triangulate points using existing calibration */
void triangulate(unsigned int c1, unsigned int c2);
/** (1) Initial pairwise calibration and triangulation. */
void calculatePairPoses();
/** (2) Calculate initial poses from pairs for non calibrated cameras */
void calculateInitialPoses();
/** (3) Bundle adjustment on initial poses and triangulations. */
double optimize();
/** Select optimal camera for chains. Optimal camera has most visibility and
* is already calibrated (if any initial calibrations included).
*/
int selectOptimalCamera();
private:
void updateStatus_(std::string);
std::vector<CalibrationData::Calibration> calib_;
std::vector<CalibrationData::Calibration> calib_optimized_;
ftl::calibration::BundleAdjustment::Options options_;
CalibrationPoints<double> points_;
std::set<std::pair<unsigned int, unsigned int>> mask_;
std::map<std::pair<unsigned int, unsigned int>, std::tuple<cv::Mat, cv::Mat, double>> pairs_;
// true if camera already has valid calibration; initial pose estimation is
// skipped (and points are directly triangulated)
std::vector<bool> is_calibrated_;
// prune points which have higher reprojection error than given threshold
// and re-run bundle adjustment. (might be able to remove some problematic
// observations from extreme angles etc.)
std::vector<double> prune_observations_ = {2.5, 2.0, 2.0, 1.0};
int min_obs_ = 64; // minimum number of observations required for pair calibration
// TODO: add map {c1,c2} for existing calibration which is used if available.
//
std::shared_ptr<std::string> status_;
std::vector<double> rmse_;
double rmse_total_;
// Theshold for point to be skipped (m); absdiff between minimum and maximum
// values of each coordinate axis in all triangulated points is calculated
// and l2 norm is compared against threshold value. Optimization uses median
// coordinate values; threshold can be fairly big.
static constexpr float threshold_bad_ = 0.67;
// theshold for warning message (% of points discarded)
static constexpr float threhsold_warning_ = 0.1;
};
} // namespace calibration
}
#pragma once
#include <opencv2/core/mat.hpp>
#include <opencv2/aruco.hpp>
/** Calibration objects */
namespace ftl
{
namespace calibration
{
class CalibrationObject {
public:
virtual int detect(cv::InputArray, std::vector<cv::Point2d>&, const cv::Mat& K=cv::Mat(), const cv::Mat& D=cv::Mat()) = 0;
virtual std::vector<cv::Point3d> object() = 0;
};
class ChessboardObject : public CalibrationObject {
public:
ChessboardObject(int rows=18, int cols=25, double square_size=0.015);
virtual int detect(cv::InputArray, std::vector<cv::Point2d>&, const cv::Mat& K=cv::Mat(), const cv::Mat& D=cv::Mat());
std::vector<cv::Point3d> object() override;
cv::Size chessboardSize();
double squareSize();
private:
void init();
cv::Size chessboard_size_;
double square_size_;
int flags_;
std::vector<cv::Point3d> object_points_;
};
class ArUCoObject : public CalibrationObject {
public:
ArUCoObject(cv::aruco::PREDEFINED_DICTIONARY_NAME dictionary = cv::aruco::DICT_6X6_100, float baseline = 0.25f, float tag_size = 0.15, int id1=0, int id2=1);
virtual int detect(cv::InputArray, std::vector<cv::Point2d>&, const cv::Mat& K=cv::Mat(), const cv::Mat& D=cv::Mat());
std::vector<cv::Point3d> object() override;
private:
cv::Ptr<cv::aruco::Dictionary> dict_;
cv::Ptr<cv::aruco::DetectorParameters> params_;
float baseline_;
float tag_size_;
int id1_;
int id2_;
};
} // namespace calibration
} // namespace ft
......@@ -9,7 +9,6 @@
#include <ftl/config.h>
#include <ceres/ceres.h>
#include <opencv2/core/core.hpp>
// BundleAdjustment uses Point3d instances via double*
......@@ -18,11 +17,88 @@ static_assert(std::is_standard_layout<cv::Point2d>());
static_assert(sizeof(cv::Point3d) == 3*sizeof(double));
static_assert(std::is_standard_layout<cv::Point3d>());
namespace ceres {
struct Problem;
}
namespace ftl {
namespace calibration {
/**
* Camera paramters (Ceres)
*/
struct Camera {
Camera() {}
Camera(const cv::Mat& K, const cv::Mat& D, const cv::Mat& R, const cv::Mat& tvec, cv::Size size);
Camera(const CalibrationData::Calibration& calib);
CalibrationData::Intrinsic intrinsic() const;
CalibrationData::Extrinsic extrinsic() const;
void setRotation(const cv::Mat& R);
void setTranslation(const cv::Mat& tvec);
void setExtrinsic(const cv::Mat& R, const cv::Mat& t) {
setRotation(R);
setTranslation(t);
}
void setIntrinsic(const cv::Mat& K, cv::Size sz);
void setDistortion(const cv::Mat &D);
void setIntrinsic(const cv::Mat& K, const cv::Mat& D, cv::Size sz) {
setIntrinsic(K, sz);
setDistortion(D);
}
cv::Mat intrinsicMatrix() const;
cv::Mat distortionCoefficients() const;
cv::Mat rvec() const;
cv::Mat tvec() const;
cv::Mat rmat() const;
cv::Mat extrinsicMatrix() const;
cv::Mat extrinsicMatrixInverse() const;
void toQuaternion();
void toAngleAxis();
cv::Size size;
const static int n_parameters = 18;
const static int n_distortion_parameters = 8;
double data[n_parameters] = {0.0};
enum Parameter {
ROTATION = 0,
Q1 = 0,
Q2 = 1,
Q3 = 2,
Q4 = 3,
TRANSLATION = 4,
TX = 4,
TY = 5,
TZ = 6,
F = 7,
CX = 8,
CY = 9,
DISTORTION = 10,
K1 = 10,
K2 = 11,
P1 = 12,
P2 = 13,
K3 = 14,
K4 = 15,
K5 = 16,
K6 = 17
};
};
#ifdef HAVE_CERES
/** Project point using camera model implemented for Ceres */
cv::Point2d projectPoint(const Camera& camera, const cv::Point3d &p);
/**
* @brief Optimize scale.
* @param object Reference object points
......@@ -41,9 +117,6 @@ double optimizeScale(const std::vector<cv::Point3d>& object, std::vector<cv::Poi
* - rotation and translation rx, ry, rz, tx, ty, tz,
* - focal legth and principal point: f, cx, cy
* - radial distortion (first three cofficients): k1, k2, k3
*
* @note: Distortion paramters are used in reprojection error, but they are
* not not optimized.
*/
class BundleAdjustment {
public:
......@@ -59,26 +132,34 @@ public:
Loss loss = Loss::SQUARED;
bool use_nonmonotonic_steps = false;
// use quaternion rotation
bool use_quaternion = true;
// fix_camera_extrinsic and fix_camera_intrinsic overlap with some of
// the generic options. The more generic setting is always used, the
// specific extrinsic/intrinsic options are applied on top of those.
// fix extrinsic paramters for cameras
std::vector<int> fix_camera_extrinsic = {};
std::set<int> fix_camera_extrinsic = {};
// fix intrinsic paramters for cameras
std::vector<int> fix_camera_intrinsic = {};
std::set<int> fix_camera_intrinsic = {};
bool fix_focal = false;
bool fix_principal_point = false;
/**
* @todo Radial distortion must be monotonic. This constraint is not
* included in the model, thus distortion parameters are always
* fixed.
* included in the model.
*/
// distortion coefficient optimization is not supported
/// fix all distortion coefficients to constant (initial values)
bool fix_distortion = true;
/// use distortion coefficients k4, k5, and k6; if false, set to zero
bool rational_model = true;
/// distortion set to zero
bool zero_distortion = false;
bool optimize_intrinsic = true;
bool optimize_motion = true;
......@@ -90,7 +171,7 @@ public:
};
/**
* Add camera(s)
* Add camera(s). Stored as pointers. TODO: copy instead
*/
void addCamera(Camera &K);
void addCameras(std::vector<Camera> &K);
......@@ -99,39 +180,53 @@ public:
* @brief Add points
*/
void addPoint(const std::vector<bool>& visibility, const std::vector<cv::Point2d>& observations, cv::Point3d& point);
/**
* @brief Vector for each camera TODO: verify this works
*/
void addPoints(const std::vector<std::vector<bool>>& visibility, const std::vector<std::vector<cv::Point2d>>& observations,
std::vector<cv::Point3d>& points);
/**
* @brief Add points, all assumed visible
* @brief Add points, all assumed visible. Values copied.
*/
void addPoint(const std::vector<cv::Point2d>& observations, cv::Point3d& point);
void addPoints(const std::vector<std::vector<cv::Point2d>>& observations, std::vector<cv::Point3d>& points);
/** TODO: estimate pose for each view which to optimize */
void addObject(const std::vector<cv::Point3d>& object_points);
/** @brief Perform bundle adjustment with custom options.
*/
void run(const BundleAdjustment::Options& options);
/** @brief Get optimized points
*
*/
std::vector<cv::Point3d> getPoints();
/** @brief Perform bundle adjustment using default options
*/
void run();
/** @brief Calculate MSE error (for one camera)
/** @brief Calculate RMSE error (for one camera)
*/
double reprojectionError(const int camera) const;
/** @brief Calculate MSE error for all cameras
/** @brief Calculate RMSE error for all cameras
*/
double reprojectionError() const;
/**/
int removeObservations(double threshold);
std::vector<cv::Point3d> points();
protected:
double* getCameraPtr(int i) { return cameras_[i]->data; }
double* getCameraPtr(int i) { return cameras_.at(i)->data; }
/** @brief Calculate MSE error
/** @brief Calculate squared error
*/
void _reprojectionErrorMSE(const int camera, double &error, double &npoints) const;
void _reprojectionErrorSE(const int camera, double &error, double &npoints) const;
/** @brief Set camera parametrization (fixed parameters/cameras)
*/
......@@ -140,6 +235,8 @@ protected:
void _buildProblem(ceres::Problem& problem, const BundleAdjustment::Options& options);
void _buildBundleAdjustmentProblem(ceres::Problem& problem, const BundleAdjustment::Options& options);
// remove?
void _buildLengthProblem(ceres::Problem& problem, const BundleAdjustment::Options& options);
private:
......@@ -150,8 +247,8 @@ private:
// pixel coordinates: x, y
std::vector<cv::Point2d> observations;
// world coordinates: x, y, z
double* point;
// point in world coordinates
cv::Point3d point;
};
// group of points with known structure; from idx_start to idx_end
......
......@@ -2,66 +2,13 @@
#ifndef _FTL_CALIBRATION_PARAMETERS_HPP_
#define _FTL_CALIBRATION_PARAMETERS_HPP_
#include <ftl/calibration/structures.hpp>
#include <opencv2/core/core.hpp>
namespace ftl {
namespace calibration {
/**
* Camera paramters
*/
struct Camera {
Camera() {}
Camera(const cv::Mat& K, const cv::Mat& D, const cv::Mat& R, const cv::Mat& tvec);
void setRotation(const cv::Mat& R);
void setTranslation(const cv::Mat& tvec);
void setExtrinsic(const cv::Mat& R, const cv::Mat& t) {
setRotation(R);
setTranslation(t);
}
void setIntrinsic(const cv::Mat& K);
void setDistortion(const cv::Mat &D);
void setIntrinsic(const cv::Mat& K, const cv::Mat& D) {
setIntrinsic(K);
setDistortion(D);
}
cv::Mat intrinsicMatrix() const;
cv::Mat distortionCoefficients() const;
cv::Mat rvec() const;
cv::Mat tvec() const;
cv::Mat rmat() const;
cv::Mat extrinsicMatrix() const;
cv::Mat extrinsicMatrixInverse() const;
const static int n_parameters = 12;
const static int n_distortion_parameters = 3;
double data[n_parameters] = {0.0};
enum Parameter {
ROTATION = 0,
RX = 0,
RY = 1,
RZ = 2,
TRANSLATION = 3,
TX = 3,
TY = 4,
TZ = 5,
F = 6,
CX = 7,
CY = 8,
DISTORTION = 9,
K1 = 9,
K2 = 10,
K3 = 11
};
};
namespace validate {
/**
......@@ -92,11 +39,9 @@ bool distortionCoefficients(const cv::Mat &D, cv::Size size);
}
namespace transform {
// TODO: Some of the methods can be directly replace with OpenCV
// (opencv2/calib3d.hpp)
// TODO: Some of the methods can be directly replace with OpenCV (opencv2/calib3d.hpp)
/**
* @brief Get rotation matrix and translation vector from transformation matrix.
......@@ -126,19 +71,20 @@ inline void inverse(cv::Mat &R, cv::Mat &t) {
}
/**
* @brief Inverse transform inplace
* @brief Inverse transform
* @param T transformation matrix (4x4)
*/
inline void inverse(cv::Mat &T) {
[[nodiscard]] inline cv::Mat inverse(const cv::Mat &T) {
cv::Mat rmat;
cv::Mat tvec;
getRotationAndTranslation(T, rmat, tvec);
T = cv::Mat::eye(4, 4, CV_64FC1);
cv::Mat T_ = cv::Mat::eye(4, 4, CV_64FC1);
T(cv::Rect(3, 0, 1, 1)) = -rmat.col(0).dot(tvec);
T(cv::Rect(3, 1, 1, 1)) = -rmat.col(1).dot(tvec);
T(cv::Rect(3, 2, 1, 1)) = -rmat.col(2).dot(tvec);
T(cv::Rect(0, 0, 3, 3)) = rmat.t();
T_(cv::Rect(3, 0, 1, 1)) = -rmat.col(0).dot(tvec);
T_(cv::Rect(3, 1, 1, 1)) = -rmat.col(1).dot(tvec);
T_(cv::Rect(3, 2, 1, 1)) = -rmat.col(2).dot(tvec);
T_(cv::Rect(0, 0, 3, 3)) = rmat.t();
return T_;
}
inline cv::Point3d apply(const cv::Point3d& point, const cv::InputArray& R, const cv::InputArray& t) {
......@@ -160,10 +106,10 @@ inline cv::Point3d apply(const cv::Point3d& point, const cv::InputArray& T) {
/**
* @brief Scale camera intrinsic matrix
* @param size_new New resolution
* @param size_old Original (camera matrix) resolution
* @param size_new New resolution
*/
cv::Mat scaleCameraMatrix(const cv::Mat &K, const cv::Size &size_new, const cv::Size &size_old);
[[nodiscard]] cv::Mat scaleCameraMatrix(const cv::Mat &K, const cv::Size &size_old, const cv::Size &size_new);
/**
* @brief Calculate MSE reprojection error
......
#pragma once
#include <opencv2/core.hpp>
#include <ftl/calibration/structures.hpp>
namespace ftl {
namespace calibration {
/** Stereo rectification parameters. Wrapper for cv::stereoRectify() */
struct StereoRectify {
/** Calculate rectification parameters. c1 and c2 contain valid calibration
* (extrinsic parameters for translation from world to camera) */
StereoRectify(const CalibrationData::Calibration &c1, const CalibrationData::Calibration& c2, const cv::Size size={0, 0}, double alpha=0.0, int flags=0);
/** stereo pair baseline (same unit as extrinsic paramters) */
double baseline() const;
/** calculate maps (cv::remap()) for camera 1 */
void map1(cv::Mat &m1, cv::Mat &m2, int format=CV_16SC2);
/** calculate maps (cv::remap()) for camera 2 */
void map2(cv::Mat &m1, cv::Mat &m2, int format=CV_16SC2);
cv::Size size;
/** unrectified params */
cv::Mat K1;
cv::Mat K2;
cv::Mat distCoeffs1;
cv::Mat distCoeffs2;
/** 3x4 projection matrix for first camera */
cv::Mat P1;
/** 3x4 projection matrix for second camera */
cv::Mat P2;
/** rotation matrix for first camera (unrectified to rectified) */
cv::Mat R1;
/** rotation matrix for second camera (unrectified to rectified) */
cv::Mat R2;
/** disparity to depth matrix */
cv::Mat Q;
/** rotation from first camera to second camera (unrectified) */
cv::Mat R;
/** translation from first camera to second camera (unrectified) */
cv::Mat t;
/** largest ROI containing only valid pixels in rectified image for first camera */
cv::Rect roi1;
/** largest ROI containing only valid pixels in rectified image for second camera */
cv::Rect roi2;
};
}
}
\ No newline at end of file
#ifndef _FTL_CALIBRATION_STRUCTURES_HPP_
#define _FTL_CALIBRATION_STRUCTURES_HPP_
#include <ftl/utility/msgpack.hpp>
#include <ftl/codecs/channels.hpp>
namespace ftl {
namespace calibration {
struct CalibrationData {
struct Intrinsic {
friend CalibrationData;
/** 12 distortion coefficients. OpenCV also provides tilted camera model
* coefficients, but not used here. */
struct DistortionCoefficients {
friend CalibrationData;
DistortionCoefficients();
/**
* Access distortion coefficients, stored in OpenCV order. Out of
* bounds access is undefined.
*
* 0,1 r1-r2 radial distortion
* 2,3 p1-p2 tangential distortion
* 4,5,6,7 r3-r6 radial distortion
* 8,9,10,11 s1-s4 thin prism distortion
*
*/
double& operator[](unsigned i);
double operator[](unsigned i) const;
/** are radial distortion values are for rational model */
bool rationalModel() const;
/** is thin prism model is used (s1-s4 set) */
bool thinPrism() const;
/**
* Return distortion parameters in cv::Mat. Shares same memory.
*/
const cv::Mat Mat(int nparams = 12) const;
cv::Mat Mat(int nparams = 12);
private:
std::vector<double> data_;
public:
MSGPACK_DEFINE(data_);
};
Intrinsic();
Intrinsic(const cv::Mat &K, cv::Size sz);
Intrinsic(const cv::Mat &K, const cv::Mat &D, cv::Size sz);
/** New instance with scaled values for new resolution */
Intrinsic(const Intrinsic& other, cv::Size sz);
/* valid values (resolution is non-zero) */
bool valid() const;
/** horizontal field of view in degrees */
double fovx() const;
/** vertical field of view in degrees */
double fovy() const;
/** focal length in sensor size units */
double focal() const;
/** aspect ratio: fx/fy */
double aspectRatio() const;
/** Replace current values with new ones */
void set(const cv::Mat &K, cv::Size sz);
void set(const cv::Mat &K, const cv::Mat &D, cv::Size sz);
/** Camera matrix */
cv::Mat matrix() const;
/** Camera matrix (scaled) */
cv::Mat matrix(cv::Size) const;
cv::Size resolution;
double fx;
double fy;
double cx;
double cy;
DistortionCoefficients distCoeffs;
/** (optional) sensor size; Move elsehwere? */
cv::Size2d sensorSize;
MSGPACK_DEFINE(resolution, fx, fy, cx, cy, distCoeffs, sensorSize);
};
struct Extrinsic {
Extrinsic();
Extrinsic(const cv::Mat &T);
Extrinsic(cv::InputArray R, cv::InputArray t);
void set(const cv::Mat &T);
void set(cv::InputArray R, cv::InputArray t);
Extrinsic inverse() const;
/** valid calibration (values not NAN) */
bool valid() const;
/** get as a 4x4 matrix */
cv::Mat matrix() const;
/** get 3x3 rotation matrix */
cv::Mat rmat() const;
cv::Vec3d rvec = {NAN, NAN, NAN};
cv::Vec3d tvec = {NAN, NAN, NAN};
MSGPACK_DEFINE(rvec, tvec);
};
struct Calibration {
Intrinsic intrinsic;
Extrinsic extrinsic;
/** 4x4 projection matrix */
cv::Mat matrix();
MSGPACK_DEFINE(intrinsic, extrinsic);
};
CalibrationData() : enabled(false) {}
bool enabled;
[[nodiscard]]
static CalibrationData readFile(const std::string &path);
void writeFile(const std::string &path) const;
/** Get reference for channel. Create if doesn't exist. */
Calibration& get(ftl::codecs::Channel channel);
bool hasCalibration(ftl::codecs::Channel channel) const;
// TODO: identify cameras with unique ID string instead of channel.
std::map<ftl::codecs::Channel, Calibration> data;
/** Correction to be applied (inverse) to extrinsic parameters
* (calibrated to new origin); Applied to rectified pose at the moment
*/
cv::Mat origin = cv::Mat::eye(4, 4, CV_64FC1);
public:
MSGPACK_DEFINE(enabled, data, origin);
};
}
}
#endif
......@@ -5,6 +5,8 @@
#include <vector>
#include <string>
#include <ftl/utility/msgpack.hpp>
namespace ftl {
namespace calibration {
......@@ -14,15 +16,15 @@ namespace calibration {
template<typename T>
class Paths {
public:
Paths(const std::vector<int> &previous, const std::vector<T> &distances);
Paths(int id, const std::vector<int> &previous, const std::vector<T> &distances);
/**
* @brief Shortest path from node i. Same as to(i) in reverse order
* @brief Shortest path from node i.
*/
std::vector<int> from(int i) const;
/**
* @brief Shortest to node i. Same as from(i) in reverse order.
* @brief Shortest to node i.
*/
std::vector<int> to(int i) const;
......@@ -39,6 +41,7 @@ public:
std::string to_string() const;
private:
int id_; // node id
std::vector<int> previous_;
std::vector<T> distances_;
};
......@@ -64,9 +67,20 @@ class Visibility {
template<typename T>
void update(const std::vector<T> &add);
void update(uint64_t add);
void mask(int a, int b);
void unmask(int a, int b);
int count(int camera) const;
int count(int camera1, int camera2) const;
// minimum counts and cameras
int min() const;
int max() const;
int argmax() const;
int argmin() const;
/**
* @brief Find most visibility shortest path to camera i
*/
......@@ -74,15 +88,19 @@ class Visibility {
protected:
std::vector<int> neighbors(int i) const;
int distance(int a, int b) const;
float distance(int a, int b) const;
private:
int n_cameras_;
int n_cameras_; // number of cameras
int n_max_; // highest index used
// adjacency matrix
std::vector<std::vector<int>> graph_;
// masked values (mask_[i][j]) are not used
std::vector<std::vector<bool>> mask_;
public:
MSGPACK_DEFINE(n_cameras_, n_max_, graph_, mask_);
};
}
......
#include <loguru.hpp>
#include <ftl/exception.hpp>
#include <ftl/calibration/optimize.hpp>
#include <ftl/calibration/extrinsic.hpp>
#include <fstream>
#include <sstream>
#include <opencv2/calib3d.hpp>
////////////////////////////////////////////////////////////////////////////////
/** check bit i in a */
inline bool hasOne(uint64_t a, unsigned int i) {
return a & (uint64_t(1) << i);
}
/** all bits set in b are also set in a */
inline bool hasAll(uint64_t a, uint64_t b) {
return (b & a) == b;
}
/** set bit i in a */
inline void setOne(uint64_t &a, unsigned int i) {
a |= (uint64_t(1) << i);
}
/** get highest bit*/
inline int hbit(uint64_t a) {
#ifdef __GNUC__
return 64 - __builtin_clzll(a);
#endif
int v = 1;
while (a >>= 1) { v++; }
return v;
}
inline int popcount(uint64_t bits) {
#if defined(_MSC_VER)
return __popcnt64(bits);
#elif defined(__GNUC__)
return __builtin_popcountl(bits);
#else
int count = 0;
while (bits != 0) {
bits = bits >> 1;
count += uint64_t(1) & bits;
}
return count;
#endif
}
// ==== CalibrationPoints ================================================
namespace ftl {
namespace calibration {
template<typename T>
void CalibrationPoints<T>::addPoints(unsigned int c, const std::vector<cv::Point_<T>>& points) {
if (hasCamera(c)) {
throw ftl::exception("Points already set for camera. "
"Forgot to call next()?");
}
if (current_.object == ~(unsigned int)(0)) {
throw ftl::exception("Target has be set before adding points.");
}
if (objects_[current_.object].size() != points.size()) {
throw ftl::exception("Number of points must cv::Match object points");
}
std::vector<cv::Point_<T>> p(points.begin(), points.end());
current_.points[c] = p;
setCamera(c);
};
template<typename T>
unsigned int CalibrationPoints<T>::setObject(const std::vector<cv::Point3_<T>> &object) {
if (!current_.points.empty()) {
throw ftl::exception("Points already set, object can not be changed. "
"Forgot to call next()?");
}
// check if object already exists
for (unsigned int i = 0; i < objects_.size(); i++) {
if (objects_[i].size() != object.size()) { continue; }
bool eq = true;
for (unsigned int j = 0; j < object.size(); j++) {
eq &= (objects_[i][j] == object[j]);
}
if (eq) {
current_.object = i;
return i;
}
}
// not found
current_.object = objects_.size();
objects_.push_back(object);
return current_.object;
}
template<typename T>
unsigned int CalibrationPoints<T>::setObject(const std::vector<cv::Point_<T>> &object) {
if (!current_.points.empty()) {
throw ftl::exception("Points already set, object can not be changed. "
"Forgot to call next()?");
}
std::vector<cv::Point3_<T>> object3d;
object3d.reserve(object.size());
for (const auto& p : object) {
object3d.push_back({p.x, p.y, T(0.0)});
}
return setObject(object3d);
}
template<typename T>
void CalibrationPoints<T>::next() {
if (objects_.empty()) {
throw ftl::exception("object must be set before calling next()");
}
if (current_.cameras == uint64_t(0)) {
return;
}
count_ += objects_[current_.object].size();
points_.push_back(current_);
visibility_.update(current_.cameras);
clear();
}
template<typename T>
void CalibrationPoints<T>::clear() {
current_ = {uint64_t(0), (unsigned int)(objects_.size()) - 1u, {}, {}};
}
template<typename T>
bool CalibrationPoints<T>::hasCamera(unsigned int c) {
return hasOne(current_.cameras, c);
}
template<typename T>
void CalibrationPoints<T>::setCamera(unsigned int c) {
setOne(current_.cameras, c);
}
template<typename T>
int CalibrationPoints<T>::getCount(unsigned int c) {
return visibility_.count(c);
}
template<typename T>
int CalibrationPoints<T>::getPointsCount() {
return count_;
}
template<typename T>
const Visibility& CalibrationPoints<T>::visibility() {
return visibility_;
}
template<typename T>
void CalibrationPoints<T>::setTriangulatedPoints(unsigned int c_base, unsigned int c_match,
const std::vector<cv::Point3_<T>>& points, int idx) {
uint64_t required = 0;
setOne(required, c_base);
setOne(required, c_match);
auto itr = points.begin();
for (unsigned int i = idx; i < points_.size(); i++) {
if (hasAll(points_[i].cameras, required)) {
auto obj_sz = objects_[points_[i].object].size();
std::vector<cv::Point3_<T>> pts;
pts.reserve(obj_sz);
for (unsigned int i_obj = 0; i_obj < obj_sz; i_obj++) {
pts.push_back(*itr);
itr++;
}
points_[i].triangulated[{c_base, c_match}] = pts;
if (itr == points.end()) { break; }
}
}
}
template<typename T>
void CalibrationPoints<T>::resetTriangulatedPoints() {
for (unsigned int i = 0; i < points_.size(); i++) {
points_[i].triangulated.clear();
}
}
template<typename T>
std::vector<std::vector<cv::Point_<T>>> CalibrationPoints<T>::getPoints(const std::vector<unsigned int>& cameras, unsigned int object) {
std::vector<std::vector<cv::Point_<T>>> points;
points.resize(cameras.size());
std::vector<unsigned int> lookup;
uint64_t required = 0;
for (unsigned i = 0; i < cameras.size(); i++) {
setOne(required, cameras[i]);
if ((cameras[i] + 1) > lookup.size()) {
lookup.resize(cameras[i] + 1, ~(unsigned int)(0));
}
lookup[cameras[i]] = i;
}
for (const auto& set : points_) {
if (!hasAll(set.cameras, required)) { continue; }
if (set.object != object) { continue; }
for (auto &[i, data] : set.points) {
if (!hasOne(required, i)) { continue; }
points[lookup[i]].insert
(points[lookup[i]].end(), data.begin(), data.end());
}
}
return points;
}
template<typename T>
std::vector<cv::Point3_<T>> CalibrationPoints<T>::getObject(unsigned int object) {
return objects_[object];
}
template class CalibrationPoints<float>;
template class CalibrationPoints<double>;
////////////////////////////////////////////////////////////////////////////////
int recoverPose(const cv::Mat &E, const std::vector<cv::Point2d> &_points1,
const std::vector<cv::Point2d> &_points2, const cv::Mat &_cameraMatrix1,
const cv::Mat &_cameraMatrix2, cv::Mat &_R, cv::Mat &_t, double distanceThresh,
cv::Mat &triangulatedPoints) {
cv::Mat cameraMatrix1;
cv::Mat cameraMatrix2;
cv::Mat cameraMatrix;
cv::Mat points1(_points1.size(), 2, CV_64FC1);
cv::Mat points2(_points2.size(), 2, CV_64FC1);
CHECK_EQ(points1.size(), points2.size());
for (size_t i = 0; i < _points1.size(); i++) {
auto p1 = points1.ptr<double>(i);
p1[0] = _points1[i].x;
p1[1] = _points1[i].y;
auto p2 = points2.ptr<double>(i);
p2[0] = _points2[i].x;
p2[1] = _points2[i].y;
}
_cameraMatrix1.convertTo(cameraMatrix1, CV_64F);
_cameraMatrix2.convertTo(cameraMatrix2, CV_64F);
cameraMatrix = cv::Mat::eye(cv::Size(3, 3), CV_64FC1);
double fx1 = cameraMatrix1.at<double>(0,0);
double fy1 = cameraMatrix1.at<double>(1,1);
double cx1 = cameraMatrix1.at<double>(0,2);
double cy1 = cameraMatrix1.at<double>(1,2);
double fx2 = cameraMatrix2.at<double>(0,0);
double fy2 = cameraMatrix2.at<double>(1,1);
double cx2 = cameraMatrix2.at<double>(0,2);
double cy2 = cameraMatrix2.at<double>(1,2);
points1.col(0) = (points1.col(0) - cx1) / fx1;
points1.col(1) = (points1.col(1) - cy1) / fy1;
points2.col(0) = (points2.col(0) - cx2) / fx2;
points2.col(1) = (points2.col(1) - cy2) / fy2;
// TODO mask
// cameraMatrix = I (for details, see OpenCV's recoverPose() source code)
// modules/calib3d/src/five-point.cpp (461)
//
// https://github.com/opencv/opencv/blob/371bba8f54560b374fbcd47e7e02f015ac4969ad/modules/calib3d/src/five-point.cpp#L461
return cv::recoverPose( E, points1, points2, cameraMatrix, _R, _t,
distanceThresh, cv::noArray(), triangulatedPoints);
}
static double scalePoints(const std::vector<cv::Point3d> &object_points, const cv::Mat& points_in, std::vector<cv::Point3d> &points_out) {
points_out.clear();
points_out.reserve(points_in.cols);
// convert from homogenous coordinates
for (int col = 0; col < points_in.cols; col++) {
CHECK_NE(points_in.at<double>(3, col), 0);
cv::Point3d p = cv::Point3d(points_in.at<double>(0, col),
points_in.at<double>(1, col),
points_in.at<double>(2, col))
/ points_in.at<double>(3, col);
points_out.push_back(p);
}
double s = ftl::calibration::optimizeScale(object_points, points_out);
return s;
}
double calibratePair(const cv::Mat &K1, const cv::Mat &D1,
const cv::Mat &K2, const cv::Mat &D2,
const std::vector<cv::Point2d> &points1,
const std::vector<cv::Point2d> &points2,
const std::vector<cv::Point3d> &object_points, cv::Mat &R,
cv::Mat &t, std::vector<cv::Point3d> &points_out, bool optimize) {
// FM_8POINT should be good enough if there are no outliers in points1 and
// points2 (all correspondences are correct)
cv::Mat F = cv::findFundamentalMat(points1, points2, cv::noArray(), cv::FM_8POINT);
cv::Mat E = K2.t() * F * K1;
cv::Mat points3dh;
// distanceThresh?
recoverPose(E, points1, points2, K1, K2, R, t, 1000.0, points3dh);
double s = scalePoints(object_points, points3dh, points_out);
t = t * s;
auto params1 = Camera(K1, D1, cv::Mat::eye(3, 3, CV_64FC1), cv::Mat::zeros(3, 1, CV_64FC1), {0, 0});
auto params2 = Camera(K2, D2, R, t, {0, 0});
auto ba = BundleAdjustment();
ba.addCamera(params1);
ba.addCamera(params2);
for (size_t i = 0; i < points_out.size(); i++) {
ba.addPoint({points1[i], points2[i]}, points_out[i]);
}
// needs to be implemented correctly: optimize each pose of the target
//ba.addObject(object_points);
double error = ba.reprojectionError();
if (optimize) {
BundleAdjustment::Options options;
options.optimize_intrinsic = false;
// any difference if both transformations multiplied with (T_1)^-1
// (inverse of first camera's transforma)after optimization instead?
options.fix_camera_extrinsic = {0};
ba.run(options);
error = ba.reprojectionError();
}
CHECK_EQ(cv::countNonZero(params1.rvec()), 0);
CHECK_EQ(cv::countNonZero(params1.tvec()), 0);
return sqrt(error);
}
// ==== Extrinsic Calibration ==================================================
unsigned int ExtrinsicCalibration::addCamera(const CalibrationData::Calibration &c) {
unsigned int idx = calib_.size();
calib_.push_back(c);
calib_optimized_.push_back(calib_.back());
is_calibrated_.push_back(true);
return idx;
}
unsigned int ExtrinsicCalibration::addStereoCamera(const CalibrationData::Calibration &c1, const CalibrationData::Calibration &c2) {
unsigned int idx = calib_.size();
calib_.push_back({c1.intrinsic, c1.extrinsic});
calib_optimized_.push_back(calib_.back());
calib_.push_back({c2.intrinsic, c2.extrinsic});
calib_optimized_.push_back(calib_.back());
is_calibrated_.push_back(c1.extrinsic.valid());
is_calibrated_.push_back(c2.extrinsic.valid());
mask_.insert({idx, idx + 1});
return idx;
}
std::string ExtrinsicCalibration::status() {
auto str = std::atomic_load(&status_);
if (str) { return *str; }
else { return ""; }
}
void ExtrinsicCalibration::updateStatus_(std::string str) {
std::atomic_store(&status_, std::make_shared<std::string>(str));
}
void ExtrinsicCalibration::calculatePairPose(unsigned int c1, unsigned int c2) {
// calculate paramters and update triangulation
cv::Mat K1 = calib_[c1].intrinsic.matrix();
cv::Mat distCoeffs1 = calib_[c1].intrinsic.distCoeffs.Mat();
cv::Mat K2 = calib_[c2].intrinsic.matrix();
cv::Mat distCoeffs2 = calib_[c2].intrinsic.distCoeffs.Mat();
auto pts = points().getPoints({c1, c2}, 0);
auto object = points().getObject(0);
cv::Mat R, t;
std::vector<cv::Point3d> points3d;
auto rmse = calibratePair(K1, distCoeffs1, K2, distCoeffs2,
pts[0], pts[1], object, R, t, points3d, true);
// debug info
LOG(INFO) << "RMSE (cameras " << c1 << " & " << c2 << "): " << rmse;
points().setTriangulatedPoints(c1, c2, points3d);
pairs_[{c1, c2}] = {R, t, rmse};
cv::Mat R_i, t_i;
R.copyTo(R_i);
t.copyTo(t_i);
transform::inverse(R_i, t_i);
pairs_[{c2, c1}] = {R_i, t_i, rmse};
}
void ExtrinsicCalibration::triangulate(unsigned int c1, unsigned int c2) {
cv::Mat T, R, t, R_i, t_i;
T = calib_[c1].extrinsic.matrix() *
transform::inverse(calib_[c2].extrinsic.matrix());
transform::getRotationAndTranslation(T, R_i, t_i);
T = calib_[c2].extrinsic.matrix() *
transform::inverse(calib_[c1].extrinsic.matrix());
transform::getRotationAndTranslation(T, R, t);
pairs_[{c1, c1}] = {R, t, NAN};
pairs_[{c2, c1}] = {R_i, t_i, NAN};
auto pts = points().getPoints({c1, c2}, 0);
std::vector<cv::Point3d> pointsw;
const auto& calib1 = calib_[c1];
const auto& calib2 = calib_[c2];
CHECK_EQ(pts[0].size(), pts[1].size());
cv::Mat pts1(1, pts[0].size(), CV_64FC2, pts[0].data());
cv::Mat pts2(1, pts[1].size(), CV_64FC2, pts[1].data());
cv::Mat out(1, pts[0].size(), CV_64FC2);
// Undistort points first. Note: undistortPoints() returns points in
// normalized coordinates, therefore projection matrices for
// cv::triangulatePoints() only include extrinsic parameters.
std::vector<cv::Point2d> pts1u, pts2u;
cv::undistortPoints(pts1, pts1u, calib1.intrinsic.matrix(), calib1.intrinsic.distCoeffs.Mat());
cv::undistortPoints(pts2, pts2u, calib2.intrinsic.matrix(), calib2.intrinsic.distCoeffs.Mat());
cv::Mat P1 = cv::Mat::eye(3, 4, CV_64FC1);
cv::Mat P2 = T(cv::Rect(0, 0, 4, 3));
// documentation claims cv::triangulatePoints() requires floats; however
// seems to only work with doubles (documentation outdated?).
// According to https://stackoverflow.com/a/16299909 cv::triangulatePoints()
// implements least squares method described in H&Z p312
cv::triangulatePoints(P1, P2, pts1u, pts2u, out);
// scalePoints() converts to non-homogenous coordinates and estimates scale
LOG(INFO) << "new scale: " << scalePoints(points().getObject(0), out, pointsw);
/*for (int col = 0; col < out.cols; col++) {
CHECK_NE(out.at<double>(3, col), 0);
cv::Point3d p = cv::Point3d(out.at<double>(0, col),
out.at<double>(1, col),
out.at<double>(2, col))
/ out.at<double>(3, col);
pointsw.push_back(p);
}*/
points().setTriangulatedPoints(c1, c2, pointsw);
}
void ExtrinsicCalibration::calculatePairPoses() {
// Calibrate all pairs. TODO: might be expensive if number of cameras is high
// if not performed for all pairs.
int i = 1;
int i_max = (camerasCount() * camerasCount()) / 2 + 1;
for (unsigned int c1 = 0; c1 < camerasCount(); c1++) {
for (unsigned int c2 = c1; c2 < camerasCount(); c2++) {
updateStatus_( "Calculating pose for pair " +
std::to_string(i++) + " of " + std::to_string(i_max) +
" and triangulating points");
if (c1 == c2) {
pairs_[{c1, c2}] = { cv::Mat::eye(cv::Size(3, 3), CV_64FC1),
cv::Mat(cv::Size(1, 3), CV_64FC1, cv::Scalar(0.0)),
0.0};
continue;
}
if (pairs_.find({c1, c2}) != pairs_.end()) {
LOG(WARNING) << "pair already processed (this shold not happen)";
continue;
}
// require minimum number of visible points
if (points().visibility().count(c1, c2) < min_obs_) {
LOG(WARNING) << "skipped pair (" << c1 << ", " << c2 << "), not enough points";
continue;
}
if (is_calibrated_[c1] && is_calibrated_[c2]) {
LOG(INFO) << "using existing pose for cameras " << c1 << " and "
<< c2 << "(only triangulating points)";
triangulate(c1, c2);
}
else {
if (mask_.count({c1, c2}) > 0 ) { continue; }
calculatePairPose(c1, c2);
}
}}
}
int ExtrinsicCalibration::selectOptimalCamera() {
// Pick optimal camera: most views of calibration pattern. If existing
// calibration is used, reference camera must already be calibrated.
int c = 0;
int calibrated_c_ref_max = 0;
for (unsigned int i = 0; i < is_calibrated_[i]; i++) {
if (is_calibrated_[i] && points().getCount(i) > calibrated_c_ref_max) {
calibrated_c_ref_max = points().getCount(i);
c = i;
}
}
if (!calibrated_c_ref_max == 0) {
c = points().visibility().argmax();
}
return c;
}
void ExtrinsicCalibration::calculateInitialPoses() {
updateStatus_("Initial poses from chained transformations");
// mask stereo cameras (do not pairwise calibrate a stereo pair; unreliable)
auto visibility = points_.visibility();
for (const auto& [c1, c2]: mask_) { visibility.mask(c1, c2); }
// mask cameras which did not have enough points TODO: triangulation later
// would still be useful (calculate initial poses, then triangulate)
for (unsigned int c1 = 0; c1 < camerasCount(); c1++) {
for (unsigned int c2 = c1; c2 < camerasCount(); c2++) {
if (pairs_.count({c1, c2}) == 0) {
visibility.mask(c1, c2);
}
}}
// select optimal camera to calculate chains to. TODO: if any of the
// cameras is already calibrated, use most visible calibrated camera as
// target camera.
auto c_ref = selectOptimalCamera();
auto paths = visibility.shortestPath(c_ref);
for (unsigned int c = 0; c < camerasCount(); c++) {
if (is_calibrated_[c]) {
// already calibrated. skip chain
continue;
}
if (c == unsigned(c_ref)) { continue; }
cv::Mat R_chain = cv::Mat::eye(cv::Size(3, 3), CV_64FC1);
cv::Mat t_chain = cv::Mat(cv::Size(1, 3), CV_64FC1, cv::Scalar(0.0));
auto path = paths.to(c);
do {
// iterate in reverse order
auto prev = path.back();
path.pop_back();
auto next = path.back();
cv::Mat R = std::get<0>(pairs_.at({prev, next}));
cv::Mat t = std::get<1>(pairs_.at({prev, next}));
CHECK_EQ(R.size(), cv::Size(3, 3));
CHECK_EQ(t.total(), 3);
R_chain = R * R_chain;
t_chain = t + R * t_chain;
}
while(path.size() > 1);
// note: direction of chain in the loop (ref to target transformation)
calib_[c].extrinsic =
CalibrationData::Extrinsic(R_chain, t_chain).inverse();
}
}
static std::vector<bool> visibility(unsigned int ncameras, uint64_t visible) {
std::vector<bool> res(ncameras, false);
for (unsigned int i = 0; i < ncameras; i++) {
res[i] = visible & (uint64_t(1) << i);
}
return res;
}
/* absolute difference between min and max for each set of coordinates */
static cv::Point3d absdiff(const std::vector<double> &xs, const std::vector<double> &ys, const std::vector<double> &zs) {
if (xs.size() < 2) {
return {0.0, 0.0, 0.0};
}
double minx = INFINITY;
double maxx = -INFINITY;
for (auto x : xs) {
minx = std::min(minx, x);
maxx = std::max(maxx, x);
}
double miny = INFINITY;
double maxy = -INFINITY;
for (auto y : ys) {
miny = std::min(miny, y);
maxy = std::max(maxy, y);
}
double minz = INFINITY;
double maxz = -INFINITY;
for (auto z : zs) {
minz = std::min(minz, z);
maxz = std::max(maxz, z);
}
cv::Point3d diff = {abs(minx - maxx), abs(miny - maxy), abs(minz - maxz)};
return diff;
}
double ExtrinsicCalibration::optimize() {
// triangulate points for stereo pairs (all points triangulated after this)
updateStatus_("Triangulating remaining points");
for (const auto& [c1, c2]: mask_) {
if (points().visibility().count(c1, c2) >= min_obs_) {
triangulate(c1, c2);
}
else {
LOG(INFO) << "Skipping triangulation for pair " << c1 << ", " << c2;
}
}
// Build BA
BundleAdjustment ba;
std::vector<Camera> cameras;
std::vector<cv::Mat> T; // camera to world
cameras.reserve(calib_.size());
unsigned int ncameras = calib_.size();
for (const auto& c : calib_) {
auto camera = c;
T.push_back(c.extrinsic.inverse().matrix());
cameras.push_back(Camera(camera));
}
for (auto &c : cameras) {
// BundleAdjustment stores pointers; do not resize cameras vector
ba.addCamera(c);
}
// TODO (?) is this good idea?; make optional
ba.addObject(points_.getObject(0));
// Transform triangulated points into same coordinates. Poinst which are
// triangulated multiple times: use median values. Note T[] contains
// inverse transformations, as points are transformed from camera to world
// (instead the other way around by parameters in cameras[]).
updateStatus_("Calculating points in world coordinates");
// NOTE: above CalibrationPoints datastructure not optimal regarding how
// points are actually used here; BundleAdjustment interface also
// expects flat arrays; overall cv::Mats would probably be better
// suited as they can be easily interpreted as flat arrays or
// multi-dimensional.
int n_points_bad = 0;
int n_points_missing = 0;
int n_points = 0;
for (const auto& itm : points_.all()) {
auto sz = points_.getObject(itm.object).size();
auto vis = visibility(ncameras, itm.cameras);
for (unsigned int i = 0; i < sz; i++) {
n_points++;
// observation and triangulated coordinates; Use {NAN, NAN} for
// non-visible points (if those are used by mistake, Ceres will
// fail with error message).
std::vector<cv::Point2d> obs(ncameras, {NAN, NAN});
std::vector<double> px;
std::vector<double> py;
std::vector<double> pz;
for (const auto& [c, o] : itm.points) {
obs[c] = o[i];
}
for (const auto [c, pts] : itm.triangulated) {
auto p = transform::apply(pts[i], T[c.first]);
px.push_back(p.x);
py.push_back(p.y);
pz.push_back(p.z);
}
// median coordinate for each axis
std::sort(px.begin(), px.end());
std::sort(py.begin(), py.end());
std::sort(pz.begin(), pz.end());
cv::Point3d p;
unsigned int n = px.size();
unsigned int m = n / 2;
if (n == 0) {
n_points_missing++;
break;
}
if (n % 2 == 0 && n > 1) {
// mean of two points if number of points even
cv::Point3d p1 = {px[m - 1], py[m - 1], pz[m - 1]};
cv::Point3d p2 = {px[m], py[m], pz[m]};
p = (p1 + p2)/2.0;
}
else {
p = {px[m], py[m], pz[m]};
}
// TODO: desgin better check
if (cv::norm(absdiff(px, py, pz)) > threshold_bad_) {
n_points_bad++;
//continue;
}
ba.addPoint(vis, obs, p);
}
}
if (float(n_points_bad)/float(n_points - n_points_missing) > threhsold_warning_) {
// print wanrning message; calibration possibly fails if triangulation
// was very low quality (more than % bad points)
LOG(ERROR) << "Large variation in "<< n_points_bad << " "
"triangulated points. Are initial intrinsic parameters "
"good? If initial camera poses were used, try again "
"without using existing values.";
}
if (float(n_points_missing)/float(n_points - n_points_bad) > threhsold_warning_) {
// this should not happen any more (all points should be triangulated).
LOG(WARNING) << "Large number of points skipped. Are there enough "
"visible points between stereo camera pairs?";
}
updateStatus_("Bundle adjustment");
options_.verbose = true;
options_.max_iter = 250; // should converge much earlier
LOG(INFO) << "fix intrinsics: " << (options_.optimize_intrinsic ? "no" : "yes");
LOG(INFO) << "fix focal: " << (options_.fix_focal ? "yes" : "no");
LOG(INFO) << "fix principal point: " << (options_.fix_principal_point ? "yes" : "no");
LOG(INFO) << "fix distortion: " << (options_.fix_distortion ? "yes" : "no");
ba.run(options_);
int n_removed = 0;
for (const auto& t : prune_observations_) {
n_removed += ba.removeObservations(t);
if (float(n_removed)/float(n_points) > threhsold_warning_) {
LOG(WARNING) << "significant number (" << n_removed << " of "
<< n_points << ") of observations removed";
break;
}
else {
LOG(INFO) << "removed observations: " << n_removed;
ba.run(options_);
}
}
calib_optimized_.resize(calib_.size());
rmse_.resize(calib_.size());
auto points_optimized = ba.points();
double scale = optimizeScale(points_.getObject(0), points_optimized);
LOG(INFO) << "scale: " << scale;
for (unsigned int i = 0; i < cameras.size(); i++) {
rmse_[i] = ba.reprojectionError(i);
auto intr = cameras[i].intrinsic();
calib_optimized_[i] = calib_[i];
calib_optimized_[i].intrinsic.set(intr.matrix(), intr.distCoeffs.Mat(), intr.resolution);
calib_optimized_[i].extrinsic.set(cameras[i].rvec(), cameras[i].tvec());
calib_optimized_[i].extrinsic.tvec *= scale;
}
rmse_total_ = ba.reprojectionError();
LOG(INFO) << "reprojection error (all cameras): " << rmse_total_;
return rmse_total_;
}
double ExtrinsicCalibration::run() {
updateStatus_("Starting");
points_.resetTriangulatedPoints();
pairs_.clear();
calculatePairPoses();
calculateInitialPoses();
return optimize();
}
const CalibrationData::Calibration& ExtrinsicCalibration::calibration(unsigned int c) {
return calib_.at(c);
}
const CalibrationData::Calibration& ExtrinsicCalibration::calibrationOptimized(unsigned int c) {
return calib_optimized_.at(c);
}
double ExtrinsicCalibration::reprojectionError(unsigned int c) {
return rmse_.at(c);
}
double ExtrinsicCalibration::reprojectionError() {
return rmse_total_;
}
bool ExtrinsicCalibration::toFile(const std::string& fname) {
points_.clear();
std::ofstream ofs(fname, std::ios_base::trunc);
msgpack::pack(ofs, *this);
ofs.close();
return true;
}
bool ExtrinsicCalibration::fromFile(const std::string& fname) {
points_ = CalibrationPoints<double>();
mask_ = {};
calib_ = {};
std::ifstream ifs(fname);
std::stringstream buf;
msgpack::object_handle oh;
buf << ifs.rdbuf();
msgpack::unpack(oh, buf.str().data(), buf.str().size());
oh.get().convert(*this);
return true;
}
}
}
#include <loguru.hpp>
#include <ftl/exception.hpp>
#include <ftl/calibration/object.hpp>
#include <opencv2/core/cuda.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/imgproc.hpp>
using ftl::calibration::ArUCoObject;
using ftl::calibration::ChessboardObject;
ArUCoObject::ArUCoObject(cv::aruco::PREDEFINED_DICTIONARY_NAME dictionary,
float baseline, float tag_size, int id1, int id2) :
baseline_(baseline), tag_size_(tag_size),id1_(id1), id2_(id2) {
dict_ = cv::aruco::getPredefinedDictionary(dictionary);
params_ = cv::aruco::DetectorParameters::create();
params_->cornerRefinementMinAccuracy = 0.01;
// cv::aruco::CORNER_REFINE_CONTOUR memory issues? intrinsic quality?
params_->cornerRefinementMethod = cv::aruco::CORNER_REFINE_CONTOUR;
}
std::vector<cv::Point3d> ArUCoObject::object() {
return {
cv::Point3d(0.0, 0.0, 0.0),
cv::Point3d(tag_size_, 0.0, 0.0),
cv::Point3d(tag_size_, tag_size_, 0.0),
cv::Point3d(0.0, tag_size_, 0.0),
cv::Point3d(baseline_, 0.0, 0.0),
cv::Point3d(baseline_ + tag_size_, 0.0, 0.0),
cv::Point3d(baseline_ + tag_size_, tag_size_, 0.0),
cv::Point3d(baseline_, tag_size_, 0.0)
};
}
int ArUCoObject::detect(cv::InputArray im, std::vector<cv::Point2d>& result, const cv::Mat& K, const cv::Mat& distCoeffs) {
// note: cv::aruco requires floats
std::vector<std::vector<cv::Point2f>> corners;
std::vector<int> ids;
cv::Mat im_gray;
// OpenCV bug: detectMarkers consumes all available memory when any
// distortion parameters are passes
const cv::Mat d;
if (im.size() == cv::Size(0, 0)) {
return -1;
}
if (im.isGpuMat()) {
cv::Mat dl;
im.getGpuMat().download(dl);
cv::cvtColor(dl, im_gray, cv::COLOR_BGRA2GRAY);
}
else if (im.isMat()) {
cv::cvtColor(im.getMat(), im_gray, cv::COLOR_BGRA2GRAY);
}
else {
throw ftl::exception("Bad input (not cv::Mat/cv::GpuMat)");
}
cv::aruco::detectMarkers(im_gray, dict_, corners, ids, params_,
cv::noArray(), K, d);
if (ids.size() < 2) { return 0; }
const size_t n_corners = 4;
const size_t n_tags = 2;
std::vector<cv::Point2d> marker1; marker1.reserve(n_corners);
std::vector<cv::Point2d> marker2; marker2.reserve(n_corners);
int n = 0;
// find the right markers
for (unsigned int i = 0; i < ids.size(); i++) {
if (ids[i] == id1_) {
n++;
for (auto& p : corners[i]) {
marker1.push_back({p.x, p.y});
}
CHECK(corners[i].size() == n_corners);
}
if (ids[i] == id2_) {
n++;
for (auto& p : corners[i]) {
marker2.push_back({p.x, p.y});
}
CHECK(corners[i].size() == n_corners);
}
}
if (marker1.empty() || marker2.empty()) {
return 0;
}
if (n != 2) {
LOG(WARNING) << "Found more than one marker with same ID";
return 0;
}
// add the points to output
result.reserve(n_tags*n_corners + result.size());
for (size_t i_corner = 0; i_corner < n_corners; i_corner++) {
result.push_back(marker1[i_corner]);
}
for (size_t i_corner = 0; i_corner < n_corners; i_corner++) {
result.push_back(marker2[i_corner]);
}
return n_tags*n_corners;
}
////////////////////////////////////////////////////////////////////////////////
cv::Size ChessboardObject::chessboardSize() {
return {chessboard_size_.width + 1, chessboard_size_.height + 1};
}
double ChessboardObject::squareSize() {
return square_size_;
}
ChessboardObject::ChessboardObject(int rows, int cols, double square_size) :
chessboard_size_(cols - 1, rows - 1), square_size_(square_size),
flags_(cv::CALIB_CB_NORMALIZE_IMAGE|cv::CALIB_CB_ACCURACY) {
init();
}
void ChessboardObject::init() {
object_points_.reserve(chessboard_size_.width * chessboard_size_.height);
for (int row = 0; row < chessboard_size_.height; ++row) {
for (int col = 0; col < chessboard_size_.width; ++col) {
object_points_.push_back({col * square_size_, row * square_size_, 0});
}}
}
int ChessboardObject::detect(cv::InputArray im, std::vector<cv::Point2d>& points, const cv::Mat& K, const cv::Mat& D) {
cv::Mat tmp;
if (im.isMat()) {
tmp = im.getMat();
}
else if (im.isGpuMat()) {
im.getGpuMat().download(tmp);
}
else {
throw ftl::exception("Image not cv::Mat or cv::GpuMat");
}
if (cv::findChessboardCornersSB(tmp, chessboard_size_, points, flags_)) {
return 1;
}
return 0;
}
std::vector<cv::Point3d> ChessboardObject::object() {
return object_points_;
}
#include "ftl/calibration/optimize.hpp"
#include "ftl/calibration/parameters.hpp"
#include "loguru.hpp"
#include <ceres/ceres.h>
#include <loguru.hpp>
#include <ftl/exception.hpp>
#include <algorithm>
#include <unordered_set>
......@@ -15,7 +19,8 @@ using cv::Mat;
using cv::Point3d;
using cv::Point2d;
using cv::Vec3d;
using cv::Size;
using cv::Rect;
using ftl::calibration::BundleAdjustment;
......@@ -23,19 +28,179 @@ using ftl::calibration::Camera;
////////////////////////////////////////////////////////////////////////////////
void Camera::setRotation(const Mat& R) {
if (((R.size() != Size(3, 3)) &&
(R.size() != Size(3, 1)) &&
(R.size() != Size(1, 3))) ||
(R.type() != CV_64FC1)) {
throw ftl::exception("bad rotation matrix size/type");
}
Mat rvec;
if (R.size() == cv::Size(3, 3)) { cv::Rodrigues(R, rvec); }
else { rvec = R; }
ceres::AngleAxisToQuaternion<double>((double*)(rvec.data), data + Parameter::ROTATION);
}
void Camera::setTranslation(const Mat& t) {
if ((t.type() != CV_64FC1) ||
(t.size() != cv::Size(1, 3))) {
throw ftl::exception("bad translation vector");
}
data[Parameter::TX] = t.at<double>(0);
data[Parameter::TY] = t.at<double>(1);
data[Parameter::TZ] = t.at<double>(2);
}
void Camera::setIntrinsic(const Mat& K, cv::Size sz) {
size = sz;
if ((K.type() != CV_64FC1) || (K.size() != cv::Size(3, 3))) {
throw ftl::exception("bad intrinsic matrix");
}
data[Parameter::F] = K.at<double>(0, 0);
data[Parameter::CX] = K.at<double>(0, 2);
data[Parameter::CY] = K.at<double>(1, 2);
}
void Camera::setDistortion(const Mat& D) {
if ((D.type() != CV_64FC1)) {
throw ftl::exception("distortion coefficients must be CV_64FC1");
}
switch(D.total()) {
case 12:
/*
data[Parameter::S1] = D.at<double>(8);
data[Parameter::S2] = D.at<double>(9);
data[Parameter::S3] = D.at<double>(10);
data[Parameter::S4] = D.at<double>(11);
*/
[[fallthrough]];
case 8:
data[Parameter::K4] = D.at<double>(5);
data[Parameter::K5] = D.at<double>(6);
data[Parameter::K6] = D.at<double>(7);
[[fallthrough]];
case 5:
data[Parameter::K3] = D.at<double>(4);
[[fallthrough]];
default:
data[Parameter::K1] = D.at<double>(0);
data[Parameter::K2] = D.at<double>(1);
data[Parameter::P1] = D.at<double>(2);
data[Parameter::P2] = D.at<double>(3);
}
}
Camera::Camera(const Mat &K, const Mat &D, const Mat &R, const Mat &tvec, cv::Size sz) {
setIntrinsic(K, D, sz);
if (!R.empty()) { setRotation(R); }
if (!tvec.empty()) { setTranslation(tvec); }
}
Camera::Camera(const ftl::calibration::CalibrationData::Calibration& calib) {
setIntrinsic(calib.intrinsic.matrix(), calib.intrinsic.distCoeffs.Mat(), calib.intrinsic.resolution);
setExtrinsic(calib.extrinsic.matrix()(cv::Rect(0, 0, 3, 3)), cv::Mat(calib.extrinsic.tvec));
}
ftl::calibration::CalibrationData::Intrinsic Camera::intrinsic() const {
return ftl::calibration::CalibrationData::Intrinsic(intrinsicMatrix(), distortionCoefficients(), size);
}
ftl::calibration::CalibrationData::Extrinsic Camera::extrinsic() const {
return ftl::calibration::CalibrationData::Extrinsic(rvec(), tvec());
}
Mat Camera::intrinsicMatrix() const {
Mat K = Mat::eye(3, 3, CV_64FC1);
K.at<double>(0, 0) = data[Parameter::F];
K.at<double>(1, 1) = data[Parameter::F];
K.at<double>(0, 2) = data[Parameter::CX];
K.at<double>(1, 2) = data[Parameter::CY];
return K;
}
Mat Camera::distortionCoefficients() const {
Mat D;
if (Camera::n_distortion_parameters <= 4) { D = Mat::zeros(1, 4, CV_64FC1); }
else if (Camera::n_distortion_parameters <= 5) { D = Mat::zeros(1, 5, CV_64FC1); }
else if (Camera::n_distortion_parameters <= 8) { D = Mat::zeros(1, 8, CV_64FC1); }
else if (Camera::n_distortion_parameters <= 12) { D = Mat::zeros(1, 12, CV_64FC1); }
else if (Camera::n_distortion_parameters <= 14) { D = Mat::zeros(1, 14, CV_64FC1); }
switch(Camera::n_distortion_parameters) {
case 14: // not used in OpenCV?
case 12:
case 8:
D.at<double>(5) = data[Parameter::K4];
D.at<double>(6) = data[Parameter::K5];
D.at<double>(7) = data[Parameter::K6];
case 5:
D.at<double>(4) = data[Parameter::K3];
case 4:
D.at<double>(0) = data[Parameter::K1];
D.at<double>(1) = data[Parameter::K2];
D.at<double>(2) = data[Parameter::P1];
D.at<double>(3) = data[Parameter::P2];
}
return D;
}
Mat Camera::rvec() const {
cv::Mat rvec(cv::Size(3, 1), CV_64FC1);
CHECK_EQ(rvec.step1(), 3);
ceres::QuaternionToAngleAxis(data + Parameter::ROTATION, (double*)(rvec.data));
return rvec;
}
Mat Camera::tvec() const {
return Mat(Vec3d(data[Parameter::TX], data[Parameter::TY], data[Parameter::TZ]));
}
Mat Camera::rmat() const {
cv::Mat R(cv::Size(3, 3), CV_64FC1);
CHECK_EQ(R.step1(), 3);
ceres::QuaternionToRotation<double>(data + Parameter::ROTATION,
ceres::RowMajorAdapter3x3<double>((double*)(R.data)));
return R;
}
Mat Camera::extrinsicMatrix() const {
Mat T = Mat::eye(4, 4, CV_64FC1);
rmat().copyTo(T(Rect(0, 0, 3, 3)));
tvec().copyTo(T(Rect(3, 0, 1, 3)));
return T;
}
Mat Camera::extrinsicMatrixInverse() const {
return transform::inverse(extrinsicMatrix());
}
////////////////////////////////////////////////////////////////////////////////
struct ReprojectionError {
/**
* Reprojection error.
*
* Camera model has _CAMERA_PARAMETERS parameters:
*
* - rotation and translation: rx, ry, rz, tx, ty, tx
* - rotation and translation: q1, q2, q3, q4, tx, ty, tx
* - focal length: f (fx == fy assumed)
* - pricipal point: cx, cy
* - first three radial distortion coefficients: k1, k2, k3
* - distortion coefficients: k1, k2, k3, k4, k5, k6, p1, p2
*
* Camera model documented in
* https://docs.opencv.org/master/d9/d0c/group__calib3d.html
* https://github.com/opencv/opencv/blob/b698d0a6ee12342a87b8ad739d908fd8d7ff1fca/modules/calib3d/src/calibration.cpp#L774
*/
explicit ReprojectionError(double observed_x, double observed_y)
: observed_x(observed_x), observed_y(observed_y) {}
......@@ -46,36 +211,48 @@ struct ReprojectionError {
T* residuals) const {
T p[3];
ceres::QuaternionRotatePoint(camera + Camera::Parameter::ROTATION, point, p);
// Apply rotation and translation
ceres::AngleAxisRotatePoint(camera + Camera::Parameter::ROTATION, point, p);
p[0] += camera[Camera::Parameter::TX];
p[1] += camera[Camera::Parameter::TY];
p[2] += camera[Camera::Parameter::TZ];
T x = T(p[0]) / p[2];
T y = T(p[1]) / p[2];
T x = p[0] / p[2];
T y = p[1] / p[2];
// Intrinsic parameters
const T& focal = camera[Camera::Parameter::F];
const T& f = camera[Camera::Parameter::F];
const T& cx = camera[Camera::Parameter::CX];
const T& cy = camera[Camera::Parameter::CY];
// Distortion parameters k1, k2, k3
// Distortion parameters
const T& k1 = camera[Camera::Parameter::K1];
const T& k2 = camera[Camera::Parameter::K2];
const T& k3 = camera[Camera::Parameter::K3];
const T& k4 = camera[Camera::Parameter::K4];
const T& k5 = camera[Camera::Parameter::K5];
const T& k6 = camera[Camera::Parameter::K6];
const T& p1 = camera[Camera::Parameter::P1];
const T& p2 = camera[Camera::Parameter::P2];
const T r2 = x*x + y*y;
const T r4 = r2*r2;
const T r6 = r4*r2;
T distortion = T(1.0) + k1*r2 + k2*r4 + k3*r6;
// Radial distortion
const T cdist = T(1.0) + k1*r2 + k2*r4 + k3*r6;
// Radial distortion: rational model
const T icdist = T(1.0)/(T(1.0) + k4*r2 + k5*r4 + k6*r6);
// Tangential distortion
const T pdistx = (T(2.0)*x*y)*p1 + (r2 + T(2.0)*x*x)*p2;
const T pdisty = (r2 + T(2.0)*y*y)*p1 + (T(2.0)*x*y)*p2;
// Apply distortion
const T xd = x*cdist*icdist + pdistx;
const T yd = y*cdist*icdist + pdisty;
// Projected point position
T predicted_x = focal*x*distortion + cx;
T predicted_y = focal*y*distortion + cy;
T predicted_x = f*xd + cx;
T predicted_y = f*yd + cy;
// Error: the difference between the predicted and observed position
residuals[0] = predicted_x - T(observed_x);
......@@ -99,6 +276,17 @@ struct ReprojectionError {
double observed_y;
};
static ReprojectionError reproject_ = ReprojectionError(0.0, 0.0);
cv::Point2d ftl::calibration::projectPoint(const Camera& camera, const cv::Point3d& point) {
cv::Point2d out;
reproject_(static_cast<const double* const>(camera.data), reinterpret_cast<const double* const>(&(point.x)), reinterpret_cast<double*>(&(out.x)));
return out;
}
////////////////////////////////////////////////////////////////////////////////
// TODO: estimate pose and optimize it instead (?)
struct LengthError {
explicit LengthError(const double d) : d(d) {}
......@@ -107,7 +295,7 @@ struct LengthError {
auto x = p1[0] - p2[0];
auto y = p1[1] - p2[1];
auto z = p1[2] - p2[2];
residual[0] = T(d) - sqrt(x*x + y*y + z*z);
residual[0] = d - sqrt(x*x + y*y + z*z);
return true;
}
......@@ -119,6 +307,8 @@ struct LengthError {
double d;
};
//
struct ScaleError {
ScaleError(const double d, const Point3d& p) : d(d), p(p) {}
......@@ -144,9 +334,9 @@ struct ScaleError {
double ftl::calibration::optimizeScale(const vector<Point3d> &object_points, vector<Point3d> &points) {
// use exceptions instead
CHECK(points.size() % object_points.size() == 0);
CHECK(points.size() % 2 == 0);
// throw exception instead
CHECK_EQ(points.size() % object_points.size(), 0);
CHECK_EQ(points.size() % 2, 0);
// initial scale guess from first two object points
......@@ -170,7 +360,7 @@ double ftl::calibration::optimizeScale(const vector<Point3d> &object_points, vec
vector<double> d;
ceres::Problem problem;
auto loss_function = new ceres::HuberLoss(1.0);
ceres::LossFunction* loss_function = nullptr;
// use all pairwise distances
for (size_t i = 0; i < object_points.size(); i++) {
......@@ -200,8 +390,7 @@ double ftl::calibration::optimizeScale(const vector<Point3d> &object_points, vec
////////////////////////////////////////////////////////////////////////////////
void BundleAdjustment::addCamera(Camera& camera) {
// cameras can't be added after points
if (points_.size() != 0) { throw std::exception(); }
if (points_.size() != 0) { throw ftl::exception("cameras can't be added after points"); }
cameras_.push_back(&camera);
}
......@@ -212,17 +401,34 @@ void BundleAdjustment::addCameras(vector<Camera>& cameras) {
void BundleAdjustment::addPoint(const vector<bool>& visibility, const vector<Point2d>& observations, Point3d& point) {
if ((observations.size() != visibility.size()) ||
(visibility.size() != cameras_.size())) { throw std::exception(); }
(visibility.size() != cameras_.size())) { throw ftl::exception("observation and visibility vector sizes do not match"); }
points_.push_back(BundleAdjustment::Point{visibility, observations, reinterpret_cast<double*>(&point)});
points_.push_back(BundleAdjustment::Point{visibility, observations, point});
}
void BundleAdjustment::addPoints(const vector<vector<bool>>& visibility, const vector<vector<Point2d>>& observations, vector<Point3d>& points) {
if ((observations.size() != points.size()) ||
(observations.size() != visibility.size())) { throw std::exception(); }
if (observations.size() != visibility.size()) { throw ftl::exception("observation and visibility vector sizes do not match"); }
for (size_t i = 0; i < points.size(); i++) {
addPoint(visibility[i], observations[i], points[i]);
auto npoints = points.size();
auto ncameras = observations.size();
for (unsigned c = 0; c < ncameras; c++) {
if ((npoints != observations[c].size()) ||
(npoints != visibility[c].size())) {
throw ftl::exception("wrong number of points");
}
}
vector<bool> add_vis;
vector<Point2d> add_obs;
for (size_t i = 0; i < npoints; i++) {
add_obs.clear();
add_vis.clear();
for (size_t c = 0; c < ncameras; c++) {
add_vis.push_back(visibility[c][i]);
add_obs.push_back(observations[c][i]);
}
addPoint(add_vis, add_obs, points[i]);
}
}
......@@ -230,48 +436,69 @@ void BundleAdjustment::addPoint(const vector<Point2d>& observations, Point3d& po
vector<bool> visibility(observations.size(), true);
addPoint(visibility, observations, point);
}
void BundleAdjustment::addPoints(const vector<vector<Point2d>>& observations, std::vector<Point3d>& points) {
if (observations.size() != points.size()) { throw std::exception(); }
if (observations.size() != points.size()) { throw ftl::exception("observation and visibility vector sizes do not match"); }
for (size_t i = 0; i < points.size(); i++) {
addPoint(observations[i], points[i]);
}
}
void BundleAdjustment::addObject(const vector<Point3d> &object_points) {
if (points_.size() % object_points.size() != 0) { throw std::exception(); }
objects_.push_back(BundleAdjustment::Object {0, (int) points_.size(), object_points});
}
void BundleAdjustment::_setCameraParametrization(ceres::Problem &problem, const BundleAdjustment::Options &options) {
vector<int> constant_camera_parameters;
std::set<int> constant_camera_parameters;
// extrinsic paramters
// apply options
for (size_t i = 0; i < cameras_.size(); i++) {
if (!options.rational_model) {
cameras_[i]->data[Camera::Parameter::K4] = 0.0;
cameras_[i]->data[Camera::Parameter::K5] = 0.0;
cameras_[i]->data[Camera::Parameter::K6] = 0.0;
constant_camera_parameters.insert(Camera::Parameter::K4);
constant_camera_parameters.insert(Camera::Parameter::K5);
constant_camera_parameters.insert(Camera::Parameter::K6);
}
if (options.zero_distortion) {
cameras_[i]->data[Camera::Parameter::K1] = 0.0;
cameras_[i]->data[Camera::Parameter::K2] = 0.0;
cameras_[i]->data[Camera::Parameter::K3] = 0.0;
cameras_[i]->data[Camera::Parameter::K4] = 0.0;
cameras_[i]->data[Camera::Parameter::K5] = 0.0;
cameras_[i]->data[Camera::Parameter::K6] = 0.0;
cameras_[i]->data[Camera::Parameter::P1] = 0.0;
cameras_[i]->data[Camera::Parameter::P2] = 0.0;
}
}
// set extrinsic paramters constant for all cameras
if (!options.optimize_motion) {
for (int i = 0; i < 3; i++) {
constant_camera_parameters.push_back(Camera::Parameter::ROTATION + i);
constant_camera_parameters.push_back(Camera::Parameter::TRANSLATION + i);
}
constant_camera_parameters.insert(Camera::Parameter::Q1);
constant_camera_parameters.insert(Camera::Parameter::Q2);
constant_camera_parameters.insert(Camera::Parameter::Q3);
constant_camera_parameters.insert(Camera::Parameter::Q4);
constant_camera_parameters.insert(Camera::Parameter::TX);
constant_camera_parameters.insert(Camera::Parameter::TY);
constant_camera_parameters.insert(Camera::Parameter::TZ);
}
if (!options.fix_distortion) {
LOG(WARNING) << "Optimization of distortion parameters is not supported"
<< "and results may contain invalid values.";
}
// intrinsic parameters
// set intrinsic parameters constant for all cameras
if (!options.optimize_intrinsic || options.fix_focal) {
constant_camera_parameters.push_back(Camera::Parameter::F);
constant_camera_parameters.insert(Camera::Parameter::F);
}
if (!options.optimize_intrinsic || options.fix_principal_point) {
constant_camera_parameters.push_back(Camera::Parameter::CX);
constant_camera_parameters.push_back(Camera::Parameter::CY);
constant_camera_parameters.insert(Camera::Parameter::CX);
constant_camera_parameters.insert(Camera::Parameter::CY);
}
if (!options.optimize_intrinsic || options.fix_distortion) {
constant_camera_parameters.push_back(Camera::Parameter::K1);
constant_camera_parameters.push_back(Camera::Parameter::K2);
constant_camera_parameters.push_back(Camera::Parameter::K3);
constant_camera_parameters.insert(Camera::Parameter::K1);
constant_camera_parameters.insert(Camera::Parameter::K2);
constant_camera_parameters.insert(Camera::Parameter::K3);
constant_camera_parameters.insert(Camera::Parameter::K4);
constant_camera_parameters.insert(Camera::Parameter::K5);
constant_camera_parameters.insert(Camera::Parameter::K6);
constant_camera_parameters.insert(Camera::Parameter::P1);
constant_camera_parameters.insert(Camera::Parameter::P2);
}
if (!options.optimize_motion && !options.optimize_intrinsic) {
......@@ -281,43 +508,68 @@ void BundleAdjustment::_setCameraParametrization(ceres::Problem &problem, const
}
}
else {
std::unordered_set<int> fix_extrinsic(
std::set<int> fix_extrinsic(
options.fix_camera_extrinsic.begin(), options.fix_camera_extrinsic.end());
std::unordered_set<int> fix_intrinsic(
std::set<int> fix_intrinsic(
options.fix_camera_extrinsic.begin(), options.fix_camera_extrinsic.end());
for (size_t i = 0; i < cameras_.size(); i++) {
std::unordered_set<int> fixed_parameters(
std::set<int> constant_parameters(
constant_camera_parameters.begin(),
constant_camera_parameters.end());
if (fix_extrinsic.find(i) != fix_extrinsic.end()) {
fixed_parameters.insert({
Camera::Parameter::RX, Camera::Parameter::RY,
Camera::Parameter::RZ, Camera::Parameter::TX,
Camera::Parameter::TY, Camera::Parameter::TZ
constant_parameters.insert({
Camera::Parameter::Q1, Camera::Parameter::Q2,
Camera::Parameter::Q3, Camera::Parameter::Q4,
Camera::Parameter::TX, Camera::Parameter::TY,
Camera::Parameter::TZ
});
}
if (fix_intrinsic.find(i) != fix_intrinsic.end()) {
fixed_parameters.insert({
constant_parameters.insert({
Camera::Parameter::F, Camera::Parameter::CX,
Camera::Parameter::CY
});
}
vector<int> params(fixed_parameters.begin(), fixed_parameters.end());
vector<int> params(constant_parameters.begin(), constant_parameters.end());
if (params.size() == Camera::n_parameters) {
// Ceres crashes if all parameters are set constant using
// Ceres crashes if all parameters are set constant with
// SubsetParameterization()
// https://github.com/ceres-solver/ceres-solver/issues/347
// https://github.com/ceres-solver/ceres-solver/commit/27183d661ecae246dbce6d03cacf84f39fba1f1e
problem.SetParameterBlockConstant(getCameraPtr(i));
}
else if (params.size() > 0) {
problem.SetParameterization(getCameraPtr(i),
new ceres::SubsetParameterization(Camera::n_parameters, params));
ceres::LocalParameterization* camera_parameterization = nullptr;
if (constant_parameters.count(Camera::Parameter::ROTATION) == 0) {
// quaternion parametrization
for (auto& v : params) { v -= 4; }
camera_parameterization =
new ceres::ProductParameterization(
new ceres::QuaternionParameterization(),
new ceres::SubsetParameterization(Camera::n_parameters - 4, params));
}
else {
// extrinsic parameters constant
CHECK(constant_parameters.count(Camera::Parameter::Q1));
CHECK(constant_parameters.count(Camera::Parameter::Q2));
CHECK(constant_parameters.count(Camera::Parameter::Q3));
CHECK(constant_parameters.count(Camera::Parameter::Q4));
CHECK(constant_parameters.count(Camera::Parameter::TX));
CHECK(constant_parameters.count(Camera::Parameter::TY));
CHECK(constant_parameters.count(Camera::Parameter::TZ));
camera_parameterization =
new ceres::SubsetParameterization(Camera::n_parameters, params);
}
problem.SetParameterization(getCameraPtr(i), camera_parameterization);
}
}
}
......@@ -325,7 +577,7 @@ void BundleAdjustment::_setCameraParametrization(ceres::Problem &problem, const
void BundleAdjustment::_buildBundleAdjustmentProblem(ceres::Problem &problem, const BundleAdjustment::Options &options) {
ceres::LossFunction *loss_function = nullptr;
ceres::LossFunction *loss_function = nullptr; // squared
if (options.loss == Options::Loss::HUBER) {
loss_function = new ceres::HuberLoss(1.0);
......@@ -334,7 +586,7 @@ void BundleAdjustment::_buildBundleAdjustmentProblem(ceres::Problem &problem, co
loss_function = new ceres::CauchyLoss(1.0);
}
for (auto point : points_) {
for (auto& point : points_) {
for (size_t i = 0; i < point.observations.size(); i++) {
if (!point.visibility[i]) { continue; }
ceres::CostFunction* cost_function =
......@@ -343,65 +595,23 @@ void BundleAdjustment::_buildBundleAdjustmentProblem(ceres::Problem &problem, co
problem.AddResidualBlock(cost_function,
loss_function,
getCameraPtr(i),
point.point);
&(point.point.x)
);
}
}
// apply options
_setCameraParametrization(problem, options);
if (!options.optmize_structure) {
// do not optimize points
for (auto &point : points_) { problem.SetParameterBlockConstant(point.point); }
}
}
void BundleAdjustment::_buildLengthProblem(ceres::Problem &problem, const BundleAdjustment::Options &options) {
// same idea as in scale optimization
ceres::LossFunction *loss_function = nullptr;
// should use separate configuration option
/*
if (options.loss == Options::Loss::HUBER) {
loss_function = new ceres::HuberLoss(1.0);
}
else if (options.loss == Options::Loss::CAUCHY) {
loss_function = new ceres::CauchyLoss(1.0);
}
*/
for (auto &object : objects_) {
int npoints = object.object_points.size();
auto &object_points = object.object_points;
vector<double> d;
for (int i = 0; i < npoints; i++) {
for (int j = i + 1; j < npoints; j++) {
d.push_back(norm(object_points[i]-object_points[j]));
}
}
for (int p = object.idx_start; p < object.idx_end; p += npoints) {
size_t i_d = 0;
for (size_t i = 0; i < object_points.size(); i++) {
for (size_t j = i + 1; j < object_points.size(); j++) {
double* p1 = points_[p+i].point;
double* p2 = points_[p+j].point;
auto cost_function = LengthError::Create(d[i_d++]);
problem.AddResidualBlock(cost_function, loss_function, p1, p2);
}
}
for (auto &point : points_) {
problem.SetParameterBlockConstant(&(point.point.x));
}
}
}
void BundleAdjustment::_buildProblem(ceres::Problem &problem, const BundleAdjustment::Options &options) {
CHECK(options.use_quaternion) << "Only quaternion rotations are supported";
_buildBundleAdjustmentProblem(problem, options);
_buildLengthProblem(problem, options);
}
......@@ -411,7 +621,7 @@ void BundleAdjustment::run(const BundleAdjustment::Options &bundle_adjustment_op
_buildProblem(problem, bundle_adjustment_options);
ceres::Solver::Options options;
options.linear_solver_type = ceres::SPARSE_NORMAL_CHOLESKY;
options.linear_solver_type = ceres::DENSE_SCHUR;
options.minimizer_progress_to_stdout = bundle_adjustment_options.verbose;
if (bundle_adjustment_options.max_iter > 0) {
......@@ -422,6 +632,8 @@ void BundleAdjustment::run(const BundleAdjustment::Options &bundle_adjustment_op
options.num_threads = bundle_adjustment_options.num_threads;
}
options.use_nonmonotonic_steps = bundle_adjustment_options.use_nonmonotonic_steps;
ceres::Solver::Summary summary;
ceres::Solve(options, &problem, &summary);
......@@ -438,31 +650,65 @@ void BundleAdjustment::run() {
run(options);
}
void BundleAdjustment::_reprojectionErrorMSE(const int camera, double &error, double &npoints) const {
vector<Point2d> observations;
vector<Point3d> points;
int BundleAdjustment::removeObservations(double threshold) {
int removed = 0;
std::vector<double> error(cameras_.size(), 0.0);
observations.reserve(points_.size());
points.reserve(points_.size());
for (auto& point : points_) {
double error_total = 0.0;
double n_points = 0.0;
for (const auto& point : points_) {
if (!point.visibility[camera]) { continue; }
observations.push_back(point.observations[camera]);
points.push_back(Point3d(point.point[0], point.point[1], point.point[2]));
for (unsigned int c = 0; c < cameras_.size(); c++) {
if (!point.visibility[c]) { continue; }
const auto& obs = point.observations[c];
const auto& proj = projectPoint(*(cameras_[c]), point.point);
double err = pow(proj.x - obs.x, 2) + pow(proj.y - obs.y, 2);
error[c] = err;
error_total += err;
n_points += 1;
}
error_total /= n_points;
if (n_points <= 1) { continue; } // TODO: remove observation completely
auto K = cameras_[camera]->intrinsicMatrix();
auto rvec = cameras_[camera]->rvec();
auto tvec = cameras_[camera]->tvec();
for (unsigned int c = 0; c < cameras_.size(); c++) {
if (!point.visibility[c]) { continue; }
if ((error[c] - error_total) > threshold) {
point.visibility[c] = false;
n_points -= 1;
removed++;
break;
}
}
}
return removed;
}
error = ftl::calibration::reprojectionError(observations, points, K, Mat::zeros(1, 5, CV_64FC1), rvec, tvec);
npoints = points.size();
std::vector<cv::Point3d> BundleAdjustment::points() {
std::vector<cv::Point3d> pts;
pts.reserve(points_.size());
for (const auto& p : points_) { pts.push_back(p.point); }
return pts;
}
void BundleAdjustment::_reprojectionErrorSE(const int camera, double &error, double &npoints) const {
error = 0.0;
npoints = 0.0;
for (const auto& point : points_) {
if (!point.visibility[camera]) { continue; }
const auto& obs = point.observations[camera];
const auto& proj = projectPoint(*(cameras_[camera]), point.point);
error += pow(proj.x - obs.x, 2);
error += pow(proj.y - obs.y, 2);
npoints += 1.0;
}
}
double BundleAdjustment::reprojectionError(const int camera) const {
double error, ncameras;
_reprojectionErrorMSE(camera, ncameras, error);
return error / ncameras;
double error, npoints;
_reprojectionErrorSE(camera, error, npoints);
return sqrt(error / npoints);
}
double BundleAdjustment::reprojectionError() const {
......@@ -470,9 +716,59 @@ double BundleAdjustment::reprojectionError() const {
double npoints = 0.0;
for (size_t i = 0; i < cameras_.size(); i++) {
double e, n;
_reprojectionErrorMSE(i, e, n);
error += e * n;
_reprojectionErrorSE(i, e, n);
error += e;
npoints += n;
}
return error / npoints;
return sqrt(error / npoints);
}
////
void BundleAdjustment::addObject(const std::vector<cv::Point3d> &object_points) {
if (points_.size() % object_points.size() != 0) { throw std::exception(); }
objects_.push_back(BundleAdjustment::Object {0, (int) points_.size(), object_points});
}
void BundleAdjustment::_buildLengthProblem(ceres::Problem &problem, const BundleAdjustment::Options &options) {
// same idea as in scale optimization
ceres::LossFunction *loss_function = nullptr;
// should use separate configuration option
/*
if (options.loss == Options::Loss::HUBER) {
loss_function = new ceres::HuberLoss(1.0);
}
else if (options.loss == Options::Loss::CAUCHY) {
loss_function = new ceres::CauchyLoss(1.0);
}
*/
for (auto &object : objects_) {
int npoints = object.object_points.size();
auto &object_points = object.object_points;
vector<double> d;
for (int i = 0; i < npoints; i++) {
for (int j = i + 1; j < npoints; j++) {
d.push_back(norm(object_points[i]-object_points[j]));
}
}
for (int p = object.idx_start; p < object.idx_end; p += npoints) {
size_t i_d = 0;
for (size_t i = 0; i < object_points.size(); i++) {
for (size_t j = i + 1; j < object_points.size(); j++) {
double* p1 = static_cast<double*>(&(points_[p+i].point.x));
double* p2 = static_cast<double*>(&(points_[p+j].point.x));
auto cost_function = LengthError::Create(d[i_d++]);
problem.AddResidualBlock(cost_function, loss_function, p1, p2);
}
}
}
}
}
#include "ftl/calibration/parameters.hpp"
#include <loguru.hpp>
#include <ftl/calibration/parameters.hpp>
#include <ftl/exception.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <ceres/rotation.h>
using cv::Mat;
using cv::Size;
using cv::Point2d;
......@@ -13,141 +19,6 @@ using std::vector;
using namespace ftl::calibration;
using ftl::calibration::Camera;
////////////////////////////////////////////////////////////////////////////////
void Camera::setRotation(const Mat& R) {
if (((R.size() != Size(3, 3)) &&
(R.size() != Size(3, 1)) &&
(R.size() != Size(1, 3))) ||
(R.type() != CV_64FC1)) { throw std::exception(); }
Mat rvec;
if (R.size() == cv::Size(3, 3)) { cv::Rodrigues(R, rvec); }
else { rvec = R; }
data[Parameter::RX] = rvec.at<double>(0);
data[Parameter::RY] = rvec.at<double>(1);
data[Parameter::RZ] = rvec.at<double>(2);
}
void Camera::setTranslation(const Mat& t) {
if ((t.type() != CV_64FC1) ||
(t.size() != cv::Size(1, 3))) { throw std::exception(); }
data[Parameter::TX] = t.at<double>(0);
data[Parameter::TY] = t.at<double>(1);
data[Parameter::TZ] = t.at<double>(2);
}
void Camera::setIntrinsic(const Mat& K) {
if ((K.type() != CV_64FC1) || (K.size() != cv::Size(3, 3))) {
throw std::exception();
}
data[Parameter::F] = K.at<double>(0, 0);
data[Parameter::CX] = K.at<double>(0, 2);
data[Parameter::CY] = K.at<double>(1, 2);
}
void Camera::setDistortion(const Mat& D) {
if ((D.type() != CV_64FC1)) { throw std::exception(); }
switch(D.total()) {
case 12:
/*
data[Parameter::S1] = D.at<double>(8);
data[Parameter::S2] = D.at<double>(9);
data[Parameter::S3] = D.at<double>(10);
data[Parameter::S4] = D.at<double>(11);
*/
[[fallthrough]];
case 8:
/*
data[Parameter::K4] = D.at<double>(5);
data[Parameter::K5] = D.at<double>(6);
data[Parameter::K6] = D.at<double>(7);
*/
[[fallthrough]];
case 5:
data[Parameter::K3] = D.at<double>(4);
[[fallthrough]];
default:
data[Parameter::K1] = D.at<double>(0);
data[Parameter::K2] = D.at<double>(1);
/*
data[Parameter::P1] = D.at<double>(2);
data[Parameter::P2] = D.at<double>(3);
*/
}
}
Camera::Camera(const Mat &K, const Mat &D, const Mat &R, const Mat &tvec) {
setIntrinsic(K, D);
if (!R.empty()) { setRotation(R); }
if (!tvec.empty()) { setTranslation(tvec); }
}
Mat Camera::intrinsicMatrix() const {
Mat K = Mat::eye(3, 3, CV_64FC1);
K.at<double>(0, 0) = data[Parameter::F];
K.at<double>(1, 1) = data[Parameter::F];
K.at<double>(0, 2) = data[Parameter::CX];
K.at<double>(1, 2) = data[Parameter::CY];
return K;
}
Mat Camera::distortionCoefficients() const {
Mat D;
if (Camera::n_distortion_parameters <= 4) { D = Mat::zeros(4, 1, CV_64FC1); }
else if (Camera::n_distortion_parameters <= 5) { D = Mat::zeros(5, 1, CV_64FC1); }
else if (Camera::n_distortion_parameters <= 8) { D = Mat::zeros(8, 1, CV_64FC1); }
else if (Camera::n_distortion_parameters <= 12) { D = Mat::zeros(12, 1, CV_64FC1); }
else if (Camera::n_distortion_parameters <= 14) { D = Mat::zeros(14, 1, CV_64FC1); }
switch(Camera::n_distortion_parameters) {
case 14:
case 12:
case 8:
case 5:
D.at<double>(4) = data[Parameter::K3];
case 4:
D.at<double>(0) = data[Parameter::K1];
D.at<double>(1) = data[Parameter::K2];
}
return D;
}
Mat Camera::rvec() const {
return Mat(Vec3d(data[Parameter::RX], data[Parameter::RY], data[Parameter::RZ]));
}
Mat Camera::tvec() const {
return Mat(Vec3d(data[Parameter::TX], data[Parameter::TY], data[Parameter::TZ]));
}
Mat Camera::rmat() const {
Mat R;
cv::Rodrigues(rvec(), R);
return R;
}
Mat Camera::extrinsicMatrix() const {
Mat T = Mat::eye(4, 4, CV_64FC1);
rmat().copyTo(T(Rect(0, 0, 3, 3)));
tvec().copyTo(T(Rect(3, 0, 1, 3)));
return T;
}
Mat Camera::extrinsicMatrixInverse() const {
return extrinsicMatrix().inv();
}
////////////////////////////////////////////////////////////////////////////////
bool validate::translationStereo(const Mat &t) {
......@@ -278,7 +149,7 @@ bool ftl::calibration::validate::distortionCoefficients(const Mat &D, Size size)
return true;
}
Mat ftl::calibration::scaleCameraMatrix(const Mat &K, const Size &size_new, const Size &size_old) {
Mat ftl::calibration::scaleCameraMatrix(const Mat &K, const Size &size_old, const Size &size_new) {
Mat S(cv::Size(3, 3), CV_64F, 0.0);
double scale_x = ((double) size_new.width) / ((double) size_old.width);
double scale_y = ((double) size_new.height) / ((double) size_old.height);
......
#include <ftl/calibration/stereorectify.hpp>
#include <ftl/calibration/parameters.hpp>
#include <opencv2/calib3d.hpp>
namespace ftl {
namespace calibration {
// ==== StereoRectify ==========================================================
StereoRectify::StereoRectify(const CalibrationData::Calibration& c1, const CalibrationData::Calibration& c2, cv::Size sz, double alpha, int flags) {
size = sz;
if (size == cv::Size{0, 0}) {
size = c1.intrinsic.resolution;
}
K1 = c1.intrinsic.matrix(size);
K2 = c2.intrinsic.matrix(size);
c1.intrinsic.distCoeffs.Mat().copyTo(distCoeffs1);
c2.intrinsic.distCoeffs.Mat().copyTo(distCoeffs2);
cv::Mat T1 = c1.extrinsic.matrix();
cv::Mat T2 = c2.extrinsic.matrix();
cv::Mat T = T2 * transform::inverse(T1);
transform::getRotationAndTranslation(T, R, t);
cv::stereoRectify( K1, distCoeffs1, K2, distCoeffs2, size, R, t,
R1, R2, P1, P2, Q, flags, alpha, size, &roi1, &roi2);
}
double StereoRectify::baseline() const {
return cv::norm(t);
}
void StereoRectify::map1(cv::Mat &m1, cv::Mat &m2, int format) {
cv::initUndistortRectifyMap(K1, distCoeffs1, R1, P1, size, format, m1, m2);
}
void StereoRectify::map2(cv::Mat &m1, cv::Mat &m2, int format) {
cv::initUndistortRectifyMap(K2, distCoeffs2, R2, P2, size, format, m1, m2);
}
}
}
\ No newline at end of file
#include <opencv2/core.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/core/utility.hpp>
#include <ftl/exception.hpp>
#include <ftl/calibration/structures.hpp>
#include <ftl/calibration/parameters.hpp>
#include <cmath>
using ftl::calibration::CalibrationData;
CalibrationData::Intrinsic::DistortionCoefficients::DistortionCoefficients() :
data_(14, 0.0) {
}
const cv::Mat CalibrationData::Intrinsic::DistortionCoefficients::Mat(int nparams) const {
if (nparams <= 0) {
return cv::Mat();
}
if (nparams > 14) {
nparams = 14;
}
return cv::Mat(cv::Size(nparams, 1), CV_64FC1, const_cast<double*>(data_.data()));
}
cv::Mat CalibrationData::Intrinsic::DistortionCoefficients::Mat(int nparams) {
if (nparams <= 0) {
return cv::Mat();
}
if (nparams > 14) {
nparams = 14;
}
return cv::Mat(cv::Size(nparams, 1), CV_64FC1, data_.data());
}
double& CalibrationData::Intrinsic::DistortionCoefficients::operator[](unsigned i) { return data_[i]; }
double CalibrationData::Intrinsic::DistortionCoefficients::operator[](unsigned i) const { return data_[i]; }
CalibrationData::Intrinsic::Intrinsic() :
resolution(0, 0), fx(0.0), fy(0.0), cx(0.0), cy(0.0) {}
void CalibrationData::Intrinsic::set(const cv::Mat& K, cv::Size sz) {
fx = K.at<double>(0, 0);
fy = K.at<double>(1, 1);
cx = K.at<double>(0, 2);
cy = K.at<double>(1, 2);
resolution = sz;
}
void CalibrationData::Intrinsic::set(const cv::Mat& K, const cv::Mat& D, cv::Size sz) {
D.copyTo(distCoeffs.Mat(D.cols));
set(K, sz);
}
CalibrationData::Intrinsic::Intrinsic(const cv::Mat &K, cv::Size size) {
set(K, size);
}
CalibrationData::Intrinsic::Intrinsic(const cv::Mat &K, const cv::Mat &D, cv::Size size) {
set(K, D, size);
}
CalibrationData::Intrinsic::Intrinsic(const CalibrationData::Intrinsic& other, cv::Size size) {
distCoeffs = DistortionCoefficients(other.distCoeffs);
sensorSize = other.sensorSize;
auto K = other.matrix(size);
fx = K.at<double>(0, 0);
fy = K.at<double>(1, 1);
cx = K.at<double>(0, 2);
cy = K.at<double>(1, 2);
resolution = size;
}
cv::Mat CalibrationData::Intrinsic::matrix() const {
cv::Mat K(cv::Size(3, 3), CV_64FC1, cv::Scalar(0));
K.at<double>(0, 0) = fx;
K.at<double>(1, 1) = fy;
K.at<double>(0, 2) = cx;
K.at<double>(1, 2) = cy;
K.at<double>(2, 2) = 1.0;
return K;
}
cv::Mat CalibrationData::Intrinsic::matrix(cv::Size size) const {
return ftl::calibration::scaleCameraMatrix(matrix(), resolution, size);
}
bool CalibrationData::Intrinsic::valid() const {
return (resolution != cv::Size{0, 0});
}
////////////////////////////////////////////////////////////////////////////////
void CalibrationData::Extrinsic::set(const cv::Mat& T) {
if (T.type() != CV_64FC1) {
throw ftl::exception("Input must be CV_64FC1");
}
if (!ftl::calibration::validate::pose(T)) {
throw ftl::exception("T is not a valid transform matrix");
}
cv::Rodrigues(T(cv::Rect(0, 0, 3, 3)), rvec);
tvec[0] = T.at<double>(0, 3);
tvec[1] = T.at<double>(1, 3);
tvec[2] = T.at<double>(2, 3);
}
void CalibrationData::Extrinsic::set(cv::InputArray R, cv::InputArray t) {
if ((t.type() != CV_64FC1) || (R.type() != CV_64FC1)) {
throw ftl::exception("Type of R and t must be CV_64FC1");
}
if ((t.size() != cv::Size(3, 1)) && (t.size() != cv::Size(1, 3))) {
throw ftl::exception("Size of t must be (3, 1) or (1, 3");
}
if (R.isMat()) {
const auto& rmat = R.getMat();
if (R.size() == cv::Size(3, 3)) {
if (!ftl::calibration::validate::rotationMatrix(rmat)) {
throw ftl::exception("R is not a rotation matrix");
}
cv::Rodrigues(rmat, rvec);
}
else if ((R.size() == cv::Size(3, 1)) || R.size() == cv::Size(1, 3)) {
rvec[0] = rmat.at<double>(0);
rvec[1] = rmat.at<double>(1);
rvec[2] = rmat.at<double>(2);
}
else {
throw ftl::exception("R must be a 3x3 rotation matrix or 3x1/1x3 rotation vector (Rodrigues)");
}
}
const auto& tmat = t.getMat();
tvec[0] = tmat.at<double>(0);
tvec[1] = tmat.at<double>(1);
tvec[2] = tmat.at<double>(2);
}
CalibrationData::Extrinsic::Extrinsic() : rvec(0.0, 0.0, 0.0), tvec(0.0, 0.0, 0.0) {}
CalibrationData::Extrinsic::Extrinsic(const cv::Mat &T) {
set(T);
}
CalibrationData::Extrinsic::Extrinsic(cv::InputArray R, cv::InputArray t) {
set(R, t);
}
cv::Mat CalibrationData::Extrinsic::matrix() const {
cv::Mat T(cv::Size(4, 4), CV_64FC1, cv::Scalar(0.0));
cv::Rodrigues(rvec, T(cv::Rect(0, 0, 3, 3)));
T.at<double>(0, 3) = tvec[0];
T.at<double>(1, 3) = tvec[1];
T.at<double>(2, 3) = tvec[2];
T.at<double>(3, 3) = 1.0;
return T;
}
CalibrationData::Extrinsic CalibrationData::Extrinsic::inverse() const {
return CalibrationData::Extrinsic(ftl::calibration::transform::inverse(matrix()));
}
cv::Mat CalibrationData::Extrinsic::rmat() const {
cv::Mat R(cv::Size(3, 3), CV_64FC1, cv::Scalar(0.0));
cv::Rodrigues(rvec, R);
return R;
}
bool CalibrationData::Extrinsic::valid() const {
return !(
std::isnan(tvec[0]) || std::isnan(tvec[1]) || std::isnan(tvec[2]) ||
std::isnan(rvec[0]) || std::isnan(rvec[1]) || std::isnan(rvec[2]));
}
////////////////////////////////////////////////////////////////////////////////
CalibrationData CalibrationData::readFile(const std::string &path) {
cv::FileStorage fs;
fs.open(path.c_str(), cv::FileStorage::READ);
if (!fs.isOpened()) {
throw std::exception();
}
CalibrationData calibration;
fs["enabled"] >> calibration.enabled;
if (!fs["origin"].isNone()) {
fs["origin"] >> calibration.origin;
}
for (auto it = fs["calibration"].begin(); it != fs["calibration"].end(); it++) {
Calibration calib;
ftl::codecs::Channel channel;
(*it)["channel"] >> channel;
(*it)["resolution"] >> calib.intrinsic.resolution;
(*it)["fx"] >> calib.intrinsic.fx;
(*it)["fy"] >> calib.intrinsic.fy;
(*it)["cx"] >> calib.intrinsic.cx;
(*it)["cy"] >> calib.intrinsic.cy;
(*it)["distCoeffs"] >> calib.intrinsic.distCoeffs.data_;
(*it)["sensorSize"] >> calib.intrinsic.sensorSize;
(*it)["rvec"] >> calib.extrinsic.rvec;
(*it)["tvec"] >> calib.extrinsic.tvec;
calibration.data[channel] = calib;
}
return calibration;
}
void CalibrationData::writeFile(const std::string &path) const {
cv::FileStorage fs(path, cv::FileStorage::WRITE);
if (!fs.isOpened()) {
throw std::exception();
}
fs << "enabled" << enabled;
fs << "origin" << origin;
fs << "calibration" << "[";
for (auto &[channel, data] : data) {
fs << "{"
<< "channel" << int(channel)
<< "resolution" << data.intrinsic.resolution
<< "fx" << data.intrinsic.fx
<< "fy" << data.intrinsic.fy
<< "cx" << data.intrinsic.cx
<< "cy" << data.intrinsic.cy
<< "distCoeffs" << data.intrinsic.distCoeffs.data_
<< "rvec" << data.extrinsic.rvec
<< "tvec" << data.extrinsic.tvec
<< "sensorSize" << data.intrinsic.sensorSize
<< "}";
}
fs << "]";
fs.release();
}
CalibrationData::Calibration& CalibrationData::get(ftl::codecs::Channel channel) {
return data[channel];
}
bool CalibrationData::hasCalibration(ftl::codecs::Channel channel) const {
return data.count(channel) != 0;
}
// ==== CalibrationData::Calibration ===========================================
#include <loguru.hpp>
cv::Mat CalibrationData::Calibration::matrix() {
if (!intrinsic.valid() || !extrinsic.valid()) {
throw ftl::exception("Invalid calibration");
}
cv::Mat P = extrinsic.matrix();
cv::Mat R = P(cv::Rect(0, 0, 3, 3));
R = intrinsic.matrix() * R;
return P;
}
#include "visibility.hpp"
#include "loguru.hpp"
#include <loguru.hpp>
#include <numeric>
#include <limits>
#include <algorithm>
#include <queue>
#include <opencv2/core.hpp>
#include <ftl/exception.hpp>
#include <ftl/calibration/visibility.hpp>
using cv::Mat;
using cv::Scalar;
using cv::Size;
using std::vector;
using std::pair;
using std::make_pair;
using std::vector;
using std::pair;
using std::make_pair;
......@@ -12,25 +24,36 @@ using std::make_pair;
using ftl::calibration::Paths;
using ftl::calibration::Visibility;
/** get highest bit*/
inline int hbit(uint64_t a) {
#ifdef __GNUC__
return 64 - __builtin_clzll(a);
#endif
int v = 1;
while (a >>= 1) { v++; }
return v;
}
template<typename T>
Paths<T>::Paths(const vector<int> &previous, const vector<T> &distances) :
previous_(previous), distances_(distances) {}
Paths<T>::Paths(int id, const vector<int> &previous, const vector<T> &distances) :
id_(id), previous_(previous), distances_(distances) {}
template<typename T>
vector<int> Paths<T>::from(int i) const {
vector<int> path;
path.push_back(i);
if (previous_[i] == -1) { return {}; }
if (previous_[i] == -1) { return path; }
int current = previous_[i];
int current = i;
do {
while (previous_[current] != -1) {
if (distance(i) == std::numeric_limits<T>::max()) { return {}; } // no path
path.push_back(current);
current = previous_[current];
}
while (previous_[current] != -1);
path.push_back(id_);
return path;
}
......@@ -144,7 +167,7 @@ static pair<vector<int>, vector<T>> dijstra_impl(const int i, const vector<vecto
template<typename T>
Paths<T> ftl::calibration::dijstra(const int i, const vector<vector<T>> &graph) {
auto tmp = dijstra_impl(i, graph);
return Paths<T>(tmp.first, tmp.second);
return Paths<T>(i, tmp.first, tmp.second);
}
template Paths<int> ftl::calibration::dijstra(const int i, const vector<vector<int>> &graph);
......@@ -155,6 +178,7 @@ template Paths<double> ftl::calibration::dijstra(const int i, const vector<vecto
Visibility::Visibility(int n_cameras) :
n_cameras_(n_cameras),
n_max_(0),
graph_(n_cameras, vector(n_cameras, 0)),
mask_(n_cameras, vector(n_cameras, false))
{}
......@@ -177,18 +201,37 @@ void Visibility::init(int n_cameras) {
template<typename T>
void Visibility::update(const vector<T> &add) {
if ((int) add.size() != n_cameras_) { throw std::exception(); }
if ((int) add.size() != n_cameras_) {
throw ftl::exception("number of points must match number of cameras");
}
n_max_ = n_cameras_;
for (int i = 0; i < n_cameras_; i++) {
if (!add[i]) { continue; }
for (int j = 0; j < n_cameras_; j++) {
if (i == j) { continue; }
if (add[j]) { graph_[i][j] += 1; }
}
}
}
void Visibility::update(uint64_t add) {
if (n_cameras_ > 64) {
throw ftl::exception("Bitmask update only if number of cameras less than 64");
}
n_max_ = std::max(n_max_, hbit(add));
for (int i = 0; i < n_max_; i++) {
if (!(add & (uint64_t(1) << i))) { continue; }
for (int j = 0; j < n_max_; j++) {
if (add & (uint64_t(1) << j)) {
graph_[i][j] += 1;
}
}
}
}
template void Visibility::update(const std::vector<int> &add);
template void Visibility::update(const std::vector<bool> &add);
......@@ -202,18 +245,32 @@ void Visibility::unmask(int a, int b) {
mask_[b][a] = false;
}
int Visibility::distance(int a, int b) const {
return graph_[a][b];
int Visibility::count(int camera) const {
return graph_[camera][camera];
}
int Visibility::count(int camera1, int camera2) const {
return graph_[camera1][camera2];
}
float Visibility::distance(int a, int b) const {
int v = graph_[a][b];
if (v == 0) { return 0.0f; }
return 1.0f/float(v);
}
Paths<float> Visibility::shortestPath(int i) const {
if ((i < 0) || (i >= n_cameras_)) { return Paths<float>({}, {}); /* throw exception */}
if ((i < 0) || (i >= n_max_)) { throw ftl::exception("Invalid index"); }
vector<vector<float>> graph(n_max_, vector<float>(n_max_, 0.0f));
for (int r = 0; r < n_max_; r++) {
for (int c = 0; c < n_max_; c++) {
if (r == c) { continue; }
vector<vector<float>> graph(n_cameras_, vector<float>(n_cameras_, 0.0f));
for (int r = 0; r < n_cameras_; r++) {
for (int c = 0; c < n_cameras_; c++) {
float v = graph_[r][c];
if ((v != 0) && !mask_[r][c]) { graph[r][c] = 1.0f / v; }
if (!mask_[r][c]) {
// use inverse of count as distance in graph
graph[r][c] = distance(r, c);
}
}
}
......@@ -222,5 +279,29 @@ Paths<float> Visibility::shortestPath(int i) const {
distance = 1.0f / distance;
}
return Paths<float>(res.first, res.second);
return Paths<float>(i, res.first, res.second);
}
int Visibility::argmax() const {
int a = -1;
int v = std::numeric_limits<int>::min();
for (int i = 0; i < n_max_; i++) {
if (count(i) > v) {
v = count(i);
a = i;
}
}
return a;
}
int Visibility::argmin() const {
int a = -1;
int v = std::numeric_limits<int>::max();
for (int i = 0; i < n_max_; i++) {
if (count(i) < v) {
v = count(i);
a = i;
}
}
return a;
}
......@@ -6,5 +6,28 @@ add_executable(calibration_parameters_unit
)
target_include_directories(calibration_parameters_unit PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include")
target_link_libraries(calibration_parameters_unit ftlcommon Threads::Threads ${OS_LIBS} ${OpenCV_LIBS})
target_link_libraries(calibration_parameters_unit ftlcalibration ftlcommon ftlcodecs Threads::Threads ${OS_LIBS} ${OpenCV_LIBS})
add_test(CalibrationValidateTest calibration_parameters_unit)
### Calibration Helper #########################################################
add_executable(calibration_helper_unit
./tests.cpp
./test_helper.cpp
../src/extrinsic.cpp
)
target_include_directories(calibration_helper_unit PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include")
target_link_libraries(calibration_helper_unit ftlcalibration ftlcommon Threads::Threads ${OS_LIBS} ${OpenCV_LIBS})
add_test(CalibrationHelperTest calibration_helper_unit)
### Extrinsic calib ############################################################
add_executable(calibration_extrinsic_unit
./tests.cpp
./test_extrinsic.cpp
../src/extrinsic.cpp
)
target_include_directories(calibration_extrinsic_unit PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include")
target_link_libraries(calibration_extrinsic_unit ftlcalibration ftlcommon Threads::Threads ${OS_LIBS} ${OpenCV_LIBS})
add_test(CalibrationExtrinsicTest calibration_extrinsic_unit)
#include "catch.hpp"
#include "visibility.hpp"
#include <ftl/calibration/visibility.hpp>
using std::vector;
using ftl::calibration::dijstra;
......@@ -30,14 +31,14 @@ TEST_CASE("Dijstra's Algorithm", "") {
REQUIRE(path.distance(7) == 8);
REQUIRE(path.distance(8) == 14);
REQUIRE((path.to(1) == vector {1}));
REQUIRE((path.to(2) == vector {1, 2}));
REQUIRE((path.to(3) == vector {1, 2, 3}));
REQUIRE((path.to(4) == vector {7, 6, 5, 4}));
REQUIRE((path.to(5) == vector {7, 6, 5}));
REQUIRE((path.to(6) == vector {7, 6}));
REQUIRE((path.to(7) == vector {7}));
REQUIRE((path.to(8) == vector {1, 2, 8}));
REQUIRE((path.to(1) == vector {0, 1}));
REQUIRE((path.to(2) == vector {0, 1, 2}));
REQUIRE((path.to(3) == vector {0, 1, 2, 3}));
REQUIRE((path.to(4) == vector {0, 7, 6, 5, 4}));
REQUIRE((path.to(5) == vector {0, 7, 6, 5}));
REQUIRE((path.to(6) == vector {0, 7, 6}));
REQUIRE((path.to(7) == vector {0, 7}));
REQUIRE((path.to(8) == vector {0, 1, 2, 8}));
}
SECTION("Check connectivity") {
......