Skip to content
Snippets Groups Projects
Commit b83e473c authored by Sebastian Hahta's avatar Sebastian Hahta
Browse files

existing calibration can be used for extrinsics

parent 9312b369
No related branches found
No related tags found
1 merge request!337use/fix existing extrinsic paramters in calibration
Pipeline #28853 passed
......@@ -419,6 +419,9 @@ const std::vector<cv::Point2d>& ExtrinsicCalibration::previousPoints(int camera)
return state_.points_prev[camera];
}
int ExtrinsicCalibration::getFrameCount(int camera) {
return state_.calib.points().getCount(unsigned(camera));
}
......
......@@ -378,8 +378,8 @@ void ExtrinsicCalibrationView::CalibrationWindow::build() {
auto* b = new nanogui::Button(use_extrinsics, std::to_string(n));
b->setFlags(nanogui::Button::Flags::ToggleButton);
b->setPushed(ctrl_->calib().useExtrinsic(n));
b->setEnabled(ctrl_->calib().calibration(n).extrinsic.valid());
b->setChangeCallback([this, n](bool v){
LOG(INFO) << "CHANGED";
ctrl_->calib().setUseExtrinsic(n, v);
});
}
......@@ -413,6 +413,7 @@ void ExtrinsicCalibrationView::CalibrationWindow::build() {
for (int n = 0; n < ctrl_->cameraCount(); n++) {
auto* b = new nanogui::Button(fix_extrinsics, std::to_string(n));
b->setFlags(nanogui::Button::Flags::ToggleButton);
b->setEnabled(ctrl_->calib().useExtrinsic(n));
b->setPushed(ctrl_->calib().options().fix_camera_extrinsic.count(n));
b->setChangeCallback([this, n](bool v){
if (v) {
......
......@@ -206,17 +206,29 @@ public:
MSGPACK_DEFINE(points_, mask_, pairs_, calib_, is_calibrated_);
protected:
/** Calculate initial pose and triangulate points **/
void calculatePairPose(unsigned int c1, unsigned int c2);
/** Only triangulate points using existing calibration */
void triangulate(unsigned int c1, unsigned int c2);
/** Initial pairwise calibration and triangulation. */
void calculatePairPoses();
/** Calculate initial poses from pairs */
/** Calculate initial poses from pairs for non calibrated cameras */
void calculateInitialPoses();
/** Bundle adjustment on initial poses and triangulations. */
double optimize();
/** Select optimal camera for chains. Optimal camera has most visibility and
* is already calibrated (if any initial calibrations included).
*/
int selectOptimalCamera();
private:
void updateStatus_(std::string);
std::vector<CalibrationData::Calibration> calib_;
std::vector<CalibrationData::Calibration> calib_optimized_;
ftl::calibration::BundleAdjustment::Options options_;
......@@ -224,7 +236,6 @@ private:
CalibrationPoints<double> points_;
std::set<std::pair<unsigned int, unsigned int>> mask_;
std::map<std::pair<unsigned int, unsigned int>, std::tuple<cv::Mat, cv::Mat, double>> pairs_;
unsigned int c_ref_;
// true if camera already has valid calibration; initial pose estimation is
// skipped (and points are directly triangulated)
......
......@@ -115,6 +115,10 @@ struct CalibrationData {
struct Calibration {
Intrinsic intrinsic;
Extrinsic extrinsic;
/** 4x4 projection matrix */
cv::Mat matrix();
MSGPACK_DEFINE(intrinsic, extrinsic);
};
......
......@@ -290,6 +290,24 @@ int recoverPose(const cv::Mat &E, const std::vector<cv::Point2d> &_points1,
distanceThresh, cv::noArray(), triangulatedPoints);
}
static double scalePoints(const std::vector<cv::Point3d> &object_points, const cv::Mat& points_in, std::vector<cv::Point3d> &points_out) {
points_out.clear();
points_out.reserve(points_in.cols);
// convert from homogenous coordinates
for (int col = 0; col < points_in.cols; col++) {
CHECK_NE(points_in.at<double>(3, col), 0);
cv::Point3d p = cv::Point3d(points_in.at<double>(0, col),
points_in.at<double>(1, col),
points_in.at<double>(2, col))
/ points_in.at<double>(3, col);
points_out.push_back(p);
}
double s = ftl::calibration::optimizeScale(object_points, points_out);
return s;
}
double calibratePair(const cv::Mat &K1, const cv::Mat &D1,
const cv::Mat &K2, const cv::Mat &D2,
const std::vector<cv::Point2d> &points1,
......@@ -304,20 +322,7 @@ double calibratePair(const cv::Mat &K1, const cv::Mat &D1,
// distanceThresh unit?
recoverPose(E, points1, points2, K1, K2, R, t, 1000.0, points3dh);
points_out.clear();
points_out.reserve(points3dh.cols);
// convert from homogenous coordinates
for (int col = 0; col < points3dh.cols; col++) {
CHECK_NE(points3dh.at<double>(3, col), 0);
cv::Point3d p = cv::Point3d(points3dh.at<double>(0, col),
points3dh.at<double>(1, col),
points3dh.at<double>(2, col))
/ points3dh.at<double>(3, col);
points_out.push_back(p);
}
double s = ftl::calibration::optimizeScale(object_points, points_out);
double s = scalePoints(object_points, points3dh, points_out);
t = t * s;
auto params1 = Camera(K1, D1, cv::Mat::eye(3, 3, CV_64FC1), cv::Mat::zeros(3, 1, CV_64FC1), {0, 0});
......@@ -403,39 +408,9 @@ void ExtrinsicCalibration::updateStatus_(std::string str) {
std::atomic_store(&status_, std::make_shared<std::string>(str));
}
void ExtrinsicCalibration::calculatePairPoses() {
void ExtrinsicCalibration::calculatePairPose(unsigned int c1, unsigned int c2) {
const auto& visibility = points_.visibility();
// Calibrate all pairs. TODO: might be expensive if number of cameras is high
// if not performed for all pairs, remaining non-triangulated poits have to
// be separately triangulated later.
int i = 1;
int i_max = (camerasCount() * camerasCount()) / 2 + 1;
for (unsigned int c1 = 0; c1 < camerasCount(); c1++) {
for (unsigned int c2 = c1; c2 < camerasCount(); c2++) {
updateStatus_( "Calculating pose for pair " +
std::to_string(i++) + " of " + std::to_string(i_max));
if (c1 == c2) {
pairs_[{c1, c2}] = { cv::Mat::eye(cv::Size(3, 3), CV_64FC1),
cv::Mat(cv::Size(1, 3), CV_64FC1, cv::Scalar(0.0)),
0.0};
continue;
}
if (mask_.count({c1, c2}) > 0 ) { continue; }
if (pairs_.find({c1, c2}) != pairs_.end()) { continue; }
// require minimum number of visible points
if (visibility.count(c1, c2) < min_points_) {
LOG(WARNING) << "skipped pair (" << c1 << ", " << c2 << "), not enough points";
continue;
}
// calculate paramters and update triangulation
cv::Mat K1 = calib_[c1].intrinsic.matrix();
......@@ -461,9 +436,117 @@ void ExtrinsicCalibration::calculatePairPoses() {
t.copyTo(t_i);
transform::inverse(R_i, t_i);
pairs_[{c2, c1}] = {R_i, t_i, rmse};
}
void ExtrinsicCalibration::triangulate(unsigned int c1, unsigned int c2) {
cv::Mat T, R, t, R_i, t_i;
T = calib_[c1].extrinsic.matrix() *
transform::inverse(calib_[c2].extrinsic.matrix());
transform::getRotationAndTranslation(T, R_i, t_i);
T = calib_[c2].extrinsic.matrix() *
transform::inverse(calib_[c1].extrinsic.matrix());
transform::getRotationAndTranslation(T, R, t);
pairs_[{c1, c1}] = {R, t, NAN};
pairs_[{c2, c1}] = {R_i, t_i, NAN};
auto pts = points().getPoints({c1, c2}, 0);
std::vector<cv::Point3d> pointsw;
const auto& calib1 = calib_[c1];
const auto& calib2 = calib_[c2];
CHECK_EQ(pts[0].size(), pts[1].size());
cv::Mat pts1(1, pts[0].size(), CV_64FC2, pts[0].data());
cv::Mat pts2(1, pts[1].size(), CV_64FC2, pts[1].data());
cv::Mat out(1, pts[0].size(), CV_64FC2);
// Undistort points first. Note: undistortPoints() returns points in
// normalized coordinates, therefore projection matrices for
// cv::triangulatePoints() only include extrinsic parameters.
std::vector<cv::Point2d> pts1u, pts2u;
cv::undistortPoints(pts1, pts1u, calib1.intrinsic.matrix(), calib1.intrinsic.distCoeffs.Mat());
cv::undistortPoints(pts2, pts2u, calib2.intrinsic.matrix(), calib2.intrinsic.distCoeffs.Mat());
cv::Mat P1 = cv::Mat::eye(3, 4, CV_64FC1);
cv::Mat P2 = T(cv::Rect(0, 0, 4, 3));
// documentation claims cv::triangulatePoints() requires floats; however
// seems to only work with doubles (documentation outdated?).
// According to https://stackoverflow.com/a/16299909 cv::triangulatePoints()
// implements least squares method described in H&Z p312
cv::triangulatePoints(P1, P2, pts1u, pts2u, out);
// scalePoints() converts to non-homogenous coordinates and estimates scale
scalePoints(points().getObject(0), out, pointsw);
points().setTriangulatedPoints(c1, c2, pointsw);
}
void ExtrinsicCalibration::calculatePairPoses() {
// Calibrate all pairs. TODO: might be expensive if number of cameras is high
// if not performed for all pairs.
int i = 1;
int i_max = (camerasCount() * camerasCount()) / 2 + 1;
for (unsigned int c1 = 0; c1 < camerasCount(); c1++) {
for (unsigned int c2 = c1; c2 < camerasCount(); c2++) {
updateStatus_( "Calculating pose for pair " +
std::to_string(i++) + " of " + std::to_string(i_max) +
" and triangulating points");
if (c1 == c2) {
pairs_[{c1, c2}] = { cv::Mat::eye(cv::Size(3, 3), CV_64FC1),
cv::Mat(cv::Size(1, 3), CV_64FC1, cv::Scalar(0.0)),
0.0};
continue;
}
if (pairs_.find({c1, c2}) != pairs_.end()) {
LOG(WARNING) << "pair already processed (this shold not happen)";
continue;
}
// require minimum number of visible points
if (points().visibility().count(c1, c2) < min_points_) {
LOG(WARNING) << "skipped pair (" << c1 << ", " << c2 << "), not enough points";
return;
}
if (is_calibrated_[c1] && is_calibrated_[c2]) {
triangulate(c1, c2);
}
else {
if (mask_.count({c1, c2}) > 0 ) { continue; }
calculatePairPose(c1, c2);
}
}}
}
int ExtrinsicCalibration::selectOptimalCamera() {
// Pick optimal camera: most views of calibration pattern. If existing
// calibration is used, reference camera must already be calibrated.
int c = 0;
int calibrated_c_ref_max = 0;
for (unsigned int i = 0; i < is_calibrated_[i]; i++) {
if (is_calibrated_[i] && points().getCount(i) > calibrated_c_ref_max) {
calibrated_c_ref_max = points().getCount(i);
c = i;
}
}
if (!calibrated_c_ref_max == 0) {
c = points().visibility().argmax();
}
return c;
}
void ExtrinsicCalibration::calculateInitialPoses() {
updateStatus_("Initial poses from chained transformations");
......@@ -480,20 +563,19 @@ void ExtrinsicCalibration::calculateInitialPoses() {
if (pairs_.count({c1, c2}) == 0) {
visibility.mask(c1, c2);
}
// mask bad pairs (high rmse)
/*if (std::get<2>(pairs_.at({c1, c2})) > 16.0) {
visibility.mask(c1, c2);
}*/
}}
// pick optimal camera: most views of calibration pattern
c_ref_ = visibility.argmax();
// select optimal camera to calculate chains to
auto c_ref = selectOptimalCamera();
auto paths = visibility.shortestPath(c_ref_);
auto paths = visibility.shortestPath(c_ref);
for (unsigned int c = 0; c < camerasCount(); c++) {
if (c == c_ref_) { continue; }
if (is_calibrated_[c]) {
// already calibrated. skip chain
continue;
}
if (c == c_ref) { continue; }
cv::Mat R_chain = cv::Mat::eye(cv::Size(3, 3), CV_64FC1);
cv::Mat t_chain = cv::Mat(cv::Size(1, 3), CV_64FC1, cv::Scalar(0.0));
......@@ -517,7 +599,6 @@ void ExtrinsicCalibration::calculateInitialPoses() {
while(path.size() > 1);
// note: direction of chain in the loop (ref to target transformation)
calib_[c].extrinsic =
CalibrationData::Extrinsic(R_chain, t_chain).inverse();
}
......@@ -629,8 +710,6 @@ double ExtrinsicCalibration::optimize() {
if (m == 0) {
n_points_missing++;
break;
// not triangulated (see earlier steps)
// TODO: triangulate here
}
if (n % 2 == 0 && n > 1) {
// mean of two points if number of points even
......@@ -663,13 +742,10 @@ double ExtrinsicCalibration::optimize() {
if (float(n_points_missing)/float(n_points - n_points_bad) > threhsold_warning_) {
// low number of points; most points only visible in pairs?
LOG(WARNING) << "Large number of points skipped. Are there enough "
"visible points between stereo camera pairs? (TODO: "
"implement necessary triangulation after pair "
"calibration)";
"visible points between stereo camera pairs?";
}
updateStatus_("Bundle adjustment");
options_.fix_camera_extrinsic = {int(c_ref_)};
options_.verbose = true;
options_.max_iter = 250; // should converge much earlier
......
......@@ -359,7 +359,7 @@ double ftl::calibration::optimizeScale(const vector<Point3d> &object_points, vec
vector<double> d;
ceres::Problem problem;
auto loss_function = new ceres::HuberLoss(1.0);
ceres::LossFunction* loss_function = nullptr;
// use all pairwise distances
for (size_t i = 0; i < object_points.size(); i++) {
......
......@@ -87,7 +87,7 @@ cv::Mat CalibrationData::Intrinsic::matrix(cv::Size size) const {
}
bool CalibrationData::Intrinsic::valid() const {
return (resolution == cv::Size{0, 0});
return (resolution != cv::Size{0, 0});
}
////////////////////////////////////////////////////////////////////////////////
......@@ -243,3 +243,18 @@ CalibrationData::Calibration& CalibrationData::get(ftl::codecs::Channel channel)
bool CalibrationData::hasCalibration(ftl::codecs::Channel channel) const {
return data_.count(channel) != 0;
}
// ==== CalibrationData::Calibration ===========================================
#include <loguru.hpp>
cv::Mat CalibrationData::Calibration::matrix() {
if (!intrinsic.valid() || !extrinsic.valid()) {
throw ftl::exception("Invalid calibration");
}
cv::Mat P = extrinsic.matrix();
cv::Mat R = P(cv::Rect(0, 0, 3, 3));
R = intrinsic.matrix() * R;
LOG(INFO) << extrinsic.matrix();
LOG(INFO) << P;
return P;
}
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment