Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • nicolaspope/ftl
1 result
Show changes
Showing
with 246 additions and 4163 deletions
#pragma once
#include <opencv2/core.hpp>
#include "visibility.hpp"
#include "util.hpp"
using cv::Mat;
using cv::Size;
using cv::Point2d;
using cv::Point3d;
using cv::Vec4d;
using cv::Scalar;
using std::vector;
using std::pair;
class CalibrationTarget {
public:
explicit CalibrationTarget(double length):
n_points(2),
calibration_bar_length_(length)
{}
/* @brief Estimate scale factor.
* @param 3D points (can pass n views)
*/
double estimateScale(vector<Point3d> points3d);
size_t n_points;
private:
double calibration_bar_length_;
};
class MultiCameraCalibrationNew {
public:
MultiCameraCalibrationNew( size_t n_cameras, size_t reference_camera,
Size resolution, CalibrationTarget target,
int fix_intrinsics=1);
void setCameraParameters(size_t idx, const Mat &K, const Mat &distCoeffs);
void setCameraParameters(size_t idx, const Mat &K);
void addPoints(vector<vector<Point2d>> points2d, vector<int> visibility);
size_t getViewsCount();
size_t getCamerasCount() { return n_cameras_; }
size_t getOptimalReferenceCamera();
size_t getMinVisibility() { return visibility_graph_.getMinVisibility(); }
size_t getViewsCount(size_t camera) { return visibility_graph_.getViewsCount(camera); }
void setFixIntrinsic(int value) { fix_intrinsics_ = (value == 1 ? 5 : 0); }
void loadInput(const std::string &filename, const vector<size_t> &cameras = {});
void saveInput(cv::FileStorage &fs);
void saveInput(const std::string &filename);
Mat getCameraMat(size_t idx);
Mat getCameraMatNormalized(size_t idx, double scale_x = 1.0, double scale_y = 1.0);
Mat getDistCoeffs(size_t idx);
double calibrateAll(int reference_camera = -1);
double getReprojectionError();
void getCalibration(vector<Mat> &R, vector<Mat> &t);
void projectPointsOriginal(size_t camera_src, size_t camera_dst, size_t idx, vector<Point2d> &points);
void projectPointsOptimized(size_t camera_dst, size_t idx, vector<Point2d> &points);
std::vector<cv::Point3d> object_points_;
protected:
bool isVisible(size_t camera, size_t idx);
bool isValid(size_t camera, size_t idx);
bool isValid(size_t idx);
Point3d getPoint3D(size_t camera, size_t i);
vector<Point2d> getPoints(size_t camera, size_t idx);
vector<vector<Point2d>> getAllPoints(size_t camera, vector<size_t> idx);
void getVisiblePoints( vector<size_t> cameras,
vector<vector<Point2d>> &points,
vector<size_t> &idx);
size_t getVisiblePointsCount(vector<size_t> cameras) {
// TODO: for pairs can use visibility graph adjacency matrix
vector<vector<Point2d>> points2d;
vector<size_t> idx;
getVisiblePoints(cameras, points2d, idx);
return idx.size();
}
size_t getTotalPointsCount() {
return points2d_[0].size();
}
vector<Point3d> getPoints3D(size_t idx);
/* @brief Find points which are visible on all cameras. Returns
* corresponding indices in idx vector.
*/
void getVisiblePoints3D(vector<size_t> cameras,
vector<vector<Point3d>> &points,
vector<size_t> &idx);
/* @brief Update 3D points with new values. If no earlier data, new data
* is used as is, otherwise calculates average.
*/
void updatePoints3D(size_t camera, Point3d new_point, size_t idx, const Mat &R, const Mat &t);
void updatePoints3D(size_t camera, vector<Point3d> new_points, vector<size_t> idx, const Mat &R, const Mat &t);
/* @brief Calculates 3D points that are not visible in reference camera
* from transformations in visible cameras.
*/
void calculateMissingPoints3D();
void getTransformation(size_t camera_from, size_t camera_to, Mat &R, Mat &T);
double calibratePair(size_t camera_from, size_t camera_to, Mat &R, Mat &T);
/* @brief Calculate reprojection error of visible points (triangulation) */
double getReprojectionError(size_t c_from, size_t c_to, const Mat &K, const Mat &R, const Mat &T);
/* @brief Calculate reprojection error of visible points (optimized/averaged points) */
double getReprojectionErrorOptimized(size_t c_from, const Mat &K, const Mat &R, const Mat &T);
/* @brief Remove old calibration data calculated by calibrateAll */
void reset();
private:
CalibrationTarget target_;
Visibility visibility_graph_;
bool is_calibrated_;
size_t n_cameras_;
size_t reference_camera_;
size_t min_visible_points_;
int fix_intrinsics_;
Size resolution_;
vector<Mat> K_;
vector<Mat> dist_coeffs_;
vector<Mat> R_;
vector<Mat> t_;
vector<Point3d> points3d_optimized_;
vector<vector<Point3d>> points3d_;
vector<vector<Point2d>> points2d_;
vector<vector<int>> visible_;
vector<vector<int>> inlier_; // "inlier"
vector<vector<double>> weights_;
int fm_method_;
double fm_ransac_threshold_;
double fm_confidence_;
};
#include "util.hpp"
#include <loguru.hpp>
#include <opencv2/core.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/aruco.hpp>
using std::vector;
using cv::Mat;
using cv::Point2i;
using cv::Point2d;
using cv::Point3d;
using cv::Size;
using cv::Scalar;
/* @brief Visualize epipolar lines for given points in the other image.
* @param Points in image
* @param Corresponding image where to draw the lines
* @param Fundamental matrix
* @param Line color
* @param Which image (1 or 2), see OpenCV's computeCorrespondEpilines()
*/
void drawEpipolarLines(vector<Point2d> const &points, Mat &img, Mat const &F, Scalar color, int image) {
Mat lines;
cv::computeCorrespondEpilines(points, image, F, lines);
for (int i = 0; i < lines.rows; i++) {
cv::Vec3f l = lines.at<cv::Vec3f>(i);
float a = l[0];
float b = l[1];
float c = l[2];
float x0, y0, x1, y1;
x0 = 0;
y0 = (-c -a * x0) / b;
x1 = img.cols;
y1 = (-c -a * x1) / b;
cv::line(img, cv::Point(x0, y0), cv::Point(x1,y1), color, 1);
}
}
/* @breif Find calibration points. AruCo markers, two per image.
* visible parameter input/ouput
*/
int findCorrespondingPoints(vector<Mat> imgs, vector<vector<Point2d>> &points,
vector<int> &visible) {
using namespace cv;
int count = 0;
visible.resize(imgs.size(), 1);
points.clear();
points.resize(imgs.size(), vector<Point2d>(2, Point2d(0.0, 0.0)));
auto dictionary = aruco::getPredefinedDictionary(aruco::DICT_5X5_50);
vector<vector<Point2f>> corners;
vector<int> ids;
for (size_t i = 0; i < imgs.size(); i++) {
if (visible[i] == 0) continue;
aruco::detectMarkers(imgs[i], dictionary, corners, ids);
if (corners.size() == 2) {
Point2d center0((corners[0][0] + corners[0][1] + corners[0][2] + corners[0][3]) / 4.0);
Point2d center1((corners[1][0] + corners[1][1] + corners[1][2] + corners[1][3]) / 4.0);
if (ids[0] != 0) { std::swap(center0, center1); }
points[i][0] = center0; points[i][1] = center1;
visible[i] = 1;
count++;
}
else {
visible[i] = 0;
}
}
return count;
}
/* @brief Find AruCo marker centers.
* @param (input) image
* @param (output) found centers
* @param (output) marker IDs
*/
void findMarkerCenters(Mat &img, vector<Point2d> &points, vector<int> &ids, int dict) {
using namespace cv;
points.clear();
auto dictionary = aruco::getPredefinedDictionary(dict);
vector<vector<Point2f>> corners;
aruco::detectMarkers(img, dictionary, corners, ids);
for (size_t j = 0; j < corners.size(); j++) {
Point2f center((corners[j][0] + corners[j][1] + corners[j][2] + corners[j][3]) / 4.0);
points.push_back(center);
}
}
/* OpenCV's recoverPose() expects both cameras to have identical intrinsic
* parameters.
*/
int recoverPose(Mat &E, vector<Point2d> &_points1, vector<Point2d> &_points2,
Mat &_cameraMatrix1, Mat &_cameraMatrix2,
Mat &_R, Mat &_t, double distanceThresh,
Mat &triangulatedPoints) {
Mat points1, points2, cameraMatrix1, cameraMatrix2, cameraMatrix;
Mat(_points1.size(), 2, CV_64FC1, _points1.data()).convertTo(points1, CV_64F);
Mat(_points2.size(), 2, CV_64FC1, _points2.data()).convertTo(points2, CV_64F);
_cameraMatrix1.convertTo(cameraMatrix1, CV_64F);
_cameraMatrix2.convertTo(cameraMatrix2, CV_64F);
cameraMatrix = Mat::eye(Size(3, 3), CV_64FC1);
double fx1 = cameraMatrix1.at<double>(0,0);
double fy1 = cameraMatrix1.at<double>(1,1);
double cx1 = cameraMatrix1.at<double>(0,2);
double cy1 = cameraMatrix1.at<double>(1,2);
double fx2 = cameraMatrix2.at<double>(0,0);
double fy2 = cameraMatrix2.at<double>(1,1);
double cx2 = cameraMatrix2.at<double>(0,2);
double cy2 = cameraMatrix2.at<double>(1,2);
points1.col(0) = (points1.col(0) - cx1) / fx1;
points1.col(1) = (points1.col(1) - cy1) / fy1;
points2.col(0) = (points2.col(0) - cx2) / fx2;
points2.col(1) = (points2.col(1) - cy2) / fy2;
// TODO mask
// cameraMatrix = I (for details, see OpenCV's recoverPose() source code)
// modules/calib3d/src/five-point.cpp (461)
//
// https://github.com/opencv/opencv/blob/371bba8f54560b374fbcd47e7e02f015ac4969ad/modules/calib3d/src/five-point.cpp#L461
return cv::recoverPose(E, points1, points2, cameraMatrix, _R, _t, distanceThresh, cv::noArray(), triangulatedPoints);
}
/* @brief Calculate RMS reprojection error
* @param 3D points
* @param Expected 2D points
* @param Camera matrix
* @param Rotation matrix/vector
* @param Translation vector
*/
double reprojectionError( const vector<Point3d> &points3d, const vector<Point2d> &points2d,
const Mat &K, const Mat &rvec, const Mat &tvec) {
DCHECK(points3d.size() == points2d.size());
Mat _rvec;
if (rvec.size() == Size(3, 3)) { cv::Rodrigues(rvec, _rvec); }
else { _rvec = rvec; }
DCHECK(_rvec.size() == Size(1, 3) || _rvec.size() == Size(3, 1));
vector<Point2d> points_reprojected;
cv::projectPoints(points3d, _rvec, tvec, K, cv::noArray(), points_reprojected);
int n_points = points2d.size();
double err = 0.0;
for (int i = 0; i < n_points; i++) {
Point2d a = points2d[i] - points_reprojected[i];
err += a.x * a.x + a.y * a.y;
}
return sqrt(err / n_points);
}
\ No newline at end of file
#pragma once
#include <loguru.hpp>
#include <opencv2/core.hpp>
#include <opencv2/aruco.hpp>
using std::vector;
using cv::Mat;
using cv::Point2i;
using cv::Point2d;
using cv::Point3d;
using cv::Size;
using cv::Scalar;
/* @brief Visualize epipolar lines for given points in the other image.
* @param Points in image
* @param Corresponding image where to draw the lines
* @param Fundamental matrix
* @param Line color
* @param Which image (1 or 2), see OpenCV's computeCorrespondEpilines()
*/
void drawEpipolarLines(vector<Point2d> const &points, Mat &img, Mat const &F, Scalar color, int image=1);
/* @breif Find calibration points. AruCo markers, two per image.
*/
int findCorrespondingPoints(vector<Mat> imgs, vector<vector<Point2d>> &points,
vector<int> &visible);
/* @brief Find AruCo marker centers.
* @param (input) image
* @param (output) found centers
* @param (output) marker IDs
*/
void findMarkerCenters(Mat &img, vector<Point2d> &points, vector<int> &ids, int dict=cv::aruco::DICT_4X4_50);
/* OpenCV's recoverPose() expects both cameras to have identical intrinsic
* parameters.
*
* https://github.com/opencv/opencv/blob/371bba8f54560b374fbcd47e7e02f015ac4969ad/modules/calib3d/src/five-point.cpp#L461
*/
int recoverPose(Mat &E, vector<Point2d> &_points1, vector<Point2d> &_points2,
Mat &_cameraMatrix1, Mat &_cameraMatrix2,
Mat &_R, Mat &_t, double distanceThresh,
Mat &triangulatedPoints);
/* @brief Calculate RMS reprojection error
* @param 3D points
* @param Expected 2D points
* @param Camera matrix
* @param Rotation matrix/vector
* @param Translation vector
*/
double reprojectionError( const vector<Point3d> &points3d, const vector<Point2d> &points2d,
const Mat &K, const Mat &rvec, const Mat &tvec);
inline double euclideanDistance(Point3d a, Point3d b) {
Point3d c = a - b;
return sqrt(c.x*c.x + c.y*c.y + c.z*c.z);
}
inline Point3d transformPoint(Point3d p, Mat R, Mat t) {
DCHECK(R.size() == Size(3, 3));
DCHECK(t.size() == Size(1, 3));
return Point3d(Mat(R * Mat(p) + t));
}
inline Point3d inverseTransformPoint(Point3d p, Mat R, Mat t) {
DCHECK(R.size() == Size(3, 3));
DCHECK(t.size() == Size(1, 3));
return Point3d(Mat(R.t() * (Mat(p) - t)));
}
inline Mat getMat4x4(const Mat &R, const Mat &t) {
DCHECK(R.size() == Size(3, 3));
DCHECK(t.size() == Size(1, 3));
Mat M = Mat::eye(Size(4, 4), CV_64FC1);
R.copyTo(M(cv::Rect(0, 0, 3, 3)));
t.copyTo(M(cv::Rect(3, 0, 1, 3)));
return M;
}
inline void getRT(const Mat RT, Mat &R, Mat &t) {
R = RT(cv::Rect(0, 0, 3, 3));
t = RT(cv::Rect(3, 0, 1, 3));
}
// calculate transforms from (R1, t1) to (R2, t2), where parameters
// (R1, t1) and (R2, t2) map to same (target) coordinate system
inline void calculateTransform(const Mat &R1, const Mat &T1, const Mat &R2, const Mat &T2, Mat &R, Mat &tvec, Mat &M) {
Mat M_src = getMat4x4(R1, T1);
Mat M_dst = getMat4x4(R2, T2);
M = M_dst.inv() * M_src;
R = M(cv::Rect(0, 0, 3, 3));
tvec = M(cv::Rect(3, 0, 1, 3));
}
inline void calculateTransform(const Mat &R1, const Mat &T1, const Mat &R2, const Mat &T2,Mat &R, Mat &tvec) {
Mat M;
calculateTransform(R1, T1, R2, T2, R, tvec, M);
}
inline void calculateInverse(const Mat &R2, const Mat &T2, Mat &R, Mat &T) {
Mat R1 = Mat::eye(Size(3, 3), CV_64FC1);
Mat T1(Size(1, 3), CV_64FC1, Scalar(0.0));
calculateTransform(R1, T1, R2, T2, R, T);
}
\ No newline at end of file
#include <numeric>
#include <loguru.hpp>
#include <queue>
#include "visibility.hpp"
using cv::Mat;
using cv::Scalar;
using cv::Size;
using std::vector;
using std::pair;
using std::make_pair;
Visibility::Visibility(int n_cameras) : n_cameras_(n_cameras) {
visibility_ = Mat(Size(n_cameras, n_cameras), CV_32SC1, Scalar(0));
count_ = vector(n_cameras, 0);
}
void Visibility::update(vector<int> &visible) {
DCHECK(visible.size() == (size_t) n_cameras_);
for (int i = 0; i < n_cameras_; i++) {
if (visible[i] == 0) continue;
count_[i]++;
for (int j = 0; j < n_cameras_; j++) {
if (i == j) continue;
if (visible[j] == 1) visibility_.at<int>(i, j)++;
}
}
}
int Visibility::getOptimalCamera() {
// most visible on average
int best_i = 0;
double best_score = -INFINITY;
for (int i = 0; i < visibility_.rows; i++) {
double score = 0.0;
for (int x = 0; x < visibility_.cols; x++) {
score += visibility_.at<int>(i, x);
}
score = score / (double) visibility_.cols;
if (score > best_score) {
best_i = i;
best_score = score;
}
}
return best_i;
}
void Visibility::deleteEdge(int camera1, int camera2)
{
visibility_.at<int>(camera1, camera2) = 0;
visibility_.at<int>(camera2, camera1) = 0;
}
int Visibility::getMinVisibility() {
int min_count = INT_MAX;
for (int i = 0; i < n_cameras_; i++) {
if (count_[i] < min_count) {
min_count = count_[i];
}
}
return min_count;
}
int Visibility::getViewsCount(int camera) {
return count_[camera];
}
vector<vector<pair<int, int>>> Visibility::findShortestPaths(int reference) {
DCHECK(reference < n_cameras_);
vector<vector<pair<int, int>>> res(n_cameras_);
for (int i = 0; i < n_cameras_; i++) {
res[i] = findShortestPath(i, reference);
}
return res;
}
vector<pair<int, int>> Visibility::findShortestPath(int from, int to) {
if (from == to) return vector<pair<int, int>>();
vector<bool> visited(n_cameras_, false);
vector<double> distances(n_cameras_, INFINITY);
vector<int> previous(n_cameras_, -1);
distances[from] = 0.0;
auto cmp = [](pair<int, double> u, pair<int, double> v) { return u.second > v.second; };
std::priority_queue<pair<int, double>, vector<pair<int, double>>, decltype(cmp)> pq(cmp);
pq.push(make_pair(from, distances[from]));
while(!pq.empty()) {
pair<int, double> current = pq.top();
pq.pop();
int current_id = current.first;
double current_distance = distances[current_id];
visited[current_id] = true;
for (int i = 0; i < n_cameras_; i++) {
int count = visibility_.at<int>(current_id, i);
if (count == 0) continue; // not connected
double distance = 1.0 / (double) count;
double new_distance = current_distance + distance;
if (distances[i] > new_distance) {
distances[i] = new_distance;
previous[i] = current_id;
pq.push(make_pair(i, distances[i]));
}
}
}
vector<pair<int, int>> res;
int prev = previous[to];
int current = to;
do {
res.push_back(make_pair(current, prev));
current = prev;
prev = previous[prev];
}
while(prev != -1);
std::reverse(res.begin(), res.end());
return res;
}
vector<int> Visibility::getClosestCameras(int c) {
// initialize original index locations
vector<int> idx(n_cameras_);
iota(idx.begin(), idx.end(), 0);
int* views = visibility_.ptr<int>(c);
// sort indexes based on comparing values in v
sort(idx.begin(), idx.end(),
[views](size_t i1, size_t i2) {return views[i1] < views[i2];});
return idx;
}
\ No newline at end of file
#pragma once
#include <opencv2/core.hpp>
using cv::Mat;
using std::vector;
using std::pair;
class Visibility {
public:
explicit Visibility(int n_cameras);
/**
* @breif Update visibility graph.
* @param Which cameras see the feature(s) in this iteration
*/
void update(vector<int> &visible);
/**
* @brief For all cameras, find shortest (optimal) paths to reference
* camera
* @param Id of reference camera
*
* Calculates shortest path in weighted graph using Dijstra's
* algorithm. Weights are inverse of views between cameras (nodes)
*
* @todo Add constant weight for each edge (prefer less edges)
*/
vector<vector<pair<int, int>>> findShortestPaths(int reference);
vector<int> getClosestCameras(int c);
void deleteEdge(int camera1, int camera2);
int getOptimalCamera();
/** @brief Returns the smallest visibility count (any camera)
*/
int getMinVisibility();
/** @brief Returns the visibility camera's value
*/
int getViewsCount(int camera);
protected:
/**
* @brief Find shortest path between nodes
* @param Source node id
* @param Destination node id
*/
vector<pair<int, int>> findShortestPath(int from, int to);
private:
int n_cameras_; // @brief number of cameras
Mat visibility_; // @brief adjacency matrix
vector<int> count_;
};
set(CALIBSRC
src/main.cpp
src/lens.cpp
src/stereo.cpp
src/align.cpp
src/common.cpp
)
add_executable(ftl-calibrate ${CALIBSRC})
target_include_directories(ftl-calibrate PRIVATE src)
target_link_libraries(ftl-calibrate ftlcalibration ftlcommon Threads::Threads ${OpenCV_LIBS})
#include "align.hpp"
#include <loguru.hpp>
#include <opencv2/core.hpp>
#include <opencv2/core/utility.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/highgui.hpp>
using std::map;
using std::string;
using cv::Mat;
using std::vector;
using cv::Point2f;
using cv::Size;
struct Rec4f {
float left;
float right;
float top;
float bottom;
};
bool loadIntrinsicsMap(const std::string &ifile, const cv::Size &imageSize, Mat &map1, Mat &map2, Mat &cameraMatrix, float scale) {
using namespace cv;
FileStorage fs;
// reading intrinsic parameters
fs.open((ifile).c_str(), FileStorage::READ);
if (!fs.isOpened()) {
LOG(WARNING) << "Could not open intrinsics file : " << ifile;
return false;
}
LOG(INFO) << "Intrinsics from: " << ifile;
Mat D1;
fs["M"] >> cameraMatrix;
fs["D"] >> D1;
//cameraMatrix *= scale;
initUndistortRectifyMap(cameraMatrix, D1, Mat::eye(3,3, CV_64F), cameraMatrix, imageSize, CV_16SC2,
map1, map2);
return true;
}
inline bool hasOption(const map<string, string> &options, const std::string &opt) {
return options.find(opt) != options.end();
}
inline std::string getOption(map<string, string> &options, const std::string &opt) {
auto str = options[opt];
return str.substr(1,str.size()-2);
}
static const float kPI = 3.14159f;
/*static float calculateZRotation(const vector<Point2f> &points, Size &boardSize) {
Point2f tl = points[boardSize.width * (boardSize.height / 2)];
Point2f tr = points[boardSize.width * (boardSize.height / 2) + boardSize.width-1];
float dx = tr.x - tl.x;
float dy = tr.y - tl.y;
float angle = atan2(dy, dx) * (180.0f / kPI);
return angle;
}
static Point2f parallaxDistortion(const vector<Point2f> &points, Size &boardSize) {
Point2f tl = points[0];
Point2f tr = points[boardSize.width-1];
Point2f bl = points[(boardSize.height-1)*boardSize.width];
Point2f br = points[points.size()-1];
float dx1 = tr.x - tl.x;
float dx2 = br.x - bl.x;
float ddx = dx1 - dx2;
float dy1 = bl.y - tl.y;
float dy2 = br.y - tr.y;
float ddy = dy1 - dy2;
return Point2f(ddx, ddy);
}*/
static float distanceTop(const Mat &camMatrix, const vector<Point2f> &points, Size &boardSize, float squareSize) {
Point2f tl = points[0];
Point2f tr = points[boardSize.width-1];
float pixSize = tr.x - tl.x;
float mmSize = boardSize.width * squareSize;
float focal = camMatrix.at<double>(0,0);
return ((mmSize / pixSize) * focal) / 1000.0f;
}
static float distanceBottom(const Mat &camMatrix, const vector<Point2f> &points, Size &boardSize, float squareSize) {
Point2f bl = points[(boardSize.height-1)*boardSize.width];
Point2f br = points[points.size()-1];
float pixSize = br.x - bl.x;
float mmSize = boardSize.width * squareSize;
float focal = camMatrix.at<double>(0,0);
return ((mmSize / pixSize) * focal) / 1000.0f;
}
static float distanceLeft(const Mat &camMatrix, const vector<Point2f> &points, Size &boardSize, float squareSize) {
Point2f bl = points[(boardSize.height-1)*boardSize.width];
Point2f tl = points[0];
float pixSize = bl.y - tl.y;
float mmSize = boardSize.height * squareSize;
float focal = camMatrix.at<double>(0,0);
return ((mmSize / pixSize) * focal) / 1000.0f;
}
static float distanceRight(const Mat &camMatrix, const vector<Point2f> &points, Size &boardSize, float squareSize) {
Point2f tr = points[boardSize.width-1];
Point2f br = points[points.size()-1];
float pixSize = br.y - tr.y;
float mmSize = boardSize.height * squareSize;
float focal = camMatrix.at<double>(0,0);
return ((mmSize / pixSize) * focal) / 1000.0f;
}
static Rec4f distances(const Mat &camMatrix, const vector<Point2f> &points, Size &boardSize, float squareSize) {
return {
-distanceLeft(camMatrix, points, boardSize, squareSize),
-distanceRight(camMatrix, points, boardSize, squareSize),
-distanceTop(camMatrix, points, boardSize, squareSize),
-distanceBottom(camMatrix, points, boardSize, squareSize)
};
}
/*static float distance(const Mat &camMatrix, const vector<Point2f> &points, Size &boardSize, float squareSize) {
Point2f tl = points[boardSize.width * (boardSize.height / 2)];
Point2f tr = points[boardSize.width * (boardSize.height / 2) + boardSize.width-1];
float pixSize = tr.x - tl.x;
float mmSize = boardSize.width * squareSize;
float focal = camMatrix.at<double>(0,0);
return ((mmSize / pixSize) * focal) / 1000.0f;
}
static Point2f diffY(const vector<Point2f> &pointsA, const vector<Point2f> &pointsB, Size &boardSize) {
Point2f tlA = pointsA[boardSize.width * (boardSize.height / 2)];
Point2f trA = pointsA[boardSize.width * (boardSize.height / 2) + boardSize.width-1];
Point2f tlB = pointsB[boardSize.width * (boardSize.height / 2)];
Point2f trB = pointsB[boardSize.width * (boardSize.height / 2) + boardSize.width-1];
float d1 = tlA.y - tlB.y;
float d2 = trA.y - trB.y;
return Point2f(d1,d2);
}*/
static const float kDistanceThreshold = 0.005f;
static void showAnaglyph(const Mat &frame_l, const Mat &frame_r, Mat &img3d) {
using namespace cv;
float data[] = {0.299f, 0.587f, 0.114f, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.299, 0.587, 0.114};
Mat m(2, 9, CV_32FC1, data);
//Mat img3d;
img3d = Mat(frame_l.size(), CV_8UC3);
for (int y=0; y<img3d.rows; y++) {
unsigned char *row3d = img3d.ptr(y);
const unsigned char *rowL = frame_l.ptr(y);
const unsigned char *rowR = frame_r.ptr(y);
for (int x=0; x<img3d.cols*3; x+=3) {
const uchar lb = rowL[x+0];
const uchar lg = rowL[x+1];
const uchar lr = rowL[x+2];
const uchar rb = rowR[x+0];
const uchar rg = rowR[x+1];
const uchar rr = rowR[x+2];
row3d[x+0] = lb*m.at<float>(0,6) + lg*m.at<float>(0,7) + lr*m.at<float>(0,8) + rb*m.at<float>(1,6) + rg*m.at<float>(1,7) + rr*m.at<float>(1,8);
row3d[x+1] = lb*m.at<float>(0,3) + lg*m.at<float>(0,4) + lr*m.at<float>(0,5) + rb*m.at<float>(1,3) + rg*m.at<float>(1,4) + rr*m.at<float>(1,5);
row3d[x+2] = lb*m.at<float>(0,0) + lg*m.at<float>(0,1) + lr*m.at<float>(0,2) + rb*m.at<float>(1,0) + rg*m.at<float>(1,1) + rr*m.at<float>(1,2);
}
}
//imshow("Anaglyph", img3d);
//return img3d;
}
void ftl::calibration::align(map<string, string> &opt) {
using namespace cv;
float squareSize = 36.0f;
VideoCapture camA(0);
VideoCapture camB(1);
if (!camA.isOpened() || !camB.isOpened()) {
LOG(ERROR) << "Could not open a camera device";
return;
}
camA.set(cv::CAP_PROP_FRAME_WIDTH, 1280); // TODO Use settings
camA.set(cv::CAP_PROP_FRAME_HEIGHT, 720);
camB.set(cv::CAP_PROP_FRAME_WIDTH, 1280);
camB.set(cv::CAP_PROP_FRAME_HEIGHT, 720);
Mat map1, map2, cameraMatrix;
Size imgSize(1280,720);
loadIntrinsicsMap((hasOption(opt, "profile")) ? getOption(opt,"profile") : "./panasonic.yml", imgSize, map1, map2, cameraMatrix, 1.0f);
Size boardSize(9,6);
#if CV_VERSION_MAJOR >= 4
int chessBoardFlags = CALIB_CB_NORMALIZE_IMAGE;
#else
int chessBoardFlags = CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE;
if (!settings.useFisheye) {
// fast check erroneously fails with high distortions like fisheye
chessBoardFlags |= CALIB_CB_FAST_CHECK;
}
#endif
bool anaglyph = true;
while (true) {
Mat frameA, fA;
Mat frameB, fB;
camA.grab();
camB.grab();
camA.retrieve(frameA);
camB.retrieve(frameB);
remap(frameA, fA, map1, map2, INTER_LINEAR);
remap(frameB, fB, map1, map2, INTER_LINEAR);
// Get the chessboard
vector<Point2f> pointBufA;
vector<Point2f> pointBufB;
bool foundA, foundB;
foundA = findChessboardCornersSB(fA, boardSize,
pointBufA, chessBoardFlags);
foundB = findChessboardCornersSB(fB, boardSize,
pointBufB, chessBoardFlags);
// Step 1: Position cameras correctly with respect to chessboard
// - print distance estimate etc
// Step 2: Show individual camera tilt degrees with left / right indicators
// Show also up down tilt perspective error
// Step 3: Display current baseline in mm
if (foundA) {
// Draw the corners.
//drawChessboardCorners(fA, boardSize,
// Mat(pointBufA), foundA);
}
if (foundB) {
// Draw the corners.
//drawChessboardCorners(fB, boardSize,
// Mat(pointBufB), foundB);
}
Mat anag;
showAnaglyph(fA, fB, anag);
if (foundA) {
Rec4f dists = distances(cameraMatrix, pointBufA, boardSize, squareSize);
//Rec4f angs = angles(pointBufA, boardSize);
// TODO Check angles also...
bool lrValid = std::abs(dists.left-dists.right) <= kDistanceThreshold;
bool tbValid = std::abs(dists.top-dists.bottom) <= kDistanceThreshold;
bool tiltUp = dists.top < dists.bottom && !tbValid;
bool tiltDown = dists.top > dists.bottom && !tbValid;
bool rotLeft = dists.left > dists.right && !lrValid;
bool rotRight = dists.left < dists.right && !lrValid;
// TODO Draw lines
Point2f bl = pointBufA[(boardSize.height-1)*boardSize.width];
Point2f tl = pointBufA[0];
Point2f tr = pointBufA[boardSize.width-1];
Point2f br = pointBufA[pointBufA.size()-1];
line(anag, tl, tr, (!lrValid && tiltUp) ? Scalar(0,0,255) : Scalar(0,255,0));
line(anag, bl, br, (!lrValid && tiltDown) ? Scalar(0,0,255) : Scalar(0,255,0));
line(anag, tl, bl, (!tbValid && rotLeft) ? Scalar(0,0,255) : Scalar(0,255,0));
line(anag, tr, br, (!tbValid && rotRight) ? Scalar(0,0,255) : Scalar(0,255,0));
//fA = fA(Rect(tl.x - 100, tl.y- 100, br.x-tl.x + 200, br.y-tl.y + 200));
// Show distance error between cameras
// Show estimated baseline
//if (step == 0) {
// Point2f pd = parallaxDistortion(pointBufA, boardSize);
// putText(fA, string("Distort: ") + std::to_string(pd.x) + string(",") + std::to_string(pd.y), Point(10,50), FONT_HERSHEY_PLAIN, 2.0, Scalar(0,0,255), 3);
//} else if (step == 1) {
//float d = distance(cameraMatrix, pointBufA, boardSize, squareSize);
//putText(anag, string("Distance: ") + std::to_string(-d) + string("m"), Point(10,50), FONT_HERSHEY_PLAIN, 2.0, Scalar(0,0,255), 3);
// } else if (step == 2) {
//float angle = calculateZRotation(pointBufA, boardSize) - 180.0f;
//putText(anag, string("Angle: ") + std::to_string(angle), Point(10,150), FONT_HERSHEY_PLAIN, 2.0, Scalar(0,0,255), 3);
//} else if (step == 3) {
// Point2f vd = diffY(pointBufA, pointBufB, boardSize);
// putText(fA, string("Vertical: ") + std::to_string(vd.x) + string(",") + std::to_string(vd.y), Point(10,200), FONT_HERSHEY_PLAIN, 2.0, Scalar(0,0,255), 3);
// }
if (foundB) {
//if (step == 0) {
//Point2f pd = parallaxDistortion(pointBufB, boardSize);
//putText(fB, string("Distort: ") + std::to_string(pd.x) + string(",") + std::to_string(pd.y), Point(10,50), FONT_HERSHEY_PLAIN, 2.0, Scalar(0,0,255), 3);
//} else if (step == 1) {
//float d = distance(cameraMatrix, pointBufB, boardSize, squareSize);
//putText(fB, string("Distance: ") + std::to_string(-d) + string("m"), Point(10,100), FONT_HERSHEY_PLAIN, 2.0, Scalar(0,0,255), 3);
//} else if (step == 2) {
//float angle = calculateZRotation(pointBufB, boardSize) - 180.0f;
//putText(fB, string("Angle: ") + std::to_string(angle), Point(10,150), FONT_HERSHEY_PLAIN, 2.0, Scalar(0,0,255), 3);
//}
Rec4f dists = distances(cameraMatrix, pointBufB, boardSize, squareSize);
//Rec4f angs = angles(pointBufA, boardSize);
// TODO Check angles also...
bool lrValid = std::abs(dists.left-dists.right) <= kDistanceThreshold;
bool tbValid = std::abs(dists.top-dists.bottom) <= kDistanceThreshold;
bool tiltUp = dists.top < dists.bottom && !tbValid;
bool tiltDown = dists.top > dists.bottom && !tbValid;
bool rotLeft = dists.left > dists.right && !lrValid;
bool rotRight = dists.left < dists.right && !lrValid;
// TODO Draw lines
Point2f bbl = pointBufB[(boardSize.height-1)*boardSize.width];
Point2f btl = pointBufB[0];
Point2f btr = pointBufB[boardSize.width-1];
Point2f bbr = pointBufB[pointBufB.size()-1];
line(anag, btl, btr, (!lrValid && tiltUp) ? Scalar(0,0,255) : Scalar(0,255,0));
line(anag, bbl, bbr, (!lrValid && tiltDown) ? Scalar(0,0,255) : Scalar(0,255,0));
line(anag, btl, bbl, (!tbValid && rotLeft) ? Scalar(0,0,255) : Scalar(0,255,0));
line(anag, btr, bbr, (!tbValid && rotRight) ? Scalar(0,0,255) : Scalar(0,255,0));
float baseline1 = std::abs(tl.x - btl.x);
float baseline2 = std::abs(tr.x - btr.x);
float baseline3 = std::abs(bl.x - bbl.x);
float baseline4 = std::abs(br.x - bbr.x);
float boardWidth = (std::abs(tl.x-tr.x) + std::abs(btl.x - btr.x)) / 2.0f;
float baseline = ((baseline1 + baseline2 + baseline3 + baseline4) / 4.0f) / boardWidth * (boardSize.width*squareSize);
putText(anag, string("Baseline: ") + std::to_string(baseline) + string("mm"), Point(10,150), FONT_HERSHEY_PLAIN, 2.0, Scalar(0,255,0), 2);
}
}
/*if (anaglyph) {
showAnaglyph(fA,fB);
} else {
imshow("Left", fA);
imshow("Right", fB);
}*/
imshow("Anaglyph", anag);
char key = static_cast<char>(waitKey(20));
if (key == 27)
break;
if (key == 32) anaglyph = !anaglyph;
}
}
\ No newline at end of file
#ifndef _FTL_CALIBRATION_ALIGN_HPP_
#define _FTL_CALIBRATION_ALIGN_HPP_
#include <map>
#include <string>
namespace ftl {
namespace calibration {
void align(std::map<std::string, std::string> &opt);
}
}
#endif // _FTL_CALIBRATION_ALIGN_HPP_
#include <loguru.hpp>
#include <ftl/config.h>
#include <opencv2/calib3d.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/highgui.hpp>
#include "common.hpp"
using std::vector;
using std::map;
using std::string;
using cv::Mat;
using cv::Vec2f, cv::Vec3f;
using cv::Size;
using cv::stereoCalibrate;
namespace ftl {
namespace calibration {
// Options
string getOption(const map<string, string> &options, const string &opt) {
const string str = options.at(opt);
return str.substr(1, str.size() - 2);
}
bool hasOption(const map<string, string> &options, const string &opt) {
return options.find(opt) != options.end();
}
int getOptionInt(const map<string, string> &options, const string &opt, int default_value) {
if (!hasOption(options, opt)) return default_value;
return std::stoi(options.at(opt));
}
double getOptionDouble(const map<string, string> &options, const string &opt, double default_value) {
if (!hasOption(options, opt)) return default_value;
return std::stod(options.at(opt));
}
string getOptionString(const map<string, string> &options, const string &opt, string default_value) {
if (!hasOption(options, opt)) return default_value;
return getOption(options, opt);
}
// Save/load files
bool saveExtrinsics(const string &ofile, Mat &R, Mat &T, Mat &R1, Mat &R2, Mat &P1, Mat &P2, Mat &Q) {
cv::FileStorage fs;
fs.open(ofile, cv::FileStorage::WRITE);
if (fs.isOpened()) {
fs << "R" << R << "T" << T << "R1" << R1 << "R2" << R2 << "P1"
<< P1 << "P2" << P2 << "Q" << Q;
fs.release();
return true;
} else {
LOG(ERROR) << "Error: can not save the extrinsic parameters";
}
return false;
}
bool saveIntrinsics(const string &ofile, const vector<Mat> &M, const vector<Mat>& D, const Size &size)
{
cv::FileStorage fs(ofile, cv::FileStorage::WRITE);
if (fs.isOpened())
{
fs << "resolution" << size;
fs << "K" << M << "D" << D;
fs.release();
return true;
}
else
{
LOG(ERROR) << "Error: can not save the intrinsic parameters to '" << ofile << "'";
}
return false;
}
bool loadIntrinsics(const string &ifile, vector<Mat> &K1, vector<Mat> &D1, Size &size) {
using namespace cv;
FileStorage fs;
// reading intrinsic parameters
fs.open((ifile).c_str(), FileStorage::READ);
if (!fs.isOpened()) {
LOG(WARNING) << "Could not open intrinsics file : " << ifile;
return false;
}
LOG(INFO) << "Intrinsics from: " << ifile;
fs["resolution"] >> size;
fs["K"] >> K1;
fs["D"] >> D1;
return true;
}
Grid::Grid(int rows, int cols, int width, int height,
int offset_x, int offset_y) {
rows_ = rows;
cols_ = cols;
width_ = width;
height_ = height;
offset_x_ = offset_x;
offset_y_ = offset_y;
cell_width_ = width_ / cols_;
cell_height_ = height_ / rows_;
reset();
corners_ = vector<std::pair<cv::Point, cv::Point>>();
for (int r = 0; r < rows_; r++) {
for (int c = 0; c < cols_; c++) {
int x1 = offset_x_ + c * cell_width_;
int y1 = offset_y_ + r * cell_height_;
int x2 = offset_x_ + (c + 1) * cell_width_ - 1;
int y2 = offset_y_ + (r + 1) * cell_height_ - 1;
corners_.push_back(std::pair(cv::Point(x1, y1), cv::Point(x2, y2)));
}}
}
void Grid::drawGrid(Mat &rgb) {
for (int i = 0; i < rows_ * cols_; ++i) {
bool visited = visited_[i];
cv::Scalar color = visited ? cv::Scalar(24, 255, 24) : cv::Scalar(24, 24, 255);
cv::rectangle(rgb, corners_[i].first, corners_[i].second, color, 2);
}
}
int Grid::checkGrid(cv::Point p1, cv::Point p2) {
// TODO calculate directly
for (int i = 0; i < rows_ * cols_; ++i) {
auto &corners = corners_[i];
if (p1.x >= corners.first.x &&
p1.x <= corners.second.x &&
p1.y >= corners.first.y &&
p1.y <= corners.second.y &&
p2.x >= corners.first.x &&
p2.x <= corners.second.x &&
p2.y >= corners.first.y &&
p2.y <= corners.second.y) {
return i;
}
}
return -1;
}
void Grid::updateGrid(int i) {
if (i >= 0 && i < static_cast<int>(visited_.size()) && !visited_[i]) {
visited_[i] = true;
visited_count_ += 1;
}
}
bool Grid::isVisited(int i) {
if (i >= 0 && i < static_cast<int>(visited_.size())) {
return visited_[i];
}
return false;
}
bool Grid::isComplete() {
return visited_count_ == static_cast<int>(visited_.size());
}
void Grid::reset() {
visited_count_ = 0;
visited_ = vector<bool>(rows_ * cols_, false);
// reset visited
}
// Calibration classes for different patterns
CalibrationChessboard::CalibrationChessboard(const map<string, string> &opt) {
pattern_size_ = Size( getOptionInt(opt, "cols", 9),
getOptionInt(opt, "rows", 6));
image_size_ = Size( getOptionInt(opt, "width", 1280),
getOptionInt(opt, "height", 720));
pattern_square_size_ = getOptionDouble(opt, "square_size", 0.036);
LOG(INFO) << "Chessboard calibration parameters";
LOG(INFO) << " rows: " << pattern_size_.height;
LOG(INFO) << " cols: " << pattern_size_.width;
LOG(INFO) << " width: " << image_size_.width;
LOG(INFO) << " height: " << image_size_.height;
LOG(INFO) << " square_size: " << pattern_square_size_;
LOG(INFO) << "-----------------------------------";
// From OpenCV (4.1.0) Documentation
//
// CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with equalizeHist before detection.
// CALIB_CB_EXHAUSTIVE Run an exhaustive search to improve detection rate.
// CALIB_CB_ACCURACY Up sample input image to improve sub-pixel accuracy due to
// aliasing effects. This should be used if an accurate camera
// calibration is required.
chessboard_flags_ = cv::CALIB_CB_NORMALIZE_IMAGE | cv::CALIB_CB_ACCURACY;
}
void CalibrationChessboard::objectPoints(vector<Vec3f> &out) {
out.reserve(pattern_size_.width * pattern_size_.height);
for (int row = 0; row < pattern_size_.height; ++row) {
for (int col = 0; col < pattern_size_.width; ++col) {
out.push_back(Vec3f(col * pattern_square_size_, row * pattern_square_size_, 0));
}}
}
bool CalibrationChessboard::findPoints(Mat &img, vector<Vec2f> &points) {
return cv::findChessboardCornersSB(img, pattern_size_, points, chessboard_flags_);
}
void CalibrationChessboard::drawCorners(Mat &img, const vector<Vec2f> &points) {
using cv::Point2i;
vector<Point2i> corners(4);
corners[1] = Point2i(points[0]);
corners[0] = Point2i(points[pattern_size_.width - 1]);
corners[2] = Point2i(points[pattern_size_.width * (pattern_size_.height - 1)]);
corners[3] = Point2i(points.back());
cv::Scalar color = cv::Scalar(200, 200, 200);
for (int i = 0; i <= 4; i++)
{
cv::line(img, corners[i % 4], corners[(i + 1) % 4], color, 2);
}
}
void CalibrationChessboard::drawPoints(Mat &img, const vector<Vec2f> &points) {
cv::drawChessboardCorners(img, pattern_size_, points, true);
}
}
}
#ifndef _FTL_CALIBRATION_COMMON_HPP_
#define _FTL_CALIBRATION_COMMON_HPP_
#include <map>
#include <string>
#include <opencv2/core.hpp>
namespace ftl {
namespace calibration {
std::string getOption(const std::map<std::string, std::string> &options, const std::string &opt);
bool hasOption(const std::map<std::string, std::string> &options, const std::string &opt);
int getOptionInt(const std::map<std::string, std::string> &options, const std::string &opt, int default_value);
double getOptionDouble(const std::map<std::string, std::string> &options, const std::string &opt, double default_value);
std::string getOptionString(const std::map<std::string, std::string> &options, const std::string &opt, std::string default_value);
bool loadIntrinsics(const std::string &ifile, std::vector<cv::Mat> &K, std::vector<cv::Mat> &D, cv::Size &size);
bool saveIntrinsics(const std::string &ofile, const std::vector<cv::Mat> &K, const std::vector<cv::Mat> &D, const cv::Size &size);
// TODO loadExtrinsics()
bool saveExtrinsics(const std::string &ofile, cv::Mat &R, cv::Mat &T, cv::Mat &R1, cv::Mat &R2, cv::Mat &P1, cv::Mat &P2, cv::Mat &Q);
class Grid {
private:
int rows_;
int cols_;
int width_;
int height_;
int cell_width_;
int cell_height_;
int offset_x_;
int offset_y_;
int visited_count_;
std::vector<std::pair<cv::Point, cv::Point>> corners_;
std::vector<bool> visited_;
public:
Grid(int rows, int cols, int width, int height, int offset_x, int offset_y);
void drawGrid(cv::Mat &rgb);
int checkGrid(cv::Point p1, cv::Point p2);
void updateGrid(int i);
bool isVisited(int i);
bool isComplete();
void reset();
};
/**
* @brief Wrapper for OpenCV's calibration methods. Paramters depend on
* implementation (different types of patterns).
*
* Calibration objects may store state; eg. from previous views of calibration
* images.
*/
class Calibration {
public:
/**
* @brief Calculate reference points for given pattern
* @param Output parameter
*/
void objectPoints(std::vector<cv::Vec3f> &out);
/**
* @brief Try to find calibration pattern in input image
* @param Input image
* @param Output parameter for found point image coordinates
* @returns true if pattern found, otherwise false
*/
bool findPoints(cv::Mat &in, std::vector<cv::Vec2f> &out);
/**
* @brief Draw points to image
* @param Image to draw to
* @param Pattern points (in image coordinates)
*/
void drawPoints(cv::Mat &img, const std::vector<cv::Vec2f> &points);
};
/**
* @brief Chessboard calibration pattern. Uses OpenCV's
* findChessboardCornersSB function.
* @todo Parameters hardcoded in constructor
*
* All parameters (command line parameters):
* - rows, cols: pattern size (inner corners)
* - square_size: millimeters (TODO: meters)
* - width, height: image size, pixels
* - flags: see ChessboardCornersSB documentation (TODO: not implemented)
*/
class CalibrationChessboard : Calibration {
public:
explicit CalibrationChessboard(const std::map<std::string, std::string> &opt);
void objectPoints(std::vector<cv::Vec3f> &out);
bool findPoints(cv::Mat &in, std::vector<cv::Vec2f> &out);
void drawPoints(cv::Mat &img, const std::vector<cv::Vec2f> &points);
void drawCorners(cv::Mat &img, const std::vector<cv::Vec2f> &points);
private:
int chessboard_flags_ = 0;
float pattern_square_size_;
cv::Size pattern_size_;
cv::Size image_size_;
};
// TODO other patterns, circles ...
}
}
#endif // _FTL_CALIBRATION_COMMON_HPP_
#include "common.hpp"
#include "lens.hpp"
#include <ftl/config.h>
#include <ftl/calibration/parameters.hpp>
#include <loguru.hpp>
#include <opencv2/core.hpp>
#include <opencv2/core/utility.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/highgui.hpp>
#include <vector>
#include <atomic>
#include <thread>
using std::map;
using std::string;
using std::vector;
using cv::Mat;
using cv::Vec2f;
using cv::Vec3f;
using cv::Size;
using namespace ftl::calibration;
void ftl::calibration::intrinsic(map<string, string> &opt) {
LOG(INFO) << "Begin intrinsic calibration";
// TODO PARAMETERS TO CONFIG FILE
const Size image_size = Size( getOptionInt(opt, "width", 1920),
getOptionInt(opt, "height", 1080));
const int n_cameras = getOptionInt(opt, "n_cameras", 2);
const int iter = getOptionInt(opt, "iter", 20);
const int delay = getOptionInt(opt, "delay", 1000);
const double aperture_width = getOptionDouble(opt, "aperture_width", 6.2);
const double aperture_height = getOptionDouble(opt, "aperture_height", 4.6);
const string filename_intrinsics = getOptionString(opt, "profile", FTL_LOCAL_CONFIG_ROOT "/calibration.yml");
CalibrationChessboard calib(opt);
bool use_guess = getOptionInt(opt, "use_guess", 1);
//bool use_guess_distortion = getOptionInt(opt, "use_guess_distortion", 0);
LOG(INFO) << "Intrinsic calibration parameters";
LOG(INFO) << " profile: " << filename_intrinsics;
LOG(INFO) << " n_cameras: " << n_cameras;
LOG(INFO) << " width: " << image_size.width;
LOG(INFO) << " height: " << image_size.height;
LOG(INFO) << " iter: " << iter;
LOG(INFO) << " delay: " << delay;
LOG(INFO) << " aperture_width: " << aperture_width;
LOG(INFO) << " aperture_height: " << aperture_height;
LOG(INFO) << " use_guess: " << use_guess << "\n";
LOG(WARNING) << "WARNING: This application overwrites existing files and does not previous values!";
//LOG(INFO) << " use_guess_distortion: " << use_guess_distortion;
LOG(INFO) << "-----------------------------------";
// assume no tangential and thin prism distortion and only estimate first
// three radial distortion coefficients
int calibrate_flags = cv::CALIB_FIX_K4 | cv::CALIB_FIX_K5 | cv::CALIB_FIX_K6 |
cv::CALIB_ZERO_TANGENT_DIST | cv::CALIB_FIX_S1_S2_S3_S4 | cv::CALIB_FIX_ASPECT_RATIO;
vector<Mat> camera_matrix(n_cameras), dist_coeffs(n_cameras);
for (Mat &d : dist_coeffs) {
d = Mat(Size(8, 1), CV_64FC1, cv::Scalar(0.0));
}
if (use_guess) {
camera_matrix.clear();
vector<Mat> tmp;
Size tmp_size;
loadIntrinsics(filename_intrinsics, camera_matrix, tmp, tmp_size);
CHECK(camera_matrix.size() == static_cast<unsigned int>(n_cameras));
if ((tmp_size != image_size) && (!tmp_size.empty())) {
for (Mat &K : camera_matrix) {
K = ftl::calibration::scaleCameraMatrix(K, image_size, tmp_size);
}
}
if (tmp_size.empty()) {
LOG(ERROR) << "No valid calibration found.";
}
else {
calibrate_flags |= cv::CALIB_USE_INTRINSIC_GUESS;
}
}
vector<cv::VideoCapture> cameras;
cameras.reserve(n_cameras);
for (int c = 0; c < n_cameras; c++) { cameras.emplace_back(c); }
for (auto &camera : cameras)
{
if (!camera.isOpened()) {
LOG(ERROR) << "Could not open camera device";
return;
}
camera.set(cv::CAP_PROP_FRAME_WIDTH, image_size.width);
camera.set(cv::CAP_PROP_FRAME_HEIGHT, image_size.height);
}
vector<vector<vector<Vec2f>>> image_points(n_cameras);
vector<vector<vector<Vec3f>>> object_points(n_cameras);
vector<Mat> img(n_cameras);
vector<Mat> img_display(n_cameras);
vector<int> count(n_cameras, 0);
Mat display(Size(image_size.width * n_cameras, image_size.height), CV_8UC3);
for (int c = 0; c < n_cameras; c++) {
img_display[c] = Mat(display, cv::Rect(c * image_size.width, 0, image_size.width, image_size.height));
}
std::mutex m;
std::atomic<bool> ready = false;
auto capture = std::thread(
[n_cameras, delay, &m, &ready, &count, &calib, &img, &image_points, &object_points]() {
vector<Mat> tmp(n_cameras);
while(true) {
if (!ready) {
std::this_thread::sleep_for(std::chrono::milliseconds(delay));
continue;
}
m.lock();
ready = false;
for (int c = 0; c < n_cameras; c++) {
img[c].copyTo(tmp[c]);
}
m.unlock();
for (int c = 0; c < n_cameras; c++) {
vector<Vec2f> points;
if (calib.findPoints(tmp[c], points)) {
count[c]++;
}
else { continue; }
vector<Vec3f> points_ref;
calib.objectPoints(points_ref);
Mat camera_matrix, dist_coeffs;
image_points[c].push_back(points);
object_points[c].push_back(points_ref);
}
std::this_thread::sleep_for(std::chrono::milliseconds(delay));
}
});
while (iter > *std::min_element(count.begin(), count.end())) {
if (m.try_lock()) {
for (auto &camera : cameras) { camera.grab(); }
for (int c = 0; c < n_cameras; c++) {
cameras[c].retrieve(img[c]);
}
ready = true;
m.unlock();
}
for (int c = 0; c < n_cameras; c++) {
img[c].copyTo(img_display[c]);
m.lock();
if (image_points[c].size() > 0) {
for (auto &points : image_points[c]) {
calib.drawCorners(img_display[c], points);
}
calib.drawPoints(img_display[c], image_points[c].back());
}
m.unlock();
}
cv::namedWindow("Cameras", cv::WINDOW_KEEPRATIO | cv::WINDOW_NORMAL);
cv::imshow("Cameras", display);
cv::waitKey(10);
}
cv::destroyAllWindows();
//bool calib_ok = true;
for (int c = 0; c < n_cameras; c++) {
LOG(INFO) << "Calculating intrinsic paramters for camera " << std::to_string(c);
vector<Mat> rvecs, tvecs;
double rms = cv::calibrateCamera(
object_points[c], image_points[c],
image_size, camera_matrix[c], dist_coeffs[c],
rvecs, tvecs, calibrate_flags
);
LOG(INFO) << "final reprojection RMS error: " << rms;
if (!ftl::calibration::validate::distortionCoefficients(dist_coeffs[c], image_size)) {
LOG(ERROR) << "Calibration failed: invalid distortion coefficients:\n"
<< dist_coeffs[c];
LOG(WARNING) << "Estimating only intrinsic parameters for camera " << std::to_string(c);
dist_coeffs[c] = Mat(Size(8, 1), CV_64FC1, cv::Scalar(0.0));
calibrate_flags |= cv::CALIB_FIX_K1 | cv::CALIB_FIX_K2 | cv::CALIB_FIX_K3;
c--;
continue;
}
//calib_ok = true;
calibrate_flags &= ~cv::CALIB_FIX_K1 & ~cv::CALIB_FIX_K2 & ~cv::CALIB_FIX_K3;
double fovx, fovy, focal_length, aspect_ratio;
cv::Point2d principal_point;
// TODO: check for valid aperture width/height; do not run if not valid values
cv::calibrationMatrixValues(camera_matrix[c], image_size, aperture_width, aperture_height,
fovx, fovy, focal_length, principal_point, aspect_ratio);
LOG(INFO) << "";
LOG(INFO) << " fovx (deg): " << fovx;
LOG(INFO) << " fovy (deg): " << fovy;
LOG(INFO) << " focal length (mm): " << focal_length;
LOG(INFO) << " principal point (mm): " << principal_point;
LOG(INFO) << " aspect ratio: " << aspect_ratio;
LOG(INFO) << "";
LOG(INFO) << "Camera matrix:\n" << camera_matrix[c];
LOG(INFO) << "Distortion coefficients:\n" << dist_coeffs[c];
LOG(INFO) << "";
}
saveIntrinsics(filename_intrinsics, camera_matrix, dist_coeffs, image_size);
LOG(INFO) << "intrinsic paramaters saved to: " << filename_intrinsics;
vector<Mat> map1(n_cameras), map2(n_cameras);
for (int c = 0; c < n_cameras; c++) {
cv::initUndistortRectifyMap(camera_matrix[c], dist_coeffs[c], Mat::eye(3,3, CV_64F), camera_matrix[c],
image_size, CV_16SC2, map1[c], map2[c]);
}
while (cv::waitKey(25) != 27) {
for (auto &camera : cameras ) { camera.grab(); }
for (int c = 0; c < n_cameras; c++) {
cameras[c].retrieve(img[c]);
cv::remap(img[c], img[c], map1[c], map2[c], cv::INTER_CUBIC);
cv::imshow("Camera " + std::to_string(c), img[c]);
}
}
}
#ifndef _FTL_CALIBRATION_LENS_HPP_
#define _FTL_CALIBRATION_LENS_HPP_
#include <map>
#include <string>
namespace ftl {
namespace calibration {
void intrinsic(std::map<std::string, std::string> &opt);
}
}
#endif // _FTL_CALIBRATION_LENS_HPP_
#include <loguru.hpp>
#include <ftl/configuration.hpp>
#include "lens.hpp"
#include "stereo.hpp"
#include "align.hpp"
int main(int argc, char **argv) {
loguru::g_preamble_date = false;
loguru::g_preamble_uptime = false;
loguru::g_preamble_thread = false;
loguru::init(argc, argv, "--verbosity");
argc--;
argv++;
// Process Arguments
auto options = ftl::config::read_options(&argv, &argc);
ftl::calibration::intrinsic(options);
return 0;
}
This diff is collapsed.
#ifndef _FTL_CALIBRATION_STEREO_HPP_
#define _FTL_CALIBRATION_STEREO_HPP_
#include <map>
#include <string>
#include <opencv2/core.hpp>
#include <opencv2/calib3d.hpp>
namespace ftl {
namespace calibration {
void stereo(std::map<std::string, std::string> &opt);
}
}
#endif // _FTL_CALIBRATION_STEREO_HPP_
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.