diff --git a/cv-node/CMakeLists.txt b/cv-node/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..17f4d3d729598e28fdf25116d940fc1d7e245609 --- /dev/null +++ b/cv-node/CMakeLists.txt @@ -0,0 +1,62 @@ +cmake_minimum_required (VERSION 3.1.0) +include (CheckIncludeFile) +include (CheckFunctionExists) + +project (cv-node) + +include(CTest) +enable_testing() + +set(THREADS_PREFER_PTHREAD_FLAG ON) + +find_package( OpenCV REQUIRED ) +find_package( Threads REQUIRED ) +#find_package(PkgConfig) +#pkg_check_modules(GTKMM gtkmm-3.0) + +# Need to include staged files and libs +include_directories(${PROJECT_SOURCE_DIR}/include) +include_directories(${PROJECT_BINARY_DIR}) + +set(ftl_VERSION_MAJOR "1") +set(ftl_VERSION_MINOR "0") +set(ftl_VERSION_PATCH "0") +set(FTL_VERSION "${ftl_VERSION_MAJOR}.${ftl_VERSION_MINOR}.${ftl_VERSION_PATCH}") +add_definitions(-DFTL_VERSION=${FTL_VERSION}) + +if (WIN32) + set(FTL_CONFIG_ROOT "$ENV{USERPROFILE}/AppData/ftl") + set(FTL_CACHE_ROOT "$ENV{USERPROFILE}/AppData/ftl") + set(FTL_DATA_ROOT "$ENV{USERPROFILE}/AppData/ftl") + add_definitions(-DWIN32) +endif (WIN32) + +if (UNIX) + add_definitions(-DUNIX) + set(FTL_CONFIG_ROOT "$ENV{HOME}/.config/ftl") + set(FTL_CACHE_ROOT "$ENV{HOME}/.cache/ftl") + set(FTL_DATA_ROOT "$ENV{HOME}/.local/share/ftl") +endif (UNIX) + +add_definitions(-DFTL_CONFIG_ROOT=${FTL_CONFIG_ROOT}) +add_definitions(-DFTL_CACHE_ROOT=${FTL_CACHE_ROOT}) +add_definitions(-DFTL_DATA_ROOT=${FTL_DATA_ROOT}) + +set(CMAKE_CXX_FLAGS "-pthread -std=c++17 -Wall -Wno-deprecated -Werror -Wno-psabi") +set(CMAKE_CXX_FLAGS_DEBUG "-D_DEBUG -pg -Wall -Werror") +set(CMAKE_CXX_FLAGS_RELEASE "-O3") + +SET(CMAKE_USE_RELATIVE_PATHS ON) + +set(CVNODESRC + src/main.cpp + src/calibrate.cpp + src/local.cpp + src/sync.cpp +) + +add_executable(cv-node ${CVNODESRC}) +target_include_directories(cv-node PUBLIC ${PROJECT_SOURCE_DIR}/include) +target_link_libraries(cv-node Threads::Threads ${OpenCV_LIBS} glog) + + diff --git a/cv-node/config/in_VID5.xml b/cv-node/config/in_VID5.xml new file mode 100644 index 0000000000000000000000000000000000000000..42e80c851e78ea54d8a0eefca45f630479f1b128 --- /dev/null +++ b/cv-node/config/in_VID5.xml @@ -0,0 +1,59 @@ +<?xml version="1.0"?> +<opencv_storage> +<Settings> + <!-- Number of inner corners per a item row and column. (square, circle) --> + <BoardSize_Width> 9</BoardSize_Width> + <BoardSize_Height>6</BoardSize_Height> + + <!-- The size of a square in some user defined metric system (pixel, millimeter)--> + <Square_Size>50</Square_Size> + + <!-- The type of input used for camera calibration. One of: CHESSBOARD CIRCLES_GRID ASYMMETRIC_CIRCLES_GRID --> + <Calibrate_Pattern>"CHESSBOARD"</Calibrate_Pattern> + + <!-- The input to use for calibration. + To use an input camera -> give the ID of the camera, like "1" + To use an input video -> give the path of the input video, like "/tmp/x.avi" + To use an image list -> give the path to the XML or YAML file containing the list of the images, like "/tmp/circles_list.xml" + --> + <Input>"images/CameraCalibration/VID5/VID5.xml"</Input> + <!-- If true (non-zero) we flip the input images around the horizontal axis.--> + <Input_FlipAroundHorizontalAxis>0</Input_FlipAroundHorizontalAxis> + + <!-- Time delay between frames in case of camera. --> + <Input_Delay>100</Input_Delay> + + <!-- How many frames to use, for calibration. --> + <Calibrate_NrOfFrameToUse>25</Calibrate_NrOfFrameToUse> + <!-- Consider only fy as a free parameter, the ratio fx/fy stays the same as in the input cameraMatrix. + Use or not setting. 0 - False Non-Zero - True--> + <Calibrate_FixAspectRatio> 1 </Calibrate_FixAspectRatio> + <!-- If true (non-zero) tangential distortion coefficients are set to zeros and stay zero.--> + <Calibrate_AssumeZeroTangentialDistortion>1</Calibrate_AssumeZeroTangentialDistortion> + <!-- If true (non-zero) the principal point is not changed during the global optimization.--> + <Calibrate_FixPrincipalPointAtTheCenter> 1 </Calibrate_FixPrincipalPointAtTheCenter> + + <!-- The name of the output log file. --> + <Write_outputFileName>"out_camera_data.xml"</Write_outputFileName> + <!-- If true (non-zero) we write to the output file the feature points.--> + <Write_DetectedFeaturePoints>1</Write_DetectedFeaturePoints> + <!-- If true (non-zero) we write to the output file the extrinsic camera parameters.--> + <Write_extrinsicParameters>1</Write_extrinsicParameters> + <!-- If true (non-zero) we write to the output file the refined 3D target grid points.--> + <Write_gridPoints>1</Write_gridPoints> + <!-- If true (non-zero) we show after calibration the undistorted images.--> + <Show_UndistortedImage>1</Show_UndistortedImage> + <!-- If true (non-zero) will be used fisheye camera model.--> + <Calibrate_UseFisheyeModel>0</Calibrate_UseFisheyeModel> + <!-- If true (non-zero) distortion coefficient k1 will be equals to zero.--> + <Fix_K1>0</Fix_K1> + <!-- If true (non-zero) distortion coefficient k2 will be equals to zero.--> + <Fix_K2>0</Fix_K2> + <!-- If true (non-zero) distortion coefficient k3 will be equals to zero.--> + <Fix_K3>0</Fix_K3> + <!-- If true (non-zero) distortion coefficient k4 will be equals to zero.--> + <Fix_K4>1</Fix_K4> + <!-- If true (non-zero) distortion coefficient k5 will be equals to zero.--> + <Fix_K5>1</Fix_K5> +</Settings> +</opencv_storage> diff --git a/cv-node/include/ftl/calibrate.hpp b/cv-node/include/ftl/calibrate.hpp new file mode 100644 index 0000000000000000000000000000000000000000..fee7647e26c2c53aeb7de48b1ff015573212e63f --- /dev/null +++ b/cv-node/include/ftl/calibrate.hpp @@ -0,0 +1,29 @@ +#ifndef _FTL_CALIBRATION_HPP_ +#define _FTL_CALIBRATION_HPP_ + +#include <opencv2/opencv.hpp> +#include <ftl/local.hpp> +#include <string> + +namespace ftl { +class Calibrate { + public: + Calibrate(ftl::LocalSource *s, const std::string &cal); + + bool recalibrate(const std::string &conf); + + bool undistort(cv::Mat &l, cv::Mat &r); + bool rectified(cv::Mat &l, cv::Mat &r); + + bool isCalibrated(); + + private: + bool runCalibration(cv::Mat &img, cv::Mat &cam); + + private: + ftl::LocalSource *local_; +}; +}; + +#endif // _FTL_CALIBRATION_HPP_ + diff --git a/cv-node/include/ftl/local.hpp b/cv-node/include/ftl/local.hpp new file mode 100644 index 0000000000000000000000000000000000000000..54f3f6916c2ec9907283ae4b932cb38240f4c812 --- /dev/null +++ b/cv-node/include/ftl/local.hpp @@ -0,0 +1,38 @@ +#ifndef _FTL_LOCAL_HPP_ +#define _FTL_LOCAL_HPP_ + +#include <string> + +namespace cv { + class Mat; + class VideoCapture; +}; + +namespace ftl { +class LocalSource { + public: + LocalSource(); + LocalSource(const std::string &vid); + + //bool left(cv::Mat &m); + //bool right(cv::Mat &m); + bool get(cv::Mat &l, cv::Mat &r); + + //void setFramerate(float fps); + //float getFramerate() const; + + double getTimestamp() const; + + bool isStereo() const; + + private: + double timestamp_; + bool stereo_; + //float fps_; + cv::VideoCapture *camera_a_; + cv::VideoCapture *camera_b_; +}; +}; + +#endif // _FTL_LOCAL_HPP_ + diff --git a/cv-node/include/ftl/synched.hpp b/cv-node/include/ftl/synched.hpp new file mode 100644 index 0000000000000000000000000000000000000000..75aafcd451f2d15e7dc1018dd02564250b98057a --- /dev/null +++ b/cv-node/include/ftl/synched.hpp @@ -0,0 +1,28 @@ +#ifndef _FTL_SYNCHED_HPP_ +#define _FTL_SYNCHED_HPP_ + +#include <opencv2/opencv.hpp> +#include <string> +#include <vector> + +namespace ftl { + +static const int LEFT = 0; +static const int RIGHT = 1; + +class SyncSource { + public: + SyncSource(); + + void addChannel(const std::string &c); + void feed(int channel, cv::Mat &m, double ts); + bool get(int channel, cv::Mat &m); + double latency() const; + + private: + std::vector<cv::Mat> channels_; +}; +}; + +#endif // _FTL_SYNCHED_HPP_ + diff --git a/cv-node/src/calibrate.cpp b/cv-node/src/calibrate.cpp new file mode 100644 index 0000000000000000000000000000000000000000..16e88d7dbc18131a3ff162b7f84aaf208215280c --- /dev/null +++ b/cv-node/src/calibrate.cpp @@ -0,0 +1,752 @@ +#include <iostream> +#include <sstream> +#include <string> +#include <ctime> +#include <cstdio> + +#include <ftl/calibrate.hpp> + +#include <opencv2/core.hpp> +#include <opencv2/core/utility.hpp> +#include <opencv2/imgproc.hpp> +#include <opencv2/calib3d.hpp> +#include <opencv2/imgcodecs.hpp> +#include <opencv2/videoio.hpp> +#include <opencv2/highgui.hpp> + +using namespace cv; +using namespace std; + +using ftl::Calibrate; + +class Settings +{ +public: + Settings() : goodInput(false) {} + enum Pattern { NOT_EXISTING, CHESSBOARD, CIRCLES_GRID, ASYMMETRIC_CIRCLES_GRID }; + enum InputType { INVALID, CAMERA, VIDEO_FILE, IMAGE_LIST }; + + void write(FileStorage& fs) const //Write serialization for this class + { + fs << "{" + << "BoardSize_Width" << boardSize.width + << "BoardSize_Height" << boardSize.height + << "Square_Size" << squareSize + << "Calibrate_Pattern" << patternToUse + << "Calibrate_NrOfFrameToUse" << nrFrames + << "Calibrate_FixAspectRatio" << aspectRatio + << "Calibrate_AssumeZeroTangentialDistortion" << calibZeroTangentDist + << "Calibrate_FixPrincipalPointAtTheCenter" << calibFixPrincipalPoint + + << "Write_DetectedFeaturePoints" << writePoints + << "Write_extrinsicParameters" << writeExtrinsics + << "Write_gridPoints" << writeGrid + << "Write_outputFileName" << outputFileName + + << "Show_UndistortedImage" << showUndistorsed + + << "Input_FlipAroundHorizontalAxis" << flipVertical + << "Input_Delay" << delay + << "Input" << input + << "}"; + } + void read(const FileNode& node) //Read serialization for this class + { + node["BoardSize_Width" ] >> boardSize.width; + node["BoardSize_Height"] >> boardSize.height; + node["Calibrate_Pattern"] >> patternToUse; + node["Square_Size"] >> squareSize; + node["Calibrate_NrOfFrameToUse"] >> nrFrames; + node["Calibrate_FixAspectRatio"] >> aspectRatio; + node["Write_DetectedFeaturePoints"] >> writePoints; + node["Write_extrinsicParameters"] >> writeExtrinsics; + node["Write_gridPoints"] >> writeGrid; + node["Write_outputFileName"] >> outputFileName; + node["Calibrate_AssumeZeroTangentialDistortion"] >> calibZeroTangentDist; + node["Calibrate_FixPrincipalPointAtTheCenter"] >> calibFixPrincipalPoint; + node["Calibrate_UseFisheyeModel"] >> useFisheye; + node["Input_FlipAroundHorizontalAxis"] >> flipVertical; + node["Show_UndistortedImage"] >> showUndistorsed; + node["Input"] >> input; + node["Input_Delay"] >> delay; + node["Fix_K1"] >> fixK1; + node["Fix_K2"] >> fixK2; + node["Fix_K3"] >> fixK3; + node["Fix_K4"] >> fixK4; + node["Fix_K5"] >> fixK5; + + validate(); + } + void validate() + { + goodInput = true; + if (boardSize.width <= 0 || boardSize.height <= 0) + { + cerr << "Invalid Board size: " << boardSize.width << " " << boardSize.height << endl; + goodInput = false; + } + if (squareSize <= 10e-6) + { + cerr << "Invalid square size " << squareSize << endl; + goodInput = false; + } + if (nrFrames <= 0) + { + cerr << "Invalid number of frames " << nrFrames << endl; + goodInput = false; + } + + if (input.empty()) // Check for valid input + inputType = INVALID; + else + { + if (input[0] >= '0' && input[0] <= '9') + { + stringstream ss(input); + ss >> cameraID; + inputType = CAMERA; + } + else + { + if (isListOfImages(input) && readStringList(input, imageList)) + { + inputType = IMAGE_LIST; + nrFrames = (nrFrames < (int)imageList.size()) ? nrFrames : (int)imageList.size(); + } + else + inputType = VIDEO_FILE; + } + if (inputType == CAMERA) + inputCapture.open(cameraID); + if (inputType == VIDEO_FILE) + inputCapture.open(input); + if (inputType != IMAGE_LIST && !inputCapture.isOpened()) + inputType = INVALID; + } + if (inputType == INVALID) + { + cerr << " Input does not exist: " << input; + goodInput = false; + } + + flag = 0; + if(calibFixPrincipalPoint) flag |= CALIB_FIX_PRINCIPAL_POINT; + if(calibZeroTangentDist) flag |= CALIB_ZERO_TANGENT_DIST; + if(aspectRatio) flag |= CALIB_FIX_ASPECT_RATIO; + if(fixK1) flag |= CALIB_FIX_K1; + if(fixK2) flag |= CALIB_FIX_K2; + if(fixK3) flag |= CALIB_FIX_K3; + if(fixK4) flag |= CALIB_FIX_K4; + if(fixK5) flag |= CALIB_FIX_K5; + + if (useFisheye) { + // the fisheye model has its own enum, so overwrite the flags + flag = fisheye::CALIB_FIX_SKEW | fisheye::CALIB_RECOMPUTE_EXTRINSIC; + if(fixK1) flag |= fisheye::CALIB_FIX_K1; + if(fixK2) flag |= fisheye::CALIB_FIX_K2; + if(fixK3) flag |= fisheye::CALIB_FIX_K3; + if(fixK4) flag |= fisheye::CALIB_FIX_K4; + if (calibFixPrincipalPoint) flag |= fisheye::CALIB_FIX_PRINCIPAL_POINT; + } + + calibrationPattern = NOT_EXISTING; + if (!patternToUse.compare("CHESSBOARD")) calibrationPattern = CHESSBOARD; + if (!patternToUse.compare("CIRCLES_GRID")) calibrationPattern = CIRCLES_GRID; + if (!patternToUse.compare("ASYMMETRIC_CIRCLES_GRID")) calibrationPattern = ASYMMETRIC_CIRCLES_GRID; + if (calibrationPattern == NOT_EXISTING) + { + cerr << " Camera calibration mode does not exist: " << patternToUse << endl; + goodInput = false; + } + atImageList = 0; + + } + Mat nextImage() + { + Mat result; + if( inputCapture.isOpened() ) + { + Mat view0; + inputCapture >> view0; + view0.copyTo(result); + } + else if( atImageList < imageList.size() ) + result = imread(imageList[atImageList++], IMREAD_COLOR); + + return result; + } + + static bool readStringList( const string& filename, vector<string>& l ) + { + l.clear(); + FileStorage fs(filename, FileStorage::READ); + if( !fs.isOpened() ) + return false; + FileNode n = fs.getFirstTopLevelNode(); + if( n.type() != FileNode::SEQ ) + return false; + FileNodeIterator it = n.begin(), it_end = n.end(); + for( ; it != it_end; ++it ) + l.push_back((string)*it); + return true; + } + + static bool isListOfImages( const string& filename) + { + string s(filename); + // Look for file extension + if( s.find(".xml") == string::npos && s.find(".yaml") == string::npos && s.find(".yml") == string::npos ) + return false; + else + return true; + } +public: + Size boardSize; // The size of the board -> Number of items by width and height + Pattern calibrationPattern; // One of the Chessboard, circles, or asymmetric circle pattern + float squareSize; // The size of a square in your defined unit (point, millimeter,etc). + int nrFrames; // The number of frames to use from the input for calibration + float aspectRatio; // The aspect ratio + int delay; // In case of a video input + bool writePoints; // Write detected feature points + bool writeExtrinsics; // Write extrinsic parameters + bool writeGrid; // Write refined 3D target grid points + bool calibZeroTangentDist; // Assume zero tangential distortion + bool calibFixPrincipalPoint; // Fix the principal point at the center + bool flipVertical; // Flip the captured images around the horizontal axis + string outputFileName; // The name of the file where to write + bool showUndistorsed; // Show undistorted images after calibration + string input; // The input -> + bool useFisheye; // use fisheye camera model for calibration + bool fixK1; // fix K1 distortion coefficient + bool fixK2; // fix K2 distortion coefficient + bool fixK3; // fix K3 distortion coefficient + bool fixK4; // fix K4 distortion coefficient + bool fixK5; // fix K5 distortion coefficient + + int cameraID; + vector<string> imageList; + size_t atImageList; + VideoCapture inputCapture; + InputType inputType; + bool goodInput; + int flag; + +private: + string patternToUse; + + +}; + +static inline void read(const FileNode& node, Settings& x, const Settings& default_value = Settings()) +{ + if(node.empty()) + x = default_value; + else + x.read(node); +} + +Calibrate::Calibrate(ftl::LocalSource *s, const std::string &cal) : local_(s) { + +} + +bool Calibrate::recalibrate(const std::string &conf) { + return false; +} + +bool Calibrate::undistort(cv::Mat &l, cv::Mat &r) { + local_->get(l,r); + return false; +} + +bool Calibrate::rectified(cv::Mat &l, cv::Mat &r) { + return undistort(l,r); +} + +bool Calibrate::isCalibrated() { + return false; +} + +enum { DETECTION = 0, CAPTURING = 1, CALIBRATED = 2 }; + +bool runCalibrationAndSave(Settings& s, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs, + vector<vector<Point2f> > imagePoints, float grid_width, bool release_object); + +int main2(int argc, char* argv[]) +{ + + + //! [file_read] + Settings s; + const string inputSettingsFile = parser.get<string>(0); + FileStorage fs(inputSettingsFile, FileStorage::READ); // Read the settings + if (!fs.isOpened()) + { + cout << "Could not open the configuration file: \"" << inputSettingsFile << "\"" << endl; + parser.printMessage(); + return -1; + } + fs["Settings"] >> s; + fs.release(); // close Settings file + //! [file_read] + + //FileStorage fout("settings.yml", FileStorage::WRITE); // write config as YAML + //fout << "Settings" << s; + + if (!s.goodInput) + { + cout << "Invalid input detected. Application stopping. " << endl; + return -1; + } + + int winSize = parser.get<int>("winSize"); + + float grid_width = s.squareSize * (s.boardSize.width - 1); + bool release_object = false; + if (parser.has("d")) { + grid_width = parser.get<float>("d"); + release_object = true; + } + + vector<vector<Point2f> > imagePoints; + Mat cameraMatrix, distCoeffs; + Size imageSize; + int mode = s.inputType == Settings::IMAGE_LIST ? CAPTURING : DETECTION; + clock_t prevTimestamp = 0; + const Scalar RED(0,0,255), GREEN(0,255,0); + const char ESC_KEY = 27; + + //! [get_input] + for(;;) + { + Mat view; + bool blinkOutput = false; + + view = s.nextImage(); + + //----- If no more image, or got enough, then stop calibration and show result ------------- + if( mode == CAPTURING && imagePoints.size() >= (size_t)s.nrFrames ) + { + if(runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints, grid_width, + release_object)) + mode = CALIBRATED; + else + mode = DETECTION; + } + if(view.empty()) // If there are no more images stop the loop + { + // if calibration threshold was not reached yet, calibrate now + if( mode != CALIBRATED && !imagePoints.empty() ) + runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints, grid_width, + release_object); + break; + } + //! [get_input] + + imageSize = view.size(); // Format input image. + if( s.flipVertical ) flip( view, view, 0 ); + + //! [find_pattern] + vector<Point2f> pointBuf; + + bool found; + + int chessBoardFlags = CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE; + + if(!s.useFisheye) { + // fast check erroneously fails with high distortions like fisheye + chessBoardFlags |= CALIB_CB_FAST_CHECK; + } + + switch( s.calibrationPattern ) // Find feature points on the input format + { + case Settings::CHESSBOARD: + found = findChessboardCorners( view, s.boardSize, pointBuf, chessBoardFlags); + break; + case Settings::CIRCLES_GRID: + found = findCirclesGrid( view, s.boardSize, pointBuf ); + break; + case Settings::ASYMMETRIC_CIRCLES_GRID: + found = findCirclesGrid( view, s.boardSize, pointBuf, CALIB_CB_ASYMMETRIC_GRID ); + break; + default: + found = false; + break; + } + //! [find_pattern] + //! [pattern_found] + if ( found) // If done with success, + { + // improve the found corners' coordinate accuracy for chessboard + if( s.calibrationPattern == Settings::CHESSBOARD) + { + Mat viewGray; + cvtColor(view, viewGray, COLOR_BGR2GRAY); + cornerSubPix( viewGray, pointBuf, Size(winSize,winSize), + Size(-1,-1), TermCriteria( TermCriteria::EPS+TermCriteria::COUNT, 30, 0.0001 )); + } + + if( mode == CAPTURING && // For camera only take new samples after delay time + (!s.inputCapture.isOpened() || clock() - prevTimestamp > s.delay*1e-3*CLOCKS_PER_SEC) ) + { + imagePoints.push_back(pointBuf); + prevTimestamp = clock(); + blinkOutput = s.inputCapture.isOpened(); + } + + // Draw the corners. + drawChessboardCorners( view, s.boardSize, Mat(pointBuf), found ); + } + //! [pattern_found] + //----------------------------- Output Text ------------------------------------------------ + //! [output_text] + string msg = (mode == CAPTURING) ? "100/100" : + mode == CALIBRATED ? "Calibrated" : "Press 'g' to start"; + int baseLine = 0; + Size textSize = getTextSize(msg, 1, 1, 1, &baseLine); + Point textOrigin(view.cols - 2*textSize.width - 10, view.rows - 2*baseLine - 10); + + if( mode == CAPTURING ) + { + if(s.showUndistorsed) + msg = format( "%d/%d Undist", (int)imagePoints.size(), s.nrFrames ); + else + msg = format( "%d/%d", (int)imagePoints.size(), s.nrFrames ); + } + + putText( view, msg, textOrigin, 1, 1, mode == CALIBRATED ? GREEN : RED); + + if( blinkOutput ) + bitwise_not(view, view); + //! [output_text] + //------------------------- Video capture output undistorted ------------------------------ + //! [output_undistorted] + if( mode == CALIBRATED && s.showUndistorsed ) + { + Mat temp = view.clone(); + if (s.useFisheye) + cv::fisheye::undistortImage(temp, view, cameraMatrix, distCoeffs); + else + undistort(temp, view, cameraMatrix, distCoeffs); + } + //! [output_undistorted] + //------------------------------ Show image and check for input commands ------------------- + //! [await_input] + imshow("Image View", view); + char key = (char)waitKey(s.inputCapture.isOpened() ? 50 : s.delay); + + if( key == ESC_KEY ) + break; + + if( key == 'u' && mode == CALIBRATED ) + s.showUndistorsed = !s.showUndistorsed; + + if( s.inputCapture.isOpened() && key == 'g' ) + { + mode = CAPTURING; + imagePoints.clear(); + } + //! [await_input] + } + + // -----------------------Show the undistorted image for the image list ------------------------ + //! [show_results] + if( s.inputType == Settings::IMAGE_LIST && s.showUndistorsed ) + { + Mat view, rview, map1, map2; + + if (s.useFisheye) + { + Mat newCamMat; + fisheye::estimateNewCameraMatrixForUndistortRectify(cameraMatrix, distCoeffs, imageSize, + Matx33d::eye(), newCamMat, 1); + fisheye::initUndistortRectifyMap(cameraMatrix, distCoeffs, Matx33d::eye(), newCamMat, imageSize, + CV_16SC2, map1, map2); + } + else + { + initUndistortRectifyMap( + cameraMatrix, distCoeffs, Mat(), + getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0), imageSize, + CV_16SC2, map1, map2); + } + + for(size_t i = 0; i < s.imageList.size(); i++ ) + { + view = imread(s.imageList[i], IMREAD_COLOR); + if(view.empty()) + continue; + remap(view, rview, map1, map2, INTER_LINEAR); + imshow("Image View", rview); + char c = (char)waitKey(); + if( c == ESC_KEY || c == 'q' || c == 'Q' ) + break; + } + } + //! [show_results] + + return 0; +} + +//! [compute_errors] +static double computeReprojectionErrors( const vector<vector<Point3f> >& objectPoints, + const vector<vector<Point2f> >& imagePoints, + const vector<Mat>& rvecs, const vector<Mat>& tvecs, + const Mat& cameraMatrix , const Mat& distCoeffs, + vector<float>& perViewErrors, bool fisheye) +{ + vector<Point2f> imagePoints2; + size_t totalPoints = 0; + double totalErr = 0, err; + perViewErrors.resize(objectPoints.size()); + + for(size_t i = 0; i < objectPoints.size(); ++i ) + { + if (fisheye) + { + fisheye::projectPoints(objectPoints[i], imagePoints2, rvecs[i], tvecs[i], cameraMatrix, + distCoeffs); + } + else + { + projectPoints(objectPoints[i], rvecs[i], tvecs[i], cameraMatrix, distCoeffs, imagePoints2); + } + err = norm(imagePoints[i], imagePoints2, NORM_L2); + + size_t n = objectPoints[i].size(); + perViewErrors[i] = (float) std::sqrt(err*err/n); + totalErr += err*err; + totalPoints += n; + } + + return std::sqrt(totalErr/totalPoints); +} +//! [compute_errors] +//! [board_corners] +static void calcBoardCornerPositions(Size boardSize, float squareSize, vector<Point3f>& corners, + Settings::Pattern patternType /*= Settings::CHESSBOARD*/) +{ + corners.clear(); + + switch(patternType) + { + case Settings::CHESSBOARD: + case Settings::CIRCLES_GRID: + for( int i = 0; i < boardSize.height; ++i ) + for( int j = 0; j < boardSize.width; ++j ) + corners.push_back(Point3f(j*squareSize, i*squareSize, 0)); + break; + + case Settings::ASYMMETRIC_CIRCLES_GRID: + for( int i = 0; i < boardSize.height; i++ ) + for( int j = 0; j < boardSize.width; j++ ) + corners.push_back(Point3f((2*j + i % 2)*squareSize, i*squareSize, 0)); + break; + default: + break; + } +} +//! [board_corners] +static bool runCalibration( Settings& s, Size& imageSize, Mat& cameraMatrix, Mat& distCoeffs, + vector<vector<Point2f> > imagePoints, vector<Mat>& rvecs, vector<Mat>& tvecs, + vector<float>& reprojErrs, double& totalAvgErr, vector<Point3f>& newObjPoints, + float grid_width, bool release_object) +{ + //! [fixed_aspect] + cameraMatrix = Mat::eye(3, 3, CV_64F); + if( s.flag & CALIB_FIX_ASPECT_RATIO ) + cameraMatrix.at<double>(0,0) = s.aspectRatio; + //! [fixed_aspect] + if (s.useFisheye) { + distCoeffs = Mat::zeros(4, 1, CV_64F); + } else { + distCoeffs = Mat::zeros(8, 1, CV_64F); + } + + vector<vector<Point3f> > objectPoints(1); + calcBoardCornerPositions(s.boardSize, s.squareSize, objectPoints[0], s.calibrationPattern); + objectPoints[0][s.boardSize.width - 1].x = objectPoints[0][0].x + grid_width; + newObjPoints = objectPoints[0]; + + objectPoints.resize(imagePoints.size(),objectPoints[0]); + + //Find intrinsic and extrinsic camera parameters + double rms; + + if (s.useFisheye) { + Mat _rvecs, _tvecs; + rms = fisheye::calibrate(objectPoints, imagePoints, imageSize, cameraMatrix, distCoeffs, _rvecs, + _tvecs, s.flag); + + rvecs.reserve(_rvecs.rows); + tvecs.reserve(_tvecs.rows); + for(int i = 0; i < int(objectPoints.size()); i++){ + rvecs.push_back(_rvecs.row(i)); + tvecs.push_back(_tvecs.row(i)); + } + } else { + int iFixedPoint = -1; + if (release_object) + iFixedPoint = s.boardSize.width - 1; + rms = calibrateCameraRO(objectPoints, imagePoints, imageSize, iFixedPoint, + cameraMatrix, distCoeffs, rvecs, tvecs, newObjPoints, + s.flag | CALIB_USE_LU); + } + + if (release_object) { + cout << "New board corners: " << endl; + cout << newObjPoints[0] << endl; + cout << newObjPoints[s.boardSize.width - 1] << endl; + cout << newObjPoints[s.boardSize.width * (s.boardSize.height - 1)] << endl; + cout << newObjPoints.back() << endl; + } + + cout << "Re-projection error reported by calibrateCamera: "<< rms << endl; + + bool ok = checkRange(cameraMatrix) && checkRange(distCoeffs); + + objectPoints.clear(); + objectPoints.resize(imagePoints.size(), newObjPoints); + totalAvgErr = computeReprojectionErrors(objectPoints, imagePoints, rvecs, tvecs, cameraMatrix, + distCoeffs, reprojErrs, s.useFisheye); + + return ok; +} + +// Print camera parameters to the output file +static void saveCameraParams( Settings& s, Size& imageSize, Mat& cameraMatrix, Mat& distCoeffs, + const vector<Mat>& rvecs, const vector<Mat>& tvecs, + const vector<float>& reprojErrs, const vector<vector<Point2f> >& imagePoints, + double totalAvgErr, const vector<Point3f>& newObjPoints ) +{ + FileStorage fs( s.outputFileName, FileStorage::WRITE ); + + time_t tm; + time( &tm ); + struct tm *t2 = localtime( &tm ); + char buf[1024]; + strftime( buf, sizeof(buf), "%c", t2 ); + + fs << "calibration_time" << buf; + + if( !rvecs.empty() || !reprojErrs.empty() ) + fs << "nr_of_frames" << (int)std::max(rvecs.size(), reprojErrs.size()); + fs << "image_width" << imageSize.width; + fs << "image_height" << imageSize.height; + fs << "board_width" << s.boardSize.width; + fs << "board_height" << s.boardSize.height; + fs << "square_size" << s.squareSize; + + if( s.flag & CALIB_FIX_ASPECT_RATIO ) + fs << "fix_aspect_ratio" << s.aspectRatio; + + if (s.flag) + { + std::stringstream flagsStringStream; + if (s.useFisheye) + { + flagsStringStream << "flags:" + << (s.flag & fisheye::CALIB_FIX_SKEW ? " +fix_skew" : "") + << (s.flag & fisheye::CALIB_FIX_K1 ? " +fix_k1" : "") + << (s.flag & fisheye::CALIB_FIX_K2 ? " +fix_k2" : "") + << (s.flag & fisheye::CALIB_FIX_K3 ? " +fix_k3" : "") + << (s.flag & fisheye::CALIB_FIX_K4 ? " +fix_k4" : "") + << (s.flag & fisheye::CALIB_RECOMPUTE_EXTRINSIC ? " +recompute_extrinsic" : ""); + } + else + { + flagsStringStream << "flags:" + << (s.flag & CALIB_USE_INTRINSIC_GUESS ? " +use_intrinsic_guess" : "") + << (s.flag & CALIB_FIX_ASPECT_RATIO ? " +fix_aspectRatio" : "") + << (s.flag & CALIB_FIX_PRINCIPAL_POINT ? " +fix_principal_point" : "") + << (s.flag & CALIB_ZERO_TANGENT_DIST ? " +zero_tangent_dist" : "") + << (s.flag & CALIB_FIX_K1 ? " +fix_k1" : "") + << (s.flag & CALIB_FIX_K2 ? " +fix_k2" : "") + << (s.flag & CALIB_FIX_K3 ? " +fix_k3" : "") + << (s.flag & CALIB_FIX_K4 ? " +fix_k4" : "") + << (s.flag & CALIB_FIX_K5 ? " +fix_k5" : ""); + } + fs.writeComment(flagsStringStream.str()); + } + + fs << "flags" << s.flag; + + fs << "fisheye_model" << s.useFisheye; + + fs << "camera_matrix" << cameraMatrix; + fs << "distortion_coefficients" << distCoeffs; + + fs << "avg_reprojection_error" << totalAvgErr; + if (s.writeExtrinsics && !reprojErrs.empty()) + fs << "per_view_reprojection_errors" << Mat(reprojErrs); + + if(s.writeExtrinsics && !rvecs.empty() && !tvecs.empty() ) + { + CV_Assert(rvecs[0].type() == tvecs[0].type()); + Mat bigmat((int)rvecs.size(), 6, CV_MAKETYPE(rvecs[0].type(), 1)); + bool needReshapeR = rvecs[0].depth() != 1 ? true : false; + bool needReshapeT = tvecs[0].depth() != 1 ? true : false; + + for( size_t i = 0; i < rvecs.size(); i++ ) + { + Mat r = bigmat(Range(int(i), int(i+1)), Range(0,3)); + Mat t = bigmat(Range(int(i), int(i+1)), Range(3,6)); + + if(needReshapeR) + rvecs[i].reshape(1, 1).copyTo(r); + else + { + //*.t() is MatExpr (not Mat) so we can use assignment operator + CV_Assert(rvecs[i].rows == 3 && rvecs[i].cols == 1); + r = rvecs[i].t(); + } + + if(needReshapeT) + tvecs[i].reshape(1, 1).copyTo(t); + else + { + CV_Assert(tvecs[i].rows == 3 && tvecs[i].cols == 1); + t = tvecs[i].t(); + } + } + fs.writeComment("a set of 6-tuples (rotation vector + translation vector) for each view"); + fs << "extrinsic_parameters" << bigmat; + } + + if(s.writePoints && !imagePoints.empty() ) + { + Mat imagePtMat((int)imagePoints.size(), (int)imagePoints[0].size(), CV_32FC2); + for( size_t i = 0; i < imagePoints.size(); i++ ) + { + Mat r = imagePtMat.row(int(i)).reshape(2, imagePtMat.cols); + Mat imgpti(imagePoints[i]); + imgpti.copyTo(r); + } + fs << "image_points" << imagePtMat; + } + + if( s.writeGrid && !newObjPoints.empty() ) + { + fs << "grid_points" << newObjPoints; + } +} + +//! [run_and_save] +bool runCalibrationAndSave(Settings& s, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs, + vector<vector<Point2f> > imagePoints, float grid_width, bool release_object) +{ + vector<Mat> rvecs, tvecs; + vector<float> reprojErrs; + double totalAvgErr = 0; + vector<Point3f> newObjPoints; + + bool ok = runCalibration(s, imageSize, cameraMatrix, distCoeffs, imagePoints, rvecs, tvecs, reprojErrs, + totalAvgErr, newObjPoints, grid_width, release_object); + cout << (ok ? "Calibration succeeded" : "Calibration failed") + << ". avg re projection error = " << totalAvgErr << endl; + + if (ok) + saveCameraParams(s, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs, reprojErrs, imagePoints, + totalAvgErr, newObjPoints); + return ok; +} +//! [run_and_save] diff --git a/cv-node/src/local.cpp b/cv-node/src/local.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1c4ddd0bde56729836144dfdd9ac358f74126f35 --- /dev/null +++ b/cv-node/src/local.cpp @@ -0,0 +1,118 @@ +#include <ftl/local.hpp> + +#include <opencv2/core.hpp> +#include <opencv2/opencv.hpp> +#include <string> +#include <chrono> +#include <glog/logging.h> + +using ftl::LocalSource; +using cv::Mat; +using cv::VideoCapture; +using cv::Rect; +using std::string; +using namespace std::chrono; + +LocalSource::LocalSource() : timestamp_(0.0) { + // Use cameras + camera_a_ = new VideoCapture(0); + camera_b_ = new VideoCapture(1); + + if (!camera_a_->isOpened()) { + delete camera_a_; + delete camera_b_; + camera_a_ = nullptr; + camera_b_ = nullptr; + LOG(FATAL) << "No cameras found"; + return; + } + + if (!camera_b_->isOpened()) { + delete camera_b_; + camera_b_ = nullptr; + stereo_ = false; + LOG(WARNING) << "Not able to find second camera for stereo"; + } else { + stereo_ = true; + } +} + +LocalSource::LocalSource(const string &vid): timestamp_(0.0) { + if (vid == "") { + LOG(FATAL) << "No video file specified"; + camera_a_ = nullptr; + camera_b_ = nullptr; + return; + } + + camera_a_ = new VideoCapture(vid.c_str()); + camera_b_ = nullptr; + + if (!camera_a_->isOpened()) { + delete camera_a_; + camera_a_ = nullptr; + LOG(FATAL) << "Unable to load video file"; + return; + } + + // Read first frame to determine stereo + Mat frame; + if (!camera_a_->read(frame)) { + LOG(FATAL) << "No data in video file"; + } + + if (frame.cols >= 2*frame.rows) { + stereo_ = true; + } else { + stereo_ = false; + } +} + +bool LocalSource::get(cv::Mat &l, cv::Mat &r) { + if (!camera_a_) return false; + + if (!camera_a_->grab()) { + LOG(ERROR) << "Unable to grab from camera A"; + return false; + } + if (camera_b_ && !camera_b_->grab()) { + LOG(ERROR) << "Unable to grab from camera B"; + return false; + } + + // Record timestamp + timestamp_ = duration_cast<duration<double>>( + high_resolution_clock::now().time_since_epoch()).count(); + + if (camera_b_ || !stereo_) { + if (!camera_a_->retrieve(l)) { + LOG(ERROR) << "Unable to read frame from camera A"; + return false; + } + if (camera_b_ && !camera_b_->retrieve(r)) { + LOG(ERROR) << "Unable to read frame from camera B"; + return false; + } + } else { + Mat frame; + if (!camera_a_->retrieve(frame)) { + LOG(ERROR) << "Unable to read frame from video"; + return false; + } + + int resx = frame.cols / 2; + l = Mat(frame, Rect(0,0,resx,frame.rows)); + r = Mat(frame, Rect(resx,0,frame.cols,frame.rows)); + } + + return true; +} + +double LocalSource::getTimestamp() const { + return timestamp_; +} + +bool LocalSource::isStereo() const { + return stereo_; +} + diff --git a/cv-node/src/main.cpp b/cv-node/src/main.cpp new file mode 100644 index 0000000000000000000000000000000000000000..bf42213999ebb5646f67f30f7c22658fd74acd60 --- /dev/null +++ b/cv-node/src/main.cpp @@ -0,0 +1,103 @@ +#include <opencv2/opencv.hpp> +#include <ftl/local.hpp> +#include <ftl/synched.hpp> +#include <ftl/calibrate.hpp> + +#include <string> +#include <vector> + +using namespace ftl; +using std::string; +using std::vector; +using cv::Mat; + +static vector<string> OPTION_peers; +static vector<string> OPTION_channels; +static string OPTION_calibration_config; +static string OPTION_config; +static bool OPTION_display = false; +static bool OPTION_calibrate = false; + +void handle_options(char ***argv, int *argc) { + while (*argc > 0) { + string cmd((*argv)[0]); + if (cmd[0] != '-') break; + + if (cmd.find("--calibrate") == 0) { + OPTION_calibrate = true; + } else if (cmd.find("--peer=") == 0) { + cmd = cmd.substr(cmd.find("=")+1); + OPTION_peers.push_back(cmd); + } else if (cmd.find("--channel=") == 0) { + cmd = cmd.substr(cmd.find("=")+1); + OPTION_channels.push_back(cmd); + } else if (cmd.find("--calibration=") == 0) { + cmd = cmd.substr(cmd.find("=")+1); + OPTION_calibration_config = cmd; + } else if (cmd.find("--config=") == 0) { + cmd = cmd.substr(cmd.find("=")+1); + OPTION_config = cmd; + } else if (cmd.find("--display") == 0) { + OPTION_display = true; + } + + (*argc)--; + (*argv)++; + } +} + +int main(int argc, char **argv) { + argc--; + argv++; + + // Process Arguments + handle_options(&argv, &argc); + + // TODO Initiate the network + + LocalSource *lsrc; + + if (argc) { + // Load video file + lsrc = new LocalSource(argv[0]); + } else { + // Use cameras + lsrc = new LocalSource(); + } + + auto sync = new SyncSource(); // TODO Pass protocol object + // Add any remote channels + for (auto c : OPTION_channels) { + sync->addChannel(c); + } + + Calibrate calibrate(lsrc, OPTION_calibration_config); + + while (true) { + Mat l, r; + + calibrate.undistort(l,r); + + // Feed into sync buffer and network forward + sync->feed(LEFT, l,lsrc->getTimestamp()); + sync->feed(RIGHT, r,lsrc->getTimestamp()); + + // Read back from buffer + sync->get(LEFT,l); + sync->get(RIGHT,r); + //double latency = sync->latency(); + + // TODO Pose and disparity etc here... + + // TODO Send RGB+D data somewhere + + cv::imshow("Left",l); + if (lsrc->isStereo()) cv::imshow("Right",r); + + if(cv::waitKey(1) == 27){ + //exit if ESC is pressed + break; + } + } +} + diff --git a/cv-node/src/sync.cpp b/cv-node/src/sync.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f57e5164696aa44cf13501082834fd9b8f7d13e5 --- /dev/null +++ b/cv-node/src/sync.cpp @@ -0,0 +1,29 @@ +#include <ftl/synched.hpp> + +using ftl::SyncSource; +using cv::Mat; + +SyncSource::SyncSource() { + channels_.push_back(Mat()); + channels_.push_back(Mat()); +} + +void SyncSource::addChannel(const std::string &c) { + +} + +void SyncSource::feed(int channel, cv::Mat &m, double ts) { + if (channel > static_cast<int>(channels_.size())) return; + channels_[channel] = m; +} + +bool SyncSource::get(int channel, cv::Mat &m) { + if (channel > static_cast<int>(channels_.size())) return false; + m = channels_[channel]; + return true; +} + +double SyncSource::latency() const { + return 0.0; +} +