From 25400f890591b27f4765b2affb58569cd711e6e7 Mon Sep 17 00:00:00 2001
From: Sebastian Hahta <joseha@utu.fi>
Date: Fri, 2 Aug 2019 21:47:28 +0300
Subject: [PATCH] multi-camera calibration: all pieces working now

needs cleanup and visualization for final results (online)
---
 CMakeLists.txt                                |   7 +-
 applications/calibration-multi/CMakeLists.txt |  12 +
 applications/calibration-multi/src/main.cpp   | 509 ++++++++++++
 .../calibration-multi/src/multicalibrate.cpp  | 777 ++++++++++++++++++
 .../calibration-multi/src/multicalibrate.hpp  | 150 ++++
 applications/calibration-multi/src/util.cpp   | 174 ++++
 applications/calibration-multi/src/util.hpp   | 110 +++
 .../calibration-multi/src/visibility.cpp      | 147 ++++
 .../calibration-multi/src/visibility.hpp      |  45 +
 applications/calibration/src/lens.cpp         |   3 +-
 applications/calibration/src/stereo.cpp       |  29 +-
 applications/reconstruct/src/voxel_scene.cpp  |   1 +
 applications/registration/src/aruco.cpp       |   2 +
 .../rgbd-sources/src/cuda_algorithms.hpp      |   3 +
 .../rgbd-sources/src/middlebury_source.cpp    |  39 +-
 components/rgbd-sources/src/net.cpp           |   5 +
 components/rgbd-sources/src/stereovideo.cpp   |   4 +-
 17 files changed, 1967 insertions(+), 50 deletions(-)
 create mode 100644 applications/calibration-multi/CMakeLists.txt
 create mode 100644 applications/calibration-multi/src/main.cpp
 create mode 100644 applications/calibration-multi/src/multicalibrate.cpp
 create mode 100644 applications/calibration-multi/src/multicalibrate.hpp
 create mode 100644 applications/calibration-multi/src/util.cpp
 create mode 100644 applications/calibration-multi/src/util.hpp
 create mode 100644 applications/calibration-multi/src/visibility.cpp
 create mode 100644 applications/calibration-multi/src/visibility.hpp

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 6cb60d392..91c934b9c 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -6,7 +6,6 @@ include(CheckLanguage)
 
 project (ftl.utu.fi)
 
-set(CMAKE_CODELITE_USE_TARGETS ON)
 include(GNUInstallDirs)
 include(CTest)
 enable_testing()
@@ -16,6 +15,7 @@ option(WITH_FIXSTARS "Use Fixstars libSGM if available" ON)
 option(BUILD_VISION "Enable the vision component" ON)
 option(BUILD_RECONSTRUCT "Enable the reconstruction component" ON)
 option(BUILD_RENDERER "Enable the renderer component" ON)
+option(BUILD_CALIBRATION "Enable the calibration component" OFF)
 
 set(THREADS_PREFER_PTHREAD_FLAG ON)
 set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/")
@@ -187,6 +187,11 @@ if (BUILD_VISION)
 	add_subdirectory(applications/vision)
 endif()
 
+if (BUILD_CALIBRATION)
+	find_package( cvsba REQUIRED )
+	add_subdirectory(applications/calibration-multi)
+endif()
+
 if (HAVE_PCL)
 	add_subdirectory(applications/registration)
 endif()
diff --git a/applications/calibration-multi/CMakeLists.txt b/applications/calibration-multi/CMakeLists.txt
new file mode 100644
index 000000000..ae1f1bb16
--- /dev/null
+++ b/applications/calibration-multi/CMakeLists.txt
@@ -0,0 +1,12 @@
+set(CALIBMULTI
+	src/main.cpp
+	src/visibility.cpp
+	src/util.cpp
+	src/multicalibrate.cpp
+)
+
+add_executable(ftl-calibrate-multi ${CALIBMULTI})
+
+target_include_directories(ftl-calibrate-multi PRIVATE src)
+
+target_link_libraries(ftl-calibrate-multi ftlcommon ftlnet ftlrgbd Threads::Threads ${OpenCV_LIBS} ${cvsba_LIBS})
diff --git a/applications/calibration-multi/src/main.cpp b/applications/calibration-multi/src/main.cpp
new file mode 100644
index 000000000..a3d36345e
--- /dev/null
+++ b/applications/calibration-multi/src/main.cpp
@@ -0,0 +1,509 @@
+#include <loguru.hpp>
+
+#include <ftl/configuration.hpp>
+#include <ftl/net/universe.hpp>
+#include <ftl/rgbd/source.hpp>
+#include <ftl/rgbd/group.hpp>
+
+#include <opencv2/core.hpp>
+#include <opencv2/aruco.hpp>
+#include <opencv2/core/eigen.hpp>
+
+#include <algorithm>
+#include <numeric>
+
+#include "util.hpp"
+#include "multicalibrate.hpp"
+
+using std::string;
+using std::optional;
+
+using std::list;
+using std::vector;
+using std::map;
+using std::pair;
+using std::make_pair;
+
+using cv::Mat;
+using cv::Scalar;
+using cv::Size;
+using cv::Point2f;
+using cv::Point2d;
+using cv::Point3f;
+using cv::Point3d;
+using cv::Vec4f;
+using cv::Vec4d;
+
+using ftl::net::Universe;
+using ftl::rgbd::Source;
+
+Mat getCameraMatrix(const ftl::rgbd::Camera &parameters) {
+	Mat m = (cv::Mat_<double>(3,3) << parameters.fx, 0.0, -parameters.cx, 0.0, parameters.fy, -parameters.cy, 0.0, 0.0, 1.0);
+	return m;
+}
+
+void to_json(nlohmann::json &json, map<string, Eigen::Matrix4d> &transformations) {
+	for (auto &item : transformations) {
+		auto val = nlohmann::json::array();
+		for(size_t i = 0; i < 16; i++) { val.push_back((float) item.second.data()[i]); }
+		json[item.first] = val;
+	}
+}
+
+// FileStorage allows only alphanumeric keys (code below does not work with URIs)
+
+bool saveRegistration(const string &ofile, const map<string, Mat> &data) {
+	cv::FileStorage fs(ofile, cv::FileStorage::WRITE);
+	if (!fs.isOpened()) return false;
+	for (auto &item : data) { fs << item.first << item.second; }
+	fs.release();
+	return true;
+}
+
+bool saveRegistration(const string &ofile, const map<string, Eigen::Matrix4d> &data) {
+	map<string, Mat> _data;
+	for (auto &item : data) {
+		Mat M;
+		cv::eigen2cv(item.second, M);
+		_data[item.first] = M; 
+	}
+	return saveRegistration(ofile, _data);
+}
+
+bool loadRegistration(const string &ifile, map<string, Mat> &data) {
+	cv::FileStorage fs(ifile, cv::FileStorage::READ);
+	if (!fs.isOpened()) return false;
+	for(cv::FileNodeIterator fit = fs.getFirstTopLevelNode().begin();
+		fit != fs.getFirstTopLevelNode().end();
+		++fit)
+	{
+		data[(*fit).name()] = (*fit).mat();
+	}
+	fs.release();
+	return true; // TODO errors?
+}
+
+bool loadRegistration(const string &ifile, map<string, Eigen::Matrix4d> &data) {
+	map<string, Mat> _data;
+	if (!loadRegistration(ifile, _data)) return false;
+	for (auto &item : _data) {
+		Eigen::Matrix4d M;
+		cv::cv2eigen(item.second, M);
+		data[item.first] = M;
+	}
+	return true;
+}
+
+//
+
+bool saveIntrinsics(const string &ofile, const vector<Mat> &M) {
+	vector<Mat> D;
+	{
+		cv::FileStorage fs(ofile, cv::FileStorage::READ);
+		fs["D"] >> D;
+		fs.release();
+	}
+	{
+		cv::FileStorage fs(ofile, cv::FileStorage::WRITE);
+		if (fs.isOpened()) {
+			fs << "K" << M << "D" << D;
+			fs.release();
+			return true;
+		}
+		else {
+			LOG(ERROR) << "Error: can not save the intrinsic parameters to '" << ofile << "'";
+		}
+		return false;
+	}
+}
+
+bool saveExtrinsics(const string &ofile, Mat &R, Mat &T, Mat &R1, Mat &R2, Mat &P1, Mat &P2, Mat &Q) {
+	cv::FileStorage fs;
+	fs.open(ofile, cv::FileStorage::WRITE);
+	if (fs.isOpened()) {
+		fs << "R" << R << "T" << T << "R1" << R1 << "R2" << R2 << "P1"
+			<< P1 << "P2" << P2 << "Q" << Q;
+		fs.release();
+		return true;
+	} else {
+		LOG(ERROR) << "Error: can not save the extrinsic parameters";
+	}
+	return false;
+}
+
+void stack(const vector<Mat> &img, Mat &out, const int rows, const int cols) {
+	Size size = img[0].size();
+	Size size_out = Size(size.width * cols, size.height * rows);
+	if (size_out != out.size() || out.type() != CV_8UC3) {
+		out = Mat(size_out, CV_8UC3, Scalar(0, 0, 0));
+	}
+
+	for (size_t i = 0; i < img.size(); i++) {
+		int row = i % rows;
+		int col = i / rows;
+		auto rect = cv::Rect(size.width * col, size.height * row, size.width, size.height);
+		img[i].copyTo(out(rect));
+	}
+}
+
+void stack(const vector<Mat> &img, Mat &out) {
+	// TODO
+	int rows = 2;
+	int cols = 5;
+	stack(img, out, rows, cols);
+}
+
+string time_now_string() {
+	char timestamp[18];
+	std::time_t t=std::time(NULL);
+	std::strftime(timestamp, sizeof(timestamp), "%F-%H%M%S", std::localtime(&t));
+	return string(timestamp);
+}
+
+struct CalibrationParams {
+	string output_path;
+	string registration_file;
+	vector<size_t> idx_cameras;
+	bool save_extrinsic = true;
+	bool save_intrinsic = false;
+	bool optimize_intrinsic = false;
+	int reference_camera = -1;
+	double alpha = 0.0;
+};
+
+void calibrate(	MultiCameraCalibrationNew &calib, vector<string> &uri_cameras,
+				const CalibrationParams &params, vector<Mat> &map1, vector<Mat> &map2)
+{
+	int reference_camera = -1;
+	if (params.reference_camera < 0) {
+		reference_camera = calib.getOptimalReferenceCamera();
+		reference_camera -= (reference_camera & 1);
+	}
+	LOG(INFO) << "optimal camera: " << reference_camera;
+
+	if (params.optimize_intrinsic) calib.setFixIntrinsic(0);
+
+	calib.calibrateAll(reference_camera);
+	vector<Mat> R, t;
+	calib.getCalibration(R, t);
+
+	size_t n_cameras = calib.getCamerasCount();
+
+	vector<Mat> R_rect(n_cameras), t_rect(n_cameras);
+	vector<Mat> Rt_out(n_cameras);
+	map1.resize(n_cameras);
+	map2.resize(n_cameras);
+
+	for (size_t c = 0; c < n_cameras; c += 2) {
+		Mat K1 = calib.getCameraMat(c);
+		Mat K2 = calib.getCameraMat(c + 1);
+		Mat D1 = calib.getDistCoeffs(c);
+		Mat D2 = calib.getDistCoeffs(c + 1);
+		Mat P1, P2, Q;
+		Mat R1, R2;
+		Mat R_c1c2, T_c1c2;
+
+		calculateTransform(R[c], t[c], R[c + 1], t[c + 1], R_c1c2, T_c1c2);
+		cv::stereoRectify(K1, D1, K2, D2, Size(1280, 720), R_c1c2, T_c1c2, R1, R2, P1, P2, Q, 0, params.alpha);
+		
+		Mat _t = Mat(Size(1, 3), CV_64FC1, Scalar(0.0));
+		Rt_out[c] = getMat4x4(R[c], t[c]) * getMat4x4(R1, _t).inv();
+		Rt_out[c + 1] = getMat4x4(R[c + 1], t[c + 1]) * getMat4x4(R2, _t).inv();
+
+		{
+			string node_name;
+			size_t pos1 = uri_cameras[c/2].find("node");
+			size_t pos2 = uri_cameras[c/2].find("#", pos1);
+			node_name = uri_cameras[c/2].substr(pos1, pos2 - pos1);
+		
+			if (params.save_extrinsic) {
+				saveExtrinsics(params.output_path + node_name + "-extrinsic.yml", R_c1c2, T_c1c2, R1, R2, P1, P2, Q);
+				LOG(INFO) << "Saved: " << params.output_path + node_name + "-extrinsic.yml";
+			}
+			if (params.save_intrinsic) {
+				saveIntrinsics(params.output_path + node_name + "-intrinsic.yml",
+					{calib.getCameraMat(c), calib.getCameraMat(c + 1)}
+				);
+				LOG(INFO) << "Saved: " << params.output_path + node_name + "-intrinsic.yml";
+			}
+		}
+		cv::stereoRectify(K1, D1, K2, D2, Size(1280, 720), R_c1c2, T_c1c2, R1, R2, P1, P2, Q, 0, 1.0);
+		cv::initUndistortRectifyMap(K1, D1, R1, P1, Size(1280, 720), CV_16SC2, map1[c], map2[c]);
+		cv::initUndistortRectifyMap(K2, D2, R2, P2, Size(1280, 720), CV_16SC2, map1[c + 1], map2[c + 1]);
+	}
+
+	{
+		map<string, Eigen::Matrix4d> out;
+		for (size_t i = 0; i < n_cameras; i += 2) {
+			Eigen::Matrix4d M_eigen;
+			Mat M_cv = Rt_out[i];
+			cv::cv2eigen(M_cv, M_eigen);
+			out[uri_cameras[i/2]] = M_eigen;
+		}
+		
+		nlohmann::json out_json;
+		to_json(out_json, out);
+		if (params.save_extrinsic) {
+			std::ofstream file_out(params.registration_file);
+			file_out << out_json;
+		}
+		else {
+			LOG(INFO) << "Registration not saved to file";
+			LOG(INFO) << out_json;
+		}
+	}
+}
+
+void calibrateFromPath(	const string &path,
+						const string &filename,
+						CalibrationParams &params,
+						bool visualize=false)
+{
+	size_t reference_camera = 0;
+	auto calib = MultiCameraCalibrationNew(0, reference_camera, CalibrationTarget(0.250));
+	
+	vector<string> uri_cameras;
+	cv::FileStorage fs(path + filename, cv::FileStorage::READ);
+	fs["uri"] >> uri_cameras;
+
+	params.idx_cameras.resize(uri_cameras.size() * 2);
+	std::iota(params.idx_cameras.begin(), params.idx_cameras.end(), 0);
+
+	calib.loadInput(path + filename, params.idx_cameras);
+	
+	vector<Mat> map1, map2;
+	calibrate(calib, uri_cameras, params, map1, map2);
+
+	if (!visualize) return;
+
+	vector<Scalar> colors = {
+		Scalar(64, 64, 255),
+		Scalar(64, 64, 255),
+		Scalar(64, 255, 64),
+		Scalar(64, 255, 64),
+	};
+	vector<int> markers = {cv::MARKER_SQUARE, cv::MARKER_DIAMOND};
+
+	Mat show;
+	size_t n_cameras = calib.getCamerasCount();
+	vector<Mat> rgb(n_cameras);
+	size_t i = 0;
+	while(ftl::running) {
+		for (size_t c = 0; c < n_cameras; c++) {
+			rgb[c] = cv::imread(path + std::to_string(params.idx_cameras[c]) + "_" + std::to_string(i) + ".jpg");
+
+			for (size_t j = 0; j < n_cameras; j++) {
+				vector<Point2d> points;
+				calib.projectPointsOptimized(c, i, points); // project BA point to image
+
+				for (Point2d &p : points) {
+					cv::drawMarker(rgb[c], cv::Point2i(p), colors[j % colors.size()], markers[j % markers.size()], 10 + 3 * j, 1);
+				}
+			}
+			cv::remap(rgb[c], rgb[c], map1[c], map2[c], cv::INTER_CUBIC);
+
+			for (int r = 50; r < rgb[c].rows; r = r+50) {
+				cv::line(rgb[c], cv::Point(0, r), cv::Point(rgb[c].cols-1, r), cv::Scalar(0,0,255), 1);
+			}
+		}
+
+		stack(rgb, show);
+		cv::namedWindow("Calibration", cv::WINDOW_KEEPRATIO | cv::WINDOW_NORMAL);
+		cv::imshow("Calibration", show);
+		
+		i = (i + 1) % calib.getViewsCount();
+		if (cv::waitKey(50) != -1) { ftl::running = false; }
+	}
+}
+
+void calibrateCameras(	ftl::Configurable* root,
+						int n_views, int min_visible,
+						string path, string filename,
+						bool save_input,
+						CalibrationParams &params)
+{
+	Universe *net = ftl::create<Universe>(root, "net");
+	net->start();
+	net->waitConnections();
+
+	vector<Source*> sources = ftl::createArray<Source>(root, "sources", net);
+
+	const size_t n_sources = sources.size();
+	const size_t n_cameras = n_sources * 2;
+	size_t reference_camera = 0;
+
+	params.idx_cameras.resize(n_cameras);
+	std::iota(params.idx_cameras.begin(), params.idx_cameras.end(), 0);
+
+	// TODO: parameter for calibration target type
+	auto calib = MultiCameraCalibrationNew(n_cameras, reference_camera, CalibrationTarget(0.250));
+
+	for (size_t i = 0; i < n_sources; i++) {
+		Mat K;
+		K = getCameraMatrix(sources[i]->parameters(ftl::rgbd::kChanRight));
+		calib.setCameraParameters(2 * i + 1, K);
+		K = getCameraMatrix(sources[i]->parameters(ftl::rgbd::kChanLeft));
+		calib.setCameraParameters(2 * i, K);
+	}
+
+	ftl::rgbd::Group group;
+	for (Source* src : sources) {
+		src->setChannel(ftl::rgbd::kChanRight);
+		group.addSource(src);
+	}
+
+	std::mutex mutex;
+	std::atomic<bool> new_frames = false;
+	vector<Mat> rgb(n_cameras), rgb_new(n_cameras);
+	
+	group.sync([&mutex, &new_frames, &rgb_new](const ftl::rgbd::FrameSet &frames) {
+		mutex.lock();
+		bool good = true;
+		for (size_t i = 0; i < frames.channel1.size(); i ++) {
+			if (frames.channel1[i].empty()) good = false;
+			if (frames.channel1[i].empty()) good = false;
+			if (frames.channel1[i].channels() != 3) good = false; // ASSERT
+			if (frames.channel2[i].channels() != 3) good = false;
+			if (!good) break;
+			
+			frames.channel1[i].copyTo(rgb_new[2 * i]);
+			frames.channel2[i].copyTo(rgb_new[2 * i + 1]);
+		}
+
+		new_frames = good;
+		mutex.unlock();
+		return true;
+	});
+	
+	int iter = 0;
+	Mat show;
+
+	vector<int> visible;
+	vector<vector<Point2d>> points(n_cameras);
+
+	while(calib.getMinVisibility() < n_views) {
+		cv::waitKey(10);
+		while (!new_frames) {
+			for (auto src : sources) { src->grab(30); }
+			cv::waitKey(10);
+		}
+
+		mutex.lock();
+		rgb.swap(rgb_new);
+		new_frames = false;
+		mutex.unlock();
+
+		visible.clear();
+		int n_found = findCorrespondingPoints(rgb, points, visible);
+
+		if (n_found >= min_visible) {
+			calib.addPoints(points, visible);
+			
+			if (save_input) {
+				for (size_t i = 0; i < n_cameras; i++) {
+					cv::imwrite(path + std::to_string(i) + "_" + std::to_string(iter) + ".jpg", rgb[i]);
+				}
+			}
+			iter++;
+		}
+
+		for (size_t i = 0; i < n_cameras; i++) {
+			if (visible[i]) {
+				cv::drawMarker(	rgb[i], points[i][0],
+								Scalar(42, 255, 42), cv::MARKER_TILTED_CROSS, 25, 2);
+				cv::drawMarker(	rgb[i], points[i][1],
+								Scalar(42, 42, 255), cv::MARKER_TILTED_CROSS, 25, 2);
+			}
+			cv::putText(rgb[i],
+						"Camera " + std::to_string(i),
+						Point2i(10, 30),
+						cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, Scalar(64, 64, 255), 1);
+			
+			cv::putText(rgb[i],
+						std::to_string(std::max(0, (int) (n_views - calib.getViewsCount(i)))),
+						Point2i(10, rgb[i].rows-10),
+						cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, Scalar(64, 64, 255), 1);
+
+		}
+
+		stack(rgb, show);
+		cv::namedWindow("Cameras", cv::WINDOW_KEEPRATIO | cv::WINDOW_NORMAL);
+		cv::imshow("Cameras", show);
+	}
+	cv::destroyWindow("Cameras");
+
+	vector<string> uri;
+	for (size_t i = 0; i < n_sources; i++) {
+		uri.push_back(sources[i]->getURI());
+	}
+
+	if (save_input) {
+		cv::FileStorage fs(path + filename, cv::FileStorage::WRITE);
+		fs << "uri" << uri;
+		calib.saveInput(fs);
+		fs.release();
+	}
+	vector<Mat> m1, m2;
+	vector<size_t> idx;
+	calibrate(calib, uri, params, m1, m2);
+}
+
+int main(int argc, char **argv) {
+	auto options = ftl::config::read_options(&argv, &argc);
+	auto root = ftl::configure(argc, argv, "registration_default");
+	
+	// run calibration from saved input?
+	const bool load_input = root->value<bool>("load_input", false);
+	// should calibration input be saved
+	const bool save_input = root->value<bool>("save_input", false);
+	// should extrinsic calibration be saved (only used with load_input)
+	const bool save_extrinsic = root->value<bool>("save_extrinsic", true);
+	// should intrinsic calibration be saved
+	const bool save_intrinsic = root->value<bool>("save_intrinsic", false);
+	const bool optimize_intrinsic = root->value<bool>("optimize_intrinsic", false);
+	// directory where calibration data and images are saved, if save_input enabled
+	const string calibration_data_dir = root->value<string>("calibration_data_dir", "./");
+	// file to save calibration input (2d points and visibility)
+	const string calibration_data_file = root->value<string>("calibration_data_file", "data.yml");
+	// in how many cameras should the pattern be visible
+	const int min_visible = root->value<int>("min_visible", 3);
+	// minimum for how many times pattern is seen per camera
+	const int n_views = root->value<int>("n_views", 500);
+	// registration file path
+	const string registration_file = root->value<string>("registration_file", FTL_LOCAL_CONFIG_ROOT "/registration.json");
+	// location where extrinsic calibration files saved
+	const string output_directory = root->value<string>("output_directory", "./");
+
+	CalibrationParams params;
+	params.save_extrinsic = save_extrinsic;
+	params.save_intrinsic = save_intrinsic;
+	params.optimize_intrinsic = optimize_intrinsic;
+	params.output_path = output_directory;
+	params.registration_file = registration_file;
+
+	LOG(INFO)	<< "\n"
+				<< "\nIMPORTANT: Remeber to set \"use_intrinsics\" to false for nodes!"
+				<< "\n"
+				<< "\n                save_input: " << (int) save_input
+				<< "\n                load_input: " << (int) load_input
+				<< "\n            save_extrinsic: " << (int) save_extrinsic
+				<< "\n            save_intrinsic: " << (int) save_intrinsic
+				<< "\n        optimize_intrinsic: " << (int) optimize_intrinsic
+				<< "\n      calibration_data_dir: " << calibration_data_dir
+				<< "\n     calibration_data_file: " << calibration_data_file
+				<< "\n               min_visible: " << min_visible
+				<< "\n                   n_views: " << n_views
+				<< "\n         registration_file: " << registration_file
+				<< "\n          output_directory: " << output_directory
+				<< "\n";
+
+	if (load_input) {
+		vector<size_t> idx = {};
+		calibrateFromPath(calibration_data_dir, calibration_data_file, params, true);
+	}
+	else {
+		calibrateCameras(root, n_views, min_visible, calibration_data_dir, calibration_data_file, save_input, params);
+	}
+
+	return 0;
+}
\ No newline at end of file
diff --git a/applications/calibration-multi/src/multicalibrate.cpp b/applications/calibration-multi/src/multicalibrate.cpp
new file mode 100644
index 000000000..1840e6827
--- /dev/null
+++ b/applications/calibration-multi/src/multicalibrate.cpp
@@ -0,0 +1,777 @@
+#include "multicalibrate.hpp"
+
+#include <opencv2/core.hpp>
+#include <opencv2/calib3d.hpp>
+
+#include <cvsba/cvsba.h>
+#include <loguru.hpp>
+
+#include <map>
+
+using cv::Mat;
+using cv::Size;
+using cv::Point2d;
+using cv::Point3d;
+using cv::Vec4d;
+using cv::Scalar;
+
+using std::string;
+using std::vector;
+using std::map;
+using std::pair;
+using std::make_pair;
+
+double CalibrationTarget::estimateScale(vector<Point3d> points) {
+	
+	// 1. calculate statistics 
+	// 2. reject possible outliers 
+	// 3. calculate scale factor
+
+	double f = 0.0;
+	double S = 0.0;
+	double m = 0.0;
+	
+	vector<double> d(points.size() / 2, 0.0);
+
+	for (size_t i = 0; i < points.size(); i += 2) {
+		const Point3d &p1 = points[i];
+		const Point3d &p2 = points[i + 1];
+
+		Point3d p = p1 - p2;
+
+		double x = sqrt(p.x * p.x + p.y * p.y + p.z * p.z);
+		double prev_mean = m;
+		d[i/2] = x;
+
+		f = f + 1.0;
+		m = m + (x - m) / f;
+		S = S + (x - m) * (x - prev_mean);
+
+	}
+
+	double stddev = sqrt(S / f);
+	f = 0.0;
+
+	int outliers = 0;
+	double scale = 0.0;
+
+	for (double l : d) {
+		// TODO:	* Parameterize how large deviation allowed
+		//			* Validate this actually improves quality
+
+		if (abs(l - m) > 3.0 * stddev) {
+			outliers++;
+		}
+		else {
+			f += 1.0;
+			scale += 1.0 / l;
+		}
+		DCHECK(scale != INFINITY);
+	}
+
+	if (outliers != 0) {
+		LOG(WARNING) << "Outliers (large std. deviation in scale): " << outliers;
+	}
+
+	LOG(INFO) << "calibration target std. dev. " <<  stddev << " (" << (int) f << " samples), scale: " << scale * calibration_bar_length_ / f;
+
+	return scale * calibration_bar_length_ / f;
+
+	// TODO:	LM-optimization for scale.
+}
+
+MultiCameraCalibrationNew::MultiCameraCalibrationNew(
+			size_t n_cameras, size_t reference_camera, CalibrationTarget target, int fix_intrinsics) :
+		
+	target_(target),
+	visibility_graph_(n_cameras),
+	is_calibrated_(false),
+	n_cameras_(n_cameras),
+	reference_camera_(reference_camera),
+	min_visible_points_(25),
+	fix_intrinsics_(fix_intrinsics == 1 ? 5 : 0),
+
+	K_(n_cameras),
+	dist_coeffs_(n_cameras),
+	R_(n_cameras),
+	t_(n_cameras),
+
+	points3d_optimized_(n_cameras),
+	points3d_(n_cameras),
+	points2d_(n_cameras),
+	visible_(n_cameras),
+
+	fm_method_(cv::FM_8POINT), // RANSAC/LMEDS results need validation (does not work)
+	fm_ransac_threshold_(0.95),
+	fm_confidence_(0.90)
+{
+	for (auto &K : K_) { K = Mat::eye(Size(3, 3), CV_64FC1); }
+	for (auto &d : dist_coeffs_) { d = Mat(Size(5, 1), CV_64FC1, Scalar(0.0)); }
+}
+
+Mat MultiCameraCalibrationNew::getCameraMat(size_t idx) {
+	DCHECK(idx < n_cameras_);
+	Mat K;
+	K_[idx].copyTo(K);
+	return K;
+}
+
+Mat MultiCameraCalibrationNew::getDistCoeffs(size_t idx) {
+	DCHECK(idx < n_cameras_);
+	Mat D;
+	dist_coeffs_[idx].copyTo(D);
+	return D;
+}
+
+void MultiCameraCalibrationNew::setCameraParameters(size_t idx, const Mat &K, const Mat &distCoeffs) {
+	DCHECK(idx < n_cameras_);
+	DCHECK(K.size() == Size(3, 3));
+	DCHECK(distCoeffs.size() == Size(5, 1));
+	K.convertTo(K_[idx], CV_64FC1);
+	distCoeffs.convertTo(dist_coeffs_[idx], CV_64FC1);
+}
+
+void MultiCameraCalibrationNew::setCameraParameters(size_t idx, const Mat &K) {
+	DCHECK(idx < n_cameras_);
+	setCameraParameters(idx, K, dist_coeffs_[idx]);
+}
+
+void MultiCameraCalibrationNew::addPoints(vector<vector<Point2d>> points, vector<int> visible) {
+	DCHECK(points.size() == visible.size());
+	DCHECK(visible.size() == n_cameras_);
+
+	for (size_t i = 0; i < n_cameras_; i++) {
+		visible_[i].insert(visible_[i].end(), points[i].size(), visible[i]);
+		points2d_[i].insert(points2d_[i].end(), points[i].begin(), points[i].end());
+	}
+	visibility_graph_.update(visible);
+}
+
+void MultiCameraCalibrationNew::reset() {
+	is_calibrated_ = false;
+	inlier_ = vector(n_cameras_, vector(points2d_[0].size(), 0));
+	points3d_ = vector(n_cameras_, vector(points2d_[0].size(), Point3d()));
+	points3d_optimized_ = vector(points2d_[0].size(), Point3d());
+	R_ = vector<Mat>(n_cameras_, Mat::eye(Size(3, 3), CV_64FC1));
+	t_ = vector<Mat>(n_cameras_, Mat(Size(1, 3), CV_64FC1, Scalar(0.0)));
+}
+
+void MultiCameraCalibrationNew::saveInput(const string &filename) {
+	cv::FileStorage fs(filename, cv::FileStorage::WRITE);
+	saveInput(fs);
+	fs.release();
+}
+
+void MultiCameraCalibrationNew::saveInput(cv::FileStorage &fs) {
+	fs << "K" << K_;
+	fs << "points2d" << points2d_;
+	fs << "visible" << visible_;
+}
+
+void MultiCameraCalibrationNew::loadInput(const std::string &filename, const vector<size_t> &cameras_in) {
+	points2d_.clear();
+	points3d_.clear();
+	points3d_optimized_.clear();
+	visible_.clear();
+	inlier_.clear();
+
+	cv::FileStorage fs(filename, cv::FileStorage::READ);
+	vector<Mat> K;
+	vector<vector<Point2d>> points2d;
+	vector<vector<int>> visible;
+	fs["K"] >> K;
+	fs["points2d"] >> points2d;
+	fs["visible"] >> visible;
+	fs.release();
+	
+	vector<size_t> cameras;
+	if (cameras_in.size() == 0) {
+		cameras.resize(K.size());
+		size_t i = 0;
+		for (auto &c : cameras) { c = i++; }
+	} 
+	else {
+		cameras.reserve(cameras_in.size());
+		for (auto &c : cameras_in) { cameras.push_back(c); }
+	}
+	
+	n_cameras_ = cameras.size();
+
+	points2d_.resize(n_cameras_);
+	points3d_.resize(n_cameras_);
+	visible_.resize(n_cameras_);
+
+	for (auto const &c : cameras) {
+		K_.push_back(K[c]);
+	}
+	for (size_t c = 0; c < n_cameras_; c++) {
+		points2d_[c].reserve(visible[0].size());
+		points3d_[c].reserve(visible[0].size());
+		visible_[c].reserve(visible[0].size());
+		points3d_optimized_.reserve(visible[0].size());
+	}
+
+	visibility_graph_ = Visibility(n_cameras_);
+	dist_coeffs_.resize(n_cameras_);
+	for (auto &d : dist_coeffs_ ) { d = Mat(Size(5, 1), CV_64FC1, Scalar(0.0)); }
+
+	vector<vector<Point2d>> points2d_add(n_cameras_, vector<Point2d>());
+	vector<int> visible_add(n_cameras_);
+	for (size_t i = 0; i < visible[0].size(); i += target_.n_points) {
+		int count = 0;
+		for (size_t c = 0; c < n_cameras_; c++) {
+			count += visible[c][i];
+			points2d_add[c].clear();
+			points2d_add[c].insert(
+								points2d_add[c].begin(),
+								points2d[cameras[c]].begin() + i,
+								points2d[cameras[c]].begin() + i + target_.n_points);
+			visible_add[c] = visible[cameras[c]][i];
+		}
+		if (count >= 2) {
+			addPoints(points2d_add, visible_add);
+		}
+	}
+	reset();
+	
+	DCHECK(points2d_.size() == n_cameras_);
+	DCHECK(points2d_.size() == visible_.size());
+	size_t len = visible_[0].size();
+	for (size_t i = 0; i < n_cameras_; i++) {
+		DCHECK(visible_[i].size() == len);
+		DCHECK(points2d_[i].size() == visible_[i].size());
+	}
+}
+
+size_t MultiCameraCalibrationNew::getViewsCount() {
+	return points2d_[0].size() / target_.n_points;
+}
+
+size_t MultiCameraCalibrationNew::getOptimalReferenceCamera() {
+	return (size_t) visibility_graph_.getOptimalCamera();
+}
+
+bool MultiCameraCalibrationNew::isVisible(size_t camera, size_t idx) {
+	return visible_[camera][idx] == 1;
+}
+
+bool MultiCameraCalibrationNew::isValid(size_t camera, size_t idx) {
+	return inlier_[camera][idx] >= 0;
+}
+
+bool MultiCameraCalibrationNew::isValid(size_t idx) {
+	for (auto camera : inlier_) {
+		if (camera[idx] > 0) return true;
+	}
+	return false;
+}
+
+vector<Point2d> MultiCameraCalibrationNew::getPoints(size_t camera, size_t idx) {
+	return vector<Point2d> (points2d_[camera].begin() + idx * (target_.n_points), 
+							points2d_[camera].begin() + idx * (target_.n_points + 1));
+}
+
+
+void MultiCameraCalibrationNew::updatePoints3D(size_t camera, Point3d new_point,
+		size_t idx, const Mat &R, const Mat &t) {
+	
+	int &f = inlier_[camera][idx];
+	Point3d &point = points3d_[camera][idx];
+	new_point = transformPoint(new_point, R, t);
+
+	if (f == -1) return;
+
+	if (f > 0) {
+		// TODO:	remove parameter (10.0 cm - 1.0m); over 0.25m difference
+		//			would most likely suggest very bad triangulation (sync? wrong match?)
+		// 			instead store all triangulations and handle outliers
+		//			(perhaps inverse variance weighted mean?)
+		
+		if (euclideanDistance(point, new_point) > 1.0) {
+			LOG(ERROR) << "bad value (skipping) " << "(" << point << " vs " << new_point << ")";
+			f = -1;
+		}
+		else {
+			point = (point * f + new_point) / (double) (f + 1);
+			f = f + 1;
+		}
+	}
+	else {
+		point = new_point;
+		f = 1;
+	}
+}
+
+void MultiCameraCalibrationNew::updatePoints3D(size_t camera, vector<Point3d> points,
+		vector<size_t> idx, const Mat &R, const Mat &t) {
+	
+	for (size_t i = 0; i < idx.size(); i++) {
+		updatePoints3D(camera, points[i], idx[i], R, t);
+	}
+}
+
+void MultiCameraCalibrationNew::getVisiblePoints(
+		vector<size_t> cameras, vector<vector<Point2d>> &points, vector<size_t> &idx) {
+	
+	size_t n_points_total = points2d_[0].size();
+	DCHECK(cameras.size() <= n_cameras_);
+	DCHECK(n_points_total % target_.n_points == 0);
+	
+	idx.clear();
+	idx.reserve(n_points_total);
+	points.clear();
+	points.resize(cameras.size(), {});
+	
+	for (size_t i = 0; i < n_points_total; i += target_.n_points) {
+		bool visible_all = true;
+
+		for (auto c : cameras) {
+			for (size_t j = 0; j < target_.n_points; j++) {
+				visible_all &= isVisible(c, i + j);
+			}
+		}
+		
+		if (!visible_all) { continue; }
+
+		for (size_t j = 0; j < target_.n_points; j++) {
+			idx.push_back(i + j);
+		}
+
+		for (size_t c = 0; c < cameras.size(); c++) {
+			points[c].insert(points[c].end(),
+							 points2d_[cameras[c]].begin() + i,
+							 points2d_[cameras[c]].begin() + i + target_.n_points
+			);
+		}
+	}
+
+	for (auto p : points) {	DCHECK(idx.size() == p.size()); }
+}
+
+double MultiCameraCalibrationNew::calibratePair(size_t camera_from, size_t camera_to, Mat &rmat, Mat &tvec) {
+	
+	vector<size_t> idx;
+	vector<Point2d> points1, points2;
+	{
+		vector<vector<Point2d>> points2d;
+		getVisiblePoints({camera_from, camera_to}, points2d, idx);
+
+		points1 = points2d[0];
+		points2 = points2d[1];
+	}
+	DCHECK(points1.size() % target_.n_points == 0);
+	DCHECK(points1.size() == points2.size());
+
+	// cameras possibly lack line of sight?
+	DCHECK(points1.size() > 8);
+
+	Mat K1 = K_[camera_from];
+	Mat K2 = K_[camera_to];
+
+	vector<uchar> inliers;
+	Mat F, E;
+	F = cv::findFundamentalMat(points1, points2, fm_method_, fm_ransac_threshold_, fm_confidence_, inliers);
+	E = K2.t() * F * K1;
+
+	// Only include inliers
+	if (fm_method_ == cv::FM_LMEDS || fm_method_ == cv::FM_RANSAC) {
+		vector<Point2d> inliers1, inliers2;
+		vector<size_t> inliers_idx;
+
+		inliers1.reserve(points1.size());
+		inliers2.reserve(points1.size());
+		inliers_idx.reserve(points1.size());
+
+		for (size_t i = 0; i < inliers.size(); i += target_.n_points) {
+			bool inlier = true;
+			
+			for (size_t j = 0; j < target_.n_points; j++) {
+				inlier &= inliers[i+j];
+			}
+
+			if (inlier) {
+				inliers1.insert(inliers1.end(), points1.begin() + i, points1.begin() + i + target_.n_points);
+				inliers2.insert(inliers2.end(), points2.begin() + i, points2.begin() + i + target_.n_points);
+				inliers_idx.insert(inliers_idx.end(), idx.begin() + i, idx.begin() + i + target_.n_points);
+			}
+		}
+		
+		LOG(INFO) << "Total points: " << points1.size() << ", inliers: " << inliers1.size();
+		double ratio_good_points = (double) inliers1.size() / (double) points1.size();
+		if (ratio_good_points < 0.66) {
+			// TODO: ... 
+			LOG(WARNING) << "Over 1/3 of points rejected!";
+			if (ratio_good_points < 0.33) { LOG(FATAL) << "Over 2/3 points rejected!"; }
+		}
+		
+		DCHECK(inliers1.size() == inliers_idx.size());
+		DCHECK(inliers2.size() == inliers_idx.size());
+
+		std::swap(inliers1, points1);
+		std::swap(inliers2, points2);
+		std::swap(inliers_idx, idx);
+	}
+	
+	// Estimate initial rotation matrix and translation vector and triangulate
+	// points (in camera 1 coordinate system).
+
+	Mat R1, R2, t1, t2;
+	R1 = Mat::eye(Size(3, 3), CV_64FC1);
+	t1 = Mat(Size(1, 3), CV_64FC1, Scalar(0.0));
+
+	vector<Point3d> points3d;
+	// Convert homogeneous coordinates 
+	{
+		Mat points3dh;
+		recoverPose(E, points1, points2, K1, K2, R2, t2, 1000.0, points3dh);
+		points3d.reserve(points3dh.cols);
+
+		for (int col = 0; col < points3dh.cols; col++) {
+			Point3d p = Point3d(points3dh.at<double>(0, col),
+								points3dh.at<double>(1, col),
+								points3dh.at<double>(2, col))
+								/ points3dh.at<double>(3, col);
+			points3d.push_back(p);
+		}
+	}
+	DCHECK(points3d.size() == points1.size());
+
+	// Estimate and apply scale factor
+	{
+		double scale = target_.estimateScale(points3d);
+		for (auto &p : points3d) { p = p * scale; }
+		t1 = t1 * scale;
+		t2 = t2 * scale;
+	}
+
+	// Reprojection error before BA
+	{
+		// SBA should report squared mean error
+		const double err1 = reprojectionError(points3d, points1, K1, R1, t1);
+		const double err2 = reprojectionError(points3d, points2, K2, R2, t2);
+		
+		if (abs(err1 - err2) > 2.0) {
+			LOG(INFO) << "Initial reprojection error (camera " << camera_from << "): " << err1;
+			LOG(INFO) << "Initial reprojection error (camera " << camera_to << "): " << err2;
+		}
+		LOG(INFO)	<< "Initial reprojection error (" << camera_from << ", " << camera_to << "): "
+					<< sqrt(err1 * err1 + err2 * err2);
+		
+	}
+	
+	// Bundle Adjustment
+	// vector<Point3d> points3d_triangulated;
+	// points3d_triangulated.insert(points3d_triangulated.begin(), points3d.begin(), points3d.end());
+	
+	double err;
+	cvsba::Sba sba;
+	{
+		sba.setParams(cvsba::Sba::Params(cvsba::Sba::TYPE::MOTION, 200, 1.0e-30, fix_intrinsics_, 5, false));
+
+		Mat rvec1, rvec2;
+		cv::Rodrigues(R1, rvec1);
+		cv::Rodrigues(R2, rvec2);
+
+		auto points2d = vector<vector<Point2d>> { points1, points2 };
+		auto K = vector<Mat> { K1, K2 };
+		auto r = vector<Mat> { rvec1, rvec2 };
+		auto t = vector<Mat> { t1, t2 };
+		auto dcoeffs = vector<Mat> { dist_coeffs_[camera_from], dist_coeffs_[camera_to] };
+		
+		sba.run(points3d,
+				vector<vector<Point2d>> { points1, points2 },
+				vector<vector<int>>(2, vector<int>(points1.size(), 1)),
+				K, r, t, dcoeffs
+		);
+		
+		cv::Rodrigues(r[0], R1);
+		cv::Rodrigues(r[1], R2);
+		t1 = t[0];
+		t2 = t[1];
+		err = sba.getFinalReprjError();
+
+		LOG(INFO) << "SBA reprojection error before BA " << sba.getInitialReprjError();
+		LOG(INFO) << "SBA reprojection error after BA " << err;
+	}
+
+	calculateTransform(R2, t2, R1, t1, rmat, tvec);
+	
+	// Store and average 3D points for both cameras (skip garbage)
+	if (err < 10.0) {
+		updatePoints3D(camera_from, points3d, idx, R1, t1);
+		updatePoints3D(camera_to, points3d, idx, R2, t2);
+	}
+	else {
+		LOG(ERROR)	<< "Large RMS error ("
+					<< reprojectionError(points3d, points2, K2, rmat, tvec)
+					<< "), not updating points!";
+	}
+
+	// LOG(INFO) << "Final error: " << reprojectionError(points3d, points2, K2, rmat, tvec);
+	//if (reprojectionError(points3d, points2, K2, rmat, tvec) > 10.0) {
+		// TODO: should ignore results
+		// LOG(ERROR) << "pairwise calibration failed! RMS: " << reprojectionError(points3d, points2, K2, rmat, tvec);
+	//};
+
+	return err;
+}
+
+Point3d MultiCameraCalibrationNew::getPoint3D(size_t camera, size_t idx) {
+	return points3d_[camera][idx];
+}
+
+void MultiCameraCalibrationNew::calculateMissingPoints3D() {
+	points3d_optimized_.clear();
+	points3d_optimized_.resize(points3d_[reference_camera_].size());
+
+	for (size_t i = 0; i < points3d_optimized_.size(); i++) {
+		if (inlier_[reference_camera_][i] > 0) {
+			points3d_optimized_[i] = points3d_[reference_camera_][i];
+			continue;
+		}
+
+		if (!isValid(i)) continue;
+
+		double f = 0.0;
+		Point3d point(0.0, 0.0, 0.0);
+		for (size_t c = 0; c < n_cameras_; c++) {
+			if (inlier_[c][i] <= 0) { continue; }
+			point += transformPoint(getPoint3D(c, i), R_[c], t_[c]);
+			f += 1.0;
+		}
+
+		DCHECK(f != 0.0);
+
+		points3d_optimized_[i] = point / f;
+	}
+}
+
+double MultiCameraCalibrationNew::getReprojectionError(size_t c_from, size_t c_to, const Mat &K, const Mat &R, const Mat &t) {
+
+	vector<Point2d> points2d;
+	vector<Point3d> points3d;
+
+	for (size_t i = 0; i < points2d_[c_from].size(); i++) {
+		if (!isValid(i) || !isVisible(c_from, i) || !isVisible(c_to, i)) continue;
+		points2d.push_back(points2d_[c_from][i]);
+		points3d.push_back(points3d_[c_to][i]);
+	}
+
+	return reprojectionError(points3d, points2d, K, R, t);
+}
+
+double MultiCameraCalibrationNew::getReprojectionErrorOptimized(size_t c_from, const Mat &K, const Mat &R, const Mat &t) {
+	
+	vector<Point2d> points2d;
+	vector<Point3d> points3d;
+
+	for (size_t i = 0; i < points2d_[c_from].size(); i++) {
+		if (!isValid(i) || !isVisible(c_from, i)) continue;
+		points2d.push_back(points2d_[c_from][i]);
+		points3d.push_back(points3d_optimized_[i]);
+	}
+
+	return reprojectionError(points3d, points2d, K, R, t);
+}
+
+
+double MultiCameraCalibrationNew::calibrateAll(int reference_camera) {
+	if (reference_camera != -1) {
+		DCHECK(reference_camera >= 0 && reference_camera < n_cameras_);
+		reference_camera_ = reference_camera; 
+	}
+
+	reset(); // remove all old calibration results
+	map<pair<size_t, size_t>, pair<Mat, Mat>> transformations; 
+	
+	// All cameras should be calibrated pairwise; otherwise all possible 3D
+	// points are not necessarily triangulated
+
+	auto paths = visibility_graph_.findShortestPaths(reference_camera_);
+	
+	for (size_t c1 = 0; c1 < n_cameras_; c1++) {
+	for (size_t c2 = c1; c2 < n_cameras_; c2++) {
+		if (c1 == c2) {
+			transformations[make_pair(c1, c2)] = 
+				make_pair(Mat::eye(Size(3, 3), CV_64FC1),
+				Mat(Size(1, 3), CV_64FC1, Scalar(0.0))
+			);
+			continue;
+		}
+		LOG(INFO) << "Running pairwise calibration for cameras " << c1 << " and " << c2;
+		size_t n_visible = getVisiblePointsCount({c1, c2});
+		
+		if (n_visible < min_visible_points_) continue;
+		if (transformations.find(make_pair(c2, c1)) != transformations.end()) {
+			continue;
+		}
+		Mat R, t, R_i, t_i;
+
+		if (calibratePair(c1, c2, R, t) > 10.0) {
+			LOG(ERROR)	<< "Pairwise calibration failed, skipping cameras "
+						<< c1 << " and " << c2;
+			continue;
+		}
+
+		calculateInverse(R, t, R_i, t_i);
+
+		transformations[make_pair(c2, c1)] = make_pair(R, t);
+		transformations[make_pair(c1, c2)] = make_pair(R_i, t_i);
+	}}
+
+	for (size_t c = 0; c < paths.size(); c++) {
+		Mat R_chain = Mat::eye(Size(3, 3), CV_64FC1);
+		Mat t_chain = Mat(Size(1, 3), CV_64FC1, Scalar(0.0));
+		LOG(INFO) << "Chain for camera " << c;
+		for (auto e: paths[c]) {
+			CHECK(transformations.find(e) != transformations.end()) << "chain not calculated; pairwise calibration possibly failed earlier?";
+			LOG(INFO) << e.first << " -> " << e.second;
+			Mat R = transformations[e].first;
+			Mat t = transformations[e].second;
+			R_chain = R * R_chain;
+			t_chain = t + R * t_chain;
+		}
+
+		R_[c] = R_chain;
+		t_[c] = t_chain;
+		/*R_[c] = transformations[make_pair(reference_camera_, c)].first;
+		t_[c] = transformations[make_pair(reference_camera_, c)].second;
+		DCHECK(R_[c].size() == Size(3, 3));
+		DCHECK(t_[c].size() == Size(1, 3));*/
+	}
+	
+	calculateMissingPoints3D();
+	
+	for (size_t c_from = 0; c_from < n_cameras_; c_from++) {
+		if (c_from == reference_camera_) continue;
+		Mat R, t;
+		calculateInverse(R_[c_from], t_[c_from], R, t);
+		LOG(INFO)	<< "Error before BA, cameras " << reference_camera_ << " and " << c_from << ": "
+					<< getReprojectionErrorOptimized(c_from, K_[c_from], R, t);
+	
+	}
+
+	double err;
+	cvsba::Sba sba;
+	{
+		sba.setParams(cvsba::Sba::Params(cvsba::Sba::TYPE::MOTIONSTRUCTURE, 200, 1.0e-24, fix_intrinsics_, 5, false));
+
+		vector<Mat> rvecs(R_.size());
+		vector<vector<int>> visible(R_.size());
+		vector<Point3d> points3d;
+		vector<vector<Point2d>> points2d(R_.size());
+		vector<size_t> idx;
+		idx.reserve(points3d_optimized_.size());
+
+		for (size_t i = 0; i < points3d_optimized_.size(); i++) {
+			
+			auto p = points3d_optimized_[i];
+			DCHECK(!isnanl(p.x) && !isnanl(p.y) && !isnanl(p.z));
+
+			int count = 0;
+			for (size_t c = 0; c < n_cameras_; c++) {
+				if (isVisible(c, i) && isValid(c, i)) { count++; }
+			}
+			
+			if (count < 2) continue;
+
+			points3d.push_back(p);
+			idx.push_back(i);
+
+			for (size_t c = 0; c < n_cameras_; c++) {
+				bool good = isVisible(c, i) && isValid(c, i);
+				visible[c].push_back(good ? 1 : 0);
+				points2d[c].push_back(points2d_[c][i]);
+			}
+		}
+
+		for (size_t i = 0; i < rvecs.size(); i++) {
+			calculateInverse(R_[i], t_[i], R_[i], t_[i]);
+			cv::Rodrigues(R_[i], rvecs[i]);
+		}
+
+		DCHECK(points2d.size() == n_cameras_);
+		DCHECK(visible.size() == n_cameras_);
+		for (size_t c = 0; c < n_cameras_; c++) {
+			DCHECK(points3d.size() == points2d[c].size());
+			DCHECK(points3d.size() == visible[c].size());
+		}
+
+		LOG(INFO) << "number of points used: " << points3d.size();
+		sba.run(points3d, points2d, visible,
+				K_,	rvecs, t_, dist_coeffs_
+		);
+		
+		for (size_t i = 0; i < rvecs.size(); i++) {
+			cv::Rodrigues(rvecs[i], R_[i]);
+			calculateInverse(R_[i], t_[i], R_[i], t_[i]);
+		}
+
+		// save optimized points
+		{
+			size_t l = points3d.size();
+			points3d_optimized_.clear();
+			points3d_optimized_.resize(l, Point3d(NAN, NAN, NAN));
+
+			for (size_t i = 0; i < points3d.size(); i++) {
+				points3d_optimized_[idx[i]] = points3d[i];
+			}
+		}
+
+		err = sba.getFinalReprjError();
+		LOG(INFO) << "SBA reprojection error before final BA " << sba.getInitialReprjError();
+		LOG(INFO) << "SBA reprojection error after final BA " << err;
+	}
+
+	for (size_t c_from = 0; c_from < n_cameras_; c_from++) {
+		if (c_from == reference_camera_) continue;
+		Mat R, t;
+		calculateInverse(R_[c_from], t_[c_from], R, t);
+		LOG(INFO)	<< "Error (RMS) after BA, cameras " << reference_camera_ << " and " << c_from << ": "
+					<< getReprojectionErrorOptimized(c_from, K_[c_from], R, t);
+	
+	}
+
+	is_calibrated_ = true;
+	return err;
+}
+
+void MultiCameraCalibrationNew::projectPointsOriginal(size_t camera_src, size_t camera_dst, size_t idx, vector<Point2d> &points) {
+	
+}
+
+void MultiCameraCalibrationNew::projectPointsOptimized(size_t camera_dst, size_t idx, vector<Point2d> &points) {
+	// TODO:	indexing does not match input (points may be skipped in loadInput())
+
+	points.clear();
+	size_t i = target_.n_points * idx;
+	
+	if (!isValid(i)) return;
+
+	Point3d p1(points3d_optimized_[i]);
+	Point3d p2(points3d_optimized_[i + 1]);
+
+	if (!std::isfinite(p1.x) || !std::isfinite(p2.x)) {
+		// DEBUG: should not happen
+		LOG(ERROR) << "Bad point! (no valid triangulation)";
+		return; 
+	}
+	
+	Mat R, tvec, rvec;
+	calculateTransform(R_[reference_camera_], t_[reference_camera_], R_[camera_dst], t_[camera_dst], R, tvec);
+	
+	cv::Rodrigues(R, rvec);
+	cv::projectPoints(	vector<Point3d> { p1, p2 },
+						rvec, tvec, K_[camera_dst], dist_coeffs_[camera_dst], points);
+}
+
+void MultiCameraCalibrationNew::getCalibration(vector<Mat> &R, vector<Mat> &t) {
+	DCHECK(is_calibrated_);
+	R.resize(n_cameras_);
+	t.resize(n_cameras_);
+
+	for (size_t i = 0; i < n_cameras_; i++) {
+		R_[i].copyTo(R[i]);
+		t_[i].copyTo(t[i]);
+	}
+}
\ No newline at end of file
diff --git a/applications/calibration-multi/src/multicalibrate.hpp b/applications/calibration-multi/src/multicalibrate.hpp
new file mode 100644
index 000000000..e739bc0d9
--- /dev/null
+++ b/applications/calibration-multi/src/multicalibrate.hpp
@@ -0,0 +1,150 @@
+#pragma once
+
+#include <opencv2/core.hpp>
+
+#include "visibility.hpp"
+#include "util.hpp"
+
+using cv::Mat;
+using cv::Size;
+using cv::Point2d;
+using cv::Point3d;
+using cv::Vec4d;
+using cv::Scalar;
+
+using std::vector;
+using std::pair;
+
+class CalibrationTarget {
+public:
+	CalibrationTarget(double length) :
+		n_points(2),
+		calibration_bar_length_(length)
+	{}
+	/* @brief	Estimate scale factor.
+	 * @param	3D points (can pass n views)
+	 */
+	double estimateScale(vector<Point3d> points3d);
+	size_t n_points;
+
+private:
+	double calibration_bar_length_;
+};
+
+class MultiCameraCalibrationNew {
+public:
+	MultiCameraCalibrationNew(	size_t n_cameras, size_t reference_camera,
+								CalibrationTarget target, int fix_intrinsics=1);
+	
+	void setCameraParameters(size_t idx, const Mat &K, const Mat &distCoeffs);
+	void setCameraParameters(size_t idx, const Mat &K);
+
+	void addPoints(vector<vector<Point2d>> points2d, vector<int> visibility);
+
+	size_t getViewsCount();
+	size_t getCamerasCount() { return n_cameras_; }
+	size_t getOptimalReferenceCamera();
+
+	size_t getMinVisibility() { return visibility_graph_.getMinVisibility(); }
+	size_t getViewsCount(size_t camera) { return visibility_graph_.getViewsCount(camera); }
+
+	void setFixIntrinsic(int value) { fix_intrinsics_ = (value == 1 ? 5 : 0); }
+
+	void loadInput(const std::string &filename, const vector<size_t> &cameras = {});
+
+	void saveInput(cv::FileStorage &fs);
+	void saveInput(const std::string &filename);
+
+	Mat getCameraMat(size_t idx);
+	Mat getDistCoeffs(size_t idx);
+
+	double calibrateAll(int reference_camera = -1);
+	double getReprojectionError();
+	void getCalibration(vector<Mat> &R, vector<Mat> &t);
+
+	void projectPointsOriginal(size_t camera_src, size_t camera_dst, size_t idx, vector<Point2d> &points);
+	void projectPointsOptimized(size_t camera_dst, size_t idx, vector<Point2d> &points);
+
+protected:
+	bool isVisible(size_t camera, size_t idx);
+	bool isValid(size_t camera, size_t idx);
+	bool isValid(size_t idx);
+
+	Point3d getPoint3D(size_t camera, size_t i);
+
+	vector<Point2d> getPoints(size_t camera, size_t idx);
+	vector<vector<Point2d>> getAllPoints(size_t camera, vector<size_t> idx);
+
+	void getVisiblePoints(	vector<size_t> cameras,
+							vector<vector<Point2d>> &points,
+							vector<size_t> &idx);
+
+	size_t getVisiblePointsCount(vector<size_t> cameras) {
+		// TODO: for pairs can use visibility graph adjacency matrix
+		vector<vector<Point2d>> points2d;
+		vector<size_t> idx;
+		getVisiblePoints(cameras, points2d, idx);
+		return idx.size();
+	}
+
+	size_t getTotalPointsCount() {
+		return points2d_[0].size();
+	}
+
+	vector<Point3d> getPoints3D(size_t idx);
+
+	/* @brief	Find points which are visible on all cameras. Returns
+	 * 			corresponding indices in idx vector.
+	 */
+	void getVisiblePoints3D(vector<size_t> cameras,
+							vector<vector<Point3d>> &points,
+							vector<size_t> &idx);
+
+	/* @brief	Update 3D points with new values. If no earlier data, new data
+	 *			is used as is, otherwise calculates average.
+	 */
+	void updatePoints3D(size_t camera, Point3d new_point, size_t idx, const Mat &R, const Mat &t);
+	void updatePoints3D(size_t camera, vector<Point3d> new_points, vector<size_t> idx, const Mat &R, const Mat &t);
+
+	/* @brief	Calculates 3D points that are not visible in reference camera
+	 *			from transformations in visible cameras.
+	 */
+	void calculateMissingPoints3D();
+
+	void getTransformation(size_t camera_from, size_t camera_to, Mat &R, Mat &T);
+	double calibratePair(size_t camera_from, size_t camera_to, Mat &R, Mat &T);
+
+	/* @brief	Calculate reprojection error of visible points (triangulation) */
+	double getReprojectionError(size_t c_from, size_t c_to, const Mat &K, const Mat &R, const Mat &T);
+
+	/* @brief	Calculate reprojection error of visible points (optimized/averaged points) */
+	double getReprojectionErrorOptimized(size_t c_from, const Mat &K, const Mat &R, const Mat &T);
+
+	/* @brief	Remove old calibration data calculated by calibrateAll */
+	void reset();
+
+private:
+	CalibrationTarget target_;
+	Visibility visibility_graph_; 
+
+	bool is_calibrated_;
+	size_t n_cameras_;
+	size_t reference_camera_;
+	size_t min_visible_points_;
+	int fix_intrinsics_;
+
+	vector<Mat> K_;
+	vector<Mat> dist_coeffs_;
+	vector<Mat> R_;
+	vector<Mat> t_;
+
+	vector<Point3d> points3d_optimized_;
+	vector<vector<Point3d>> points3d_;
+	vector<vector<Point2d>> points2d_;
+	vector<vector<int>> visible_;
+	vector<vector<int>> inlier_; // "inlier"
+
+	int fm_method_;
+	double fm_ransac_threshold_;
+	double fm_confidence_;
+};
diff --git a/applications/calibration-multi/src/util.cpp b/applications/calibration-multi/src/util.cpp
new file mode 100644
index 000000000..0019516e4
--- /dev/null
+++ b/applications/calibration-multi/src/util.cpp
@@ -0,0 +1,174 @@
+#include "util.hpp"
+
+#include <loguru.hpp>
+
+#include <opencv2/core.hpp>
+#include <opencv2/calib3d.hpp>
+#include <opencv2/imgproc.hpp>
+#include <opencv2/aruco.hpp>
+
+using std::vector;
+
+using cv::Mat;
+using cv::Point2i;
+using cv::Point2d;
+using cv::Point3d;
+using cv::Size;
+using cv::Scalar;
+
+/* @brief	Visualize epipolar lines for given points in the other image.
+ * @param	Points in image
+ * @param	Corresponding image where to draw the lines
+ * @param	Fundamental matrix
+ * @param	Line color
+ * @param	Which image (1 or 2), see OpenCV's computeCorrespondEpilines()
+ */
+void drawEpipolarLines(vector<Point2d> const &points, Mat &img, Mat const &F, Scalar color, int image) {
+	Mat lines;
+	cv::computeCorrespondEpilines(points, image, F, lines);
+
+	for (int i = 0; i < lines.rows; i++) {
+		cv::Vec3f l = lines.at<cv::Vec3f>(i);
+		float a = l[0];
+		float b = l[1];
+		float c = l[2];
+		float x0, y0, x1, y1;
+		x0 = 0;
+		y0 = (-c -a * x0) / b;
+		x1 = img.cols;
+		y1 = (-c -a * x1) / b;
+		cv::line(img, cv::Point(x0, y0), cv::Point(x1,y1), color, 1);
+	}
+}
+
+/* @breif	Find calibration points. AruCo markers, two per image.
+ *			visible parameter input/ouput
+ */
+int findCorrespondingPoints(vector<Mat> imgs, vector<vector<Point2d>> &points,
+							vector<int> &visible) {
+	using namespace cv;
+	int count = 0;
+
+	visible.resize(imgs.size(), 1);
+
+	points.clear();
+	points.resize(imgs.size(), vector<Point2d>(2, Point2d(0.0, 0.0)));
+
+	auto dictionary = aruco::getPredefinedDictionary(aruco::DICT_5X5_50);
+	vector<vector<Point2f>> corners;
+	vector<int> ids;
+	
+	for (size_t i = 0; i < imgs.size(); i++) {
+		if (visible[i] == 0) continue;
+
+		aruco::detectMarkers(imgs[i], dictionary, corners, ids);
+		if (corners.size() == 2) {
+			Point2d center0((corners[0][0] + corners[0][1] + corners[0][2] + corners[0][3]) / 4.0);
+			Point2d center1((corners[1][0] + corners[1][1] + corners[1][2] + corners[1][3]) / 4.0);
+			if (ids[0] != 0) { std::swap(center0, center1); }
+
+			points[i][0] = center0; points[i][1] = center1;
+			visible[i] = 1;
+
+			count++;
+		}
+		else {
+			visible[i] = 0;
+		}
+	}
+
+	return count;
+}
+
+/* @brief	Find AruCo marker centers.
+ * @param	(input) image
+ * @param	(output) found centers
+ * @param	(output) marker IDs
+ */
+void findMarkerCenters(Mat &img, vector<Point2d> &points, vector<int> &ids, int dict) {
+	using namespace cv;
+
+	points.clear();
+
+	auto dictionary = aruco::getPredefinedDictionary(dict);
+	vector<vector<Point2f>> corners;
+
+	aruco::detectMarkers(img, dictionary, corners, ids);
+	for (size_t j = 0; j < corners.size(); j++) {
+		Point2f center((corners[j][0] + corners[j][1] + corners[j][2] + corners[j][3]) / 4.0);
+		points.push_back(center);
+	}
+}
+
+/* OpenCV's recoverPose() expects both cameras to have identical intrinsic
+ * parameters.
+ */
+int recoverPose(Mat &E, vector<Point2d> &_points1, vector<Point2d> &_points2,
+				Mat &_cameraMatrix1, Mat &_cameraMatrix2,
+				Mat &_R, Mat &_t, double distanceThresh,
+				Mat &triangulatedPoints) {
+
+	Mat points1, points2, cameraMatrix1, cameraMatrix2, cameraMatrix;
+	
+	Mat(_points1.size(), 2, CV_64FC1, _points1.data()).convertTo(points1, CV_64F);
+	Mat(_points2.size(), 2, CV_64FC1, _points2.data()).convertTo(points2, CV_64F);
+	_cameraMatrix1.convertTo(cameraMatrix1, CV_64F);
+	_cameraMatrix2.convertTo(cameraMatrix2, CV_64F);
+	cameraMatrix = Mat::eye(Size(3, 3), CV_64FC1);
+
+	double fx1 = cameraMatrix1.at<double>(0,0);
+	double fy1 = cameraMatrix1.at<double>(1,1);
+	double cx1 = cameraMatrix1.at<double>(0,2);
+	double cy1 = cameraMatrix1.at<double>(1,2);
+
+	double fx2 = cameraMatrix2.at<double>(0,0);
+	double fy2 = cameraMatrix2.at<double>(1,1);
+	double cx2 = cameraMatrix2.at<double>(0,2);
+	double cy2 = cameraMatrix2.at<double>(1,2);
+
+	points1.col(0) = (points1.col(0) - cx1) / fx1;
+	points1.col(1) = (points1.col(1) - cy1) / fy1;
+
+	points2.col(0) = (points2.col(0) - cx2) / fx2;
+	points2.col(1) = (points2.col(1) - cy2) / fy2;
+
+	// TODO mask
+	// cameraMatrix = I (for details, see OpenCV's recoverPose() source code)
+	// modules/calib3d/src/five-point.cpp (461)
+	//
+	// https://github.com/opencv/opencv/blob/371bba8f54560b374fbcd47e7e02f015ac4969ad/modules/calib3d/src/five-point.cpp#L461
+
+	return cv::recoverPose(E, points1, points2, cameraMatrix, _R, _t, distanceThresh, cv::noArray(), triangulatedPoints);
+}
+
+/* @brief	Calculate RMS reprojection error
+ * @param	3D points
+ * @param	Expected 2D points
+ * @param	Camera matrix
+ * @param	Rotation matrix/vector
+ * @param	Translation vector
+ */
+double reprojectionError(	const vector<Point3d> &points3d, const vector<Point2d> &points2d,
+							const Mat &K, const Mat &rvec, const Mat &tvec) {
+	
+	DCHECK(points3d.size() == points2d.size());
+	
+	Mat _rvec;
+	if (rvec.size() == Size(3, 3)) { cv::Rodrigues(rvec, _rvec); }
+	else { _rvec = rvec; }
+
+	DCHECK(_rvec.size() == Size(1, 3) || _rvec.size() == Size(3, 1));
+
+	vector<Point2d> points_reprojected;
+	cv::projectPoints(points3d, _rvec, tvec, K, cv::noArray(), points_reprojected);
+	
+	int n_points = points2d.size();
+	double err = 0.0;
+
+	for (int i = 0; i < n_points; i++) {
+		Point2d a = points2d[i] - points_reprojected[i];
+		err += a.x * a.x + a.y * a.y;
+	}
+
+	return sqrt(err / n_points);
+}
\ No newline at end of file
diff --git a/applications/calibration-multi/src/util.hpp b/applications/calibration-multi/src/util.hpp
new file mode 100644
index 000000000..7c2b5702f
--- /dev/null
+++ b/applications/calibration-multi/src/util.hpp
@@ -0,0 +1,110 @@
+#pragma once
+
+#include <loguru.hpp>
+
+#include <opencv2/core.hpp>
+#include <opencv2/aruco.hpp>
+
+using std::vector;
+
+using cv::Mat;
+using cv::Point2i;
+using cv::Point2d;
+using cv::Point3d;
+using cv::Size;
+using cv::Scalar;
+
+/* @brief	Visualize epipolar lines for given points in the other image.
+ * @param	Points in image
+ * @param	Corresponding image where to draw the lines
+ * @param	Fundamental matrix
+ * @param	Line color
+ * @param	Which image (1 or 2), see OpenCV's computeCorrespondEpilines()
+ */
+void drawEpipolarLines(vector<Point2d> const &points, Mat &img, Mat const &F, Scalar color, int image=1);
+
+
+/* @breif	Find calibration points. AruCo markers, two per image.
+ */
+int findCorrespondingPoints(vector<Mat> imgs, vector<vector<Point2d>> &points,
+							vector<int> &visible);
+
+/* @brief	Find AruCo marker centers.
+ * @param	(input) image
+ * @param	(output) found centers
+ * @param	(output) marker IDs
+ */
+void findMarkerCenters(Mat &img, vector<Point2d> &points, vector<int> &ids, int dict=cv::aruco::DICT_4X4_50);
+
+/* OpenCV's recoverPose() expects both cameras to have identical intrinsic
+ * parameters.
+ * 
+ * https://github.com/opencv/opencv/blob/371bba8f54560b374fbcd47e7e02f015ac4969ad/modules/calib3d/src/five-point.cpp#L461
+ */
+int recoverPose(Mat &E, vector<Point2d> &_points1, vector<Point2d> &_points2,
+				Mat &_cameraMatrix1, Mat &_cameraMatrix2,
+				Mat &_R, Mat &_t, double distanceThresh,
+				Mat &triangulatedPoints);
+
+/* @brief	Calculate RMS reprojection error
+ * @param	3D points
+ * @param	Expected 2D points
+ * @param	Camera matrix
+ * @param	Rotation matrix/vector
+ * @param	Translation vector
+ */
+double reprojectionError(	const vector<Point3d> &points3d, const vector<Point2d> &points2d,
+							const Mat &K, const Mat &rvec, const Mat &tvec);
+
+inline double euclideanDistance(Point3d a, Point3d b) {
+	Point3d c = a - b;
+	return sqrt(c.x*c.x + c.y*c.y + c.z*c.z);
+}
+
+inline Point3d transformPoint(Point3d p, Mat R, Mat t) {
+	DCHECK(R.size() == Size(3, 3));
+	DCHECK(t.size() == Size(1, 3));
+	return Point3d(Mat(R * Mat(p) + t));
+}
+
+inline Point3d inverseTransformPoint(Point3d p, Mat R, Mat t) {
+	DCHECK(R.size() == Size(3, 3));
+	DCHECK(t.size() == Size(1, 3));
+	return Point3d(Mat(R.t() * (Mat(p) - t)));
+}
+
+inline Mat getMat4x4(const Mat &R, const Mat &t) {
+	DCHECK(R.size() == Size(3, 3));
+	DCHECK(t.size() == Size(1, 3));
+	Mat M = Mat::eye(Size(4, 4), CV_64FC1);
+	R.copyTo(M(cv::Rect(0, 0, 3, 3)));
+	t.copyTo(M(cv::Rect(3, 0, 1, 3)));
+	return M;
+}
+
+inline void getRT(const Mat RT, Mat &R, Mat &t) {
+	R = RT(cv::Rect(0, 0, 3, 3));
+	t = RT(cv::Rect(3, 0, 1, 3));
+}
+
+// calculate transforms from (R1, t1) to (R2, t2), where parameters
+// (R1, t1) and (R2, t2) map to same (target) coordinate system
+
+inline void calculateTransform(const Mat &R1, const Mat &T1, const Mat &R2, const Mat &T2, Mat &R, Mat &tvec, Mat &M) {
+	Mat M_src = getMat4x4(R1, T1);
+	Mat M_dst = getMat4x4(R2, T2);
+	M = M_dst.inv() * M_src;	
+	R = M(cv::Rect(0, 0, 3, 3));
+	tvec = M(cv::Rect(3, 0, 1, 3));
+}
+
+inline void calculateTransform(const Mat &R1, const Mat &T1, const Mat &R2, const Mat &T2,Mat &R, Mat &tvec) {
+	Mat M;
+	calculateTransform(R1, T1, R2, T2, R, tvec, M);
+}
+
+inline void calculateInverse(const Mat &R2, const Mat &T2, Mat &R, Mat &T) {
+	Mat R1 = Mat::eye(Size(3, 3), CV_64FC1);
+	Mat T1(Size(1, 3), CV_64FC1, Scalar(0.0));
+	calculateTransform(R1, T1, R2, T2, R, T);
+}
\ No newline at end of file
diff --git a/applications/calibration-multi/src/visibility.cpp b/applications/calibration-multi/src/visibility.cpp
new file mode 100644
index 000000000..bd001f603
--- /dev/null
+++ b/applications/calibration-multi/src/visibility.cpp
@@ -0,0 +1,147 @@
+#include <numeric>
+#include <loguru.hpp>
+#include <queue>
+
+#include "visibility.hpp"
+
+using cv::Mat;
+using cv::Scalar;
+using cv::Size;
+using std::vector;
+using std::pair;
+using std::make_pair;
+
+Visibility::Visibility(int n_cameras) : n_cameras_(n_cameras) {
+	visibility_ = Mat(Size(n_cameras, n_cameras), CV_32SC1, Scalar(0));
+	count_ = vector(n_cameras, 0);
+}
+
+void Visibility::update(vector<int> &visible) {
+	DCHECK(visible.size() == (size_t) n_cameras_);
+
+	for (int i = 0; i < n_cameras_; i++) {
+		if (visible[i] == 0) continue;
+		count_[i]++;
+
+		for (int j = 0; j < n_cameras_; j++) {
+			if (i == j) continue;
+			if (visible[j] == 1) visibility_.at<int>(i, j)++;
+		}
+	}
+}
+
+int Visibility::getOptimalCamera() {
+	// most visible on average
+	int best_i;
+	double best_score = -INFINITY;
+	for (int i = 0; i < visibility_.rows; i++) {
+		double score = 0.0;
+		for (int x = 0; x < visibility_.cols; x++) {
+			score += visibility_.at<int>(i, x);
+		}
+		score = score / (double) visibility_.cols;
+		if (score > best_score) {
+			best_i = i;
+			best_score = score;
+		}
+	}
+	
+	return best_i;
+}
+
+int Visibility::getMinVisibility() {
+	int min_i;
+	int min_count = INT_MAX;
+
+	for (int i = 0; i < n_cameras_; i++) {
+		if (count_[i] < min_count) {
+			min_i = i;
+			min_count = count_[i];
+		}
+	}
+	
+	return min_count;
+}
+
+int Visibility::getViewsCount(int camera) {
+	return count_[camera];
+}
+
+vector<vector<pair<int, int>>> Visibility::findShortestPaths(int reference) {
+	DCHECK(reference < n_cameras_);
+
+	vector<vector<pair<int, int>>> res(n_cameras_);
+	for (int i = 0; i < n_cameras_; i++) {
+		res[i] = findShortestPath(i, reference);
+	}
+	
+	return res;
+}
+
+vector<pair<int, int>> Visibility::findShortestPath(int from, int to) {
+	if (from == to) return vector<pair<int, int>>();
+
+	vector<bool> visited(n_cameras_, false);
+	vector<double> distances(n_cameras_, INFINITY);
+	vector<int> previous(n_cameras_, -1);
+	
+	distances[from] = 0.0;
+
+	auto cmp = [](pair<int, double> u, pair<int, double> v) { return u.second > v.second; };
+	std::priority_queue<pair<int, double>, vector<pair<int, double>>, decltype(cmp)> pq(cmp);
+
+	pq.push(make_pair(from, distances[from]));
+
+	while(!pq.empty()) {
+		pair<int, double> current = pq.top();
+		pq.pop();
+
+		int current_id = current.first;
+		double current_distance = distances[current_id];
+
+		visited[current_id] = true;
+
+		for (int i = 0; i < n_cameras_; i++) {
+			int count = visibility_.at<int>(current_id, i);
+			if (count == 0) continue; // not connected
+
+			double distance = 1.0 / (double) count;
+			double new_distance = current_distance + distance;
+			
+			if (distances[i] > new_distance) {
+				distances[i] = new_distance;
+				previous[i] = current_id;
+
+				pq.push(make_pair(i, distances[i]));
+			}
+		}
+	}
+
+	vector<pair<int, int>> res;
+	int prev = previous[to];
+	int current = to;
+
+	do {
+		res.push_back(make_pair(current, prev));
+		current = prev;
+		prev = previous[prev];
+	}
+	while(prev != -1);
+
+	std::reverse(res.begin(), res.end());
+	return res;
+}
+
+vector<int> Visibility::getClosestCameras(int c) {
+
+	// initialize original index locations
+	vector<int> idx(n_cameras_);
+	iota(idx.begin(), idx.end(), 0);
+	int* views = visibility_.ptr<int>(c);
+	
+	// sort indexes based on comparing values in v
+	sort(idx.begin(), idx.end(),
+		[views](size_t i1, size_t i2) {return views[i1] < views[i2];});
+
+	return idx;
+}
\ No newline at end of file
diff --git a/applications/calibration-multi/src/visibility.hpp b/applications/calibration-multi/src/visibility.hpp
new file mode 100644
index 000000000..dcf7e71dd
--- /dev/null
+++ b/applications/calibration-multi/src/visibility.hpp
@@ -0,0 +1,45 @@
+#pragma once
+
+#include <opencv2/core.hpp>
+
+using cv::Mat;
+using std::vector;
+using std::pair;
+
+class Visibility {
+public:
+	Visibility(int n_cameras);
+
+	/* @breif	Update visibility graph.
+	 * @param	Which cameras see the feature(s) in this iteration
+	 */
+	void update(vector<int> &visible);
+
+	/* @brief	For all cameras, find shortest (optimal) paths to reference
+	 * 			camera
+	 * @param	Id of reference camera
+	 * 
+	 * Calculates shortest path in weighted graph using Dijstra's
+	 * algorithm. Weights are inverse of views between cameras (nodes)
+	 * 
+	 * @todo	Add constant weight for each edge (prefer less edges)
+	 */
+	vector<vector<pair<int, int>>> findShortestPaths(int reference);
+
+	vector<int> getClosestCameras(int c);
+	int getOptimalCamera();
+	int getMinVisibility();
+	int getViewsCount(int camera);
+
+protected:
+	/* @brief	Find shortest path between nodes
+	 * @param	Source node id
+	 * @param	Destination node id
+	 */
+	vector<pair<int, int>> findShortestPath(int from, int to);
+
+private:
+	int n_cameras_;		// @brief number of cameras
+	Mat visibility_;	// @brief adjacency matrix
+	vector<int> count_;
+};
diff --git a/applications/calibration/src/lens.cpp b/applications/calibration/src/lens.cpp
index 924787a74..5d47043fd 100644
--- a/applications/calibration/src/lens.cpp
+++ b/applications/calibration/src/lens.cpp
@@ -51,7 +51,8 @@ void ftl::calibration::intrinsic(map<string, string> &opt) {
 	LOG(INFO) << " aperture_height: " << aperture_height;
 	LOG(INFO) << "-----------------------------------";
 
-	int calibrate_flags = cv::CALIB_ZERO_TANGENT_DIST | cv::CALIB_FIX_ASPECT_RATIO;
+	int calibrate_flags =	cv::CALIB_ZERO_TANGENT_DIST | cv::CALIB_FIX_ASPECT_RATIO |
+							cv::CALIB_FIX_PRINCIPAL_POINT;
 	// PARAMETERS
 
 	vector<cv::VideoCapture> cameras;
diff --git a/applications/calibration/src/stereo.cpp b/applications/calibration/src/stereo.cpp
index c009d6dd0..0d2e16369 100644
--- a/applications/calibration/src/stereo.cpp
+++ b/applications/calibration/src/stereo.cpp
@@ -68,7 +68,7 @@ void ftl::calibration::stereo(map<string, string> &opt) {
 
 	int stereocalibrate_flags =
 		cv::CALIB_FIX_INTRINSIC | cv::CALIB_FIX_PRINCIPAL_POINT | cv::CALIB_FIX_ASPECT_RATIO |
-		cv::CALIB_ZERO_TANGENT_DIST | cv::CALIB_SAME_FOCAL_LENGTH | cv::CALIB_RATIONAL_MODEL |
+		cv::CALIB_ZERO_TANGENT_DIST | cv::CALIB_SAME_FOCAL_LENGTH | 
 		cv::CALIB_FIX_K3 | cv::CALIB_FIX_K4 | cv::CALIB_FIX_K5;
 
 	vector<cv::VideoCapture> cameras { cv::VideoCapture(0), cv::VideoCapture(1) };
@@ -152,11 +152,11 @@ void ftl::calibration::stereo(map<string, string> &opt) {
 		vector<Vec3f> points_ref;
 		calib.objectPoints(points_ref);
 		
+		/* doesn't seem to be very helpful (error almost always low enough)
 		// calculate reprojection error with single pair of images
 		// reject it if RMS reprojection error too high
 		int flags = stereocalibrate_flags;
-
-		// TODO move to "findPoints"-thread
+		
 		double rms_iter = stereoCalibrate(
 					vector<vector<Vec3f>> { points_ref }, 
 					vector<vector<Vec2f>> { new_points[0] },
@@ -170,7 +170,7 @@ void ftl::calibration::stereo(map<string, string> &opt) {
 		if (rms_iter > max_error) {
 			LOG(WARNING) << "RMS reprojection error too high, maximum allowed error: " << max_error;
 			continue;
-		}
+		}*/
 		
 		if (use_grid) {
 			// store results in result grid
@@ -225,14 +225,11 @@ void ftl::calibration::stereo(map<string, string> &opt) {
 	Mat R1, R2, P1, P2, Q;
 	cv::Rect validRoi[2];
 
-	// calculate extrinsic parameters
-	// NOTE: 	Other code assumes CALIB_ZERO_DISPARITY is used (for Cy == Cx). 
-	//			Depth map map calculation disparityToDepth() could be incorrect otherwise.
 	stereoRectify(
 		camera_matrices[0], dist_coeffs[0],
 		camera_matrices[1], dist_coeffs[1],
 		image_size, R, T, R1, R2, P1, P2, Q,
-		cv::CALIB_ZERO_DISPARITY, alpha, image_size,
+		0, alpha, image_size,
 		&validRoi[0], &validRoi[1]
 	);
 
@@ -257,21 +254,33 @@ void ftl::calibration::stereo(map<string, string> &opt) {
 			camera.grab();
 			camera.retrieve(in[i]);
 	
+			auto p = cv::Point2i(camera_matrices[i].at<double>(0, 2), camera_matrices[i].at<double>(1, 2));
+			cv::drawMarker(in[i], p, cv::Scalar(51, 204, 51), cv::MARKER_CROSS, 40, 1);
+			cv::drawMarker(in[i], p, cv::Scalar(51, 204, 51), cv::MARKER_SQUARE, 25);
+
 			cv::remap(in[i], out[i], map1[i], map2[i], cv::INTER_CUBIC);
-			// cv::cvtColor(out[i], out_gray[i], cv::COLOR_BGR2GRAY);
 
 			// draw lines
 			for (int r = 50; r < image_size.height; r = r+50) {
 				cv::line(out[i], cv::Point(0, r), cv::Point(image_size.width-1, r), cv::Scalar(0,0,255), 1);
 			}
 
+			if (i == 0) { // left camera
+				auto p_r = cv::Point2i(-Q.at<double>(0, 3), -Q.at<double>(1, 3));
+				cv::drawMarker(out[i], p_r, cv::Scalar(0, 0, 204), cv::MARKER_CROSS, 30);
+				cv::drawMarker(out[i], p_r, cv::Scalar(0, 0, 204), cv::MARKER_SQUARE);
+			}
+			
+			cv::imshow("Camera " + std::to_string(i) + " (unrectified)", in[i]);
 			cv::imshow("Camera " + std::to_string(i) + " (rectified)", out[i]);
 		}
-
+		
 		/* not useful
 		cv::absdiff(out_gray[0], out_gray[1], diff);
 		cv::applyColorMap(diff, diff_color, cv::COLORMAP_JET);
 		cv::imshow("Difference", diff_color);
 		*/
 	}
+
+	cv::destroyAllWindows();
 }
diff --git a/applications/reconstruct/src/voxel_scene.cpp b/applications/reconstruct/src/voxel_scene.cpp
index 5ec9204e9..5d392d4f7 100644
--- a/applications/reconstruct/src/voxel_scene.cpp
+++ b/applications/reconstruct/src/voxel_scene.cpp
@@ -98,6 +98,7 @@ void SceneRep::addSource(ftl::rgbd::Source *src) {
 	auto &cam = cameras_.emplace_back();
 	cam.source = src;
 	cam.params.m_imageWidth = 0;
+	src->setChannel(ftl::rgbd::kChanDepth);
 }
 
 extern "C" void updateCUDACameraConstant(ftl::voxhash::DepthCameraCUDA *data, int count);
diff --git a/applications/registration/src/aruco.cpp b/applications/registration/src/aruco.cpp
index 8ff848546..99d3552d9 100644
--- a/applications/registration/src/aruco.cpp
+++ b/applications/registration/src/aruco.cpp
@@ -35,6 +35,8 @@ void ftl::registration::aruco(ftl::Configurable *root) {
 
 	if (sources.size() == 0) return;
 
+	for (auto *src : sources) src->setChannel(ftl::rgbd::kChanDepth);
+
 	cv::namedWindow("Target", cv::WINDOW_KEEPRATIO);
 	cv::namedWindow("Source", cv::WINDOW_KEEPRATIO);
 
diff --git a/components/rgbd-sources/src/cuda_algorithms.hpp b/components/rgbd-sources/src/cuda_algorithms.hpp
index 7c7d510ff..0aa7399c0 100644
--- a/components/rgbd-sources/src/cuda_algorithms.hpp
+++ b/components/rgbd-sources/src/cuda_algorithms.hpp
@@ -11,6 +11,9 @@
 namespace ftl {
 namespace cuda {
 
+	void disparity_to_depth(const cv::cuda::GpuMat &disparity, cv::cuda::GpuMat &depth,
+				const ftl::rgbd::Camera &c, cv::cuda::Stream &stream);
+
 	/**
 	 * Disparity consistency algorithm.
 	 */
diff --git a/components/rgbd-sources/src/middlebury_source.cpp b/components/rgbd-sources/src/middlebury_source.cpp
index 6f38f7bdc..d41d4e77b 100644
--- a/components/rgbd-sources/src/middlebury_source.cpp
+++ b/components/rgbd-sources/src/middlebury_source.cpp
@@ -3,6 +3,8 @@
 #include "disparity.hpp"
 #include "cuda_algorithms.hpp"
 
+#include "cuda_algorithms.hpp"
+
 using ftl::rgbd::detail::MiddleburySource;
 using ftl::rgbd::detail::Disparity;
 using std::string;
@@ -142,43 +144,6 @@ MiddleburySource::MiddleburySource(ftl::rgbd::Source *host, const string &dir)
 	ready_ = true;
 }
 
-static void disparityToDepth(const cv::cuda::GpuMat &disparity, cv::cuda::GpuMat &depth,
-							 const ftl::rgbd::Camera &c, cv::cuda::Stream &stream) {
-	double val = c.baseline * c.fx;
-	cv::cuda::add(disparity, c.doffs, depth, cv::noArray(), -1, stream);
-	cv::cuda::divide(val, depth, depth, 1.0f / 1000.0f, -1, stream);
-}
-
-/*static void disparityToDepthTRUE(const cv::Mat &disp, cv::Mat &depth, const ftl::rgbd::Camera &c) {
-	using namespace cv;
-
-	double doffs = 270.821 * 0.3;
-
-	Matx44d Q(
-		1.0,0.0,0.0,c.cx,
-		0.0,1.0,0.0,c.cy,
-		0.0,0.0,0.0,c.fx,
-		0.0,0.0,1.0/c.baseline,0.0);
-
-	for( int y = 0; y < disp.rows; y++ )
-    {
-        const float* sptr = disp.ptr<float>(y);
-        float* dptr = depth.ptr<float>(y);
-
-        for( int x = 0; x < disp.cols; x++)
-        {
-            double d = sptr[x] + doffs;
-            Vec4d homg_pt = Q*Vec4d(x, y, d, 1.0);
-            auto dvec = Vec3d(homg_pt.val);
-            dvec /= homg_pt[3];
-			dptr[x] = dvec[2] / 1000.0;
-
-            //if( fabs(d-minDisparity) <= FLT_EPSILON )
-            //    dptr[x][2] = bigZ;
-        }
-    }
-}*/
-
 void MiddleburySource::_performDisparity() {
 	if (depth_tmp_.empty()) depth_tmp_ = cv::cuda::GpuMat(left_.size(), CV_32FC1);
 	if (disp_tmp_.empty()) disp_tmp_ = cv::cuda::GpuMat(left_.size(), CV_32FC1);
diff --git a/components/rgbd-sources/src/net.cpp b/components/rgbd-sources/src/net.cpp
index 42b2c8fd6..9a2fdf2af 100644
--- a/components/rgbd-sources/src/net.cpp
+++ b/components/rgbd-sources/src/net.cpp
@@ -90,6 +90,11 @@ NetSource::NetSource(ftl::rgbd::Source *host)
 		host_->getNet()->send(peer_, "update_cfg", host_->getURI() + "/baseline", host_->getConfig()["baseline"].dump());
 	});
 
+	host->on("doffs", [this,host](const ftl::config::Event&) {
+		params_.doffs = host_->value("doffs", params_.doffs);
+		host_->getNet()->send(peer_, "update_cfg", host_->getURI() + "/doffs", host_->getConfig()["doffs"].dump());
+	});
+
 	host->on("quality", [this,host](const ftl::config::Event&) {
 		default_quality_ = host->value("quality", 0);
 	});
diff --git a/components/rgbd-sources/src/stereovideo.cpp b/components/rgbd-sources/src/stereovideo.cpp
index cb5e08a3a..aa8f8c376 100644
--- a/components/rgbd-sources/src/stereovideo.cpp
+++ b/components/rgbd-sources/src/stereovideo.cpp
@@ -7,6 +7,8 @@
 #include "disparity.hpp"
 #include "cuda_algorithms.hpp"
 
+#include "cuda_algorithms.hpp"
+
 using ftl::rgbd::detail::Calibrate;
 using ftl::rgbd::detail::LocalSource;
 using ftl::rgbd::detail::StereoVideoSource;
@@ -151,7 +153,7 @@ static void disparityToDepth(const cv::cuda::GpuMat &disparity, cv::cuda::GpuMat
 	cv::cuda::divide(val, disparity, depth, 1.0f / 1000.0f, -1, stream);
 }
 
-bool StereoVideoSource::grab(int n, int b) {
+bool StereoVideoSource::grab(int n, int b) {	
 	const ftl::rgbd::channel_t chan = host_->getChannel();
 
 	if (chan == ftl::rgbd::kChanDepth) {
-- 
GitLab