diff --git a/CMakeLists.txt b/CMakeLists.txt
index c4192b464b45274d3bccf77f5719df45e7b677dd..7a34946d6ecaf0047693d61d116d9701ed454459 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -111,6 +111,7 @@ if (REALSENSE_LIBRARY)
 	endif()
 else()
 	set(REALSENSE_LIBRARY "")
+	add_library(realsense INTERFACE)
 endif()
 
 if (BUILD_GUI)
diff --git a/components/codecs/include/ftl/codecs/transformation.hpp b/components/codecs/include/ftl/codecs/transformation.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..b1ff85374bade6591bb824f7c2c65dbc977b7da2
--- /dev/null
+++ b/components/codecs/include/ftl/codecs/transformation.hpp
@@ -0,0 +1,42 @@
+#pragma once
+#ifndef _FTL_TRANSFORMATION_HPP_
+#define _FTL_TRANSFORMATION_HPP_
+
+#include <opencv2/core/mat.hpp>
+#include <opencv2/calib3d.hpp>
+
+#include "ftl/utility/msgpack.hpp"
+
+namespace ftl {
+namespace codecs {
+struct Transformation {
+	explicit Transformation() {};
+	explicit Transformation(const int &id, const cv::Vec3d &rvec, const cv::Vec3d &tvec) :
+		id(id), rvec(rvec), tvec(tvec) {}
+
+	int id;
+	cv::Vec3d rvec;
+	cv::Vec3d tvec;
+
+	MSGPACK_DEFINE_ARRAY(id, rvec, tvec);
+
+	cv::Mat rmat() const {
+		cv::Mat R(cv::Size(3, 3), CV_64FC1);
+		cv::Rodrigues(rvec, R);
+		return R;
+	}
+
+	cv::Mat matrix() const {
+		cv::Mat M = cv::Mat::eye(cv::Size(4, 4), CV_64FC1);
+		rmat().copyTo(M(cv::Rect(0, 0, 3, 3)));
+		M.at<double>(0, 3) = tvec[0];
+		M.at<double>(1, 3) = tvec[1];
+		M.at<double>(2, 3) = tvec[2];
+		return M;
+	}
+};
+
+}
+}
+
+#endif
diff --git a/components/common/cpp/include/ftl/utility/msgpack.hpp b/components/common/cpp/include/ftl/utility/msgpack.hpp
index 6d733f1fb520d60e2e021759fa6ea9fbffdbef46..b9ed96637a78697b219cd9b0506c01e6b76da934 100644
--- a/components/common/cpp/include/ftl/utility/msgpack.hpp
+++ b/components/common/cpp/include/ftl/utility/msgpack.hpp
@@ -105,6 +105,48 @@ struct object_with_zone<cv::Rect_<T>> {
 	}
 };
 
+////////////////////////////////////////////////////////////////////////////////
+// cv::Vec
+
+template<typename T, int SIZE>
+struct pack<cv::Vec<T, SIZE>> {
+	template <typename Stream>
+	packer<Stream>& operator()(msgpack::packer<Stream>& o, cv::Vec<T, SIZE> const& v) const {
+
+		o.pack_array(SIZE);
+		for (int i = 0; i < SIZE; i++) { o.pack(v[i]); }
+
+		return o;
+	}
+};
+
+template<typename T, int SIZE>
+struct convert<cv::Vec<T, SIZE>> {
+	msgpack::object const& operator()(msgpack::object const& o, cv::Vec<T, SIZE> &v) const {
+		if (o.type != msgpack::type::ARRAY) { throw msgpack::type_error(); }
+		if (o.via.array.size != SIZE) { throw msgpack::type_error(); }
+		
+		for (int i = 0; i < SIZE; i++) { v[i] = o.via.array.ptr[i].as<T>(); }
+
+		return o;
+	}
+};
+
+template <typename T, int SIZE>
+struct object_with_zone<cv::Vec<T, SIZE>> {
+	void operator()(msgpack::object::with_zone& o, cv::Vec<T, SIZE> const& v) const {
+		o.type = type::ARRAY;
+		o.via.array.size = SIZE;
+		o.via.array.ptr = static_cast<msgpack::object*>(
+			o.zone.allocate_align(	sizeof(msgpack::object) * o.via.array.size,
+									MSGPACK_ZONE_ALIGNOF(msgpack::object)));
+
+		for (int i = 0; i < SIZE; i++) {
+			o.via.array.ptr[i] = msgpack::object(v[i], o.zone);
+		}
+	}
+};
+
 ////////////////////////////////////////////////////////////////////////////////
 // cv::Mat
 
diff --git a/components/common/cpp/test/msgpack_unit.cpp b/components/common/cpp/test/msgpack_unit.cpp
index 996c10cd2784fa9fe198b0190d8f87184b870620..a321cd36bdbebe16959fdf558b3d3dffd16dc06a 100644
--- a/components/common/cpp/test/msgpack_unit.cpp
+++ b/components/common/cpp/test/msgpack_unit.cpp
@@ -91,4 +91,9 @@ TEST_CASE( "msgpack cv::Mat" ) {
 		auto res = msgpack_unpack<cv::Rect2d>(msgpack_pack(cv::Rect2d(1,2,3,4)));
 		REQUIRE(res == cv::Rect2d(1,2,3,4));
 	}
+	
+	SECTION( "Vec<T, SIZE>" ) {
+		auto res = msgpack_unpack<cv::Vec4d>(msgpack_pack(cv::Vec4d(1,2,3,4)));
+		REQUIRE(res == cv::Vec4d(1,2,3,4));
+	}
 }
diff --git a/components/operators/CMakeLists.txt b/components/operators/CMakeLists.txt
index cd9d674f828ffafcc79d3c590e1792d474ff336f..6d6e796b5decb3272f549e02721ea87262938980 100644
--- a/components/operators/CMakeLists.txt
+++ b/components/operators/CMakeLists.txt
@@ -24,6 +24,7 @@ set(OPERSRC
 	src/clipping.cpp
 	src/depth.cpp
 	src/detectandtrack.cpp
+	src/aruco.cpp
 )
 
 if (LIBSGM_FOUND)
diff --git a/components/operators/include/ftl/operators/detectandtrack.hpp b/components/operators/include/ftl/operators/detectandtrack.hpp
index b769283a48735f89d86d122f5fc2567ca27c47f1..f4d9e47e3889169c7c14c4992c2baeeec0e89d54 100644
--- a/components/operators/include/ftl/operators/detectandtrack.hpp
+++ b/components/operators/include/ftl/operators/detectandtrack.hpp
@@ -1,22 +1,39 @@
-#ifndef _FTL_OPERATORS_CASCADECLASSIFIER_HPP_
-#define _FTL_OPERATORS_CASCADECLASSIFIER_HPP_
+#ifndef _FTL_OPERATORS_TRACKING_HPP_
+#define _FTL_OPERATORS_TRACKING_HPP_
 
 #include "ftl/operators/operator.hpp"
 #include <opencv2/objdetect.hpp>
 #include <opencv2/tracking.hpp>
+#include <opencv2/aruco.hpp>
 
 namespace ftl {
 namespace operators {
 
 /**
- * Object detection and tracking. 
- * 
+ * Object detection and tracking.
+ *
  * cv::CascadeClassifier used in detection
  * https://docs.opencv.org/master/d1/de5/classcv_1_1CascadeClassifier.html
- * 
- * cv::TrackerKCF used in tracking
+ *
+ * cv::TrackerKCF or cv::TrackerCSRT is used for tracking
  * https://docs.opencv.org/master/d2/dff/classcv_1_1TrackerKCF.html
- * 
+ * https://docs.opencv.org/master/d2/da2/classcv_1_1TrackerCSRT.html
+ *
+ * Configurable parameters:
+ * - max_distance: If classifier detects an object closer than max_distance, the
+ *   found object is considered already tracked and new tracker is not created.
+ *   Value in pixels.
+ * - update_bounding_box: When true, tracker is always re-initialized with new
+ *   bounding box when classifier detects already tracked object.
+ * - max_tarcked: Maximum number of tracked objects.
+ * - max_fail: Number of tracking failures before object is removed. Value in
+ *   frames.
+ * - detect_n_frames: How often object detection is performed.
+ * - filename: Path to OpenCV CascadeClassifier file
+ * - scale_neighbors, scalef, min_size, max_size: OpenCV CascadeClassifier
+ *   parameters
+ * - tracker_type: KCF or CSRT
+ * - debug: Draw bounding boxes and object IDs in image.
  */
 class DetectAndTrack : public ftl::operators::Operator {
 	public:
@@ -29,18 +46,20 @@ class DetectAndTrack : public ftl::operators::Operator {
 
 	protected:
 	bool init();
-	
+
 	bool detect(const cv::Mat &im);
 	bool track(const cv::Mat &im);
 
 	private:
 
+	int createTracker(const cv::Mat &im, const cv::Rect2d &obj);
+
 	ftl::codecs::Channel channel_in_;
 	ftl::codecs::Channel channel_out_;
 
 	bool debug_;
 	int id_max_;
-	
+
 	struct Object {
 		int id;
 		cv::Rect2d object;
@@ -49,6 +68,11 @@ class DetectAndTrack : public ftl::operators::Operator {
 	};
 	std::vector<Object> tracked_;
 
+	// 0: KCF, 1: CSRT
+	int tracker_type_;
+
+	// update tracked bounding box when detected
+	bool update_bounding_box_;
 	// detection: if detected object is farther than max_distance_, new tracked
 	// object is added
 	double max_distance_;
@@ -72,7 +96,37 @@ class DetectAndTrack : public ftl::operators::Operator {
 	cv::CascadeClassifier classifier_;
 };
 
+/*
+ * AruCo tag detection and tracking.
+ *
+ * Configurable parameters:
+ * - dictionary: ArUco dictionary id. See PREDEFINED_DICTIONARY_NAME in
+ *   opencv2/aruco/dictionary.hpp for possible values.
+ * - marker_size: Marker size (side), in meteres.
+ * - estimate_pose: Estimate marker poses. Requires marker_size to be set.
+ * - debug: When enabled, markers and poses are drawn to image.
+ */
+class ArUco : public ftl::operators::Operator {
+	public:
+	explicit ArUco(ftl::Configurable*);
+	~ArUco() {};
+
+	inline Operator::Type type() const override { return Operator::Type::OneToOne; }
+	bool apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t stream) override;
+
+	ftl::codecs::Channel channel_in_;
+	ftl::codecs::Channel channel_out_;
+
+	private:
+	bool debug_;
+	bool estimate_pose_;
+	float marker_size_;
+
+	cv::Ptr<cv::aruco::Dictionary> dictionary_;
+	cv::Ptr<cv::aruco::DetectorParameters> params_;
+};
+
 }
 }
 
-#endif // _FTL_OPERATORS_CASCADECLASSIFIER_HPP_
+#endif // _FTL_OPERATORS_TRACKING_HPP_
diff --git a/components/operators/src/aruco.cpp b/components/operators/src/aruco.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b80067108d00c5d154e207d4f14ceba697deb140
--- /dev/null
+++ b/components/operators/src/aruco.cpp
@@ -0,0 +1,78 @@
+#include "ftl/operators/detectandtrack.hpp"
+#include "ftl/codecs/transformation.hpp"
+
+using ftl::operators::ArUco;
+using ftl::rgbd::Frame;
+
+using ftl::codecs::Channel;
+using ftl::codecs::Transformation;
+
+using cv::Mat;
+using cv::Point2f;
+using cv::Vec3d;
+
+using std::vector;
+
+ArUco::ArUco(ftl::Configurable *cfg) : ftl::operators::Operator(cfg) {
+	dictionary_ = cv::aruco::getPredefinedDictionary(cfg->value("dictionary", 0));
+	params_ = cv::aruco::DetectorParameters::create();
+
+	debug_ = cfg->value("debug", false);
+	estimate_pose_ = cfg->value("estimate_pose", false);
+	auto marker_size = cfg->get<float>("marker_size");
+	if (!marker_size || (*marker_size < 0.0f)) {
+		marker_size_ = 0.0;
+		estimate_pose_ = false;
+	}
+	else {
+		marker_size_ = *marker_size;
+	}
+
+	channel_in_ = Channel::Colour;
+	channel_out_ = Channel::Transforms;
+}
+
+bool ArUco::apply(Frame &in, Frame &out, cudaStream_t stream) {
+	if (!in.hasChannel(channel_in_)) { return false; }
+	in.download(channel_in_);
+
+	Mat im = in.get<Mat>(channel_in_);
+	Mat K = in.getLeftCamera().getCameraMatrix();
+	Mat dist = cv::Mat::zeros(cv::Size(5, 1), CV_64FC1);
+
+	std::vector<std::vector<cv::Point2f>> corners;
+	std::vector<int> ids;
+
+	cv::aruco::detectMarkers(	im, dictionary_,
+								corners, ids, params_, cv::noArray(), K);
+
+	std::vector<Vec3d> rvecs;
+	std::vector<Vec3d> tvecs;
+
+	if (estimate_pose_) {
+		cv::aruco::estimatePoseSingleMarkers(corners, marker_size_, K, dist, rvecs, tvecs);
+	}
+
+	vector<Transformation> result;
+	for (size_t i = 0; i < rvecs.size(); i++) {
+		if (estimate_pose_) {
+			result.push_back(ftl::codecs::Transformation(ids[i], rvecs[i], tvecs[i]));
+		}
+	}
+
+	out.create(channel_out_, result);
+
+	if (debug_) {
+		cv::aruco::drawDetectedMarkers(im, corners, ids);
+		if (estimate_pose_) {
+			for (size_t i = 0; i < rvecs.size(); i++) {
+					cv::aruco::drawAxis(im, K, dist, rvecs[i], tvecs[i], marker_size_);
+			}
+		}
+	}
+
+	// TODO: should be uploaded by operator which requires data on GPU
+	in.upload(channel_in_);
+
+	return true;
+}
\ No newline at end of file
diff --git a/components/operators/src/depth.cpp b/components/operators/src/depth.cpp
index 0a93061ed650800ea9fc6cd69917a689ff27d15a..b9462b8242348d9f632bd22d38fe2f5b853c1c0a 100644
--- a/components/operators/src/depth.cpp
+++ b/components/operators/src/depth.cpp
@@ -49,7 +49,7 @@ static void calc_space_weighted_filter(GpuMat& table_space, int win_size, float
 
 DepthBilateralFilter::DepthBilateralFilter(ftl::Configurable* cfg) :
 		ftl::operators::Operator(cfg) {
-	
+
 	scale_ = 16.0;
 	radius_ = cfg->value("radius", 7);
 	iter_ = cfg->value("iter", 2);
@@ -64,7 +64,7 @@ DepthBilateralFilter::DepthBilateralFilter(ftl::Configurable* cfg) :
 	cfg->on("max_discontinuity", [this](const ftl::config::Event &e) {
 		max_disc_ = config()->value("max_discontinuity", 0.1f);
 	});
-	
+
 
 	calc_color_weighted_table(table_color_, sigma_range_, 255);
     calc_space_weighted_filter(table_space_, radius_ * 2 + 1, radius_ + 1.0f);
@@ -72,7 +72,7 @@ DepthBilateralFilter::DepthBilateralFilter(ftl::Configurable* cfg) :
 
 DepthBilateralFilter::DepthBilateralFilter(ftl::Configurable* cfg, const std::tuple<ftl::codecs::Channel> &p) :
 		ftl::operators::Operator(cfg) {
-	
+
 	scale_ = 16.0;
 	radius_ = cfg->value("radius", 7);
 	iter_ = cfg->value("iter", 2);
@@ -87,7 +87,7 @@ DepthBilateralFilter::DepthBilateralFilter(ftl::Configurable* cfg, const std::tu
 	cfg->on("max_discontinuity", [this](const ftl::config::Event &e) {
 		max_disc_ = config()->value("max_discontinuity", 0.1f);
 	});
-	
+
 
 	calc_color_weighted_table(table_color_, sigma_range_, 255);
     calc_space_weighted_filter(table_space_, radius_ * 2 + 1, radius_ + 1.0f);
@@ -134,7 +134,9 @@ void DepthChannel::_createPipeline() {
 							config()->value("height", 720));
 
 	pipe_->append<ftl::operators::ColourChannels>("colour");  // Convert BGR to BGRA
+	#ifdef HAVE_LIBSGM
 	pipe_->append<ftl::operators::FixstarsSGM>("algorithm");
+	#endif
 	#ifdef HAVE_OPTFLOW
 	pipe_->append<ftl::operators::OpticalFlowTemporalSmoothing>("optflow_filter");
 	#endif
diff --git a/components/operators/src/detectandtrack.cpp b/components/operators/src/detectandtrack.cpp
index 313c0a26e9302dd311fd71acd266bd5be8e8f827..5d7c629d699b7761424785970df99b490cddb3dd 100644
--- a/components/operators/src/detectandtrack.cpp
+++ b/components/operators/src/detectandtrack.cpp
@@ -47,7 +47,7 @@ bool DetectAndTrack::init() {
 	else { min_size_ = {0.0, 0.0}; }
 	if (max_size && max_size->size() == 2) { max_size_ = *max_size; }
 	else { max_size_ = {1.0, 1.0}; }
-	
+
 	min_size_[0] = max(min(1.0, min_size_[0]), 0.0);
 	min_size_[1] = max(min(1.0, min_size_[1]), 0.0);
 	max_size_[0] = max(min(1.0, max_size_[0]), 0.0);
@@ -55,6 +55,19 @@ bool DetectAndTrack::init() {
 	if (min_size_[0] > max_size_[0]) { min_size_[0] = max_size_[0]; }
 	if (min_size_[1] > max_size_[1]) { min_size_[1] = max_size_[1]; }
 
+	update_bounding_box_ = config()->value("update_bounding_box", false);
+	std::string tracker_type = config()->value("tracker_type", std::string("KCF"));
+	if (tracker_type == "CSRT") {
+		tracker_type_ = 1;
+	}
+	else if (tracker_type == "KCF") {
+		tracker_type_ = 0;
+	}
+	else {
+		LOG(WARNING) << "unknown tracker type " << tracker_type << ", using KCF";
+		tracker_type_ = 0;
+	}
+
 	channel_in_ = ftl::codecs::Channel::Colour;
 	channel_out_ = ftl::codecs::Channel::Data;
 	id_max_ = 0;
@@ -82,6 +95,22 @@ static Point2d center(Rect2d obj) {
 	return Point2d(obj.x+obj.width/2.0, obj.y+obj.height/2.0);
 }
 
+int DetectAndTrack::createTracker(const Mat &im, const Rect2d &obj) {
+	cv::Ptr<cv::Tracker> tracker;
+	if (tracker_type_ == 1) {
+		// cv::TrackerCSRT::Params params; /// defaults (???)
+		tracker = cv::TrackerCSRT::create();
+	}
+	else {
+		tracker = cv::TrackerKCF::create();
+	}
+	
+	int id = id_max_++;
+	tracker->init(im, obj);
+	tracked_.push_back({ id, obj, tracker, 0 });
+	return id;
+}
+
 bool DetectAndTrack::detect(const Mat &im) {
 	Size min_size(im.size().width*min_size_[0], im.size().height*min_size_[1]);
 	Size max_size(im.size().width*max_size_[0], im.size().height*max_size_[1]);
@@ -99,18 +128,18 @@ bool DetectAndTrack::detect(const Mat &im) {
 		bool found = false;
 		for (auto &tracker : tracked_) {
 			if (cv::norm(center(tracker.object)-c) < max_distance_) {
-				// update? (bounding box can be quite different)
-				// tracker.object = obj;
+				if (update_bounding_box_) {
+					tracker.object = obj;
+					tracker.tracker->init(im, obj);
+				}
+				
 				found = true;
 				break;
 			}
 		}
 
 		if (!found && (tracked_.size() < max_tracked_)) {
-			cv::Ptr<cv::Tracker> tracker = cv::TrackerCSRT::create();
-			//cv::Ptr<cv::Tracker> tracker = cv::TrackerKCF::create();
-			tracker->init(im, obj);
-			tracked_.push_back({ id_max_++, obj, tracker, 0 });
+			createTracker(im, obj);
 		}
 	}
 
@@ -182,7 +211,7 @@ bool DetectAndTrack::apply(Frame &in, Frame &out, cudaStream_t stream) {
 			cv::rectangle(im, tracked.object, cv::Scalar(0, 0, 255), 1);
 		}
 	}
-	in.create(channel_out_, result);
+	out.create(channel_out_, result);
 
 	// TODO: should be uploaded by operator which requires data on GPU
 	in.upload(channel_in_);
diff --git a/components/rgbd-sources/include/ftl/rgbd/camera.hpp b/components/rgbd-sources/include/ftl/rgbd/camera.hpp
index 772a884b0b47b963d3ba8db0dcc8a6532cd83412..54f41579d5418d7d8a107ef46b3cd968461fef76 100644
--- a/components/rgbd-sources/include/ftl/rgbd/camera.hpp
+++ b/components/rgbd-sources/include/ftl/rgbd/camera.hpp
@@ -43,12 +43,15 @@ struct __align__(16) Camera {
 	__device__ float3 screenToCam(uint ux, uint uy, float depth) const; 
 
 	#ifndef __CUDACC__
+
 	MSGPACK_DEFINE(fx,fy,cx,cy,width,height,minDepth,maxDepth,baseline,doffs);
 
 	/**
 	 * Make a camera struct from a configurable.
 	 */
 	static Camera from(ftl::Configurable*);
+	
+	cv::Mat getCameraMatrix() const;
 	#endif
 };
 
diff --git a/components/rgbd-sources/src/camera.cpp b/components/rgbd-sources/src/camera.cpp
index 90d77f34ba1226405e5cb1c0a5b98459033294d6..96ac0be4cc5f154fc9fd50fef5040f76998d16a0 100644
--- a/components/rgbd-sources/src/camera.cpp
+++ b/components/rgbd-sources/src/camera.cpp
@@ -16,3 +16,12 @@ Camera Camera::from(ftl::Configurable *cfg) {
 	r.baseline = cfg->value("baseline", 0.05f);
 	return r;
 }
+
+cv::Mat Camera::getCameraMatrix() const {
+	cv::Mat K = cv::Mat::eye(cv::Size(3, 3), CV_64FC1);
+	K.at<double>(0,0) = fx;
+	K.at<double>(0,2) = -cx;
+	K.at<double>(1,1) = fy;
+	K.at<double>(1,2) = -cy;
+	return K;
+}
diff --git a/components/rgbd-sources/src/sources/stereovideo/stereovideo.cpp b/components/rgbd-sources/src/sources/stereovideo/stereovideo.cpp
index ae78c70aebf2b71b4dd13925d49f0bad93654009..141070f3917cd476fa09485cc268760e0bc62269 100644
--- a/components/rgbd-sources/src/sources/stereovideo/stereovideo.cpp
+++ b/components/rgbd-sources/src/sources/stereovideo/stereovideo.cpp
@@ -74,7 +74,7 @@ void StereoVideoSource::init(const string &file) {
 		LOG(INFO) << "Using cameras...";
 		lsrc_ = ftl::create<LocalSource>(host_, "feed");
 	}
-	
+
 	color_size_ = cv::Size(lsrc_->width(), lsrc_->height());
 	frames_ = std::vector<Frame>(2);
 
@@ -83,6 +83,7 @@ void StereoVideoSource::init(const string &file) {
 	pipeline_input_->append<ftl::operators::NVOpticalFlow>("optflow", Channel::Colour, Channel::Flow);
 	#endif
 	pipeline_input_->append<ftl::operators::DetectAndTrack>("facedetection")->value("enabled", false);
+	pipeline_input_->append<ftl::operators::ArUco>("aruco")->value("enabled", false);
 	pipeline_input_->append<ftl::operators::ColourChannels>("colour");
 
 	calib_ = ftl::create<Calibrate>(host_, "calibration", cv::Size(lsrc_->fullWidth(), lsrc_->fullHeight()), stream_);
@@ -100,10 +101,10 @@ void StereoVideoSource::init(const string &file) {
 		}
 	}
 	else {
-		fname = fname_config ? *fname_config : 
+		fname = fname_config ? *fname_config :
 								string(FTL_LOCAL_CONFIG_ROOT) + "/"
 								+ std::string("calibration.yml");
-		
+
 		LOG(ERROR) << "No calibration, default path set to " + fname;
 	}
 
@@ -111,7 +112,7 @@ void StereoVideoSource::init(const string &file) {
 	// RPC callbacks to update calibration
 	// Should only be used by calibration app (interface may change)
 	// Tries to follow interface of ftl::Calibrate
-	
+
 	host_->getNet()->bind("set_pose",
 		[this](cv::Mat pose){
 			if (!calib_->setPose(pose)) {
@@ -121,10 +122,10 @@ void StereoVideoSource::init(const string &file) {
 
 			return true;
 	});
-	
+
 	host_->getNet()->bind("set_intrinsics",
 		[this](cv::Size size, cv::Mat K_l, cv::Mat D_l, cv::Mat K_r, cv::Mat D_r) {
-			
+
 			if (!calib_->setIntrinsics(size, {K_l, K_r})) {
 				LOG(ERROR) << "bad intrinsic parameters (bad values)";
 				return false;
@@ -151,12 +152,12 @@ void StereoVideoSource::init(const string &file) {
 			return true;
 	});
 
-	host_->getNet()->bind("save_calibration", 
+	host_->getNet()->bind("save_calibration",
 		[this, fname](){
 			return calib_->saveCalibration(fname);
 	});
 
-	host_->getNet()->bind("set_rectify", 
+	host_->getNet()->bind("set_rectify",
 		[this](bool enable){
 			bool retval = calib_->setRectify(enable);
 			updateParameters();
@@ -168,12 +169,12 @@ void StereoVideoSource::init(const string &file) {
 			cv::Mat(calib_->getCameraDistortionLeft()),
 			cv::Mat(calib_->getCameraDistortionRight()) };
 	});
-	
+
 	////////////////////////////////////////////////////////////////////////////
 
 	// Generate camera parameters from camera matrix
 	updateParameters();
-	
+
 	LOG(INFO) << "StereoVideo source ready...";
 	ready_ = true;
 
@@ -194,7 +195,7 @@ void StereoVideoSource::updateParameters() {
 	setPose(pose);
 
 	cv::Mat K;
-	
+
 	// same for left and right
 	float baseline = static_cast<float>(1.0 / calib_->getQ().at<double>(3,2));
 	float doff = static_cast<float>(-calib_->getQ().at<double>(3,3) * baseline);
@@ -268,7 +269,7 @@ bool StereoVideoSource::retrieve() {
 	frame.setOrigin(&state_);
 
 	cv::cuda::GpuMat dummy;
-	auto &hres = (lsrc_->hasHigherRes()) ? frame.create<cv::cuda::GpuMat>(Channel::ColourHighRes) : dummy;	
+	auto &hres = (lsrc_->hasHigherRes()) ? frame.create<cv::cuda::GpuMat>(Channel::ColourHighRes) : dummy;
 
 	if (lsrc_->isStereo()) {
 		cv::cuda::GpuMat &left = frame.create<cv::cuda::GpuMat>(Channel::Left);
@@ -285,7 +286,7 @@ bool StereoVideoSource::retrieve() {
 
 	pipeline_input_->apply(frame, frame, cv::cuda::StreamAccessor::getStream(stream2_));
 	stream2_.waitForCompletion();
-	
+
 	return true;
 }
 
@@ -297,11 +298,11 @@ void StereoVideoSource::swap() {
 
 bool StereoVideoSource::compute(int n, int b) {
 	auto &frame = frames_[1];
-	
+
 	if (lsrc_->isStereo()) {
 		if (!frame.hasChannel(Channel::Left) ||
 			!frame.hasChannel(Channel::Right)) {
-			
+
 			return false;
 		}
 
@@ -315,7 +316,7 @@ bool StereoVideoSource::compute(int n, int b) {
 	else {
 		if (!frame.hasChannel(Channel::Left)) { return false; }
 	}
-	
+
 	host_->notify(timestamp_, frame);
 	return true;
 }