diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 512a4d19087bd66c5c895f3f4c5595190122132c..5a81ac8cbd6f27bb4388eab11d6d4b1b15875410 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -25,7 +25,7 @@ linux:
   script:
     - mkdir build
     - cd build
-    - cmake .. -DWITH_OPTFLOW=TRUE -DBUILD_CALIBRATION=TRUE
+    - cmake .. -DWITH_OPTFLOW=TRUE -DBUILD_CALIBRATION=TRUE -DCMAKE_BUILD_TYPE=Release
     - make
     - ctest --output-on-failure
 
diff --git a/CMakeLists.txt b/CMakeLists.txt
index b0aecb25a357a5c978db35a80cb20487adb5d08d..0f866eda014e6b6353824435cbacbc500fe2c683 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -13,6 +13,7 @@ enable_testing()
 option(WITH_PCL "Use PCL if available" OFF)
 option(WITH_NVPIPE "Use NvPipe for compression if available" ON)
 option(WITH_OPTFLOW "Use NVIDIA Optical Flow if available" OFF)
+option(WITH_OPENVR "Build with OpenVR support" OFF)
 option(WITH_FIXSTARS "Use Fixstars libSGM if available" ON)
 option(BUILD_VISION "Enable the vision component" ON)
 option(BUILD_RECONSTRUCT "Enable the reconstruction component" ON)
@@ -30,6 +31,8 @@ find_package( URIParser REQUIRED )
 find_package( MsgPack REQUIRED )
 find_package( Eigen3 REQUIRED )
 
+# find_package( ffmpeg )
+
 if (WITH_OPTFLOW)
 	# TODO check that cudaoptflow.hpp exists (OpenCV built with correct contrib modules)
 	set(HAVE_OPTFLOW true)
@@ -40,19 +43,20 @@ if (LibArchive_FOUND)
 	set(HAVE_LIBARCHIVE true)
 endif()
 
-## OpenVR API path
-find_library(OPENVR_LIBRARIES
-  NAMES
-    openvr_api
-)
-set(OPENVR_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../headers)
-
-if (OPENVR_LIBRARIES)
-	message(STATUS "Found OpenVR: ${OPENVR_LIBRARIES}")
-	set(HAVE_OPENVR true)
+if (WITH_OPENVR)
+	## OpenVR API path
+	find_library(OPENVR_LIBRARIES
+		NAMES
+			openvr_api
+	)
+	set(OPENVR_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../headers)
+
+	if (OPENVR_LIBRARIES)
+		message(STATUS "Found OpenVR: ${OPENVR_LIBRARIES}")
+		set(HAVE_OPENVR true)
+	endif()
 endif()
 
-
 if (WITH_FIXSTARS)
 	find_package( LibSGM )
 	if (LibSGM_FOUND)
@@ -179,6 +183,9 @@ find_library(UUID_LIBRARIES NAMES uuid libuuid)
 else()
 endif()
 
+# For ftl2mkv
+check_include_file("libavformat/avformat.h" HAVE_AVFORMAT)
+
 # Check for optional opencv components
 set(CMAKE_REQUIRED_INCLUDES ${OpenCV_INCLUDE_DIRS})
 check_include_file_cxx("opencv2/viz.hpp" HAVE_VIZ)
@@ -220,6 +227,12 @@ add_subdirectory(components/rgbd-sources)
 add_subdirectory(components/control/cpp)
 add_subdirectory(applications/calibration)
 add_subdirectory(applications/groupview)
+add_subdirectory(applications/player)
+add_subdirectory(applications/recorder)
+
+if (HAVE_AVFORMAT)
+	add_subdirectory(applications/ftl2mkv)
+endif()
 
 if (BUILD_RENDERER)
 	add_subdirectory(components/renderers)
diff --git a/applications/calibration-multi/src/main.cpp b/applications/calibration-multi/src/main.cpp
index ea770f69b03981164c2b6c97f95aa8841f1d8ae7..dbea22842d4b0a70546187f48e161def746af207 100644
--- a/applications/calibration-multi/src/main.cpp
+++ b/applications/calibration-multi/src/main.cpp
@@ -37,9 +37,9 @@ using cv::Vec4d;
 
 using ftl::net::Universe;
 using ftl::rgbd::Source;
-using ftl::rgbd::Channel;
+using ftl::codecs::Channel;
 
-Mat getCameraMatrix(const ftl::rgbd::Camera &parameters) {
+Mat createCameraMatrix(const ftl::rgbd::Camera &parameters) {
 	Mat m = (cv::Mat_<double>(3,3) << parameters.fx, 0.0, -parameters.cx, 0.0, parameters.fy, -parameters.cy, 0.0, 0.0, 1.0);
 	return m;
 }
@@ -173,7 +173,7 @@ void visualizeCalibration(	MultiCameraCalibrationNew &calib, Mat &out,
 		Scalar(64, 255, 64),
 		Scalar(64, 255, 64),
 	};
-
+	
 	vector<int> markers = {cv::MARKER_SQUARE, cv::MARKER_DIAMOND};
 
 	for (size_t c = 0; c < rgb.size(); c++) {
@@ -183,8 +183,12 @@ void visualizeCalibration(	MultiCameraCalibrationNew &calib, Mat &out,
 		for (int r = 50; r < rgb[c].rows; r = r+50) {
 			cv::line(rgb[c], cv::Point(0, r), cv::Point(rgb[c].cols-1, r), cv::Scalar(0,0,255), 1);
 		}
-	}
 
+		cv::putText(rgb[c],
+			"Camera " + std::to_string(c),
+			Point2i(roi[c].x + 10, roi[c].y + 30),
+			cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, Scalar(64, 64, 255), 1);
+	}
 	stack(rgb, out);
 }
 
@@ -203,7 +207,7 @@ struct CalibrationParams {
 void calibrate(	MultiCameraCalibrationNew &calib, vector<string> &uri_cameras,
 				const CalibrationParams &params, vector<Mat> &map1, vector<Mat> &map2, vector<cv::Rect> &roi)
 {
-	int reference_camera = -1;
+	int reference_camera = params.reference_camera;
 	if (params.reference_camera < 0) {
 		reference_camera = calib.getOptimalReferenceCamera();
 		reference_camera -= (reference_camera & 1);
@@ -233,10 +237,11 @@ void calibrate(	MultiCameraCalibrationNew &calib, vector<string> &uri_cameras,
 		Mat P1, P2, Q;
 		Mat R1, R2;
 		Mat R_c1c2, T_c1c2;
-
+		
 		calculateTransform(R[c], t[c], R[c + 1], t[c + 1], R_c1c2, T_c1c2);
 		cv::stereoRectify(K1, D1, K2, D2, params.size, R_c1c2, T_c1c2, R1, R2, P1, P2, Q, 0, params.alpha);
 
+		// calculate extrinsics from rectified parameters
 		Mat _t = Mat(Size(1, 3), CV_64FC1, Scalar(0.0));
 		Rt_out[c] = getMat4x4(R[c], t[c]) * getMat4x4(R1, _t).inv();
 		Rt_out[c + 1] = getMat4x4(R[c + 1], t[c + 1]) * getMat4x4(R2, _t).inv();
@@ -247,27 +252,46 @@ void calibrate(	MultiCameraCalibrationNew &calib, vector<string> &uri_cameras,
 			size_t pos2 = uri_cameras[c/2].find("#", pos1);
 			node_name = uri_cameras[c/2].substr(pos1, pos2 - pos1);
 			
-			if (params.save_extrinsic) {
-				// TODO:	only R and T required, rectification performed on vision node,
-				//			consider saving global extrinsic calibration?
-				saveExtrinsics(params.output_path + node_name + "-extrinsic.yml", R_c1c2, T_c1c2, R1, R2, P1, P2, Q);
+			if (params.save_extrinsic)
+			{
+				// TODO:	only R and T required when rectification performed
+				//			on vision node. Consider saving extrinsic global
+				//			for node as well?
+				saveExtrinsics(	params.output_path + node_name + "-extrinsic.yml",
+								R_c1c2, T_c1c2, R1, R2, P1, P2, Q
+				);
 				LOG(INFO) << "Saved: " << params.output_path + node_name + "-extrinsic.yml";
 			}
-			if (params.save_intrinsic) {
+			else
+			{
+				Mat rvec;
+				cv::Rodrigues(R_c1c2, rvec);
+				LOG(INFO) << "From camera " << c << " to " << c + 1;
+				LOG(INFO) << "rotation:    " << rvec.t();
+				LOG(INFO) << "translation: " << T_c1c2.t();
+			}
+
+			if (params.save_intrinsic)
+			{
 				saveIntrinsics(
 					params.output_path + node_name + "-intrinsic.yml",
-					{calib.getCameraMat(c),
-					 calib.getCameraMat(c + 1)},
+					{ calib.getCameraMat(c), calib.getCameraMat(c + 1) },
 					params.size
-
 				);
 				LOG(INFO) << "Saved: " << params.output_path + node_name + "-intrinsic.yml";
 			}
+			else if (params.optimize_intrinsic)
+			{
+				LOG(INFO) << "K1:\n" << K1;
+				LOG(INFO) << "K2:\n" << K2;
+			}
 		}
 
 		// for visualization
 		Size new_size;
 		cv::stereoRectify(K1, D1, K2, D2, params.size, R_c1c2, T_c1c2, R1, R2, P1, P2, Q, 0, 1.0, new_size, &roi[c], &roi[c + 1]);
+		//roi[c] = cv::Rect(0, 0, params.size.width, params.size.height);
+		//roi[c+1] = cv::Rect(0, 0, params.size.width, params.size.height);
 		cv::initUndistortRectifyMap(K1, D1, R1, P1, params.size, CV_16SC2, map1[c], map2[c]);
 		cv::initUndistortRectifyMap(K2, D2, R2, P2, params.size, CV_16SC2, map1[c + 1], map2[c + 1]);
 	}
@@ -393,11 +417,11 @@ void runCameraCalibration(	ftl::Configurable* root,
 		CHECK(params.size == Size(camera_l.width, camera_l.height));
 		
 		Mat K;
-		K = getCameraMatrix(camera_r);
+		K = createCameraMatrix(camera_r);
 		LOG(INFO) << "K[" << 2 * i + 1 << "] = \n" << K;
 		calib.setCameraParameters(2 * i + 1, K);
 
-		K = getCameraMatrix(camera_l);
+		K = createCameraMatrix(camera_l);
 		LOG(INFO) << "K[" << 2 * i << "] = \n" << K;
 		calib.setCameraParameters(2 * i, K);
 	}
@@ -509,6 +533,7 @@ void runCameraCalibration(	ftl::Configurable* root,
 
 	// visualize
 	while(ftl::running) {
+		cv::waitKey(10);
 		while (!new_frames) {
 			for (auto src : sources) { src->grab(30); }
 			if (cv::waitKey(50) != -1) { ftl::running = false; }
diff --git a/applications/calibration-multi/src/multicalibrate.cpp b/applications/calibration-multi/src/multicalibrate.cpp
index 9d85d1af7ae938981bb445a852fd85a913c6e26e..1d22c8f0e9ac4e3d6f0dea95ae7adcd30efaf6dc 100644
--- a/applications/calibration-multi/src/multicalibrate.cpp
+++ b/applications/calibration-multi/src/multicalibrate.cpp
@@ -90,8 +90,8 @@ MultiCameraCalibrationNew::MultiCameraCalibrationNew(
 	n_cameras_(n_cameras),
 	reference_camera_(reference_camera),
 	min_visible_points_(50),
-	fix_intrinsics_(fix_intrinsics == 1 ? 5 : 0),
 
+	fix_intrinsics_(fix_intrinsics == 1 ? 0 : 5),
 	resolution_(resolution),
 	K_(n_cameras),
 	dist_coeffs_(n_cameras),
@@ -491,7 +491,7 @@ double MultiCameraCalibrationNew::calibratePair(size_t camera_from, size_t camer
 	// Bundle Adjustment
 	// vector<Point3d> points3d_triangulated;
 	// points3d_triangulated.insert(points3d_triangulated.begin(), points3d.begin(), points3d.end());
-	LOG(INFO) << K1;
+	
 	double err;
 	cvsba::Sba sba;
 	{
diff --git a/applications/ftl2mkv/CMakeLists.txt b/applications/ftl2mkv/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..96ed5b890040e77516aec786ca767f6e8d430117
--- /dev/null
+++ b/applications/ftl2mkv/CMakeLists.txt
@@ -0,0 +1,11 @@
+set(FTL2MKVSRC
+	src/main.cpp
+)
+
+add_executable(ftl2mkv ${FTL2MKVSRC})
+
+target_include_directories(ftl2mkv PRIVATE src)
+
+target_link_libraries(ftl2mkv ftlcommon ftlcodecs ftlrgbd Threads::Threads ${OpenCV_LIBS} avutil avformat avcodec)
+
+
diff --git a/applications/ftl2mkv/src/main.cpp b/applications/ftl2mkv/src/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b555dcbf7f3494dbd66f87415f51cbb0cec3c49e
--- /dev/null
+++ b/applications/ftl2mkv/src/main.cpp
@@ -0,0 +1,239 @@
+#include <loguru.hpp>
+#include <ftl/configuration.hpp>
+#include <ftl/codecs/reader.hpp>
+#include <ftl/codecs/packet.hpp>
+#include <ftl/rgbd/camera.hpp>
+#include <ftl/codecs/hevc.hpp>
+
+#include <fstream>
+
+#include <Eigen/Eigen>
+
+extern "C" {
+ #include <libavformat/avformat.h>
+}
+
+using ftl::codecs::codec_t;
+using ftl::codecs::Channel;
+
+static AVStream *add_video_stream(AVFormatContext *oc, const ftl::codecs::Packet &pkt)
+{
+    AVCodecContext *c;
+    AVStream *st;
+
+    st = avformat_new_stream(oc, 0);
+    if (!st) {
+        fprintf(stderr, "Could not alloc stream\n");
+        exit(1);
+    }
+
+	AVCodecID codec_id;
+	switch (pkt.codec) {
+	case codec_t::HEVC : codec_id = AV_CODEC_ID_HEVC; break;
+	}
+
+    //c = st->codec;
+    //c->codec_id = codec_id;
+    //c->codec_type = AVMEDIA_TYPE_VIDEO;
+
+	st->time_base.den = 20;
+	st->time_base.num = 1;
+	//st->id = oc->nb_streams-1;
+	//st->nb_frames = 0;
+	st->codecpar->codec_id = codec_id;
+	st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
+	st->codecpar->width = ftl::codecs::getWidth(pkt.definition);
+	st->codecpar->height = ftl::codecs::getHeight(pkt.definition);
+	st->codecpar->format = AV_PIX_FMT_NV12;
+	st->codecpar->bit_rate = 4000000;
+
+    /* put sample parameters */
+    //c->bit_rate = 4000000;
+    /* resolution must be a multiple of two */
+    //c->width = ftl::codecs::getWidth(pkt.definition);
+   // c->height = ftl::codecs::getHeight(pkt.definition);
+    /* time base: this is the fundamental unit of time (in seconds) in terms
+      of which frame timestamps are represented. for fixed-fps content,
+       timebase should be 1/framerate and timestamp increments should be
+       identically 1. */
+    //c->time_base.den = 20;  // Frame rate
+    //c->time_base.num = 1;
+    //c->gop_size = 10; /* emit one intra frame every twelve frames at most */
+    //c->pix_fmt = AV_PIX_FMT_ABGR;
+
+    // some formats want stream headers to be separate
+    //if(oc->oformat->flags & AVFMT_GLOBALHEADER)
+    //    st->codecpar-> |= CODEC_FLAG_GLOBAL_HEADER;
+
+    return st;
+}
+
+
+int main(int argc, char **argv) {
+    std::string filename;
+
+	auto root = ftl::configure(argc, argv, "player_default");
+
+	std::string outputfile = root->value("out", std::string("output.mkv"));
+	std::vector<std::string> paths = *root->get<std::vector<std::string>>("paths");
+
+	if (paths.size() == 0) {
+		LOG(ERROR) << "Missing input ftl file.";
+		return -1;
+	} else {
+		filename = paths[0];
+	}
+
+	std::ifstream f;
+    f.open(filename);
+    if (!f.is_open()) {
+		LOG(ERROR) << "Could not open file: " << filename;
+		return -1;
+	}
+
+    ftl::codecs::Reader r(f);
+    if (!r.begin()) {
+		LOG(ERROR) << "Bad ftl file format";
+		return -1;
+	}
+
+	AVOutputFormat *fmt;
+	AVFormatContext *oc;
+	AVStream *video_st[10] = {nullptr};
+
+	av_register_all();
+
+	fmt = av_guess_format(NULL, outputfile.c_str(), NULL);
+
+	if (!fmt) {
+        LOG(ERROR) << "Could not find suitable output format";
+        return -1;
+    }
+
+    /* allocate the output media context */
+    oc = avformat_alloc_context();
+    if (!oc) {
+        LOG(ERROR) << "Memory error";
+        return -1;
+    }
+    oc->oformat = fmt;
+	snprintf(oc->filename, sizeof(oc->filename), "%s", outputfile.c_str());
+
+	/* open the output file, if needed */
+    if (!(fmt->flags & AVFMT_NOFILE)) {
+        if (avio_open(&oc->pb, outputfile.c_str(), AVIO_FLAG_WRITE) < 0) {
+            LOG(ERROR) << "Could not open output file: " << outputfile;
+            return -1;
+        }
+    }
+
+    LOG(INFO) << "Converting...";
+
+    int current_stream = root->value("stream", 0);
+    int current_channel = 0;
+
+	//bool stream_added[10] = {false};
+
+	// TODO: In future, find a better way to discover number of streams...
+	// Read entire file to find all streams before reading again to write data
+	bool res = r.read(90000000000000, [&current_stream,&current_channel,&r,&video_st,oc](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+        if (spkt.channel != static_cast<ftl::codecs::Channel>(current_channel)) return;
+        if (spkt.streamID == current_stream || current_stream == 255) {
+
+            if (pkt.codec == codec_t::POSE) {
+                return;
+            }
+
+            if (pkt.codec == codec_t::CALIBRATION) {
+                return;
+            }
+
+			if (spkt.streamID >= 10) return;  // TODO: Allow for more than 10
+
+			if (video_st[spkt.streamID] == nullptr) {
+				video_st[spkt.streamID] = add_video_stream(oc, pkt);
+			}
+		}
+	});
+
+	r.end();
+	f.clear();
+	f.seekg(0);
+	if (!r.begin()) {
+		LOG(ERROR) << "Bad ftl file format";
+		return -1;
+	}
+
+	av_dump_format(oc, 0, "output.mkv", 1);
+	if (avformat_write_header(oc, NULL) < 0) {
+		LOG(ERROR) << "Failed to write stream header";
+	}
+
+	bool seen_key[10] = {false};
+
+    res = r.read(90000000000000, [&current_stream,&current_channel,&r,&video_st,oc,&seen_key](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+        if (spkt.channel != static_cast<ftl::codecs::Channel>(current_channel)) return;
+        if (spkt.streamID == current_stream || current_stream == 255) {
+
+            if (pkt.codec == codec_t::POSE) {
+                return;
+            }
+
+            if (pkt.codec == codec_t::CALIBRATION) {
+                return;
+            }
+
+            //LOG(INFO) << "Reading packet: (" << (int)spkt.streamID << "," << (int)spkt.channel << ") " << (int)pkt.codec << ", " << (int)pkt.definition;
+
+			if (spkt.streamID >= 10) return;  // TODO: Allow for more than 10
+
+			bool keyframe = false;
+			if (pkt.codec == codec_t::HEVC) {
+				if (ftl::codecs::hevc::isIFrame(pkt.data)) {
+					seen_key[spkt.streamID] = true;
+					keyframe = true;
+				}
+			}
+			if (!seen_key[spkt.streamID]) return;
+
+            AVPacket avpkt;
+			av_init_packet(&avpkt);
+			if (keyframe) avpkt.flags |= AV_PKT_FLAG_KEY;
+			avpkt.pts = spkt.timestamp - r.getStartTime();
+			avpkt.dts = avpkt.pts;
+			avpkt.stream_index= video_st[spkt.streamID]->index;
+			avpkt.data= const_cast<uint8_t*>(pkt.data.data());
+			avpkt.size= pkt.data.size();
+			avpkt.duration = 1;
+
+			//LOG(INFO) << "write frame: " << avpkt.pts << "," << avpkt.stream_index << "," << avpkt.size;
+
+			/* write the compressed frame in the media file */
+			auto ret = av_write_frame(oc, &avpkt);
+			if (ret != 0) {
+				LOG(ERROR) << "Error writing frame: " << ret;
+			}
+        }
+    });
+
+	av_write_trailer(oc);
+	//avcodec_close(video_st->codec);
+
+	for (int i=0; i<10; ++i) {
+		if (video_st[i]) av_free(video_st[i]);
+	}
+
+	if (!(fmt->flags & AVFMT_NOFILE)) {
+         /* close the output file */
+        avio_close(oc->pb);
+    }
+
+	av_free(oc);
+
+    if (!res) LOG(INFO) << "Done.";
+
+    r.end();
+
+	ftl::running = false;
+	return 0;
+}
diff --git a/applications/groupview/src/main.cpp b/applications/groupview/src/main.cpp
index 6b32221ef1a13ad05f9e5bb915c1186eca0aa52c..be99d857562bfb48ec120cccccf0737ff2d0b210 100644
--- a/applications/groupview/src/main.cpp
+++ b/applications/groupview/src/main.cpp
@@ -16,7 +16,7 @@ using std::string;
 using std::vector;
 using cv::Size;
 using cv::Mat;
-using ftl::rgbd::Channel;
+using ftl::codecs::Channel;
 
 // TODO: remove code duplication (function from reconstruction)
 static void from_json(nlohmann::json &json, map<string, Matrix4d> &transformations) {
diff --git a/applications/gui/CMakeLists.txt b/applications/gui/CMakeLists.txt
index fbed0680dde997c0f0a76519ac272fb3e5c1c64f..fe553becc5ef883873f0385383b06ede5f3cc6ab 100644
--- a/applications/gui/CMakeLists.txt
+++ b/applications/gui/CMakeLists.txt
@@ -15,6 +15,10 @@ set(GUISRC
 	src/thumbview.cpp
 )
 
+if (HAVE_OPENVR)
+	list(APPEND GUISRC "src/vr.cpp")
+endif()
+
 add_executable(ftl-gui ${GUISRC})
 
 target_include_directories(ftl-gui PUBLIC
diff --git a/applications/gui/src/camera.cpp b/applications/gui/src/camera.cpp
index 32d78a4a0ff8bbccfed60b13293b1277ab599792..f35b4e539e8216b32106762b69ea7e2b028706eb 100644
--- a/applications/gui/src/camera.cpp
+++ b/applications/gui/src/camera.cpp
@@ -1,14 +1,17 @@
 #include "camera.hpp"
 #include "pose_window.hpp"
 #include "screen.hpp"
-
 #include <nanogui/glutil.h>
 
+#ifdef HAVE_OPENVR
+#include "vr.hpp"
+#endif
+
 using ftl::rgbd::isValidDepth;
 using ftl::gui::GLTexture;
 using ftl::gui::PoseWindow;
-using ftl::rgbd::Channel;
-using ftl::rgbd::Channels;
+using ftl::codecs::Channel;
+using ftl::codecs::Channels;
 
 // TODO(Nick) MOVE
 class StatisticsImage {
@@ -116,13 +119,13 @@ void StatisticsImage::getValidRatio(cv::Mat &out) {
 }
 
 static Eigen::Affine3d create_rotation_matrix(float ax, float ay, float az) {
-  Eigen::Affine3d rx =
-      Eigen::Affine3d(Eigen::AngleAxisd(ax, Eigen::Vector3d(1, 0, 0)));
-  Eigen::Affine3d ry =
-      Eigen::Affine3d(Eigen::AngleAxisd(ay, Eigen::Vector3d(0, 1, 0)));
-  Eigen::Affine3d rz =
-      Eigen::Affine3d(Eigen::AngleAxisd(az, Eigen::Vector3d(0, 0, 1)));
-  return rz * rx * ry;
+	Eigen::Affine3d rx =
+		Eigen::Affine3d(Eigen::AngleAxisd(ax, Eigen::Vector3d(1, 0, 0)));
+	Eigen::Affine3d ry =
+		Eigen::Affine3d(Eigen::AngleAxisd(ay, Eigen::Vector3d(0, 1, 0)));
+	Eigen::Affine3d rz =
+		Eigen::Affine3d(Eigen::AngleAxisd(az, Eigen::Vector3d(0, 0, 1)));
+	return rz * rx * ry;
 }
 
 ftl::gui::Camera::Camera(ftl::gui::Screen *screen, ftl::rgbd::Source *src) : screen_(screen), src_(src) {
@@ -145,14 +148,17 @@ ftl::gui::Camera::Camera(ftl::gui::Screen *screen, ftl::rgbd::Source *src) : scr
 	posewin_->setTheme(screen->windowtheme);
 	posewin_->setVisible(false);
 
-	src->setCallback([this](int64_t ts, cv::Mat &rgb, cv::Mat &depth) {
+	src->setCallback([this](int64_t ts, cv::Mat &channel1, cv::Mat &channel2) {
 		UNIQUE_LOCK(mutex_, lk);
-		rgb_.create(rgb.size(), rgb.type());
-		depth_.create(depth.size(), depth.type());
-		cv::swap(rgb_,rgb);
-		cv::swap(depth_, depth);
-		cv::flip(rgb_,rgb_,0);
-		cv::flip(depth_,depth_,0);
+		im1_.create(channel1.size(), channel1.type());
+		im2_.create(channel2.size(), channel2.type());
+
+		//cv::swap(channel1, im1_);
+		//cv::swap(channel2, im2_);
+		
+		// OpenGL (0,0) bottom left
+		cv::flip(channel1, im1_, 0);
+		cv::flip(channel2, im2_, 0);
 	});
 }
 
@@ -231,10 +237,56 @@ void ftl::gui::Camera::showSettings() {
 
 }
 
+#ifdef HAVE_OPENVR
+bool ftl::gui::Camera::setVR(bool on) {
+	if (on == vr_mode_) {
+		LOG(WARNING) << "VR mode already enabled";
+		return on;
+	}
+
+	if (on) {
+		setChannel(Channel::Right);
+		src_->set("baseline", baseline_);
+
+		Eigen::Matrix3d intrinsic;
+		
+		intrinsic = getCameraMatrix(screen_->getVR(), vr::Eye_Left);
+		CHECK(intrinsic(0, 2) < 0 && intrinsic(1, 2) < 0);
+		src_->set("focal", intrinsic(0, 0));
+		src_->set("centre_x", intrinsic(0, 2));
+		src_->set("centre_y", intrinsic(1, 2));
+		LOG(INFO) << intrinsic;
+		
+		intrinsic = getCameraMatrix(screen_->getVR(), vr::Eye_Right);
+		CHECK(intrinsic(0, 2) < 0 && intrinsic(1, 2) < 0);
+		src_->set("focal_right", intrinsic(0, 0));
+		src_->set("centre_x_right", intrinsic(0, 2));
+		src_->set("centre_y_right", intrinsic(1, 2));
+
+		vr_mode_ = true;
+	}
+	else {
+		vr_mode_ = false;
+		setChannel(Channel::Left); // reset to left channel
+		// todo restore camera params
+	}
+	
+	return vr_mode_;
+}
+#endif
+
 void ftl::gui::Camera::setChannel(Channel c) {
+#ifdef HAVE_OPENVR
+	if (isVR()) {
+		LOG(ERROR) << "Changing channel in VR mode is not possible.";
+		return;
+	}
+#endif
+
 	channel_ = c;
 	switch (c) {
 	case Channel::Energy:
+	case Channel::Density:
 	case Channel::Flow:
 	case Channel::Confidence:
 	case Channel::Normals:
@@ -255,17 +307,6 @@ void ftl::gui::Camera::setChannel(Channel c) {
 	}
 }
 
-static Eigen::Matrix4d ConvertSteamVRMatrixToMatrix4( const vr::HmdMatrix34_t &matPose )
-{
-	Eigen::Matrix4d matrixObj;
-	matrixObj <<
-		matPose.m[0][0], matPose.m[1][0], matPose.m[2][0], 0.0,
-		matPose.m[0][1], matPose.m[1][1], matPose.m[2][1], 0.0,
-		matPose.m[0][2], matPose.m[1][2], matPose.m[2][2], 0.0,
-		matPose.m[0][3], matPose.m[1][3], matPose.m[2][3], 1.0f;
-	return matrixObj;
-}
-
 static void visualizeDepthMap(	const cv::Mat &depth, cv::Mat &out,
 								const float max_depth)
 {
@@ -307,8 +348,9 @@ static void drawEdges(	const cv::Mat &in, cv::Mat &out,
 bool ftl::gui::Camera::thumbnail(cv::Mat &thumb) {
 	UNIQUE_LOCK(mutex_, lk);
 	src_->grab(1,9);
-	if (rgb_.empty()) return false;
-	cv::resize(rgb_, thumb, cv::Size(320,180));
+	if (im1_.empty()) return false;
+	cv::resize(im1_, thumb, cv::Size(320,180));
+	cv::flip(thumb, thumb, 0);
 	return true;
 }
 
@@ -320,49 +362,61 @@ const GLTexture &ftl::gui::Camera::captureFrame() {
 	if (src_ && src_->isReady()) {
 		UNIQUE_LOCK(mutex_, lk);
 
-		if (screen_->hasVR()) {
+		if (isVR()) {
 			#ifdef HAVE_OPENVR
-			src_->setChannel(Channel::Right);
-
 			vr::VRCompositor()->WaitGetPoses(rTrackedDevicePose_, vr::k_unMaxTrackedDeviceCount, NULL, 0 );
 
-			if ( rTrackedDevicePose_[vr::k_unTrackedDeviceIndex_Hmd].bPoseIsValid )
+			if ((channel_ == Channel::Right) && rTrackedDevicePose_[vr::k_unTrackedDeviceIndex_Hmd].bPoseIsValid )
 			{
-				auto pose = ConvertSteamVRMatrixToMatrix4( rTrackedDevicePose_[vr::k_unTrackedDeviceIndex_Hmd].mDeviceToAbsoluteTracking );
-				pose.inverse();
-
-				// Lerp the Eye
-				eye_[0] += (neye_[0] - eye_[0]) * lerpSpeed_ * delta_;
-				eye_[1] += (neye_[1] - eye_[1]) * lerpSpeed_ * delta_;
-				eye_[2] += (neye_[2] - eye_[2]) * lerpSpeed_ * delta_;
-
-				Eigen::Translation3d trans(eye_);
-				Eigen::Affine3d t(trans);
-				Eigen::Matrix4d viewPose = t.matrix() * pose;
-
-				if (src_->hasCapabilities(ftl::rgbd::kCapMovable)) src_->setPose(viewPose);
+				Eigen::Matrix4d eye_l = ConvertSteamVRMatrixToMatrix4(
+					vr::VRSystem()->GetEyeToHeadTransform(vr::Eye_Left));
+				
+				Eigen::Matrix4d eye_r = ConvertSteamVRMatrixToMatrix4(
+					vr::VRSystem()->GetEyeToHeadTransform(vr::Eye_Left));
+
+				float baseline_in = 2.0 * eye_l(0, 3);
+				
+				if (baseline_in != baseline_) {
+					baseline_ = baseline_in;
+					src_->set("baseline", baseline_);
+				}
+				Eigen::Matrix4d pose = ConvertSteamVRMatrixToMatrix4(rTrackedDevicePose_[vr::k_unTrackedDeviceIndex_Hmd].mDeviceToAbsoluteTracking);
+				Eigen::Vector3d ea = pose.block<3, 3>(0, 0).eulerAngles(0, 1, 2);
+				
+				// NOTE: If modified, should be verified with VR headset!
+				Eigen::Matrix3d R;
+				R =		Eigen::AngleAxisd(ea[0], Eigen::Vector3d::UnitX()) *
+						Eigen::AngleAxisd(-ea[1], Eigen::Vector3d::UnitY()) *
+						Eigen::AngleAxisd(-ea[2], Eigen::Vector3d::UnitZ()); 
+				
+				//double rd = 180.0 / 3.141592;
+				//LOG(INFO) << "rotation x: " << ea[0] *rd << ", y: " << ea[1] * rd << ", z: " << ea[2] * rd;
+				// pose.block<3, 3>(0, 0) = R;
+				
+				rotmat_.block(0, 0, 3, 3) = R;
+			
 			} else {
-				LOG(ERROR) << "No VR Pose";
+				//LOG(ERROR) << "No VR Pose";
 			}
 			#endif
-		} else {
-			// Lerp the Eye
-			eye_[0] += (neye_[0] - eye_[0]) * lerpSpeed_ * delta_;
-			eye_[1] += (neye_[1] - eye_[1]) * lerpSpeed_ * delta_;
-			eye_[2] += (neye_[2] - eye_[2]) * lerpSpeed_ * delta_;
+		}
 
-			Eigen::Translation3d trans(eye_);
-			Eigen::Affine3d t(trans);
-			Eigen::Matrix4d viewPose = t.matrix() * rotmat_;
+		eye_[0] += (neye_[0] - eye_[0]) * lerpSpeed_ * delta_;
+		eye_[1] += (neye_[1] - eye_[1]) * lerpSpeed_ * delta_;
+		eye_[2] += (neye_[2] - eye_[2]) * lerpSpeed_ * delta_;
 
-			if (src_->hasCapabilities(ftl::rgbd::kCapMovable)) src_->setPose(viewPose);
-		}
+		Eigen::Translation3d trans(eye_);
+		Eigen::Affine3d t(trans);
+		Eigen::Matrix4d viewPose = t.matrix() * rotmat_;
 
+		if (src_->hasCapabilities(ftl::rgbd::kCapMovable)) src_->setPose(viewPose);
+	
 		src_->grab();
-		//src_->getFrames(rgb, depth);
 
 		// When switching from right to depth, client may still receive
 		// right images from previous batch (depth.channels() == 1 check)
+
+		/* todo: does not work
 		if (channel_ == Channel::Deviation &&
 			depth_.rows > 0 && depth_.channels() == 1)
 		{
@@ -371,55 +425,68 @@ const GLTexture &ftl::gui::Camera::captureFrame() {
 			}
 			
 			stats_->update(depth_);
-		}
+		}*/
 
 		cv::Mat tmp;
 
 		switch(channel_) {
+			case Channel::Confidence:
+				if (im2_.rows == 0) { break; }
+				visualizeEnergy(im2_, tmp, 1.0);
+				texture2_.update(tmp);
+				break;
+			
+			case Channel::Density:
 			case Channel::Energy:
-				if (depth_.rows == 0) { break; }
-				visualizeEnergy(depth_, tmp, 10.0);
-				texture_.update(tmp);
+				if (im2_.rows == 0) { break; }
+				visualizeEnergy(im2_, tmp, 10.0);
+				texture2_.update(tmp);
 				break;
+			
 			case Channel::Depth:
-				if (depth_.rows == 0) { break; }
-				visualizeDepthMap(depth_, tmp, 7.0);
-				if (screen_->root()->value("showEdgesInDepth", false)) drawEdges(rgb_, tmp);
-				texture_.update(tmp);
+				if (im2_.rows == 0) { break; }
+				visualizeDepthMap(im2_, tmp, 7.0);
+				if (screen_->root()->value("showEdgesInDepth", false)) drawEdges(im1_, tmp);
+				texture2_.update(tmp);
 				break;
 			
 			case Channel::Deviation:
-				if (depth_.rows == 0) { break; }
+				if (im2_.rows == 0) { break; }/*
 				//imageSize = Vector2f(depth.cols, depth.rows);
 				stats_->getStdDev(tmp);
 				tmp.convertTo(tmp, CV_8U, 1000.0);
 				applyColorMap(tmp, tmp, cv::COLORMAP_HOT);
-				texture_.update(tmp);
+				texture2_.update(tmp);*/
 				break;
 
 		case Channel::Flow:
-		case Channel::Confidence:
 		case Channel::Normals:
 		case Channel::Right:
-				if (depth_.rows == 0 || depth_.type() != CV_8UC3) { break; }
-				texture_.update(depth_);
+				if (im2_.rows == 0 || im2_.type() != CV_8UC3) { break; }
+				texture2_.update(im2_);
 				break;
 
 			default:
-				if (rgb_.rows == 0) { break; }
+				break;
+				/*if (rgb_.rows == 0) { break; }
 				//imageSize = Vector2f(rgb.cols,rgb.rows);
 				texture_.update(rgb_);
-
 				#ifdef HAVE_OPENVR
 				if (screen_->hasVR() && depth_.channels() >= 3) {
 					LOG(INFO) << "DRAW RIGHT";
 					textureRight_.update(depth_);
 				}
 				#endif
+				*/
+		}
+
+		if (im1_.rows != 0) {
+			//imageSize = Vector2f(rgb.cols,rgb.rows);
+			texture1_.update(im1_);
 		}
 	}
 
-	return texture_;
+	return texture1_;
 }
 
 nlohmann::json ftl::gui::Camera::getMetaData() {
diff --git a/applications/gui/src/camera.hpp b/applications/gui/src/camera.hpp
index 55042fe0d7b3baf5f24a26e390b1348084bcf320..24dcbacfdc51d20166ffc49175f268140e4dcdf3 100644
--- a/applications/gui/src/camera.hpp
+++ b/applications/gui/src/camera.hpp
@@ -38,15 +38,16 @@ class Camera {
 	void showPoseWindow();
 	void showSettings();
 
-	void setChannel(ftl::rgbd::Channel c);
-
+	void setChannel(ftl::codecs::Channel c);
+	const ftl::codecs::Channel getChannel() { return channel_; }
+	
 	void togglePause();
 	void isPaused();
-	const ftl::rgbd::Channels &availableChannels();
+	const ftl::codecs::Channels &availableChannels();
 
 	const GLTexture &captureFrame();
-	const GLTexture &getLeft() const { return texture_; }
-	const GLTexture &getRight() const { return textureRight_; }
+	const GLTexture &getLeft() const { return texture1_; }
+	const GLTexture &getRight() const { return texture2_; }
 
 	bool thumbnail(cv::Mat &thumb);
 
@@ -54,12 +55,21 @@ class Camera {
 
 	StatisticsImage *stats_ = nullptr;
 
+
+#ifdef HAVE_OPENVR
+	bool isVR() { return vr_mode_; }
+	bool setVR(bool on);
+#else
+	bool isVR() { return false; }
+#endif
+
 	private:
 	Screen *screen_;
 	ftl::rgbd::Source *src_;
 	GLTexture thumb_;
-	GLTexture texture_;
-	GLTexture textureRight_;
+	GLTexture texture1_; // first channel (always left at the moment)
+	GLTexture texture2_; // second channel ("right")
+
 	ftl::gui::PoseWindow *posewin_;
 	nlohmann::json meta_;
 	Eigen::Vector4d neye_;
@@ -71,14 +81,17 @@ class Camera {
 	float lerpSpeed_;
 	bool sdepth_;
 	bool pause_;
-	ftl::rgbd::Channel channel_;
-	ftl::rgbd::Channels channels_;
-	cv::Mat rgb_;
-	cv::Mat depth_;
+	ftl::codecs::Channel channel_;
+	ftl::codecs::Channels channels_;
+	cv::Mat im1_; // first channel (left)
+	cv::Mat im2_; // second channel ("right")
+
 	MUTEX mutex_;
 
 	#ifdef HAVE_OPENVR
 	vr::TrackedDevicePose_t rTrackedDevicePose_[ vr::k_unMaxTrackedDeviceCount ];
+	bool vr_mode_;
+	float baseline_;
 	#endif
 };
 
diff --git a/applications/gui/src/ctrl_window.cpp b/applications/gui/src/ctrl_window.cpp
index 06543822027853969ca7fee277523896969a100f..67a3682bd2fcfcb55a68fb9c71b6dd55aad36491 100644
--- a/applications/gui/src/ctrl_window.cpp
+++ b/applications/gui/src/ctrl_window.cpp
@@ -26,8 +26,8 @@ ControlWindow::ControlWindow(nanogui::Widget *parent, ftl::ctrl::Master *ctrl)
 	_updateDetails();
 
 	auto tools = new Widget(this);
-    tools->setLayout(new BoxLayout(Orientation::Horizontal,
-                                       Alignment::Middle, 0, 6));
+	tools->setLayout(new BoxLayout(	Orientation::Horizontal,
+									Alignment::Middle, 0, 6));
 
 	auto button = new Button(tools, "", ENTYPO_ICON_PLUS);
 	button->setCallback([this] {
@@ -35,6 +35,9 @@ ControlWindow::ControlWindow(nanogui::Widget *parent, ftl::ctrl::Master *ctrl)
 		_addNode();
 	});
 	button->setTooltip("Add new node");
+	
+	// commented-out buttons not working/useful
+	/*
 	button = new Button(tools, "", ENTYPO_ICON_CYCLE);
 	button->setCallback([this] {
 		ctrl_->restart();
@@ -43,7 +46,7 @@ ControlWindow::ControlWindow(nanogui::Widget *parent, ftl::ctrl::Master *ctrl)
 	button->setCallback([this] {
 		ctrl_->pause();
 	});
-	button->setTooltip("Pause all nodes");
+	button->setTooltip("Pause all nodes");*/
 
 	new Label(this, "Select Node","sans-bold");
 	auto select = new ComboBox(this, node_titles_);
@@ -55,14 +58,14 @@ ControlWindow::ControlWindow(nanogui::Widget *parent, ftl::ctrl::Master *ctrl)
 	new Label(this, "Node Options","sans-bold");
 
 	tools = new Widget(this);
-    tools->setLayout(new BoxLayout(Orientation::Horizontal,
-                                       Alignment::Middle, 0, 6));
+	tools->setLayout(new BoxLayout(	Orientation::Horizontal,
+									Alignment::Middle, 0, 6));
 
-	button = new Button(tools, "", ENTYPO_ICON_INFO);
+	/*button = new Button(tools, "", ENTYPO_ICON_INFO);
 	button->setCallback([this] {
 		
 	});
-	button->setTooltip("Node status information");
+	button->setTooltip("Node status information");*/
 
 	button = new Button(tools, "", ENTYPO_ICON_COG);
 	button->setCallback([this,parent] {
@@ -71,17 +74,17 @@ ControlWindow::ControlWindow(nanogui::Widget *parent, ftl::ctrl::Master *ctrl)
 	});
 	button->setTooltip("Edit node configuration");
 
-	button = new Button(tools, "", ENTYPO_ICON_CYCLE);
+	/*button = new Button(tools, "", ENTYPO_ICON_CYCLE);
 	button->setCallback([this] {
 		ctrl_->restart(_getActiveID());
 	});
-	button->setTooltip("Restart this node");
+	button->setTooltip("Restart this node");*/
 
-	button = new Button(tools, "", ENTYPO_ICON_CONTROLLER_PAUS);
+	/*button = new Button(tools, "", ENTYPO_ICON_CONTROLLER_PAUS);
 	button->setCallback([this] {
 		ctrl_->pause(_getActiveID());
 	});
-	button->setTooltip("Pause node processing");
+	button->setTooltip("Pause node processing");*/
 
 	ctrl->getNet()->onConnect([this,select](ftl::net::Peer *p) {
 		_updateDetails();
diff --git a/applications/gui/src/media_panel.cpp b/applications/gui/src/media_panel.cpp
index cb44400bb1c850669332376e0134f138b9c460f3..b3ea1a6afbbc8f2a31bf294e85a85564fdffe1cb 100644
--- a/applications/gui/src/media_panel.cpp
+++ b/applications/gui/src/media_panel.cpp
@@ -12,13 +12,14 @@
 #endif
 
 using ftl::gui::MediaPanel;
-using ftl::rgbd::Channel;
+using ftl::codecs::Channel;
 
 MediaPanel::MediaPanel(ftl::gui::Screen *screen) : nanogui::Window(screen, ""), screen_(screen) {
 	using namespace nanogui;
 
 	paused_ = false;
 	writer_ = nullptr;
+	disable_switch_channels_ = false;
 
 	setLayout(new BoxLayout(Orientation::Horizontal,
 									Alignment::Middle, 5, 10));
@@ -77,6 +78,7 @@ MediaPanel::MediaPanel(ftl::gui::Screen *screen) : nanogui::Window(screen, ""),
 
 	//button = new Button(this, "", ENTYPO_ICON_CONTROLLER_RECORD);
 
+	/* Doesn't work at the moment
  #ifdef HAVE_LIBARCHIVE
 	auto button_snapshot = new Button(this, "", ENTYPO_ICON_IMAGES);
 	button_snapshot->setTooltip("Screen capture");
@@ -102,94 +104,133 @@ MediaPanel::MediaPanel(ftl::gui::Screen *screen) : nanogui::Window(screen, ""),
 	});
 #endif
 
-    auto popbutton = new PopupButton(this, "", ENTYPO_ICON_LAYERS);
-    popbutton->setSide(Popup::Side::Right);
-	popbutton->setChevronIcon(ENTYPO_ICON_CHEVRON_SMALL_RIGHT);
-    Popup *popup = popbutton->popup();
-    popup->setLayout(new GroupLayout());
+	
+	// not very useful (l/r)
+
+	auto button_dual = new Button(this, "", ENTYPO_ICON_MAP);
+	button_dual->setCallback([this]() {
+		screen_->setDualView(!screen_->getDualView());
+	});
+	*/
+
+#ifdef HAVE_OPENVR
+	if (this->screen_->hasVR()) {
+		auto button_vr = new Button(this, "VR");
+		button_vr->setFlags(Button::ToggleButton);
+		button_vr->setChangeCallback([this, button_vr](bool state) {
+			if (!screen_->useVR()) {
+				if (screen_->switchVR(true) == true) {
+					button_vr->setTextColor(nanogui::Color(0.5f,0.5f,1.0f,1.0f));
+					this->button_channels_->setEnabled(false);
+				}
+			}
+			else {
+				if (screen_->switchVR(false) == false) {
+					button_vr->setTextColor(nanogui::Color(1.0f,1.0f,1.0f,1.0f));
+					this->button_channels_->setEnabled(true);
+				}
+			}
+		});
+	}
+#endif
+
+	button_channels_ = new PopupButton(this, "", ENTYPO_ICON_LAYERS);
+	button_channels_->setSide(Popup::Side::Right);
+	button_channels_->setChevronIcon(ENTYPO_ICON_CHEVRON_SMALL_RIGHT);
+	Popup *popup = button_channels_->popup();
+	popup->setLayout(new GroupLayout());
 	popup->setTheme(screen->toolbuttheme);
-    popup->setAnchorHeight(150);
-
-    button = new Button(popup, "Left");
-    button->setFlags(Button::RadioButton);
-    button->setPushed(true);
-    button->setCallback([this]() {
-        ftl::gui::Camera *cam = screen_->activeCamera();
-        if (cam) {
-            cam->setChannel(Channel::Left);
-        }
-    });
+	popup->setAnchorHeight(150);
+
+	button = new Button(popup, "Left");
+	button->setFlags(Button::RadioButton);
+	button->setPushed(true);
+	button->setCallback([this]() {
+		ftl::gui::Camera *cam = screen_->activeCamera();
+		if (cam) {
+			cam->setChannel(Channel::Left);
+		}
+	});
 
 	right_button_ = new Button(popup, "Right");
-    right_button_->setFlags(Button::RadioButton);
-    right_button_->setCallback([this]() {
-        ftl::gui::Camera *cam = screen_->activeCamera();
-        if (cam) {
-            cam->setChannel(Channel::Right);
-        }
-    });
-
-    depth_button_ = new Button(popup, "Depth");
-    depth_button_->setFlags(Button::RadioButton);
-    depth_button_->setCallback([this]() {
-        ftl::gui::Camera *cam = screen_->activeCamera();
-        if (cam) {
-            cam->setChannel(Channel::Depth);
-        }
-    });
-
-	popbutton = new PopupButton(popup, "More");
-    popbutton->setSide(Popup::Side::Right);
+	right_button_->setFlags(Button::RadioButton);
+	right_button_->setCallback([this]() {
+		ftl::gui::Camera *cam = screen_->activeCamera();
+		if (cam) {
+			cam->setChannel(Channel::Right);
+		}
+	});
+
+	depth_button_ = new Button(popup, "Depth");
+	depth_button_->setFlags(Button::RadioButton);
+	depth_button_->setCallback([this]() {
+		ftl::gui::Camera *cam = screen_->activeCamera();
+		if (cam) {
+			cam->setChannel(Channel::Depth);
+		}
+	});
+
+	auto *popbutton = new PopupButton(popup, "More");
+	popbutton->setSide(Popup::Side::Right);
 	popbutton->setChevronIcon(ENTYPO_ICON_CHEVRON_SMALL_RIGHT);
-    popup = popbutton->popup();
-    popup->setLayout(new GroupLayout());
+	popup = popbutton->popup();
+	popup->setLayout(new GroupLayout());
 	popup->setTheme(screen->toolbuttheme);
-    popup->setAnchorHeight(150);
+	popup->setAnchorHeight(150);
 
-    button = new Button(popup, "Deviation");
-    button->setFlags(Button::RadioButton);
-    button->setCallback([this]() {
-        ftl::gui::Camera *cam = screen_->activeCamera();
-        if (cam) {
-            cam->setChannel(Channel::Deviation);
-        }
-    });
+	button = new Button(popup, "Deviation");
+	button->setFlags(Button::RadioButton);
+	button->setCallback([this]() {
+		ftl::gui::Camera *cam = screen_->activeCamera();
+		if (cam) {
+			cam->setChannel(Channel::Deviation);
+		}
+	});
 
 	button = new Button(popup, "Normals");
-    button->setFlags(Button::RadioButton);
-    button->setCallback([this]() {
-        ftl::gui::Camera *cam = screen_->activeCamera();
-        if (cam) {
-            cam->setChannel(Channel::Normals);
-        }
-    });
+	button->setFlags(Button::RadioButton);
+	button->setCallback([this]() {
+		ftl::gui::Camera *cam = screen_->activeCamera();
+		if (cam) {
+			cam->setChannel(Channel::Normals);
+		}
+	});
 
 	button = new Button(popup, "Flow");
-    button->setFlags(Button::RadioButton);
-    button->setCallback([this]() {
-        ftl::gui::Camera *cam = screen_->activeCamera();
-        if (cam) {
-            cam->setChannel(Channel::Flow);
-        }
-    });
+	button->setFlags(Button::RadioButton);
+	button->setCallback([this]() {
+		ftl::gui::Camera *cam = screen_->activeCamera();
+		if (cam) {
+			cam->setChannel(Channel::Flow);
+		}
+	});
 
 	button = new Button(popup, "Confidence");
-    button->setFlags(Button::RadioButton);
-    button->setCallback([this]() {
-        ftl::gui::Camera *cam = screen_->activeCamera();
-        if (cam) {
-            cam->setChannel(Channel::Confidence);
-        }
-    });
-
-    button = new Button(popup, "Energy");
-    button->setFlags(Button::RadioButton);
-    button->setCallback([this]() {
-        ftl::gui::Camera *cam = screen_->activeCamera();
-        if (cam) {
-            cam->setChannel(Channel::Energy);
-        }
-    });
+	button->setFlags(Button::RadioButton);
+	button->setCallback([this]() {
+		ftl::gui::Camera *cam = screen_->activeCamera();
+		if (cam) {
+			cam->setChannel(Channel::Confidence);
+		}
+	});
+
+	button = new Button(popup, "Energy");
+	button->setFlags(Button::RadioButton);
+	button->setCallback([this]() {
+		ftl::gui::Camera *cam = screen_->activeCamera();
+		if (cam) {
+			cam->setChannel(Channel::Energy);
+		}
+	});
+
+	button = new Button(popup, "Density");
+	button->setFlags(Button::RadioButton);
+	button->setCallback([this]() {
+		ftl::gui::Camera *cam = screen_->activeCamera();
+		if (cam) {
+			cam->setChannel(Channel::Density);
+		}
+	});
 
 }
 
@@ -199,12 +240,12 @@ MediaPanel::~MediaPanel() {
 
 // Update button enabled status
 void MediaPanel::cameraChanged() {
-    ftl::gui::Camera *cam = screen_->activeCamera();
-    if (cam) {
-        if (cam->source()->hasCapabilities(ftl::rgbd::kCapStereo)) {
-            right_button_->setEnabled(true);
-        } else {
-            right_button_->setEnabled(false);
-        }
-    }
+	ftl::gui::Camera *cam = screen_->activeCamera();
+	if (cam) {
+		if (cam->source()->hasCapabilities(ftl::rgbd::kCapStereo)) {
+			right_button_->setEnabled(true);
+		} else {
+			right_button_->setEnabled(false);
+		}
+	}
 }
diff --git a/applications/gui/src/media_panel.hpp b/applications/gui/src/media_panel.hpp
index 9e9154d860483a6bf6c88b8da856a4a89862de2f..0279cb3fadab41003ceefef538e8f42a20f00139 100644
--- a/applications/gui/src/media_panel.hpp
+++ b/applications/gui/src/media_panel.hpp
@@ -14,18 +14,22 @@ namespace gui {
 class Screen;
 
 class MediaPanel : public nanogui::Window {
-    public:
-    explicit MediaPanel(ftl::gui::Screen *);
-    ~MediaPanel();
-
-    void cameraChanged();
-
-    private:
-    ftl::gui::Screen *screen_;
-    bool paused_;
-    ftl::rgbd::SnapshotStreamWriter *writer_;
-    nanogui::Button *right_button_;
-    nanogui::Button *depth_button_;
+	public:
+	explicit MediaPanel(ftl::gui::Screen *);
+	~MediaPanel();
+
+	void cameraChanged();
+
+	private:
+	ftl::gui::Screen *screen_;
+
+	bool paused_;
+	bool disable_switch_channels_;
+
+	ftl::rgbd::SnapshotStreamWriter *writer_;
+	nanogui::PopupButton *button_channels_;
+	nanogui::Button *right_button_;
+	nanogui::Button *depth_button_;
 };
 
 }
diff --git a/applications/gui/src/screen.cpp b/applications/gui/src/screen.cpp
index 03d1847112fa86468d9661d5a9966b71a3d46b09..b0a74d37a2e39856901006504093c29c708e8189 100644
--- a/applications/gui/src/screen.cpp
+++ b/applications/gui/src/screen.cpp
@@ -20,6 +20,10 @@
 #include "camera.hpp"
 #include "media_panel.hpp"
 
+#ifdef HAVE_OPENVR
+#include "vr.hpp"
+#endif
+
 using ftl::gui::Screen;
 using ftl::gui::Camera;
 using std::string;
@@ -27,28 +31,28 @@ using ftl::rgbd::Source;
 using ftl::rgbd::isValidDepth;
 
 namespace {
-    constexpr char const *const defaultImageViewVertexShader =
-        R"(#version 330
-        uniform vec2 scaleFactor;
-        uniform vec2 position;
-        in vec2 vertex;
-        out vec2 uv;
-        void main() {
-            uv = vertex;
-            vec2 scaledVertex = (vertex * scaleFactor) + position;
-            gl_Position  = vec4(2.0*scaledVertex.x - 1.0,
-                                2.0*scaledVertex.y - 1.0,
-                                0.0, 1.0);
-        })";
-
-    constexpr char const *const defaultImageViewFragmentShader =
-        R"(#version 330
-        uniform sampler2D image;
-        out vec4 color;
-        in vec2 uv;
-        void main() {
-            color = texture(image, uv);
-        })";
+	constexpr char const *const defaultImageViewVertexShader =
+		R"(#version 330
+		uniform vec2 scaleFactor;
+		uniform vec2 position;
+		in vec2 vertex;
+		out vec2 uv;
+		void main() {
+			uv = vertex;
+			vec2 scaledVertex = (vertex * scaleFactor) + position;
+			gl_Position  = vec4(2.0*scaledVertex.x - 1.0,
+								2.0*scaledVertex.y - 1.0,
+								0.0, 1.0);
+		})";
+
+	constexpr char const *const defaultImageViewFragmentShader =
+		R"(#version 330
+		uniform sampler2D image;
+		out vec4 color;
+		in vec2 uv;
+		void main() {
+			color = texture(image, uv);
+		})";
 }
 
 ftl::gui::Screen::Screen(ftl::Configurable *proot, ftl::net::Universe *pnet, ftl::ctrl::Master *controller) :
@@ -60,6 +64,11 @@ ftl::gui::Screen::Screen(ftl::Configurable *proot, ftl::net::Universe *pnet, ftl
 	root_ = proot;
 	camera_ = nullptr;
 
+	#ifdef HAVE_OPENVR
+	HMD_ = nullptr;
+	has_vr_ = vr::VR_IsHmdPresent();
+	#endif
+
 	setSize(Vector2i(1280,720));
 
 	toolbuttheme = new Theme(*theme());
@@ -83,7 +92,7 @@ ftl::gui::Screen::Screen(ftl::Configurable *proot, ftl::net::Universe *pnet, ftl
 	mediatheme->mButtonGradientTopFocused = nanogui::Color(80,230);
 	mediatheme->mButtonGradientBotFocused = nanogui::Color(80,230);
 	mediatheme->mIconColor = nanogui::Color(255,255);
-    mediatheme->mTextColor = nanogui::Color(1.0f,1.0f,1.0f,1.0f);
+	mediatheme->mTextColor = nanogui::Color(1.0f,1.0f,1.0f,1.0f);
 	mediatheme->mBorderDark = nanogui::Color(0,0);
 	mediatheme->mBorderMedium = nanogui::Color(0,0);
 	mediatheme->mBorderLight = nanogui::Color(0,0);
@@ -161,9 +170,9 @@ ftl::gui::Screen::Screen(ftl::Configurable *proot, ftl::net::Universe *pnet, ftl
 	popbutton->setSide(Popup::Side::Right);
 	popbutton->setChevronIcon(0);
 	Popup *popup = popbutton->popup();
-    popup->setLayout(new GroupLayout());
+	popup->setLayout(new GroupLayout());
 	popup->setTheme(toolbuttheme);
-    //popup->setAnchorHeight(100);
+	//popup->setAnchorHeight(100);
 
 	auto itembutton = new Button(popup, "Add Camera", ENTYPO_ICON_CAMERA);
 	itembutton->setCallback([this,popup]() {
@@ -185,7 +194,7 @@ ftl::gui::Screen::Screen(ftl::Configurable *proot, ftl::net::Universe *pnet, ftl
 	popbutton->setSide(Popup::Side::Right);
 	popbutton->setChevronIcon(0);
 	popup = popbutton->popup();
-    popup->setLayout(new GroupLayout());
+	popup->setLayout(new GroupLayout());
 	popup->setTheme(toolbuttheme);
 	//popbutton->setCallback([this]() {
 	//	cwindow_->setVisible(true);
@@ -244,30 +253,60 @@ ftl::gui::Screen::Screen(ftl::Configurable *proot, ftl::net::Universe *pnet, ftl
 
 	setVisible(true);
 	performLayout();
+}
 
+#ifdef HAVE_OPENVR
+bool ftl::gui::Screen::initVR() {
+	if (!vr::VR_IsHmdPresent()) {
+		return false;
+	}
 
-	#ifdef HAVE_OPENVR
-	if (vr::VR_IsHmdPresent()) {
-		// Loading the SteamVR Runtime
-		vr::EVRInitError eError = vr::VRInitError_None;
-		HMD_ = vr::VR_Init( &eError, vr::VRApplication_Scene );
-
-		if ( eError != vr::VRInitError_None )
-		{
-			HMD_ = nullptr;
-			LOG(ERROR) << "Unable to init VR runtime: " << vr::VR_GetVRInitErrorAsEnglishDescription( eError );
-		}
-	} else {
+	vr::EVRInitError eError = vr::VRInitError_None;
+	HMD_ = vr::VR_Init( &eError, vr::VRApplication_Scene );
+	
+	if (eError != vr::VRInitError_None)
+	{
 		HMD_ = nullptr;
+		LOG(ERROR) << "Unable to init VR runtime: " << vr::VR_GetVRInitErrorAsEnglishDescription(eError);
 	}
-	#endif
+
+	uint32_t size_x, size_y;
+	HMD_->GetRecommendedRenderTargetSize(&size_x, &size_y);
+	LOG(INFO) << size_x << ", " << size_y;
+	LOG(INFO) << "\n" << getCameraMatrix(HMD_, vr::Eye_Left);
+	return true;
+}
+
+bool ftl::gui::Screen::useVR() {
+	auto *cam = activeCamera();
+	if (HMD_ == nullptr || cam == nullptr) { return false; }
+	return cam->isVR();
 }
 
+bool ftl::gui::Screen::switchVR(bool on) {
+	if (useVR() == on) { return on; }
+
+	if (on && (HMD_ == nullptr) && !initVR()) {
+		return false;
+	}
+
+	if (on) {
+		activeCamera()->setVR(true);
+	} else {
+		activeCamera()->setVR(false);
+	}
+	
+	return useVR();
+}
+#endif
+
 ftl::gui::Screen::~Screen() {
 	mShader.free();
 
 	#ifdef HAVE_OPENVR
-	vr::VR_Shutdown();
+	if (HMD_ != nullptr) {
+		vr::VR_Shutdown();
+	}
 	#endif
 }
 
@@ -361,13 +400,19 @@ void ftl::gui::Screen::draw(NVGcontext *ctx) {
 		leftEye_ = mImageID;
 		rightEye_ = camera_->getRight().texture();
 
+		if (camera_->getChannel() != ftl::codecs::Channel::Left) { mImageID = rightEye_; }
+
 		#ifdef HAVE_OPENVR
-		if (hasVR() && imageSize[0] > 0 && camera_->getLeft().isValid() && camera_->getRight().isValid()) {
+		if (useVR() && imageSize[0] > 0 && camera_->getLeft().isValid() && camera_->getRight().isValid()) {
+			
 			vr::Texture_t leftEyeTexture = {(void*)(uintptr_t)leftEye_, vr::TextureType_OpenGL, vr::ColorSpace_Gamma };
 			vr::VRCompositor()->Submit(vr::Eye_Left, &leftEyeTexture );
+
 			glBindTexture(GL_TEXTURE_2D, rightEye_);
 			vr::Texture_t rightEyeTexture = {(void*)(uintptr_t)rightEye_, vr::TextureType_OpenGL, vr::ColorSpace_Gamma };
 			vr::VRCompositor()->Submit(vr::Eye_Right, &rightEyeTexture );
+			
+			mImageID = leftEye_;
 		}
 		#endif
 
@@ -400,4 +445,4 @@ void ftl::gui::Screen::draw(NVGcontext *ctx) {
 	/* Draw the user interface */
 	screen()->performLayout(ctx);
 	nanogui::Screen::draw(ctx);
-}
\ No newline at end of file
+}
diff --git a/applications/gui/src/screen.hpp b/applications/gui/src/screen.hpp
index d51cec2bf2c34afc7c589b7e6114f503379afe30..b78ad8b690017d84950f0e135473731c631492bc 100644
--- a/applications/gui/src/screen.hpp
+++ b/applications/gui/src/screen.hpp
@@ -43,11 +43,27 @@ class Screen : public nanogui::Screen {
 	void setActiveCamera(ftl::gui::Camera*);
 	ftl::gui::Camera *activeCamera() { return camera_; }
 
-	#ifdef HAVE_OPENVR
-	bool hasVR() const { return HMD_ != nullptr; }
-	#else
+#ifdef HAVE_OPENVR
+	// initialize OpenVR
+	bool initVR();
+
+	// is VR available (HMD was found at initialization)
+	bool hasVR() const { return has_vr_; }
+
+	// is VR mode on/off
+	bool useVR();
+
+	// toggle VR on/off
+	bool switchVR(bool mode);
+
+	vr::IVRSystem* getVR() { return HMD_; }
+
+#else
 	bool hasVR() const { return false; }
-	#endif
+#endif
+
+	void setDualView(bool v) { show_two_images_ = v; LOG(INFO) << "CLICK"; }
+	bool getDualView() { return show_two_images_; }
 
 	nanogui::Theme *windowtheme;
 	nanogui::Theme *specialtheme;
@@ -58,10 +74,11 @@ class Screen : public nanogui::Screen {
 	ftl::gui::SourceWindow *swindow_;
 	ftl::gui::ControlWindow *cwindow_;
 	ftl::gui::MediaPanel *mwindow_;
+
 	//std::vector<SourceViews> sources_;
 	ftl::net::Universe *net_;
 	nanogui::GLShader mShader;
-    GLuint mImageID;
+	GLuint mImageID;
 	//Source *src_;
 	GLTexture texture_;
 	Eigen::Vector3f eye_;
@@ -82,7 +99,10 @@ class Screen : public nanogui::Screen {
 	GLuint leftEye_;
 	GLuint rightEye_;
 
+	bool show_two_images_ = false;
+
 	#ifdef HAVE_OPENVR
+	bool has_vr_;
 	vr::IVRSystem *HMD_;
 	#endif
 };
diff --git a/applications/gui/src/src_window.cpp b/applications/gui/src/src_window.cpp
index e7ffc4b1e0e44caccd67cce5d60d34b481ec58d7..8e087d1dd42e25b00afd0efaf7b5dd22d5139a0d 100644
--- a/applications/gui/src/src_window.cpp
+++ b/applications/gui/src/src_window.cpp
@@ -61,10 +61,9 @@ SourceWindow::SourceWindow(ftl::gui::Screen *screen)
 
 	screen->net()->onConnect([this](ftl::net::Peer *p) {
 		UNIQUE_LOCK(mutex_, lk);
-		//select->setItems(available_);
 		_updateCameras(screen_->net()->findAll<string>("list_streams"));
 	});
-
+	
 	UNIQUE_LOCK(mutex_, lk);
 
 	std::vector<ftl::rgbd::Source*> srcs = ftl::createArray<ftl::rgbd::Source>(screen_->root(), "sources", screen_->net());
diff --git a/applications/gui/src/vr.cpp b/applications/gui/src/vr.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b300a27b2484733786f5bd216f0a48aef3dc913c
--- /dev/null
+++ b/applications/gui/src/vr.cpp
@@ -0,0 +1,36 @@
+#include "loguru.hpp"
+#include "vr.hpp"
+
+Eigen::Matrix3d getCameraMatrix(const double tanx1,
+								const double tanx2,
+								const double tany1,
+								const double tany2,
+								const double size_x,
+								const double size_y) {
+	
+	Eigen::Matrix3d C = Eigen::Matrix3d::Identity();
+	
+	CHECK(tanx1 < 0 && tanx2 > 0 && tany1 < 0 && tany2 > 0);
+	CHECK(size_x > 0 && size_y > 0);
+
+	double fx = size_x / (-tanx1 + tanx2);
+	double fy = size_y / (-tany1 + tany2);
+	C(0,0) = fx;
+	C(1,1) = fy;
+	C(0,2) = tanx1 * fx;
+	C(1,2) = tany1 * fy;
+
+	// safe to remove
+	CHECK((int) (abs(tanx1 * fx) + abs(tanx2 * fx)) == (int) size_x);
+	CHECK((int) (abs(tany1 * fy) + abs(tany2 * fy)) == (int) size_y);
+
+	return C;
+}
+
+Eigen::Matrix3d getCameraMatrix(vr::IVRSystem *vr, const vr::Hmd_Eye &eye) {
+	float tanx1, tanx2, tany1, tany2;
+	uint32_t size_x, size_y;
+	vr->GetProjectionRaw(eye, &tanx1, &tanx2, &tany1, &tany2);
+	vr->GetRecommendedRenderTargetSize(&size_x, &size_y);
+	return getCameraMatrix(tanx1, tanx2, tany1, tany2, size_x, size_y);
+}
\ No newline at end of file
diff --git a/applications/gui/src/vr.hpp b/applications/gui/src/vr.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..7e8f6314fb85765d438ae5f46915a8c215160127
--- /dev/null
+++ b/applications/gui/src/vr.hpp
@@ -0,0 +1,57 @@
+#include <openvr/openvr.h>
+#include <Eigen/Eigen>
+#include <openvr/openvr.h>
+
+/* @brief	Calculate (pinhole camera) intrinsic matrix from OpenVR parameters
+ * @param	Tangent of left half angle (negative) from center view axis
+ * @param	Tangent of right half angle from center view axis
+ * @param	Tangent of top half angle (negative) from center view axis
+ * @param	Tangent of bottom half angle from center view axis
+ * @param	Image width
+ * @param	Image height
+ * 
+ * Parameters are provided by IVRSystem::GetProjectionRaw and
+ * IVRSystem::GetRecommendedRenderTargetSize.
+ * 
+ * tanx1 = x1 / fx		(1)
+ * tanx2 = x2 / fy		(2)
+ * x1 + x2 = size_x		(3)
+ * 
+ * :. fx = size_x / (-tanx1 + tanx2)
+ * 
+ * fy can be calculated in same way
+ */
+Eigen::Matrix3d getCameraMatrix(const double tanx1,
+								const double tanx2,
+								const double tany1,
+								const double tany2,
+								const double size_x,
+								const double size_y);
+
+/*
+ * @brief	Same as above, but uses given IVRSystem and eye.
+ */
+Eigen::Matrix3d getCameraMatrix(vr::IVRSystem *vr, const vr::Hmd_Eye &eye);
+
+
+static inline Eigen::Matrix4d ConvertSteamVRMatrixToMatrix4( const vr::HmdMatrix34_t &matPose )
+{
+	Eigen::Matrix4d matrixObj;
+	matrixObj <<
+		matPose.m[0][0], matPose.m[0][1], matPose.m[0][2], matPose.m[0][3],
+		matPose.m[1][0], matPose.m[1][1], matPose.m[1][2], matPose.m[1][3],
+		matPose.m[2][0], matPose.m[2][1], matPose.m[2][2], matPose.m[2][3],
+					0.0,			 0.0,			  0.0,			   1.0;
+	return matrixObj;
+}
+
+static inline Eigen::Matrix4d ConvertSteamVRMatrixToMatrix4( const vr::HmdMatrix44_t &matPose )
+{
+	Eigen::Matrix4d matrixObj;
+	matrixObj <<
+		matPose.m[0][0], matPose.m[0][1], matPose.m[0][2], matPose.m[0][3],
+		matPose.m[1][0], matPose.m[1][1], matPose.m[1][2], matPose.m[1][3],
+		matPose.m[2][0], matPose.m[2][1], matPose.m[2][2], matPose.m[2][3],
+		matPose.m[3][0], matPose.m[3][1], matPose.m[3][2], matPose.m[3][3];
+	return matrixObj;
+}
\ No newline at end of file
diff --git a/applications/player/CMakeLists.txt b/applications/player/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c501c882464a5fbd2a13f2d1561de19b74a3cd76
--- /dev/null
+++ b/applications/player/CMakeLists.txt
@@ -0,0 +1,11 @@
+set(PLAYERSRC
+	src/main.cpp
+)
+
+add_executable(ftl-player ${PLAYERSRC})
+
+target_include_directories(ftl-player PRIVATE src)
+
+target_link_libraries(ftl-player ftlcommon ftlcodecs ftlrgbd Threads::Threads ${OpenCV_LIBS})
+
+
diff --git a/applications/player/src/main.cpp b/applications/player/src/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..60d2793c1c4b0484dbb226bff12deebfb6e1fc2e
--- /dev/null
+++ b/applications/player/src/main.cpp
@@ -0,0 +1,122 @@
+#include <loguru.hpp>
+#include <ftl/configuration.hpp>
+#include <ftl/codecs/reader.hpp>
+#include <ftl/codecs/decoder.hpp>
+#include <ftl/codecs/packet.hpp>
+#include <ftl/rgbd/camera.hpp>
+#include <ftl/timer.hpp>
+
+#include <fstream>
+
+#include <Eigen/Eigen>
+
+using ftl::codecs::codec_t;
+using ftl::codecs::Channel;
+
+static ftl::codecs::Decoder *decoder;
+
+
+static void createDecoder(const ftl::codecs::Packet &pkt) {
+	if (decoder) {
+		if (!decoder->accepts(pkt)) {
+			ftl::codecs::free(decoder);
+		} else {
+			return;
+		}
+	}
+
+	decoder = ftl::codecs::allocateDecoder(pkt);
+}
+
+static void visualizeDepthMap(	const cv::Mat &depth, cv::Mat &out,
+								const float max_depth)
+{
+	depth.convertTo(out, CV_8U, 255.0f / max_depth);
+	out = 255 - out;
+	//cv::Mat mask = (depth >= 39.0f); // TODO (mask for invalid pixels)
+	
+	applyColorMap(out, out, cv::COLORMAP_JET);
+	//out.setTo(cv::Scalar(255, 255, 255), mask);
+}
+
+int main(int argc, char **argv) {
+    std::string filename(argv[1]);
+    LOG(INFO) << "Playing: " << filename;
+
+	auto root = ftl::configure(argc, argv, "player_default");
+
+	std::ifstream f;
+    f.open(filename);
+    if (!f.is_open()) LOG(ERROR) << "Could not open file";
+
+    ftl::codecs::Reader r(f);
+    if (!r.begin()) LOG(ERROR) << "Bad ftl file";
+
+    LOG(INFO) << "Playing...";
+
+    int current_stream = 0;
+    int current_channel = 0;
+
+	ftl::timer::add(ftl::timer::kTimerMain, [&current_stream,&current_channel,&r](int64_t ts) {
+		bool res = r.read(ts, [&current_stream,&current_channel,&r](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+			if (spkt.streamID == current_stream) {
+
+				if (pkt.codec == codec_t::POSE) {
+					Eigen::Matrix4d p = Eigen::Map<Eigen::Matrix4d>((double*)pkt.data.data());
+					LOG(INFO) << "Have pose: " << p;
+					return;
+				}
+
+				if (pkt.codec == codec_t::CALIBRATION) {
+					ftl::rgbd::Camera *camera = (ftl::rgbd::Camera*)pkt.data.data();
+					LOG(INFO) << "Have calibration: " << camera->fx;
+					return;
+				}
+				
+				if (spkt.channel != static_cast<ftl::codecs::Channel>(current_channel)) return;
+
+				//LOG(INFO) << "Reading packet: (" << (int)spkt.streamID << "," << (int)spkt.channel << ") " << (int)pkt.codec << ", " << (int)pkt.definition;
+
+				cv::Mat frame(cv::Size(ftl::codecs::getWidth(pkt.definition),ftl::codecs::getHeight(pkt.definition)), (spkt.channel == Channel::Depth) ? CV_32F : CV_8UC3);
+				createDecoder(pkt);
+
+				try {
+					decoder->decode(pkt, frame);
+				} catch (std::exception &e) {
+					LOG(INFO) << "Decoder exception: " << e.what();
+				}
+
+				if (!frame.empty()) {
+					if (spkt.channel == Channel::Depth) {
+						visualizeDepthMap(frame, frame, 8.0f);
+					}
+					double time = (double)(spkt.timestamp - r.getStartTime()) / 1000.0;
+					cv::putText(frame, std::string("Time: ") + std::to_string(time) + std::string("s"), cv::Point(10,20), cv::FONT_HERSHEY_PLAIN, 1, cv::Scalar(0,0,255));
+					cv::imshow("Player", frame);
+				} else {
+					frame.create(cv::Size(600,600), CV_8UC3);
+					cv::imshow("Player", frame);
+				}
+				int key = cv::waitKey(1);
+				if (key >= 48 && key <= 57) {
+					current_stream = key - 48;
+				} else if (key == 'd') {
+					current_channel = (current_channel == 0) ? 1 : 0;
+				} else if (key == 'r') {
+					current_channel = (current_channel == 0) ? 2 : 0;
+				} else if (key == 27) {
+					ftl::timer::stop(false);
+				}
+			}
+		});
+		if (!res) ftl::timer::stop(false);
+		return res;
+	});
+
+	ftl::timer::start(true);
+
+    r.end();
+
+	ftl::running = false;
+	return 0;
+}
diff --git a/applications/reconstruct/CMakeLists.txt b/applications/reconstruct/CMakeLists.txt
index dcee7afa0147378ffaabd91263e200f8fe284c98..dce81e19c91a41f7e8bba90a2149a6602d73e5aa 100644
--- a/applications/reconstruct/CMakeLists.txt
+++ b/applications/reconstruct/CMakeLists.txt
@@ -15,8 +15,11 @@ set(REPSRC
 	#src/mls.cu
 	#src/depth_camera.cu
 	#src/depth_camera.cpp
-	src/ilw.cpp
-	src/ilw.cu
+	src/ilw/ilw.cpp
+	src/ilw/ilw.cu
+	src/ilw/fill.cu
+	src/ilw/discontinuity.cu
+	src/ilw/correspondence.cu
 )
 
 add_executable(ftl-reconstruct ${REPSRC})
diff --git a/applications/reconstruct/src/depth_camera.cpp b/applications/reconstruct/src/depth_camera.cpp
deleted file mode 100644
index de310f592b303dbcf7d041087863bf6c76db9305..0000000000000000000000000000000000000000
--- a/applications/reconstruct/src/depth_camera.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-#include <loguru.hpp>
-#include <ftl/depth_camera.hpp>
-#include "depth_camera_cuda.hpp"
-#include <opencv2/core/cuda_stream_accessor.hpp>
-
-using ftl::voxhash::DepthCamera;
-using ftl::voxhash::DepthCameraCUDA;
-
-DepthCamera::DepthCamera() {
-	depth_tex_ = nullptr;
-	depth2_tex_ = nullptr;
-	points_tex_ = nullptr;
-	colour_tex_ = nullptr;
-	normal_tex_ = nullptr;
-}
-
-void DepthCamera::alloc(const DepthCameraParams& params, bool withNormals) { //! todo resizing???
-	depth_tex_ = new ftl::cuda::TextureObject<float>(params.m_imageWidth, params.m_imageHeight);
-	depth2_tex_ = new ftl::cuda::TextureObject<int>(params.m_imageWidth, params.m_imageHeight);
-	points_tex_ = new ftl::cuda::TextureObject<float4>(params.m_imageWidth, params.m_imageHeight);
-	colour_tex_ = new ftl::cuda::TextureObject<uchar4>(params.m_imageWidth, params.m_imageHeight);
-	data.depth = depth_tex_->cudaTexture();
-	data.depth2 = depth2_tex_->cudaTexture();
-	data.points = points_tex_->cudaTexture();
-	data.colour = colour_tex_->cudaTexture();
-	data.params = params;
-
-	//if (withNormals) {
-		normal_tex_ = new ftl::cuda::TextureObject<float4>(params.m_imageWidth, params.m_imageHeight);
-		data.normal = normal_tex_->cudaTexture();
-	//} else {
-	//	data.normal = 0;
-	//}
-}
-
-void DepthCamera::free() {
-	delete depth_tex_;
-	delete colour_tex_;
-	delete depth2_tex_;
-	delete points_tex_;
-	if (normal_tex_) delete normal_tex_;
-}
-
-void DepthCamera::updateData(const cv::Mat &depth, const cv::Mat &rgb, cv::cuda::Stream &stream) {
-	depth_tex_->upload(depth, cv::cuda::StreamAccessor::getStream(stream));
-	colour_tex_->upload(rgb, cv::cuda::StreamAccessor::getStream(stream));
-	//if (normal_mat_) {
-		//computeNormals(cv::cuda::StreamAccessor::getStream(stream));
-	//}
-}
-
-void DepthCamera::computeNormals(cudaStream_t stream) {
-	//ftl::cuda::point_cloud(*points_tex_, data, stream);
-	ftl::cuda::compute_normals(*depth_tex_, *normal_tex_, data, stream);
-}
diff --git a/applications/reconstruct/src/depth_camera.cu b/applications/reconstruct/src/depth_camera.cu
deleted file mode 100644
index 7a322eaf733230772e72c50722e849335bf7e891..0000000000000000000000000000000000000000
--- a/applications/reconstruct/src/depth_camera.cu
+++ /dev/null
@@ -1,355 +0,0 @@
-#include <ftl/cuda_common.hpp>
-#include <ftl/cuda_util.hpp>
-#include <ftl/depth_camera.hpp>
-#include "depth_camera_cuda.hpp"
-
-#define T_PER_BLOCK 16
-#define MINF __int_as_float(0xff800000)
-
-using ftl::voxhash::DepthCameraCUDA;
-using ftl::voxhash::HashData;
-using ftl::voxhash::HashParams;
-using ftl::cuda::TextureObject;
-using ftl::render::SplatParams;
-
-extern __constant__ ftl::voxhash::DepthCameraCUDA c_cameras[MAX_CAMERAS];
-extern __constant__ HashParams c_hashParams;
-
-__global__ void clear_depth_kernel(ftl::cuda::TextureObject<float> depth) {
-	const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
-	const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
-
-	if (x < depth.width() && y < depth.height()) {
-		depth(x,y) = 1000.0f; //PINF;
-		//colour(x,y) = make_uchar4(76,76,82,0);
-	}
-}
-
-void ftl::cuda::clear_depth(const ftl::cuda::TextureObject<float> &depth, cudaStream_t stream) {
-	const dim3 clear_gridSize((depth.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
-	const dim3 clear_blockSize(T_PER_BLOCK, T_PER_BLOCK);
-	clear_depth_kernel<<<clear_gridSize, clear_blockSize, 0, stream>>>(depth);
-}
-
-__global__ void clear_depth_kernel(ftl::cuda::TextureObject<int> depth) {
-	const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
-	const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
-
-	if (x < depth.width() && y < depth.height()) {
-		depth(x,y) = 0x7FFFFFFF; //PINF;
-		//colour(x,y) = make_uchar4(76,76,82,0);
-	}
-}
-
-void ftl::cuda::clear_depth(const ftl::cuda::TextureObject<int> &depth, cudaStream_t stream) {
-	const dim3 clear_gridSize((depth.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
-	const dim3 clear_blockSize(T_PER_BLOCK, T_PER_BLOCK);
-	clear_depth_kernel<<<clear_gridSize, clear_blockSize, 0, stream>>>(depth);
-}
-
-__global__ void clear_to_zero_kernel(ftl::cuda::TextureObject<float> depth) {
-	const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
-	const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
-
-	if (x < depth.width() && y < depth.height()) {
-		depth(x,y) = 0.0f; //PINF;
-	}
-}
-
-void ftl::cuda::clear_to_zero(const ftl::cuda::TextureObject<float> &depth, cudaStream_t stream) {
-	const dim3 clear_gridSize((depth.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
-	const dim3 clear_blockSize(T_PER_BLOCK, T_PER_BLOCK);
-	clear_to_zero_kernel<<<clear_gridSize, clear_blockSize, 0, stream>>>(depth);
-}
-
-__global__ void clear_points_kernel(ftl::cuda::TextureObject<float4> depth) {
-	const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
-	const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
-
-	if (x < depth.width() && y < depth.height()) {
-		depth(x,y) = make_float4(MINF,MINF,MINF,MINF);
-		//colour(x,y) = make_uchar4(76,76,82,0);
-	}
-}
-
-void ftl::cuda::clear_points(const ftl::cuda::TextureObject<float4> &depth, cudaStream_t stream) {
-	const dim3 clear_gridSize((depth.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
-	const dim3 clear_blockSize(T_PER_BLOCK, T_PER_BLOCK);
-	clear_points_kernel<<<clear_gridSize, clear_blockSize, 0, stream>>>(depth);
-}
-
-__global__ void clear_colour_kernel(ftl::cuda::TextureObject<uchar4> depth) {
-	const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
-	const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
-
-	if (x < depth.width() && y < depth.height()) {
-		depth(x,y) = make_uchar4(76,76,76,76);
-		//colour(x,y) = make_uchar4(76,76,82,0);
-	}
-}
-
-void ftl::cuda::clear_colour(const ftl::cuda::TextureObject<uchar4> &depth, cudaStream_t stream) {
-	const dim3 clear_gridSize((depth.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
-	const dim3 clear_blockSize(T_PER_BLOCK, T_PER_BLOCK);
-	clear_colour_kernel<<<clear_gridSize, clear_blockSize, 0, stream>>>(depth);
-}
-
-__global__ void clear_colour_kernel(ftl::cuda::TextureObject<float4> depth) {
-	const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
-	const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
-
-	if (x < depth.width() && y < depth.height()) {
-		depth(x,y) = make_float4(0.0f);
-	}
-}
-
-void ftl::cuda::clear_colour(const ftl::cuda::TextureObject<float4> &depth, cudaStream_t stream) {
-	const dim3 clear_gridSize((depth.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
-	const dim3 clear_blockSize(T_PER_BLOCK, T_PER_BLOCK);
-	clear_colour_kernel<<<clear_gridSize, clear_blockSize, 0, stream>>>(depth);
-}
-
-// ===== Type convert =====
-
-template <typename A, typename B>
-__global__ void convert_kernel(const ftl::cuda::TextureObject<A> in, ftl::cuda::TextureObject<B> out, float scale) {
-	const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
-	const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
-
-	if (x < in.width() && y < in.height()) {
-		out(x,y) = ((float)in.tex2D((int)x,(int)y)) * scale;
-	}
-}
-
-void ftl::cuda::float_to_int(const ftl::cuda::TextureObject<float> &in, ftl::cuda::TextureObject<int> &out, float scale, cudaStream_t stream) {
-	const dim3 gridSize((in.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
-	const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
-	convert_kernel<float,int><<<gridSize, blockSize, 0, stream>>>(in, out, scale);
-}
-
-void ftl::cuda::int_to_float(const ftl::cuda::TextureObject<int> &in, ftl::cuda::TextureObject<float> &out, float scale, cudaStream_t stream) {
-	const dim3 gridSize((in.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
-	const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
-	convert_kernel<int,float><<<gridSize, blockSize, 0, stream>>>(in, out, scale);
-}
-
-/// ===== Median Filter ======
-
-#define WINDOW_SIZE 3
-#define MEDIAN_RADIUS 3
-#define MEDIAN_SIZE (((MEDIAN_RADIUS*2)+1)*((MEDIAN_RADIUS*2)+1))
-
-__global__ void medianFilterKernel(TextureObject<int> inputImageKernel, TextureObject<float> outputImagekernel)
-{
-	// Set row and colum for thread.
-	int row = blockIdx.y * blockDim.y + threadIdx.y;
-	int col = blockIdx.x * blockDim.x + threadIdx.x;
-	int filterVector[MEDIAN_SIZE] = {0};   //Take fiter window
-	if((row<=MEDIAN_RADIUS) || (col<=MEDIAN_RADIUS) || (row>=inputImageKernel.height()-MEDIAN_RADIUS) || (col>=inputImageKernel.width()-MEDIAN_RADIUS))
-				outputImagekernel(col, row) = 0.0f; //Deal with boundry conditions
-	else {
-		for (int v = -MEDIAN_RADIUS; v <= MEDIAN_RADIUS; v++) { 
-			for (int u = -MEDIAN_RADIUS; u <= MEDIAN_RADIUS; u++){
-				filterVector[(v+MEDIAN_RADIUS)*(2*MEDIAN_RADIUS+1)+u+MEDIAN_RADIUS] = inputImageKernel((col+u), (row+v));   // setup the filterign window.
-			}
-		}
-		for (int i = 0; i < MEDIAN_SIZE; i++) {
-			for (int j = i + 1; j < MEDIAN_SIZE; j++) {
-				if (filterVector[i] > filterVector[j]) { 
-					//Swap the variables.
-					char tmp = filterVector[i];
-					filterVector[i] = filterVector[j];
-					filterVector[j] = tmp;
-				}
-			}
-		}
-		outputImagekernel(col, row) = (float)filterVector[MEDIAN_SIZE/2+1] / 1000.0f;   //Set the output variables.
-	}
-}
-
-void ftl::cuda::median_filter(const ftl::cuda::TextureObject<int> &in, ftl::cuda::TextureObject<float> &out, cudaStream_t stream) {
-	const dim3 gridSize((in.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
-	const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
-	medianFilterKernel<<<gridSize, blockSize, 0, stream>>>(in, out);
-}
-
-
-/// ===== Hole Fill =====
-
-__device__ inline float distance2(float3 a, float3 b) {
-	const float x = a.x-b.x;
-	const float y = a.y-b.y;
-	const float z = a.z-b.z;
-	return x*x+y*y+z*z;
-}
-
-#define SPLAT_RADIUS 7
-#define SPLAT_BOUNDS (2*SPLAT_RADIUS+T_PER_BLOCK+1)
-#define SPLAT_BUFFER_SIZE (SPLAT_BOUNDS*SPLAT_BOUNDS)
-
-__global__ void hole_fill_kernel(
-	TextureObject<int> depth_in,
-	TextureObject<float> depth_out, DepthCameraParams params) {
-	// Read an NxN region and
-	// - interpolate a depth value for this pixel
-	// - interpolate an rgb value for this pixel
-	// Must respect depth discontinuities.
-	// How much influence a given neighbour has depends on its depth value
-
-	__shared__ float3 positions[SPLAT_BUFFER_SIZE];
-
-	const float voxelSize = c_hashParams.m_virtualVoxelSize;
-
-	const int i = threadIdx.y*blockDim.y + threadIdx.x;
-	const int bx = blockIdx.x*blockDim.x;
-	const int by = blockIdx.y*blockDim.y;
-	const int x = bx + threadIdx.x;
-	const int y = by + threadIdx.y;
-
-	// const float camMinDepth = params.camera.m_sensorDepthWorldMin;
-	// const float camMaxDepth = params.camera.m_sensorDepthWorldMax;
-
-	for (int j=i; j<SPLAT_BUFFER_SIZE; j+=T_PER_BLOCK) {
-		const unsigned int sx = (j % SPLAT_BOUNDS)+bx-SPLAT_RADIUS;
-		const unsigned int sy = (j / SPLAT_BOUNDS)+by-SPLAT_RADIUS;
-		if (sx >= depth_in.width() || sy >= depth_in.height()) {
-			positions[j] = make_float3(1000.0f,1000.0f, 1000.0f);
-		} else {
-			positions[j] = params.kinectDepthToSkeleton(sx, sy, (float)depth_in.tex2D((int)sx,(int)sy) / 1000.0f);
-		}
-	}
-
-	__syncthreads();
-
-	if (x >= depth_in.width() || y >= depth_in.height()) return;
-
-	const float voxelSquared = voxelSize*voxelSize;
-	float mindepth = 1000.0f;
-	//int minidx = -1;
-	// float3 minpos;
-
-	//float3 validPos[MAX_VALID];
-	//int validIndices[MAX_VALID];
-	//int validix = 0;
-
-	for (int v=-SPLAT_RADIUS; v<=SPLAT_RADIUS; ++v) {
-		for (int u=-SPLAT_RADIUS; u<=SPLAT_RADIUS; ++u) {
-			//const int idx = (threadIdx.y+v)*SPLAT_BOUNDS+threadIdx.x+u;
-			const int idx = (threadIdx.y+v+SPLAT_RADIUS)*SPLAT_BOUNDS+threadIdx.x+u+SPLAT_RADIUS;
-
-			float3 posp = positions[idx];
-			const float d = posp.z;
-			//if (d < camMinDepth || d > camMaxDepth) continue;
-
-			float3 pos = params.kinectDepthToSkeleton(x, y, d);
-			float dist = distance2(pos, posp);
-
-			if (dist < voxelSquared) {
-				// Valid so check for minimum
-				//validPos[validix] = pos;
-				//validIndices[validix++] = idx;
-				if (d < mindepth) {
-					mindepth = d;
-					//minidx = idx;
-					// minpos = pos;
-				}
-			}
-		}
-	}
-
-	depth_out(x,y) = mindepth;
-}
-
-void ftl::cuda::hole_fill(const TextureObject<int> &depth_in,
-	const TextureObject<float> &depth_out, const DepthCameraParams &params, cudaStream_t stream) 
-{
-
-	const dim3 gridSize((depth_in.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth_in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
-	const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
-
-	hole_fill_kernel<<<gridSize, blockSize, 0, stream>>>(depth_in, depth_out, params);
-	cudaSafeCall( cudaGetLastError() );
-
-	#ifdef _DEBUG
-	cudaSafeCall(cudaDeviceSynchronize());
-	#endif
-}
-
-
-/// ===== Point cloud from depth =====
-
-__global__ void point_cloud_kernel(ftl::cuda::TextureObject<float4> output, DepthCameraCUDA depthCameraData)
-{
-	const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
-	const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
-
-	const int width = depthCameraData.params.m_imageWidth;
-	const int height = depthCameraData.params.m_imageHeight;
-
-	if (x < width && y < height) {
-		float depth = tex2D<float>(depthCameraData.depth, x, y);
-
-		output(x,y) = (depth >= depthCameraData.params.m_sensorDepthWorldMin && depth <= depthCameraData.params.m_sensorDepthWorldMax) ?
-			make_float4(depthCameraData.pose * depthCameraData.params.kinectDepthToSkeleton(x, y, depth), 0.0f) :
-			make_float4(MINF, MINF, MINF, MINF);
-	}
-}
-
-void ftl::cuda::point_cloud(ftl::cuda::TextureObject<float4> &output, const DepthCameraCUDA &depthCameraData, cudaStream_t stream) {
-	const dim3 gridSize((depthCameraData.params.m_imageWidth + T_PER_BLOCK - 1)/T_PER_BLOCK, (depthCameraData.params.m_imageHeight + T_PER_BLOCK - 1)/T_PER_BLOCK);
-	const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
-
-	point_cloud_kernel<<<gridSize, blockSize, 0, stream>>>(output, depthCameraData);
-
-#ifdef _DEBUG
-	cudaSafeCall(cudaDeviceSynchronize());
-#endif
-}
-
-/// ===== NORMALS =====
-
-
-__global__ void compute_normals_kernel(const ftl::cuda::TextureObject<float> input, ftl::cuda::TextureObject<float4> output, DepthCameraCUDA camera)
-{
-	const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
-	const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
-
-	const int width = output.width();
-
-	if(x >= output.width() || y >= output.height()) return;
-
-	output(x,y) = make_float4(MINF, MINF, MINF, MINF);
-
-	if(x > 0 && x < output.width()-1 && y > 0 && y < output.height()-1)
-	{
-		const float3 CC = camera.pose * camera.params.kinectDepthToSkeleton(x,y,input(x,y)); //input[(y+0)*width+(x+0)];
-		const float3 PC = camera.pose * camera.params.kinectDepthToSkeleton(x,y,input(x,y+1)); //input[(y+1)*width+(x+0)];
-		const float3 CP = camera.pose * camera.params.kinectDepthToSkeleton(x,y,input(x+1,y)); //input[(y+0)*width+(x+1)];
-		const float3 MC = camera.pose * camera.params.kinectDepthToSkeleton(x,y,input(x,y-1)); //input[(y-1)*width+(x+0)];
-		const float3 CM = camera.pose * camera.params.kinectDepthToSkeleton(x,y,input(x-1,y)); //input[(y+0)*width+(x-1)];
-
-		if(CC.x != MINF && PC.x != MINF && CP.x != MINF && MC.x != MINF && CM.x != MINF)
-		{
-			const float3 n = cross(PC-MC, CP-CM);
-			const float  l = length(n);
-
-			if(l > 0.0f)
-			{
-				//if (x == 400 && y == 200) printf("Cam NORMX: %f\n", (n/-l).x);
-				output(x,y) = make_float4(n.x/-l, n.y/-l, n.z/-l, 0.0f); //make_float4(n/-l, 1.0f);
-			}
-		}
-	}
-}
-
-void ftl::cuda::compute_normals(const ftl::cuda::TextureObject<float> &input, ftl::cuda::TextureObject<float4> &output, const DepthCameraCUDA &camera, cudaStream_t stream) {
-	const dim3 gridSize((output.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (output.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
-	const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
-
-	compute_normals_kernel<<<gridSize, blockSize, 0, stream>>>(input, output, camera);
-
-#ifdef _DEBUG
-	cudaSafeCall(cudaDeviceSynchronize());
-	//cutilCheckMsg(__FUNCTION__);
-#endif
-}
\ No newline at end of file
diff --git a/applications/reconstruct/src/depth_camera_cuda.hpp b/applications/reconstruct/src/depth_camera_cuda.hpp
deleted file mode 100644
index 26bfcad73f5ae72f20cfe2dd29d0e91c62fca62b..0000000000000000000000000000000000000000
--- a/applications/reconstruct/src/depth_camera_cuda.hpp
+++ /dev/null
@@ -1,37 +0,0 @@
-#ifndef _FTL_RECONSTRUCTION_CAMERA_CUDA_HPP_
-#define _FTL_RECONSTRUCTION_CAMERA_CUDA_HPP_
-
-#include <ftl/depth_camera.hpp>
-#include <ftl/voxel_hash.hpp>
-#include "splat_params.hpp"
-
-namespace ftl {
-namespace cuda {
-
-void clear_depth(const TextureObject<float> &depth, cudaStream_t stream);
-void clear_depth(const TextureObject<int> &depth, cudaStream_t stream);
-void clear_to_zero(const ftl::cuda::TextureObject<float> &depth, cudaStream_t stream);
-void clear_points(const ftl::cuda::TextureObject<float4> &depth, cudaStream_t stream);
-void clear_colour(const ftl::cuda::TextureObject<uchar4> &depth, cudaStream_t stream);
-void clear_colour(const ftl::cuda::TextureObject<float4> &depth, cudaStream_t stream);
-
-void median_filter(const ftl::cuda::TextureObject<int> &in, ftl::cuda::TextureObject<float> &out, cudaStream_t stream);
-
-void int_to_float(const ftl::cuda::TextureObject<int> &in, ftl::cuda::TextureObject<float> &out, float scale, cudaStream_t stream);
-
-void float_to_int(const ftl::cuda::TextureObject<float> &in, ftl::cuda::TextureObject<int> &out, float scale, cudaStream_t stream);
-
-void mls_smooth(TextureObject<float4> &output, const ftl::voxhash::HashParams &hashParams, int numcams, int cam, cudaStream_t stream);
-
-void mls_render_depth(const TextureObject<int> &input, TextureObject<int> &output, const ftl::render::SplatParams &params, int numcams, cudaStream_t stream);
-
-void hole_fill(const TextureObject<int> &depth_in, const TextureObject<float> &depth_out, const DepthCameraParams &params, cudaStream_t stream);
-
-void point_cloud(ftl::cuda::TextureObject<float4> &output, const ftl::voxhash::DepthCameraCUDA &depthCameraData, cudaStream_t stream);
-
-void compute_normals(const ftl::cuda::TextureObject<float> &depth, ftl::cuda::TextureObject<float4> &normals, const ftl::voxhash::DepthCameraCUDA &camera, cudaStream_t stream);
-
-}
-}
-
-#endif  // _FTL_RECONSTRUCTION_CAMERA_CUDA_HPP_
diff --git a/applications/reconstruct/src/ilw.cpp b/applications/reconstruct/src/ilw.cpp
index 86a4cca5e4f82047ed6591a137b694788da879eb..435cd886eba1b83d7530f19f28ccfb09f7e37f3a 100644
--- a/applications/reconstruct/src/ilw.cpp
+++ b/applications/reconstruct/src/ilw.cpp
@@ -8,8 +8,8 @@
 
 using ftl::ILW;
 using ftl::detail::ILWData;
-using ftl::rgbd::Channel;
-using ftl::rgbd::Channels;
+using ftl::codecs::Channel;
+using ftl::codecs::Channels;
 using ftl::rgbd::Format;
 using cv::cuda::GpuMat;
 
@@ -66,6 +66,11 @@ bool ILW::_phase0(ftl::rgbd::FrameSet &fs, cudaStream_t stream) {
         f.createTexture<float4>(Channel::EnergyVector, Format<float4>(f.get<GpuMat>(Channel::Colour).size()));
         f.createTexture<float>(Channel::Energy, Format<float>(f.get<GpuMat>(Channel::Colour).size()));
         f.createTexture<uchar4>(Channel::Colour);
+
+		cv::cuda::Stream cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
+
+		f.get<GpuMat>(Channel::EnergyVector).setTo(cv::Scalar(0.0f,0.0f,0.0f,0.0f), cvstream);
+		f.get<GpuMat>(Channel::Energy).setTo(cv::Scalar(0.0f), cvstream);
     }
 
     return true;
diff --git a/applications/reconstruct/src/ilw.cu b/applications/reconstruct/src/ilw.cu
index 90133a3a57800ee87a91fd50902deea5f701258a..999b5ec9031eed08fc4bc527471961c3236d7445 100644
--- a/applications/reconstruct/src/ilw.cu
+++ b/applications/reconstruct/src/ilw.cu
@@ -30,9 +30,14 @@ __global__ void correspondence_energy_vector_kernel(
 	const int x = (blockIdx.x*blockDim.x + threadIdx.x) / WARP_SIZE;
     const int y = blockIdx.y*blockDim.y + threadIdx.y;
     
-    const float3 world1 = make_float3(p1.tex2D(x, y));
+	const float3 world1 = make_float3(p1.tex2D(x, y));
+	if (world1.x == MINF) {
+        vout(x,y) = make_float4(0.0f);
+        eout(x,y) = 0.0f;
+        return;
+    }
     const float3 camPos2 = pose2 * world1;
-    const uint2 screen2 = cam2.camToScreen<uint2>(camPos2);
+	const uint2 screen2 = cam2.camToScreen<uint2>(camPos2);
 
     const int upsample = 8;
 
@@ -43,12 +48,11 @@ __global__ void correspondence_energy_vector_kernel(
 		const float u = (i % upsample) - (upsample / 2);
         const float v = (i / upsample) - (upsample / 2);
         
-        const float3 world2 = make_float3(p2.tex2D(screen2.x+u, screen2.y+v));
+		const float3 world2 = make_float3(p2.tex2D(screen2.x+u, screen2.y+v));
+		if (world2.x == MINF) continue;
 
         // Determine degree of correspondence
         const float confidence = 1.0f / length(world1 - world2);
-
-        printf("conf %f\n", confidence);
         const float maxconf = warpMax(confidence);
 
         // This thread has best confidence value
diff --git a/applications/reconstruct/src/ilw/correspondence.cu b/applications/reconstruct/src/ilw/correspondence.cu
new file mode 100644
index 0000000000000000000000000000000000000000..8fb9d94b75df5a5d2687fa81da01d999f2bcfeeb
--- /dev/null
+++ b/applications/reconstruct/src/ilw/correspondence.cu
@@ -0,0 +1,198 @@
+#include "ilw_cuda.hpp"
+#include <ftl/cuda/weighting.hpp>
+
+using ftl::cuda::TextureObject;
+using ftl::rgbd::Camera;
+using ftl::cuda::Mask;
+
+#define T_PER_BLOCK 8
+
+template<int FUNCTION>
+__device__ float weightFunction(const ftl::cuda::ILWParams &params, float dweight, float cweight);
+
+template <>
+__device__ inline float weightFunction<0>(const ftl::cuda::ILWParams &params, float dweight, float cweight) {
+	return (params.cost_ratio * (cweight) + (1.0f - params.cost_ratio) * dweight);
+}
+
+template <>
+__device__ inline float weightFunction<1>(const ftl::cuda::ILWParams &param, float dweight, float cweight) {
+	return (cweight * cweight * dweight);
+}
+
+template <>
+__device__ inline float weightFunction<2>(const ftl::cuda::ILWParams &param, float dweight, float cweight) {
+	return (dweight * dweight * cweight);
+}
+
+template <>
+__device__ inline float weightFunction<3>(const ftl::cuda::ILWParams &params, float dweight, float cweight) {
+	return (dweight == 0.0f) ? 0.0f : (params.cost_ratio * (cweight) + (1.0f - params.cost_ratio) * dweight);
+}
+
+template <>
+__device__ inline float weightFunction<4>(const ftl::cuda::ILWParams &params, float dweight, float cweight) {
+	return cweight;
+}
+
+template<int COR_STEPS, int FUNCTION> 
+__global__ void correspondence_energy_vector_kernel(
+        TextureObject<float> d1,
+        TextureObject<float> d2,
+        TextureObject<uchar4> c1,
+        TextureObject<uchar4> c2,
+        TextureObject<float> dout,
+		TextureObject<float> conf,
+		TextureObject<int> mask,
+        float4x4 pose1,
+        float4x4 pose1_inv,
+        float4x4 pose2,  // Inverse
+        Camera cam1,
+        Camera cam2, ftl::cuda::ILWParams params) {
+
+    // Each warp picks point in p1
+    //const int tid = (threadIdx.x + threadIdx.y * blockDim.x);
+	const int x = (blockIdx.x*blockDim.x + threadIdx.x); // / WARP_SIZE;
+    const int y = blockIdx.y*blockDim.y + threadIdx.y;
+    
+    //const float3 world1 = make_float3(p1.tex2D(x, y));
+    const float depth1 = d1.tex2D(x,y); //(pose1_inv * world1).z;  // Initial starting depth
+	if (depth1 < cam1.minDepth || depth1 > cam1.maxDepth) return;
+
+	// TODO: Temporary hack to ensure depth1 is present
+	//const float4 temp = vout.tex2D(x,y);
+	//vout(x,y) =  make_float4(depth1, 0.0f, temp.z, temp.w);
+	
+	const float3 world1 = pose1 * cam1.screenToCam(x,y,depth1);
+
+    const uchar4 colour1 = c1.tex2D(x, y);
+
+	float bestdepth = 0.0f;
+	float bestweight = 0.0f;
+	float bestcolour = 0.0f;
+	float bestdweight = 0.0f;
+	float totalcolour = 0.0f;
+	int count = 0;
+	float contrib = 0.0f;
+    
+	const float step_interval = params.range / (COR_STEPS / 2);
+	
+	const float3 rayStep_world = pose1.getFloat3x3() * cam1.screenToCam(x,y,step_interval);
+	const float3 rayStart_2 = pose2 * world1;
+	const float3 rayStep_2 = pose2.getFloat3x3() * rayStep_world;
+
+    // Project to p2 using cam2
+    // Each thread takes a possible correspondence and calculates a weighting
+    //const int lane = tid % WARP_SIZE;
+	for (int i=0; i<COR_STEPS; ++i) {
+		const int j = i - (COR_STEPS/2);
+		const float depth_adjust = (float)j * step_interval + depth1;
+
+        // Calculate adjusted depth 3D point in camera 2 space
+        const float3 worldPos = world1 + j * rayStep_world; //(pose1 * cam1.screenToCam(x, y, depth_adjust));
+        const float3 camPos = rayStart_2 + j * rayStep_2; //pose2 * worldPos;
+        const uint2 screen = cam2.camToScreen<uint2>(camPos);
+
+        if (screen.x >= cam2.width || screen.y >= cam2.height) continue;
+
+		// Generate a depth correspondence value
+		const float depth2 = d2.tex2D((int)screen.x, (int)screen.y);
+		const float dweight = ftl::cuda::weighting(fabs(depth2 - camPos.z), params.spatial_smooth);
+		//const float dweight = ftl::cuda::weighting(fabs(depth_adjust - depth1), 2.0f*params.range);
+		
+		// Generate a colour correspondence value
+		const uchar4 colour2 = c2.tex2D((int)screen.x, (int)screen.y);
+		const float cweight = ftl::cuda::colourWeighting(colour1, colour2, params.colour_smooth);
+
+		const float weight = weightFunction<FUNCTION>(params, dweight, cweight);
+
+		++count;
+		contrib += weight;
+		bestcolour = max(cweight, bestcolour);
+		bestdweight = max(dweight, bestdweight);
+		totalcolour += cweight;
+		if (weight > bestweight) {
+			bestweight = weight;
+			bestdepth = depth_adjust;
+		}
+    }
+
+	const float avgcolour = totalcolour/(float)count;
+	const float confidence = bestcolour / totalcolour; //bestcolour - avgcolour;
+	
+	Mask m(mask.tex2D(x,y));
+
+    //if (bestweight > 0.0f) {
+        float old = conf.tex2D(x,y);
+
+        if (bestweight * confidence > old) {
+			dout(x,y) = bestdepth;
+			conf(x,y) = bestweight * confidence;
+		}
+	//}
+	
+	// If a good enough match is found, mark dodgy depth as solid
+	if ((m.isFilled() || m.isDiscontinuity()) && (bestweight > params.match_threshold)) mask(x,y) = 0;
+}
+
+void ftl::cuda::correspondence(
+        TextureObject<float> &d1,
+        TextureObject<float> &d2,
+        TextureObject<uchar4> &c1,
+        TextureObject<uchar4> &c2,
+        TextureObject<float> &dout,
+		TextureObject<float> &conf,
+		TextureObject<int> &mask,
+        float4x4 &pose1,
+        float4x4 &pose1_inv,
+        float4x4 &pose2,
+        const Camera &cam1,
+        const Camera &cam2, const ILWParams &params, int func,
+        cudaStream_t stream) {
+
+	const dim3 gridSize((d1.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (d1.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
+	const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
+
+    //printf("COR SIZE %d,%d\n", p1.width(), p1.height());
+
+	switch (func) {
+    case 0: correspondence_energy_vector_kernel<16,0><<<gridSize, blockSize, 0, stream>>>(d1, d2, c1, c2, dout, conf, mask, pose1, pose1_inv, pose2, cam1, cam2, params); break;
+	case 1: correspondence_energy_vector_kernel<16,1><<<gridSize, blockSize, 0, stream>>>(d1, d2, c1, c2, dout, conf, mask, pose1, pose1_inv, pose2, cam1, cam2, params); break;
+	case 2: correspondence_energy_vector_kernel<16,2><<<gridSize, blockSize, 0, stream>>>(d1, d2, c1, c2, dout, conf, mask, pose1, pose1_inv, pose2, cam1, cam2, params); break;
+	case 3: correspondence_energy_vector_kernel<16,3><<<gridSize, blockSize, 0, stream>>>(d1, d2, c1, c2, dout, conf, mask, pose1, pose1_inv, pose2, cam1, cam2, params); break;
+	case 4: correspondence_energy_vector_kernel<16,4><<<gridSize, blockSize, 0, stream>>>(d1, d2, c1, c2, dout, conf, mask, pose1, pose1_inv, pose2, cam1, cam2, params); break;
+	}
+
+    cudaSafeCall( cudaGetLastError() );
+}
+
+//==============================================================================
+
+__global__ void mask_filter_kernel(
+		TextureObject<float> depth,
+		TextureObject<int> mask) {
+	
+	const int x = (blockIdx.x*blockDim.x + threadIdx.x);
+	const int y = blockIdx.y*blockDim.y + threadIdx.y;
+
+	if (x >= 0 && x < depth.width() && y >=0 && y < depth.height()) {
+		Mask m(mask.tex2D(x,y));
+		if (m.isFilled()) {
+			depth(x,y) = MINF;
+		}
+	}
+}
+
+
+void ftl::cuda::mask_filter(
+		TextureObject<float> &depth,
+		TextureObject<int> &mask,
+		cudaStream_t stream) {
+	const dim3 gridSize((depth.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
+	const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
+
+	mask_filter_kernel<<<gridSize, blockSize, 0, stream>>>(
+		depth, mask
+	);
+	cudaSafeCall( cudaGetLastError() );
+}
diff --git a/applications/reconstruct/src/ilw/discontinuity.cu b/applications/reconstruct/src/ilw/discontinuity.cu
new file mode 100644
index 0000000000000000000000000000000000000000..fe78d47158e81ce02be82953601151f5bf31703a
--- /dev/null
+++ b/applications/reconstruct/src/ilw/discontinuity.cu
@@ -0,0 +1,52 @@
+#include "ilw_cuda.hpp"
+
+#define T_PER_BLOCK 8
+
+using ftl::cuda::Mask;
+
+template <int RADIUS>
+__global__ void discontinuity_kernel(ftl::cuda::TextureObject<int> mask_out, ftl::cuda::TextureObject<float> depth, ftl::rgbd::Camera params) {
+	const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
+	const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
+
+	if (x < params.width && y < params.height) {
+		Mask mask(0);
+
+		const float d = depth.tex2D((int)x, (int)y);
+
+		// Calculate depth between 0.0 and 1.0
+		//float p = (d - params.minDepth) / (params.maxDepth - params.minDepth);
+
+		if (d >= params.minDepth && d <= params.maxDepth) {
+			/* Orts-Escolano S. et al. 2016. Holoportation: Virtual 3D teleportation in real-time. */
+			// Is there a discontinuity nearby?
+			for (int u=-RADIUS; u<=RADIUS; ++u) {
+				for (int v=-RADIUS; v<=RADIUS; ++v) {
+					// If yes, the flag using w = -1
+					if (fabs(depth.tex2D((int)x+u, (int)y+v) - d) > 0.1f) mask.isDiscontinuity(true);
+				}
+			}
+        }
+        
+        mask_out(x,y) = (int)mask;
+	}
+}
+
+void ftl::cuda::discontinuity(ftl::cuda::TextureObject<int> &mask_out, ftl::cuda::TextureObject<float> &depth, const ftl::rgbd::Camera &params, uint discon, cudaStream_t stream) {
+	const dim3 gridSize((params.width + T_PER_BLOCK - 1)/T_PER_BLOCK, (params.height + T_PER_BLOCK - 1)/T_PER_BLOCK);
+	const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
+
+	switch (discon) {
+    case 5 :	discontinuity_kernel<5><<<gridSize, blockSize, 0, stream>>>(mask_out, depth, params); break;
+	case 4 :	discontinuity_kernel<4><<<gridSize, blockSize, 0, stream>>>(mask_out, depth, params); break;
+	case 3 :	discontinuity_kernel<3><<<gridSize, blockSize, 0, stream>>>(mask_out, depth, params); break;
+	case 2 :	discontinuity_kernel<2><<<gridSize, blockSize, 0, stream>>>(mask_out, depth, params); break;
+	case 1 :	discontinuity_kernel<1><<<gridSize, blockSize, 0, stream>>>(mask_out, depth, params); break;
+	default:	break;
+	}
+	cudaSafeCall( cudaGetLastError() );
+
+#ifdef _DEBUG
+	cudaSafeCall(cudaDeviceSynchronize());
+#endif
+}
diff --git a/applications/reconstruct/src/ilw/fill.cu b/applications/reconstruct/src/ilw/fill.cu
new file mode 100644
index 0000000000000000000000000000000000000000..4109c1034d045ecdede9e28582cd19df670f5a63
--- /dev/null
+++ b/applications/reconstruct/src/ilw/fill.cu
@@ -0,0 +1,71 @@
+#include "ilw_cuda.hpp"
+#include <ftl/cuda/weighting.hpp>
+
+using ftl::cuda::Mask;
+
+#define T_PER_BLOCK 8
+
+template <int RADIUS>
+__global__ void preprocess_kernel(
+    	ftl::cuda::TextureObject<float> depth_in,
+		ftl::cuda::TextureObject<float> depth_out,
+		ftl::cuda::TextureObject<uchar4> colour,
+		ftl::cuda::TextureObject<int> mask,
+		ftl::rgbd::Camera camera,
+		ftl::cuda::ILWParams params) {
+
+    const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
+    const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
+
+    float d = depth_in.tex2D((int)x,(int)y);
+    Mask main_mask(mask.tex2D((int)x,(int)y));
+	uchar4 c = colour.tex2D((int)x,(int)y);
+
+	// Calculate discontinuity mask
+
+	// Fill missing depths
+	if (d < camera.minDepth || d > camera.maxDepth) {
+		float depth_accum = 0.0f;
+		float contrib = 0.0f;
+
+        int v=0;
+		//for (int v=-RADIUS; v<=RADIUS; ++v) {
+			for (int u=-RADIUS; u<=RADIUS; ++u) {
+				uchar4 c2 = colour.tex2D((int)x+u,(int)y+v);
+                float d2 = depth_in.tex2D((int)x+u,(int)y+v);
+                Mask m(mask.tex2D((int)x+u,(int)y+v));
+
+				if (!m.isDiscontinuity() && d2 >= camera.minDepth && d2 <= camera.maxDepth) {
+					float w = ftl::cuda::colourWeighting(c, c2, params.fill_match);
+					depth_accum += d2*w;
+					contrib += w;
+				}
+			}
+		//}
+
+		if (contrib > params.fill_threshold) {
+            d = depth_accum / contrib;
+            main_mask.isFilled(true);
+        }
+	}
+
+    mask(x,y) = (int)main_mask;
+	depth_out(x,y) = d;
+}
+
+void ftl::cuda::preprocess_depth(
+		ftl::cuda::TextureObject<float> &depth_in,
+		ftl::cuda::TextureObject<float> &depth_out,
+		ftl::cuda::TextureObject<uchar4> &colour,
+		ftl::cuda::TextureObject<int> &mask,
+		const ftl::rgbd::Camera &camera,
+		const ftl::cuda::ILWParams &params,
+		cudaStream_t stream) {
+
+	const dim3 gridSize((depth_in.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth_in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
+	const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
+
+	preprocess_kernel<10><<<gridSize, blockSize, 0, stream>>>(depth_in, depth_out, colour, mask, camera, params);
+
+	cudaSafeCall( cudaGetLastError() );
+}
diff --git a/applications/reconstruct/src/ilw/ilw.cpp b/applications/reconstruct/src/ilw/ilw.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..93fd75a188b5e48e48e387c5ddf96ea43bcb1eec
--- /dev/null
+++ b/applications/reconstruct/src/ilw/ilw.cpp
@@ -0,0 +1,363 @@
+#include "ilw.hpp"
+#include <ftl/utility/matrix_conversion.hpp>
+#include <ftl/rgbd/source.hpp>
+#include <ftl/cuda/points.hpp>
+#include <loguru.hpp>
+
+#include "ilw_cuda.hpp"
+
+using ftl::ILW;
+using ftl::detail::ILWData;
+using ftl::codecs::Channel;
+using ftl::codecs::Channels;
+using ftl::rgbd::Format;
+using cv::cuda::GpuMat;
+
+// TODO: Put in common place
+static Eigen::Affine3d create_rotation_matrix(float ax, float ay, float az) {
+  Eigen::Affine3d rx =
+      Eigen::Affine3d(Eigen::AngleAxisd(ax, Eigen::Vector3d(1, 0, 0)));
+  Eigen::Affine3d ry =
+      Eigen::Affine3d(Eigen::AngleAxisd(ay, Eigen::Vector3d(0, 1, 0)));
+  Eigen::Affine3d rz =
+      Eigen::Affine3d(Eigen::AngleAxisd(az, Eigen::Vector3d(0, 0, 1)));
+  return rz * rx * ry;
+}
+
+ILW::ILW(nlohmann::json &config) : ftl::Configurable(config) {
+    enabled_ = value("ilw_align", true);
+    iterations_ = value("iterations", 4);
+    motion_rate_ = value("motion_rate", 0.8f);
+    motion_window_ = value("motion_window", 3);
+    use_lab_ = value("use_Lab", false);
+    params_.colour_smooth = value("colour_smooth", 50.0f);
+    params_.spatial_smooth = value("spatial_smooth", 0.04f);
+    params_.cost_ratio = value("cost_ratio", 0.2f);
+	params_.cost_threshold = value("cost_threshold", 1.0f);
+	discon_mask_ = value("discontinuity_mask",2);
+	fill_depth_ = value("fill_depth", false);
+
+    on("fill_depth", [this](const ftl::config::Event &e) {
+        fill_depth_ = value("fill_depth", false);
+    });
+
+	on("ilw_align", [this](const ftl::config::Event &e) {
+        enabled_ = value("ilw_align", true);
+    });
+
+    on("iterations", [this](const ftl::config::Event &e) {
+        iterations_ = value("iterations", 4);
+    });
+
+    on("motion_rate", [this](const ftl::config::Event &e) {
+        motion_rate_ = value("motion_rate", 0.4f);
+    });
+
+    on("motion_window", [this](const ftl::config::Event &e) {
+        motion_window_ = value("motion_window", 3);
+    });
+
+	on("discontinuity_mask", [this](const ftl::config::Event &e) {
+        discon_mask_ = value("discontinuity_mask", 2);
+    });
+
+    on("use_Lab", [this](const ftl::config::Event &e) {
+        use_lab_ = value("use_Lab", false);
+    });
+
+    on("colour_smooth", [this](const ftl::config::Event &e) {
+        params_.colour_smooth = value("colour_smooth", 50.0f);
+    });
+
+    on("spatial_smooth", [this](const ftl::config::Event &e) {
+        params_.spatial_smooth = value("spatial_smooth", 0.04f);
+    });
+
+    on("cost_ratio", [this](const ftl::config::Event &e) {
+        params_.cost_ratio = value("cost_ratio", 0.2f);
+    });
+
+	on("cost_threshold", [this](const ftl::config::Event &e) {
+        params_.cost_threshold = value("cost_threshold", 1.0f);
+    });
+
+    params_.flags = 0;
+    if (value("ignore_bad", false)) params_.flags |= ftl::cuda::kILWFlag_IgnoreBad;
+    if (value("ignore_bad_colour", false)) params_.flags |= ftl::cuda::kILWFlag_SkipBadColour;
+    if (value("restrict_z", true)) params_.flags |= ftl::cuda::kILWFlag_RestrictZ;
+    if (value("colour_confidence_only", false)) params_.flags |= ftl::cuda::kILWFlag_ColourConfidenceOnly;
+
+    on("ignore_bad", [this](const ftl::config::Event &e) {
+        if (value("ignore_bad", false)) params_.flags |= ftl::cuda::kILWFlag_IgnoreBad;
+        else params_.flags &= ~ftl::cuda::kILWFlag_IgnoreBad;
+    });
+
+    on("ignore_bad_colour", [this](const ftl::config::Event &e) {
+        if (value("ignore_bad_colour", false)) params_.flags |= ftl::cuda::kILWFlag_SkipBadColour;
+        else params_.flags &= ~ftl::cuda::kILWFlag_SkipBadColour;
+    });
+
+    on("restrict_z", [this](const ftl::config::Event &e) {
+        if (value("restrict_z", false)) params_.flags |= ftl::cuda::kILWFlag_RestrictZ;
+        else params_.flags &= ~ftl::cuda::kILWFlag_RestrictZ;
+    });
+
+    on("colour_confidence_only", [this](const ftl::config::Event &e) {
+        if (value("colour_confidence_only", false)) params_.flags |= ftl::cuda::kILWFlag_ColourConfidenceOnly;
+        else params_.flags &= ~ftl::cuda::kILWFlag_ColourConfidenceOnly;
+    });
+
+	if (config["clipping"].is_object()) {
+		auto &c = config["clipping"];
+		float rx = c.value("pitch", 0.0f);
+		float ry = c.value("yaw", 0.0f);
+		float rz = c.value("roll", 0.0f);
+		float x = c.value("x", 0.0f);
+		float y = c.value("y", 0.0f);
+		float z = c.value("z", 0.0f);
+		float width = c.value("width", 1.0f);
+		float height = c.value("height", 1.0f);
+		float depth = c.value("depth", 1.0f);
+
+		Eigen::Affine3f r = create_rotation_matrix(rx, ry, rz).cast<float>();
+		Eigen::Translation3f trans(Eigen::Vector3f(x,y,z));
+		Eigen::Affine3f t(trans);
+
+		clip_.origin = MatrixConversion::toCUDA(r.matrix() * t.matrix());
+		clip_.size = make_float3(width, height, depth);
+		clipping_ = value("clipping_enabled", true);
+	} else {
+		clipping_ = false;
+	}
+
+	on("clipping_enabled", [this](const ftl::config::Event &e) {
+		clipping_ = value("clipping_enabled", true);
+	});
+
+	cudaSafeCall(cudaStreamCreate(&stream_));
+}
+
+ILW::~ILW() {
+
+}
+
+bool ILW::process(ftl::rgbd::FrameSet &fs) {
+    if (!enabled_) return false;
+
+	fs.upload(Channel::Colour + Channel::Depth, stream_);
+    _phase0(fs, stream_);
+
+	params_.range = value("search_range", 0.05f);
+    params_.fill_match = value("fill_match", 50.0f);
+    params_.fill_threshold = value("fill_threshold", 0.0f);
+	params_.match_threshold = value("match_threshold", 0.3f);
+
+    for (int i=0; i<iterations_; ++i) {
+        _phase1(fs, value("cost_function",3), stream_);
+        //for (int j=0; j<3; ++j) {
+            _phase2(fs, motion_rate_, stream_);
+        //}
+
+		params_.range *= value("search_reduce", 0.9f);
+		// TODO: Break if no time left
+
+		//cudaSafeCall(cudaStreamSynchronize(stream_));
+    }
+
+	for (size_t i=0; i<fs.frames.size(); ++i) {
+		auto &f = fs.frames[i];
+		auto *s = fs.sources[i];
+			
+        auto &t = f.createTexture<float4>(Channel::Points, Format<float4>(f.get<GpuMat>(Channel::Colour).size()));
+        auto pose = MatrixConversion::toCUDA(s->getPose().cast<float>()); //.inverse());
+        ftl::cuda::point_cloud(t, f.createTexture<float>(Channel::Depth), s->parameters(), pose, discon_mask_, stream_);
+    }
+
+	cudaSafeCall(cudaStreamSynchronize(stream_));
+    return true;
+}
+
+bool ILW::_phase0(ftl::rgbd::FrameSet &fs, cudaStream_t stream) {
+    // Make points channel...
+    for (size_t i=0; i<fs.frames.size(); ++i) {
+		auto &f = fs.frames[i];
+		auto *s = fs.sources[i];
+
+		if (f.empty(Channel::Depth + Channel::Colour)) {
+			LOG(ERROR) << "Missing required channel";
+			continue;
+		}
+			
+        //auto &t = f.createTexture<float4>(Channel::Points, Format<float4>(f.get<GpuMat>(Channel::Colour).size()));
+        auto pose = MatrixConversion::toCUDA(s->getPose().cast<float>()); //.inverse());
+        //ftl::cuda::point_cloud(t, f.createTexture<float>(Channel::Depth), s->parameters(), pose, discon_mask_, stream);
+
+        // TODO: Create energy vector texture and clear it
+        // Create energy and clear it
+
+        // Convert colour from BGR to BGRA if needed
+		if (f.get<GpuMat>(Channel::Colour).type() == CV_8UC3) {
+            cv::cuda::Stream cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
+			// Convert to 4 channel colour
+			auto &col = f.get<GpuMat>(Channel::Colour);
+			GpuMat tmp(col.size(), CV_8UC4);
+			cv::cuda::swap(col, tmp);
+            if (use_lab_) cv::cuda::cvtColor(tmp,tmp, cv::COLOR_BGR2Lab, 0, cvstream);
+			cv::cuda::cvtColor(tmp,col, cv::COLOR_BGR2BGRA, 0, cvstream);
+		}
+
+		// Clip first?
+		if (clipping_) {
+			auto clip = clip_;
+			clip.origin = clip.origin * pose;
+			ftl::cuda::clipping(f.createTexture<float>(Channel::Depth), s->parameters(), clip, stream);
+		}
+
+        f.createTexture<float>(Channel::Depth2, Format<float>(f.get<GpuMat>(Channel::Colour).size()));
+        f.createTexture<float>(Channel::Confidence, Format<float>(f.get<GpuMat>(Channel::Colour).size()));
+		f.createTexture<int>(Channel::Mask, Format<int>(f.get<GpuMat>(Channel::Colour).size()));
+        f.createTexture<uchar4>(Channel::Colour);
+		f.createTexture<float>(Channel::Depth);
+
+		f.get<GpuMat>(Channel::Mask).setTo(cv::Scalar(0));
+    }
+
+	//cudaSafeCall(cudaStreamSynchronize(stream_));
+
+    return true;
+}
+
+bool ILW::_phase1(ftl::rgbd::FrameSet &fs, int win, cudaStream_t stream) {
+    // Run correspondence kernel to create an energy vector
+    cv::cuda::Stream cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
+
+    // Find discontinuity mask
+    for (size_t i=0; i<fs.frames.size(); ++i) {
+        auto &f = fs.frames[i];
+        auto s = fs.sources[i];
+
+        ftl::cuda::discontinuity(
+            f.getTexture<int>(Channel::Mask),
+            f.getTexture<float>(Channel::Depth),
+            s->parameters(),
+            discon_mask_,
+            stream
+        );
+    }
+
+	// First do any preprocessing
+	if (fill_depth_) {
+		for (size_t i=0; i<fs.frames.size(); ++i) {
+			auto &f = fs.frames[i];
+            auto s = fs.sources[i];
+
+			ftl::cuda::preprocess_depth(
+				f.getTexture<float>(Channel::Depth),
+				f.getTexture<float>(Channel::Depth2),
+				f.getTexture<uchar4>(Channel::Colour),
+				f.getTexture<int>(Channel::Mask),
+				s->parameters(),
+				params_,
+				stream
+			);
+
+			//cv::cuda::swap(f.get<GpuMat>(Channel::Depth),f.get<GpuMat>(Channel::Depth2)); 
+			f.swapChannels(Channel::Depth, Channel::Depth2);
+		}
+	}
+
+	//cudaSafeCall(cudaStreamSynchronize(stream_));
+
+	// For each camera combination
+    for (size_t i=0; i<fs.frames.size(); ++i) {
+        auto &f1 = fs.frames[i];
+        f1.get<GpuMat>(Channel::Depth2).setTo(cv::Scalar(0.0f), cvstream);
+		f1.get<GpuMat>(Channel::Confidence).setTo(cv::Scalar(0.0f), cvstream);
+
+		Eigen::Vector4d d1(0.0, 0.0, 1.0, 0.0);
+		d1 = fs.sources[i]->getPose() * d1;
+
+        for (size_t j=0; j<fs.frames.size(); ++j) {
+            if (i == j) continue;
+
+            //LOG(INFO) << "Running phase1";
+
+            auto &f2 = fs.frames[j];
+            auto s1 = fs.sources[i];
+            auto s2 = fs.sources[j];
+
+			// Are cameras facing similar enough direction?
+			Eigen::Vector4d d2(0.0, 0.0, 1.0, 0.0);
+			d2 = fs.sources[j]->getPose() * d2;
+			// No, so skip this combination
+			if (d1.dot(d2) <= 0.0) continue;
+
+            auto pose1 = MatrixConversion::toCUDA(s1->getPose().cast<float>());
+            auto pose1_inv = MatrixConversion::toCUDA(s1->getPose().cast<float>().inverse());
+            auto pose2 = MatrixConversion::toCUDA(s2->getPose().cast<float>().inverse());
+
+            try {
+            //Calculate energy vector to best correspondence
+            ftl::cuda::correspondence(
+                f1.getTexture<float>(Channel::Depth),
+                f2.getTexture<float>(Channel::Depth),
+                f1.getTexture<uchar4>(Channel::Colour),
+                f2.getTexture<uchar4>(Channel::Colour),
+                // TODO: Add normals and other things...
+                f1.getTexture<float>(Channel::Depth2),
+                f1.getTexture<float>(Channel::Confidence),
+				f1.getTexture<int>(Channel::Mask),
+                pose1,
+                pose1_inv,
+                pose2,
+                s1->parameters(),
+                s2->parameters(),
+                params_,
+                win,
+                stream
+            );
+            } catch (ftl::exception &e) {
+                LOG(ERROR) << "Exception in correspondence: " << e.what();
+            }
+
+            //LOG(INFO) << "Correspondences done... " << i;
+        }
+    }
+
+	//cudaSafeCall(cudaStreamSynchronize(stream_));
+
+    return true;
+}
+
+bool ILW::_phase2(ftl::rgbd::FrameSet &fs, float rate, cudaStream_t stream) {
+    // Run energies and motion kernel
+
+	// Smooth vectors across a window and iteratively
+	// strongly disagreeing vectors should cancel out
+	// A weak vector is overriden by a stronger one.
+
+    for (size_t i=0; i<fs.frames.size(); ++i) {
+        auto &f = fs.frames[i];
+
+        auto pose = MatrixConversion::toCUDA(fs.sources[i]->getPose().cast<float>()); //.inverse());
+
+        ftl::cuda::move_points(
+             f.getTexture<float>(Channel::Depth),
+             f.getTexture<float>(Channel::Depth2),
+			 f.getTexture<float>(Channel::Confidence),
+             fs.sources[i]->parameters(),
+             pose,
+			 params_,
+             rate,
+             motion_window_,
+             stream
+        );
+
+        if (f.hasChannel(Channel::Mask)) {
+			ftl::cuda::mask_filter(f.getTexture<float>(Channel::Depth),
+				f.getTexture<int>(Channel::Mask), stream_);
+		}
+    }
+
+    return true;
+}
diff --git a/applications/reconstruct/src/ilw/ilw.cu b/applications/reconstruct/src/ilw/ilw.cu
new file mode 100644
index 0000000000000000000000000000000000000000..710f7521d4a508dd8fc968f4648a5402f8045fd7
--- /dev/null
+++ b/applications/reconstruct/src/ilw/ilw.cu
@@ -0,0 +1,112 @@
+#include "ilw_cuda.hpp"
+#include <ftl/cuda/weighting.hpp>
+
+using ftl::cuda::TextureObject;
+using ftl::rgbd::Camera;
+
+#define WARP_SIZE 32
+#define T_PER_BLOCK 8
+#define FULL_MASK 0xffffffff
+
+__device__ inline float warpMin(float e) {
+	for (int i = WARP_SIZE/2; i > 0; i /= 2) {
+		const float other = __shfl_xor_sync(FULL_MASK, e, i, WARP_SIZE);
+		e = min(e, other);
+	}
+	return e;
+}
+
+__device__ inline float warpSum(float e) {
+	for (int i = WARP_SIZE/2; i > 0; i /= 2) {
+		const float other = __shfl_xor_sync(FULL_MASK, e, i, WARP_SIZE);
+		e += other;
+	}
+	return e;
+}
+
+//==============================================================================
+
+
+
+//==============================================================================
+
+
+
+//==============================================================================
+
+//#define MOTION_RADIUS 9
+
+template <int MOTION_RADIUS>
+__global__ void move_points_kernel(
+    ftl::cuda::TextureObject<float> d_old,
+	ftl::cuda::TextureObject<float> d_new,
+	ftl::cuda::TextureObject<float> conf,
+    ftl::rgbd::Camera camera,
+	float4x4 pose,
+	ftl::cuda::ILWParams params,
+    float rate) {
+
+    const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
+    const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
+
+	const float d0_new = d_new.tex2D((int)x,(int)y);
+	const float d0_old = d_old.tex2D((int)x,(int)y);
+	if (d0_new == 0.0f) return;  // No correspondence found
+    
+    if (x < d_old.width() && y < d_old.height()) {
+		//const float4 world = p(x,y);
+		//if (world.x == MINF) return;
+
+		float delta = 0.0f; //make_float4(0.0f, 0.0f, 0.0f, 0.0f); //ev.tex2D((int)x,(int)y);
+		float contrib = 0.0f;
+
+		// Calculate screen space distortion with neighbours
+		for (int v=-MOTION_RADIUS; v<=MOTION_RADIUS; ++v) {
+			for (int u=-MOTION_RADIUS; u<=MOTION_RADIUS; ++u) {
+				const float dn_new = d_new.tex2D((int)x+u,(int)y+v);
+				const float dn_old = d_old.tex2D((int)x+u,(int)y+v);
+				const float confn = conf.tex2D((int)x+u,(int)y+v);
+				//const float3 pn = make_float3(p.tex2D((int)x+u,(int)y+v));
+				//if (pn.x == MINF) continue;
+				if (dn_new == 0.0f) continue;  // Neighbour has no new correspondence
+
+				const float s = ftl::cuda::spatialWeighting(camera.screenToCam(x,y,d0_new), camera.screenToCam(x+u,y+v,dn_new), params.range); // ftl::cuda::weighting(fabs(d0_new - dn_new), params.range);
+				contrib += (confn+0.01f) * s;
+				delta += (confn+0.01f) * s * ((confn == 0.0f) ? dn_old : dn_new);
+			}
+		}
+
+        if (contrib > 0.0f) {
+            //const float3 newworld = pose * camera.screenToCam(x, y, vec0.x + rate * ((delta / contrib) - vec0.x));
+			//p(x,y) = make_float4(newworld, world.w); //world + rate * (vec / contrib);
+			
+			d_old(x,y) = d0_old + rate * ((delta / contrib) - d0_old);
+        }
+    }
+}
+
+
+void ftl::cuda::move_points(
+        ftl::cuda::TextureObject<float> &d_old,
+		ftl::cuda::TextureObject<float> &d_new,
+		ftl::cuda::TextureObject<float> &conf,
+        const ftl::rgbd::Camera &camera,
+		const float4x4 &pose,
+		const ftl::cuda::ILWParams &params,
+        float rate,
+        int radius,
+        cudaStream_t stream) {
+
+    const dim3 gridSize((d_old.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (d_old.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
+    const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
+
+    switch (radius) {
+    case 9 : move_points_kernel<9><<<gridSize, blockSize, 0, stream>>>(d_old,d_new,conf,camera, pose, params, rate); break;
+    case 5 : move_points_kernel<5><<<gridSize, blockSize, 0, stream>>>(d_old,d_new,conf,camera, pose, params, rate); break;
+    case 3 : move_points_kernel<3><<<gridSize, blockSize, 0, stream>>>(d_old,d_new,conf,camera, pose, params, rate); break;
+    case 1 : move_points_kernel<1><<<gridSize, blockSize, 0, stream>>>(d_old,d_new,conf,camera, pose, params, rate); break;
+    case 0 : move_points_kernel<0><<<gridSize, blockSize, 0, stream>>>(d_old,d_new,conf,camera, pose, params, rate); break;
+    }
+
+    cudaSafeCall( cudaGetLastError() );
+}
diff --git a/applications/reconstruct/src/ilw.hpp b/applications/reconstruct/src/ilw/ilw.hpp
similarity index 73%
rename from applications/reconstruct/src/ilw.hpp
rename to applications/reconstruct/src/ilw/ilw.hpp
index 0be45d015e976b540263a2c16cc5605376092a43..78251832d5647233082c8c59072084c6b1d60559 100644
--- a/applications/reconstruct/src/ilw.hpp
+++ b/applications/reconstruct/src/ilw/ilw.hpp
@@ -6,6 +6,10 @@
 #include <ftl/configurable.hpp>
 #include <vector>
 
+#include "ilw_cuda.hpp"
+
+#include <ftl/cuda/points.hpp>
+
 namespace ftl {
 
 namespace detail {
@@ -40,7 +44,9 @@ class ILW : public ftl::Configurable {
     /**
      * Take a frameset and perform the iterative lattice warping.
      */
-    bool process(ftl::rgbd::FrameSet &fs, cudaStream_t stream=0);
+    bool process(ftl::rgbd::FrameSet &fs);
+
+    inline bool isLabColour() const { return use_lab_; }
 
     private:
     /*
@@ -51,14 +57,26 @@ class ILW : public ftl::Configurable {
     /*
      * Find possible correspondences and a confidence value.
      */
-    bool _phase1(ftl::rgbd::FrameSet &fs, cudaStream_t stream);
+    bool _phase1(ftl::rgbd::FrameSet &fs, int win, cudaStream_t stream);
 
     /*
      * Calculate energies and move the points.
      */
-    bool _phase2(ftl::rgbd::FrameSet &fs);
+    bool _phase2(ftl::rgbd::FrameSet &fs, float rate, cudaStream_t stream);
 
     std::vector<detail::ILWData> data_;
+    bool enabled_;
+    ftl::cuda::ILWParams params_;
+    int iterations_;
+    float motion_rate_;
+    int motion_window_;
+    bool use_lab_;
+	int discon_mask_;
+	bool fill_depth_;
+	ftl::cuda::ClipSpace clip_;
+	bool clipping_;
+
+	cudaStream_t stream_;
 };
 
 }
diff --git a/applications/reconstruct/src/ilw/ilw_cuda.hpp b/applications/reconstruct/src/ilw/ilw_cuda.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..fad97afbdb8385d9381eff5af40fb272f172ca1a
--- /dev/null
+++ b/applications/reconstruct/src/ilw/ilw_cuda.hpp
@@ -0,0 +1,83 @@
+#ifndef _FTL_ILW_CUDA_HPP_
+#define _FTL_ILW_CUDA_HPP_
+
+#include <ftl/cuda_common.hpp>
+#include <ftl/rgbd/camera.hpp>
+#include <ftl/cuda_matrix_util.hpp>
+#include <ftl/cuda/mask.hpp>
+
+namespace ftl {
+namespace cuda {
+
+struct ILWParams {
+    float spatial_smooth;
+    float colour_smooth;
+	float fill_match;
+	float fill_threshold;
+	float match_threshold;
+    float cost_ratio;
+    float cost_threshold;
+	float range;
+    uint flags;
+};
+
+static const uint kILWFlag_IgnoreBad = 0x0001;
+static const uint kILWFlag_RestrictZ = 0x0002;
+static const uint kILWFlag_SkipBadColour = 0x0004;
+static const uint kILWFlag_ColourConfidenceOnly = 0x0008;
+
+void discontinuity(
+	ftl::cuda::TextureObject<int> &mask_out,
+	ftl::cuda::TextureObject<float> &depth,
+	const ftl::rgbd::Camera &params,
+	uint discon, cudaStream_t stream
+);
+
+void mask_filter(
+	ftl::cuda::TextureObject<float> &depth,
+	ftl::cuda::TextureObject<int> &mask,
+	cudaStream_t stream);
+
+void preprocess_depth(
+	ftl::cuda::TextureObject<float> &depth_in,
+	ftl::cuda::TextureObject<float> &depth_out,
+	ftl::cuda::TextureObject<uchar4> &colour,
+	ftl::cuda::TextureObject<int> &mask,
+	const ftl::rgbd::Camera &camera,
+	const ILWParams &params,
+	cudaStream_t stream
+);
+
+void correspondence(
+    ftl::cuda::TextureObject<float> &d1,
+    ftl::cuda::TextureObject<float> &d2,
+    ftl::cuda::TextureObject<uchar4> &c1,
+    ftl::cuda::TextureObject<uchar4> &c2,
+    ftl::cuda::TextureObject<float> &dout,
+    ftl::cuda::TextureObject<float> &conf,
+	ftl::cuda::TextureObject<int> &mask,
+    float4x4 &pose1,
+    float4x4 &pose1_inv,
+    float4x4 &pose2,
+    const ftl::rgbd::Camera &cam1,
+    const ftl::rgbd::Camera &cam2,
+    const ILWParams &params, int win,
+    cudaStream_t stream
+);
+
+void move_points(
+    ftl::cuda::TextureObject<float> &d_old,
+    ftl::cuda::TextureObject<float> &d_new,
+	ftl::cuda::TextureObject<float> &conf,
+    const ftl::rgbd::Camera &camera,
+    const float4x4 &pose,
+	const ILWParams &params,
+    float rate,
+    int radius,
+    cudaStream_t stream
+);
+
+}
+}
+
+#endif  // _FTL_ILW_CUDA_HPP_
diff --git a/applications/reconstruct/src/ilw_cuda.hpp b/applications/reconstruct/src/ilw_cuda.hpp
deleted file mode 100644
index a01af75149409fe033ba39ffb0170489ee926be9..0000000000000000000000000000000000000000
--- a/applications/reconstruct/src/ilw_cuda.hpp
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef _FTL_ILW_CUDA_HPP_
-#define _FTL_ILW_CUDA_HPP_
-
-#include <ftl/cuda_common.hpp>
-#include <ftl/rgbd/camera.hpp>
-#include <ftl/cuda_matrix_util.hpp>
-
-namespace ftl {
-namespace cuda {
-
-void correspondence_energy_vector(
-    ftl::cuda::TextureObject<float4> &p1,
-    ftl::cuda::TextureObject<float4> &p2,
-    ftl::cuda::TextureObject<uchar4> &c1,
-    ftl::cuda::TextureObject<uchar4> &c2,
-    ftl::cuda::TextureObject<float4> &vout,
-    ftl::cuda::TextureObject<float> &eout,
-    float4x4 &pose2,
-    const ftl::rgbd::Camera &cam2,
-    cudaStream_t stream
-);
-
-}
-}
-
-#endif  // _FTL_ILW_CUDA_HPP_
diff --git a/applications/reconstruct/src/main.cpp b/applications/reconstruct/src/main.cpp
index 6c1d8a8813dd5f5a39a1457003df31bc388ab642..4d239758b19401e3d749ae34829401e298aba332 100644
--- a/applications/reconstruct/src/main.cpp
+++ b/applications/reconstruct/src/main.cpp
@@ -15,10 +15,12 @@
 #include <ftl/slave.hpp>
 #include <ftl/rgbd/group.hpp>
 #include <ftl/threads.hpp>
+#include <ftl/codecs/writer.hpp>
 
-#include "ilw.hpp"
+#include "ilw/ilw.hpp"
 #include <ftl/render/splat_render.hpp>
 
+#include <fstream>
 #include <string>
 #include <vector>
 #include <thread>
@@ -29,6 +31,8 @@
 
 #include <ftl/registration.hpp>
 
+#include <cuda_profiler_api.h>
+
 #ifdef WIN32
 #pragma comment(lib, "Rpcrt4.lib")
 #endif
@@ -38,7 +42,7 @@ using std::string;
 using std::vector;
 using ftl::rgbd::Source;
 using ftl::config::json_t;
-using ftl::rgbd::Channel;
+using ftl::codecs::Channel;
 
 using json = nlohmann::json;
 using std::this_thread::sleep_for;
@@ -51,9 +55,50 @@ using std::chrono::milliseconds;
 using ftl::registration::loadTransformations;
 using ftl::registration::saveTransformations;
 
+static Eigen::Affine3d create_rotation_matrix(float ax, float ay, float az) {
+  Eigen::Affine3d rx =
+      Eigen::Affine3d(Eigen::AngleAxisd(ax, Eigen::Vector3d(1, 0, 0)));
+  Eigen::Affine3d ry =
+      Eigen::Affine3d(Eigen::AngleAxisd(ay, Eigen::Vector3d(0, 1, 0)));
+  Eigen::Affine3d rz =
+      Eigen::Affine3d(Eigen::AngleAxisd(az, Eigen::Vector3d(0, 0, 1)));
+  return rz * rx * ry;
+}
+
+static void writeSourceProperties(ftl::codecs::Writer &writer, int id, ftl::rgbd::Source *src) {
+	ftl::codecs::StreamPacket spkt;
+	ftl::codecs::Packet pkt;
+
+	spkt.timestamp = 0;
+	spkt.streamID = id;
+	spkt.channel = Channel::Calibration;
+	spkt.channel_count = 1;
+	pkt.codec = ftl::codecs::codec_t::CALIBRATION;
+	pkt.definition = ftl::codecs::definition_t::Any;
+	pkt.block_number = 0;
+	pkt.block_total = 1;
+	pkt.flags = 0;
+	pkt.data = std::move(std::vector<uint8_t>((uint8_t*)&src->parameters(), (uint8_t*)&src->parameters() + sizeof(ftl::rgbd::Camera)));
+
+	writer.write(spkt, pkt);
+
+	spkt.channel = Channel::Pose;
+	pkt.codec = ftl::codecs::codec_t::POSE;
+	pkt.definition = ftl::codecs::definition_t::Any;
+	pkt.block_number = 0;
+	pkt.block_total = 1;
+	pkt.flags = 0;
+	pkt.data = std::move(std::vector<uint8_t>((uint8_t*)src->getPose().data(), (uint8_t*)src->getPose().data() + 4*4*sizeof(double)));
+
+	writer.write(spkt, pkt);
+}
+
 static void run(ftl::Configurable *root) {
 	Universe *net = ftl::create<Universe>(root, "net");
 	ftl::ctrl::Slave slave(net, root);
+
+	// Controls
+	auto *controls = ftl::create<ftl::Configurable>(root, "controls");
 	
 	net->start();
 	net->waitConnections();
@@ -66,6 +111,26 @@ static void run(ftl::Configurable *root) {
 		return;
 	}
 
+	// Create scene transform, intended for axis aligning the walls and floor
+	Eigen::Matrix4d transform;
+	if (root->getConfig()["transform"].is_object()) {
+		auto &c = root->getConfig()["transform"];
+		float rx = c.value("pitch", 0.0f);
+		float ry = c.value("yaw", 0.0f);
+		float rz = c.value("roll", 0.0f);
+		float x = c.value("x", 0.0f);
+		float y = c.value("y", 0.0f);
+		float z = c.value("z", 0.0f);
+
+		Eigen::Affine3d r = create_rotation_matrix(rx, ry, rz);
+		Eigen::Translation3d trans(Eigen::Vector3d(x,y,z));
+		Eigen::Affine3d t(trans);
+		transform = t.matrix() * r.matrix();
+		LOG(INFO) << "Set transform: " << transform;
+	} else {
+		transform.setIdentity();
+	}
+
 	// Must find pose for each source...
 	if (sources.size() > 1) {
 		std::map<std::string, Eigen::Matrix4d> transformations;
@@ -87,9 +152,10 @@ static void run(ftl::Configurable *root) {
 				//sources = { sources[0] };
 				//sources[0]->setPose(Eigen::Matrix4d::Identity());
 				//break;
+				input->setPose(transform * input->getPose());
 				continue;
 			}
-			input->setPose(T->second);
+			input->setPose(transform * T->second);
 		}
 	}
 
@@ -100,12 +166,22 @@ static void run(ftl::Configurable *root) {
 	ftl::rgbd::Streamer *stream = ftl::create<ftl::rgbd::Streamer>(root, "stream", net);
 	ftl::rgbd::VirtualSource *virt = ftl::create<ftl::rgbd::VirtualSource>(root, "virtual");
 	ftl::render::Splatter *splat = ftl::create<ftl::render::Splatter>(root, "renderer", &scene_B);
-	ftl::rgbd::Group group;
+	ftl::rgbd::Group *group = new ftl::rgbd::Group;
 	ftl::ILW *align = ftl::create<ftl::ILW>(root, "merge");
 
+	int o = root->value("origin_pose", 0) % sources.size();
+	virt->setPose(sources[o]->getPose());
+
 	// Generate virtual camera render when requested by streamer
-	virt->onRender([splat,virt,&scene_B](ftl::rgbd::Frame &out) {
+	virt->onRender([splat,virt,&scene_B,align](ftl::rgbd::Frame &out) {
 		virt->setTimestamp(scene_B.timestamp);
+		// Do we need to convert Lab to BGR?
+		if (align->isLabColour()) {
+			for (auto &f : scene_B.frames) {
+				auto &col = f.get<cv::cuda::GpuMat>(Channel::Colour);
+				cv::cuda::cvtColor(col,col, cv::COLOR_Lab2BGR); // TODO: Add stream
+			}
+		}
 		splat->render(virt, out);
 	});
 	stream->add(virt);
@@ -113,20 +189,62 @@ static void run(ftl::Configurable *root) {
 	for (size_t i=0; i<sources.size(); i++) {
 		Source *in = sources[i];
 		in->setChannel(Channel::Depth);
-		group.addSource(in);
+		group->addSource(in);
 	}
 
-	stream->setLatency(4);  // FIXME: This depends on source!?
+	// ---- Recording code -----------------------------------------------------
+
+	std::ofstream fileout;
+	ftl::codecs::Writer writer(fileout);
+	auto recorder = [&writer,&group](ftl::rgbd::Source *src, const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+		ftl::codecs::StreamPacket s = spkt;
+
+		// Patch stream ID to match order in group
+		s.streamID = group->streamID(src);
+		writer.write(s, pkt);
+	};
+
+	root->set("record", false);
+
+	// Allow stream recording
+	root->on("record", [&group,&fileout,&writer,&recorder](const ftl::config::Event &e) {
+		if (e.entity->value("record", false)) {
+			char timestamp[18];
+			std::time_t t=std::time(NULL);
+			std::strftime(timestamp, sizeof(timestamp), "%F-%H%M%S", std::localtime(&t));
+			fileout.open(std::string(timestamp) + ".ftl");
+
+			writer.begin();
+
+			// TODO: Write pose+calibration+config packets
+			auto sources = group->sources();
+			for (int i=0; i<sources.size(); ++i) {
+				writeSourceProperties(writer, i, sources[i]);
+			}
+
+			group->addRawCallback(std::function(recorder));
+		} else {
+			group->removeRawCallback(recorder);
+			writer.end();
+			fileout.close();
+		}
+	});
+
+	// -------------------------------------------------------------------------
+
+	stream->setLatency(6);  // FIXME: This depends on source!?
+	//stream->add(group);
 	stream->run();
 
 	bool busy = false;
 
-	group.setLatency(4);
-	group.setName("ReconGroup");
-	group.sync([splat,virt,&busy,&slave,&scene_A,&scene_B,&align](ftl::rgbd::FrameSet &fs) -> bool {
+	group->setLatency(4);
+	group->setName("ReconGroup");
+	group->sync([splat,virt,&busy,&slave,&scene_A,&scene_B,&align,controls](ftl::rgbd::FrameSet &fs) -> bool {
 		//cudaSetDevice(scene->getCUDADevice());
 
-		if (slave.isPaused()) return true;
+		//if (slave.isPaused()) return true;
+		if (controls->value("paused", false)) return true;
 		
 		if (busy) {
 			LOG(INFO) << "Group frameset dropped: " << fs.timestamp;
@@ -145,8 +263,8 @@ static void run(ftl::Configurable *root) {
 			UNIQUE_LOCK(scene_A.mtx, lk);
 
 			// Send all frames to GPU, block until done?
-			scene_A.upload(Channel::Colour + Channel::Depth);  // TODO: (Nick) Add scene stream.
-			//align->process(scene_A);
+			//scene_A.upload(Channel::Colour + Channel::Depth);  // TODO: (Nick) Add scene stream.
+			align->process(scene_A);
 
 			// TODO: To use second GPU, could do a download, swap, device change,
 			// then upload to other device. Or some direct device-2-device copy.
@@ -157,14 +275,25 @@ static void run(ftl::Configurable *root) {
 		return true;
 	});
 
+	LOG(INFO) << "Shutting down...";
 	ftl::timer::stop();
 	slave.stop();
 	net->shutdown();
+	ftl::pool.stop();
+
+	cudaProfilerStop();
+
+	LOG(INFO) << "Deleting...";
+
 	delete align;
 	delete splat;
 	delete stream;
 	delete virt;
 	delete net;
+	delete group;
+
+	ftl::config::cleanup();  // Remove any last configurable objects.
+	LOG(INFO) << "Done.";
 }
 
 int main(int argc, char **argv) {
diff --git a/applications/reconstruct/src/mls.cu b/applications/reconstruct/src/mls/mls.cu
similarity index 100%
rename from applications/reconstruct/src/mls.cu
rename to applications/reconstruct/src/mls/mls.cu
diff --git a/applications/reconstruct/src/point_cloud.hpp b/applications/reconstruct/src/point_cloud.hpp
deleted file mode 100644
index 7b5f49c07ea5826736feb861bcd42f133367c291..0000000000000000000000000000000000000000
--- a/applications/reconstruct/src/point_cloud.hpp
+++ /dev/null
@@ -1,36 +0,0 @@
-#ifndef _FTL_POINT_CLOUD_HPP_
-#define _FTL_POINT_CLOUD_HPP_
-
-namespace ftl {
-namespace pointcloud {
-
-
-struct VoxelPoint {
-	union {
-	uint64_t val;
-	struct {
-	uint16_t x : 12;  // Block X
-	uint16_t y : 12;  // Block Y
-	uint16_t z : 12;  // Block Z
-	uint16_t v : 9;   // Voxel offset in block 0-511
-	};
-	};
-};
-
-struct VoxelColour {
-	union {
-	uint32_t val;
-	struct {
-	uint8_t b;
-	uint8_t g;
-	uint8_t r;
-	uint8_t a;
-	};
-	};
-};
-
-
-}
-}
-
-#endif  // _FTL_POINT_CLOUD_HPP_
diff --git a/applications/reconstruct/src/ray_cast_sdf.cpp b/applications/reconstruct/src/ray_cast_sdf.cpp
deleted file mode 100644
index 1c12a762cd56b82d8dd8be585c0fe75733b010f5..0000000000000000000000000000000000000000
--- a/applications/reconstruct/src/ray_cast_sdf.cpp
+++ /dev/null
@@ -1,106 +0,0 @@
-//#include <stdafx.h>
-
-#include <ftl/voxel_hash.hpp>
-#include "compactors.hpp"
-#include "splat_render_cuda.hpp"
-
-//#include "Util.h"
-
-#include <ftl/ray_cast_sdf.hpp>
-
-
-extern "C" void renderCS(
-	const ftl::voxhash::HashData& hashData,
-	const RayCastData &rayCastData,
-	const RayCastParams &rayCastParams, cudaStream_t stream);
-
-extern "C" void computeNormals(float4* d_output, float3* d_input, unsigned int width, unsigned int height);
-extern "C" void convertDepthFloatToCameraSpaceFloat3(float3* d_output, float* d_input, float4x4 intrinsicsInv, unsigned int width, unsigned int height, const DepthCameraData& depthCameraData);
-
-extern "C" void resetRayIntervalSplatCUDA(RayCastData& data, const RayCastParams& params);
-extern "C" void rayIntervalSplatCUDA(const ftl::voxhash::HashData& hashData,
-								 const RayCastData &rayCastData, const RayCastParams &rayCastParams);
-
-extern "C" void nickRenderCUDA(const ftl::voxhash::HashData& hashData, const ftl::voxhash::HashParams& hashParams, const RayCastData &rayCastData, const RayCastParams &params, cudaStream_t stream);
-
-
-
-void CUDARayCastSDF::create(const RayCastParams& params)
-{
-	m_params = params;
-	m_data.allocate(m_params);
-	//m_rayIntervalSplatting.OnD3D11CreateDevice(DXUTGetD3D11Device(), params.m_width, params.m_height);
-}
-
-void CUDARayCastSDF::destroy(void)
-{
-	m_data.free();
-	//m_rayIntervalSplatting.OnD3D11DestroyDevice();
-}
-
-//extern "C" unsigned int compactifyHashAllInOneCUDA(ftl::voxhash::HashData& hashData, const ftl::voxhash::HashParams& hashParams);
-
-
-void CUDARayCastSDF::compactifyHashEntries(ftl::voxhash::HashData& hashData, ftl::voxhash::HashParams& hashParams, cudaStream_t stream) { //const DepthCameraData& depthCameraData) {
-		
-	ftl::cuda::compactifyVisible(hashData, hashParams, m_params.camera, stream);		//this version uses atomics over prefix sums, which has a much better performance
-	//std::cout << "Ray blocks = " << hashParams.m_numOccupiedBlocks << std::endl;
-	//hashData.updateParams(hashParams);	//make sure numOccupiedBlocks is updated on the GPU
-}
-
-void CUDARayCastSDF::render(ftl::voxhash::HashData& hashData, ftl::voxhash::HashParams& hashParams, const DepthCameraParams& cameraParams, const Eigen::Matrix4f& lastRigidTransform, cudaStream_t stream)
-{
-	//updateConstantDepthCameraParams(cameraParams);
-	//rayIntervalSplatting(hashData, hashParams, lastRigidTransform);
-	//m_data.d_rayIntervalSplatMinArray = m_rayIntervalSplatting.mapMinToCuda();
-	//m_data.d_rayIntervalSplatMaxArray = m_rayIntervalSplatting.mapMaxToCuda();
-
-	m_params.camera = cameraParams;
-	//m_params.m_numOccupiedSDFBlocks = hashParams.m_numOccupiedBlocks;
-	m_params.m_viewMatrix = MatrixConversion::toCUDA(lastRigidTransform.inverse());
-	m_params.m_viewMatrixInverse = MatrixConversion::toCUDA(lastRigidTransform);
-	//m_data.updateParams(m_params);
-
-	compactifyHashEntries(hashData, hashParams, stream);
-
-	if (hash_render_) {
-		//ftl::cuda::isosurface_point_image(hashData, hashParams, m_data, m_params, stream);
-	} else renderCS(hashData, m_data, m_params, stream);
-
-	//convertToCameraSpace(cameraData);
-	//if (!m_params.m_useGradients)
-	//{
-	//	computeNormals(m_data.d_normals, m_data.d_depth3, m_params.m_width, m_params.m_height);
-	//}
-
-	//m_rayIntervalSplatting.unmapCuda();
-
-}
-
-
-/*void CUDARayCastSDF::convertToCameraSpace(const DepthCameraData& cameraData)
-{
-	convertDepthFloatToCameraSpaceFloat3(m_data.d_depth3, m_data.d_depth, m_params.m_intrinsicsInverse, m_params.m_width, m_params.m_height, cameraData);
-	
-	if(!m_params.m_useGradients) {
-		computeNormals(m_data.d_normals, m_data.d_depth3, m_params.m_width, m_params.m_height);
-	}
-}*/
-
-/*void CUDARayCastSDF::rayIntervalSplatting(const ftl::voxhash::HashData& hashData, const ftl::voxhash::HashParams& hashParams, const Eigen::Matrix4f& lastRigidTransform)
-{
-	if (hashParams.m_numOccupiedBlocks == 0)	return;
-
-	if (m_params.m_maxNumVertices <= 6*hashParams.m_numOccupiedBlocks) { // 6 verts (2 triangles) per block
-		throw std::runtime_error("not enough space for vertex buffer for ray interval splatting");
-	}
-
-	m_params.m_numOccupiedSDFBlocks = hashParams.m_numOccupiedBlocks;
-	m_params.m_viewMatrix = MatrixConversion::toCUDA(lastRigidTransform.inverse());
-	m_params.m_viewMatrixInverse = MatrixConversion::toCUDA(lastRigidTransform);
-
-	m_data.updateParams(m_params); // !!! debugging
-
-	//don't use ray interval splatting (cf CUDARayCastSDF.cu -> line 40
-	//m_rayIntervalSplatting.rayIntervalSplatting(DXUTGetD3D11DeviceContext(), hashData, cameraData, m_data, m_params, m_params.m_numOccupiedSDFBlocks*6);
-}*/
\ No newline at end of file
diff --git a/applications/reconstruct/src/ray_cast_sdf.cu b/applications/reconstruct/src/ray_cast_sdf.cu
deleted file mode 100644
index 10fd3e0b7b84ca694432abc7dc68fc513ad483eb..0000000000000000000000000000000000000000
--- a/applications/reconstruct/src/ray_cast_sdf.cu
+++ /dev/null
@@ -1,189 +0,0 @@
-
-//#include <cuda_runtime.h>
-
-#include <ftl/cuda_matrix_util.hpp>
-
-#include <ftl/depth_camera.hpp>
-#include <ftl/voxel_hash.hpp>
-#include <ftl/ray_cast_util.hpp>
-
-#define T_PER_BLOCK 8
-#define NUM_GROUPS_X 1024
-
-#define NUM_CUDA_BLOCKS  10000
-
-//texture<float, cudaTextureType2D, cudaReadModeElementType> rayMinTextureRef;
-//texture<float, cudaTextureType2D, cudaReadModeElementType> rayMaxTextureRef;
-
-__global__ void renderKernel(ftl::voxhash::HashData hashData, RayCastData rayCastData, RayCastParams rayCastParams) 
-{
-	const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
-	const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
-
-	//const RayCastParams& rayCastParams = c_rayCastParams;
-
-	if (x < rayCastParams.m_width && y < rayCastParams.m_height) {
-		rayCastData.d_depth[y*rayCastParams.m_width+x] = MINF;
-		rayCastData.d_depth3[y*rayCastParams.m_width+x] = make_float3(MINF,MINF,MINF);
-		rayCastData.d_normals[y*rayCastParams.m_width+x] = make_float4(MINF,MINF,MINF,MINF);
-		rayCastData.d_colors[y*rayCastParams.m_width+x] = make_uchar3(0,0,0);
-
-		float3 camDir = normalize(rayCastParams.camera.kinectProjToCamera(x, y, 1.0f));
-		float3 worldCamPos = rayCastParams.m_viewMatrixInverse * make_float3(0.0f, 0.0f, 0.0f);
-		float4 w = rayCastParams.m_viewMatrixInverse * make_float4(camDir, 0.0f);
-		float3 worldDir = normalize(make_float3(w.x, w.y, w.z));
-
-		////use ray interval splatting
-		//float minInterval = tex2D(rayMinTextureRef, x, y);
-		//float maxInterval = tex2D(rayMaxTextureRef, x, y);
-
-		//don't use ray interval splatting
-		float minInterval = rayCastParams.m_minDepth;
-		float maxInterval = rayCastParams.m_maxDepth;
-
-		//if (minInterval == 0 || minInterval == MINF) minInterval = rayCastParams.m_minDepth;
-		//if (maxInterval == 0 || maxInterval == MINF) maxInterval = rayCastParams.m_maxDepth;
-		//TODO MATTHIAS: shouldn't this return in the case no interval is found?
-		if (minInterval == 0 || minInterval == MINF) return;
-		if (maxInterval == 0 || maxInterval == MINF) return;
-
-		// debugging 
-		//if (maxInterval < minInterval) {
-		//	printf("ERROR (%d,%d): [ %f, %f ]\n", x, y, minInterval, maxInterval);
-		//}
-
-		rayCastData.traverseCoarseGridSimpleSampleAll(hashData, rayCastParams, worldCamPos, worldDir, camDir, make_int3(x,y,1), minInterval, maxInterval);
-	} 
-}
-
-extern "C" void renderCS(const ftl::voxhash::HashData& hashData, const RayCastData &rayCastData, const RayCastParams &rayCastParams, cudaStream_t stream) 
-{
-
-	const dim3 gridSize((rayCastParams.m_width + T_PER_BLOCK - 1)/T_PER_BLOCK, (rayCastParams.m_height + T_PER_BLOCK - 1)/T_PER_BLOCK);
-	const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
-
-	//cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
-	//cudaBindTextureToArray(rayMinTextureRef, rayCastData.d_rayIntervalSplatMinArray, channelDesc);
-	//cudaBindTextureToArray(rayMaxTextureRef, rayCastData.d_rayIntervalSplatMaxArray, channelDesc);
-
-	//printf("Ray casting render...\n");
-
-	renderKernel<<<gridSize, blockSize, 0, stream>>>(hashData, rayCastData, rayCastParams);
-
-#ifdef _DEBUG
-	cudaSafeCall(cudaDeviceSynchronize());
-	//cutilCheckMsg(__FUNCTION__);
-#endif
-}
-
-
-/////////////////////////////////////////////////////////////////////////
-// ray interval splatting
-/////////////////////////////////////////////////////////////////////////
-
-/*__global__ void resetRayIntervalSplatKernel(RayCastData data) 
-{
-	uint idx = blockIdx.x + blockIdx.y * NUM_GROUPS_X;
-	data.point_cloud_[idx] = make_float3(MINF);
-}
-
-extern "C" void resetRayIntervalSplatCUDA(RayCastData& data, const RayCastParams& params)
-{
-	const dim3 gridSize(NUM_GROUPS_X, (params.m_maxNumVertices + NUM_GROUPS_X - 1) / NUM_GROUPS_X, 1); // ! todo check if need third dimension?
-	const dim3 blockSize(1, 1, 1);
-
-	resetRayIntervalSplatKernel<<<gridSize, blockSize>>>(data);
-
-#ifdef _DEBUG
-	cudaSafeCall(cudaDeviceSynchronize());
-	//cutilCheckMsg(__FUNCTION__);
-#endif
-}*/
-
-/*__global__ void rayIntervalSplatKernel(ftl::voxhash::HashData hashData, DepthCameraData depthCameraData, RayCastData rayCastData, DepthCameraData cameraData) 
-{
-	uint idx = blockIdx.x + blockIdx.y * NUM_GROUPS_X;
-
-	const ftl::voxhash::HashEntry& entry = hashData.d_hashCompactified[idx];
-	if (entry.ptr != ftl::voxhash::FREE_ENTRY) {
-		//if (!hashData.isSDFBlockInCameraFrustumApprox(entry.pos)) return;
-		const RayCastParams &params = c_rayCastParams;
-		const float4x4& viewMatrix = params.m_viewMatrix;
-
-		float3 worldCurrentVoxel = hashData.SDFBlockToWorld(entry.pos);
-
-		float3 MINV = worldCurrentVoxel - c_hashParams.m_virtualVoxelSize / 2.0f;
-
-		float3 maxv = MINV+SDF_BLOCK_SIZE*c_hashParams.m_virtualVoxelSize;
-
-		float3 proj000 = cameraData.cameraToKinectProj(viewMatrix * make_float3(MINV.x, MINV.y, MINV.z));
-		float3 proj100 = cameraData.cameraToKinectProj(viewMatrix * make_float3(maxv.x, MINV.y, MINV.z));
-		float3 proj010 = cameraData.cameraToKinectProj(viewMatrix * make_float3(MINV.x, maxv.y, MINV.z));
-		float3 proj001 = cameraData.cameraToKinectProj(viewMatrix * make_float3(MINV.x, MINV.y, maxv.z));
-		float3 proj110 = cameraData.cameraToKinectProj(viewMatrix * make_float3(maxv.x, maxv.y, MINV.z));
-		float3 proj011 = cameraData.cameraToKinectProj(viewMatrix * make_float3(MINV.x, maxv.y, maxv.z));
-		float3 proj101 = cameraData.cameraToKinectProj(viewMatrix * make_float3(maxv.x, MINV.y, maxv.z));
-		float3 proj111 = cameraData.cameraToKinectProj(viewMatrix * make_float3(maxv.x, maxv.y, maxv.z));
-
-		// Tree Reduction Min
-		float3 min00 = fminf(proj000, proj100);
-		float3 min01 = fminf(proj010, proj001);
-		float3 min10 = fminf(proj110, proj011);
-		float3 min11 = fminf(proj101, proj111);
-
-		float3 min0 = fminf(min00, min01);
-		float3 min1 = fminf(min10, min11);
-
-		float3 minFinal = fminf(min0, min1);
-
-		// Tree Reduction Max
-		float3 max00 = fmaxf(proj000, proj100);
-		float3 max01 = fmaxf(proj010, proj001);
-		float3 max10 = fmaxf(proj110, proj011);
-		float3 max11 = fmaxf(proj101, proj111);
-
-		float3 max0 = fmaxf(max00, max01);
-		float3 max1 = fmaxf(max10, max11);
-
-		float3 maxFinal = fmaxf(max0, max1);
-
-		float depth = maxFinal.z;
-		if(params.m_splatMinimum == 1) {
-			depth = minFinal.z;
-		}
-		float depthWorld = cameraData.kinectProjToCameraZ(depth);
-
-		//uint addr = idx*4;
-		//rayCastData.d_vertexBuffer[addr] = make_float4(maxFinal.x, minFinal.y, depth, depthWorld);
-		//rayCastData.d_vertexBuffer[addr+1] = make_float4(minFinal.x, minFinal.y, depth, depthWorld);
-		//rayCastData.d_vertexBuffer[addr+2] = make_float4(maxFinal.x, maxFinal.y, depth, depthWorld);
-		//rayCastData.d_vertexBuffer[addr+3] = make_float4(minFinal.x, maxFinal.y, depth, depthWorld);
-
-		// Note (Nick) : Changed to create point cloud instead of vertex.
-		uint addr = idx;
-		rayCastData.point_cloud_[addr] = make_float3(maxFinal.x, maxFinal.y, depth);
-		//printf("Ray: %f\n", depth);
-
-		uint addr = idx*6;
-		rayCastData.d_vertexBuffer[addr] = make_float4(maxFinal.x, minFinal.y, depth, depthWorld);
-		rayCastData.d_vertexBuffer[addr+1] = make_float4(minFinal.x, minFinal.y, depth, depthWorld);
-		rayCastData.d_vertexBuffer[addr+2] = make_float4(maxFinal.x, maxFinal.y, depth, depthWorld);
-		rayCastData.d_vertexBuffer[addr+3] = make_float4(minFinal.x, minFinal.y, depth, depthWorld);
-		rayCastData.d_vertexBuffer[addr+4] = make_float4(maxFinal.x, maxFinal.y, depth, depthWorld);
-		rayCastData.d_vertexBuffer[addr+5] = make_float4(minFinal.x, maxFinal.y, depth, depthWorld);
-	}
-}
-
-extern "C" void rayIntervalSplatCUDA(const ftl::voxhash::HashData& hashData, const DepthCameraData& cameraData, const RayCastData &rayCastData, const RayCastParams &rayCastParams) 
-{
-	//printf("Ray casting...\n");
-	const dim3 gridSize(NUM_GROUPS_X, (rayCastParams.m_numOccupiedSDFBlocks + NUM_GROUPS_X - 1) / NUM_GROUPS_X, 1);
-	const dim3 blockSize(1, 1, 1);
-
-	rayIntervalSplatKernel<<<gridSize, blockSize>>>(hashData, cameraData, rayCastData, cameraData);
-
-#ifdef _DEBUG
-	cudaSafeCall(cudaDeviceSynchronize());
-	//cutilCheckMsg(__FUNCTION__);
-#endif
-}  */
diff --git a/applications/reconstruct/src/scene_rep_hash_sdf.cu b/applications/reconstruct/src/scene_rep_hash_sdf.cu
deleted file mode 100644
index 4750d3e7ed4f7aeb68875e0b5050d76efed5b715..0000000000000000000000000000000000000000
--- a/applications/reconstruct/src/scene_rep_hash_sdf.cu
+++ /dev/null
@@ -1,282 +0,0 @@
-// From: https://github.com/niessner/VoxelHashing/blob/master/DepthSensingCUDA/Source/CUDASceneRepHashSDF.cu
-
-//#include <cutil_inline.h>
-//#include <cutil_math.h>
-//#include <vector_types.h>
-//#include <cuda_runtime.h>
-
-#include <ftl/cuda_matrix_util.hpp>
-
-#include <ftl/voxel_hash.hpp>
-#include <ftl/depth_camera.hpp>
-#include <ftl/ray_cast_params.hpp>
-
-#define T_PER_BLOCK 8
-
-using ftl::voxhash::HashData;
-using ftl::voxhash::HashParams;
-using ftl::voxhash::Voxel;
-using ftl::voxhash::HashEntry;
-using ftl::voxhash::FREE_ENTRY;
-
-// TODO (Nick) Use ftl::cuda::Texture (texture objects)
-//texture<float, cudaTextureType2D, cudaReadModeElementType> depthTextureRef;
-//texture<float4, cudaTextureType2D, cudaReadModeElementType> colorTextureRef;
-
-__device__ __constant__ HashParams c_hashParams;
-__device__ __constant__ RayCastParams c_rayCastParams;
-//__device__ __constant__ DepthCameraParams c_depthCameraParams;
-__device__ __constant__ ftl::voxhash::DepthCameraCUDA c_cameras[MAX_CAMERAS];
-
-extern "C" void updateConstantHashParams(const HashParams& params) {
-
-	size_t size;
-	cudaSafeCall(cudaGetSymbolSize(&size, c_hashParams));
-	cudaSafeCall(cudaMemcpyToSymbol(c_hashParams, &params, size, 0, cudaMemcpyHostToDevice));
-	
-#ifdef DEBUG
-	cudaSafeCall(cudaDeviceSynchronize());
-	//cutilCheckMsg(__FUNCTION__);
-#endif
-	}
-
-
-/*extern "C" void updateConstantRayCastParams(const RayCastParams& params) {
-	//printf("Update ray cast params\n");
-	size_t size;
-	cudaSafeCall(cudaGetSymbolSize(&size, c_rayCastParams));
-	cudaSafeCall(cudaMemcpyToSymbol(c_rayCastParams, &params, size, 0, cudaMemcpyHostToDevice));
-	
-#ifdef DEBUG
-	cudaSafeCall(cudaDeviceSynchronize());
-	//cutilCheckMsg(__FUNCTION__);
-#endif
-
-}*/
-
-extern "C" void updateCUDACameraConstant(ftl::voxhash::DepthCameraCUDA *data, int count) {
-	if (count == 0 || count >= MAX_CAMERAS) return;
-	//printf("Update depth camera params\n");
-	size_t size;
-	cudaSafeCall(cudaGetSymbolSize(&size, c_cameras));
-	cudaSafeCall(cudaMemcpyToSymbol(c_cameras, data, sizeof(ftl::voxhash::DepthCameraCUDA)*count, 0, cudaMemcpyHostToDevice));
-	
-#ifdef DEBUG
-	cudaSafeCall(cudaDeviceSynchronize());
-	//cutilCheckMsg(__FUNCTION__);
-#endif
-
-}
-
-/*__global__ void resetHeapKernel(HashData hashData) 
-{
-	const HashParams& hashParams = c_hashParams;
-	unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
-
-	if (idx == 0) {
-		hashData.d_heapCounter[0] = hashParams.m_numSDFBlocks - 1;	//points to the last element of the array
-	}
-	
-	if (idx < hashParams.m_numSDFBlocks) {
-
-		hashData.d_heap[idx] = hashParams.m_numSDFBlocks - idx - 1;
-		uint blockSize = SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE;
-		uint base_idx = idx * blockSize;
-		for (uint i = 0; i < blockSize; i++) {
-			hashData.deleteVoxel(base_idx+i);
-		}
-	}
-}*/
-
-__global__ void resetHashKernel(HashData hashData) 
-{
-	const HashParams& hashParams = c_hashParams;
-	const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
-	if (idx < hashParams.m_hashNumBuckets) {
-		hashData.deleteHashEntry(hashData.d_hash[idx]);
-		//hashData.deleteHashEntry(hashData.d_hashCompactified[idx]);
-	}
-}
-
-
-__global__ void resetHashBucketMutexKernel(HashData hashData) 
-{
-	const HashParams& hashParams = c_hashParams;
-	const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
-	if (idx < hashParams.m_hashNumBuckets) {
-		hashData.d_hashBucketMutex[idx] = FREE_ENTRY;
-	}
-}
-
-extern "C" void resetCUDA(HashData& hashData, const HashParams& hashParams)
-{
-	//{
-		//resetting the heap and SDF blocks
-		//const dim3 gridSize((hashParams.m_numSDFBlocks + (T_PER_BLOCK*T_PER_BLOCK) - 1)/(T_PER_BLOCK*T_PER_BLOCK), 1);
-		//const dim3 blockSize((T_PER_BLOCK*T_PER_BLOCK), 1);
-
-		//resetHeapKernel<<<gridSize, blockSize>>>(hashData);
-
-
-		//#ifdef _DEBUG
-		//	cudaSafeCall(cudaDeviceSynchronize());
-			//cutilCheckMsg(__FUNCTION__);
-		//#endif
-
-	//}
-
-	{
-		//resetting the hash
-		const dim3 gridSize((hashParams.m_hashNumBuckets + (T_PER_BLOCK*T_PER_BLOCK) - 1)/(T_PER_BLOCK*T_PER_BLOCK), 1);
-		const dim3 blockSize((T_PER_BLOCK*T_PER_BLOCK), 1);
-
-		resetHashKernel<<<gridSize, blockSize>>>(hashData);
-
-		#ifdef _DEBUG
-			cudaSafeCall(cudaDeviceSynchronize());
-			//cutilCheckMsg(__FUNCTION__);
-		#endif
-	}
-
-	{
-		//resetting the mutex
-		const dim3 gridSize((hashParams.m_hashNumBuckets + (T_PER_BLOCK*T_PER_BLOCK) - 1)/(T_PER_BLOCK*T_PER_BLOCK), 1);
-		const dim3 blockSize((T_PER_BLOCK*T_PER_BLOCK), 1);
-
-		resetHashBucketMutexKernel<<<gridSize, blockSize>>>(hashData);
-
-		#ifdef _DEBUG
-			cudaSafeCall(cudaDeviceSynchronize());
-			//cutilCheckMsg(__FUNCTION__);
-		#endif
-	}
-
-
-}
-
-extern "C" void resetHashBucketMutexCUDA(HashData& hashData, const HashParams& hashParams, cudaStream_t stream)
-{
-	const dim3 gridSize((hashParams.m_hashNumBuckets + (T_PER_BLOCK*T_PER_BLOCK) - 1)/(T_PER_BLOCK*T_PER_BLOCK), 1);
-	const dim3 blockSize((T_PER_BLOCK*T_PER_BLOCK), 1);
-
-	resetHashBucketMutexKernel<<<gridSize, blockSize, 0, stream>>>(hashData);
-
-#ifdef _DEBUG
-	cudaSafeCall(cudaDeviceSynchronize());
-	//cutilCheckMsg(__FUNCTION__);
-#endif
-}
-
-
-// Note: bitMask used for Streaming out code... could be set to nullptr if not streaming out
-// Note: Allocations might need to be around fat rays since multiple voxels could correspond
-// to same depth map pixel at larger distances.
-__global__ void allocKernel(HashData hashData, HashParams hashParams, int camnum) 
-{
-	//const HashParams& hashParams = c_hashParams;
-	const ftl::voxhash::DepthCameraCUDA camera = c_cameras[camnum];
-	const DepthCameraParams &cameraParams = camera.params;
-
-	const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
-	const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
-
-	if (x < cameraParams.m_imageWidth && y < cameraParams.m_imageHeight)
-	{
-		float d = tex2D<float>(camera.depth, x, y);
-		//if (d == MINF || d < cameraParams.m_sensorDepthWorldMin || d > cameraParams.m_sensorDepthWorldMax)	return;
-		if (d == MINF || d == 0.0f)	return;
-
-		if (d >= hashParams.m_maxIntegrationDistance) return;
-
-		// TODO (Nick) Use covariance to include a frustrum of influence
-		float t = hashData.getTruncation(d);
-		float minDepth = min(hashParams.m_maxIntegrationDistance, d-t);
-		float maxDepth = min(hashParams.m_maxIntegrationDistance, d+t);
-		if (minDepth >= maxDepth) return;
-
-		// Convert ray from image coords to world
-		// Does kinectDepthToSkeleton convert pixel values to coordinates using
-		// camera intrinsics? Same as what reprojectTo3D does in OpenCV?
-		float3 rayMin = cameraParams.kinectDepthToSkeleton(x, y, minDepth);
-		// Is the rigid transform then the estimated camera pose?
-		rayMin = camera.pose * rayMin;
-		//printf("Ray min: %f,%f,%f\n", rayMin.x, rayMin.y, rayMin.z);
-		float3 rayMax = cameraParams.kinectDepthToSkeleton(x, y, maxDepth);
-		rayMax = camera.pose * rayMax;
-
-		float3 rayDir = normalize(rayMax - rayMin);
-	
-		// Only ray cast from min possible depth to max depth
-		int3 idCurrentVoxel = hashData.worldToSDFBlock(rayMin);
-		int3 idEnd = hashData.worldToSDFBlock(rayMax);
-		
-		float3 step = make_float3(sign(rayDir));
-		float3 boundaryPos = hashData.SDFBlockToWorld(idCurrentVoxel+make_int3(clamp(step, 0.0, 1.0f)))-0.5f*hashParams.m_virtualVoxelSize;
-		float3 tMax = (boundaryPos-rayMin)/rayDir;
-		float3 tDelta = (step*SDF_BLOCK_SIZE*hashParams.m_virtualVoxelSize)/rayDir;
-		int3 idBound = make_int3(make_float3(idEnd)+step);
-
-		//#pragma unroll
-		//for(int c = 0; c < 3; c++) {
-		//	if (rayDir[c] == 0.0f) { tMax[c] = PINF; tDelta[c] = PINF; }
-		//	if (boundaryPos[c] - rayMin[c] == 0.0f) { tMax[c] = PINF; tDelta[c] = PINF; }
-		//}
-		if (rayDir.x == 0.0f) { tMax.x = PINF; tDelta.x = PINF; }
-		if (boundaryPos.x - rayMin.x == 0.0f) { tMax.x = PINF; tDelta.x = PINF; }
-
-		if (rayDir.y == 0.0f) { tMax.y = PINF; tDelta.y = PINF; }
-		if (boundaryPos.y - rayMin.y == 0.0f) { tMax.y = PINF; tDelta.y = PINF; }
-
-		if (rayDir.z == 0.0f) { tMax.z = PINF; tDelta.z = PINF; }
-		if (boundaryPos.z - rayMin.z == 0.0f) { tMax.z = PINF; tDelta.z = PINF; }
-
-
-		unsigned int iter = 0; // iter < g_MaxLoopIterCount
-		unsigned int g_MaxLoopIterCount = 1024;
-#pragma unroll 1
-		while(iter < g_MaxLoopIterCount) {
-
-			//check if it's in the frustum and not checked out
-			if (hashData.isInBoundingBox(hashParams, idCurrentVoxel)) { //} && !isSDFBlockStreamedOut(idCurrentVoxel, hashData, d_bitMask)) {		
-				hashData.allocBlock(idCurrentVoxel);
-				//printf("Allocate block: %d\n",idCurrentVoxel.x);
-			}
-
-			// Traverse voxel grid
-			if(tMax.x < tMax.y && tMax.x < tMax.z)	{
-				idCurrentVoxel.x += step.x;
-				if(idCurrentVoxel.x == idBound.x) return;
-				tMax.x += tDelta.x;
-			}
-			else if(tMax.z < tMax.y) {
-				idCurrentVoxel.z += step.z;
-				if(idCurrentVoxel.z == idBound.z) return;
-				tMax.z += tDelta.z;
-			}
-			else	{
-				idCurrentVoxel.y += step.y;
-				if(idCurrentVoxel.y == idBound.y) return;
-				tMax.y += tDelta.y;
-			}
-
-			iter++;
-		}
-	}
-}
-
-extern "C" void allocCUDA(HashData& hashData, const HashParams& hashParams, int camnum, const DepthCameraParams &depthCameraParams, cudaStream_t stream) 
-{
-
-	//printf("Allocating: %d\n",depthCameraParams.m_imageWidth);
-
-	const dim3 gridSize((depthCameraParams.m_imageWidth + T_PER_BLOCK - 1)/T_PER_BLOCK, (depthCameraParams.m_imageHeight + T_PER_BLOCK - 1)/T_PER_BLOCK);
-	const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
-
-	allocKernel<<<gridSize, blockSize, 0, stream>>>(hashData, hashParams, camnum);
-
-
-	#ifdef _DEBUG
-		cudaSafeCall(cudaDeviceSynchronize());
-		//cutilCheckMsg(__FUNCTION__);
-	#endif
-}
diff --git a/applications/reconstruct/src/virtual_source.cpp b/applications/reconstruct/src/virtual_source.cpp
deleted file mode 100644
index c8dc380743373c591ffcfcea6aae0f713c1068c1..0000000000000000000000000000000000000000
--- a/applications/reconstruct/src/virtual_source.cpp
+++ /dev/null
@@ -1,89 +0,0 @@
-#include <ftl/virtual_source.hpp>
-#include <ftl/depth_camera.hpp>
-#include <ftl/voxel_scene.hpp>
-#include <ftl/ray_cast_sdf.hpp>
-
-#define LOGURU_WITH_STREAMS 1
-#include <loguru.hpp>
-
-using ftl::rgbd::VirtualSource;
-
-VirtualSource::VirtualSource(ftl::rgbd::Source *host)
-		: ftl::rgbd::detail::Source(host) {
-	rays_ = ftl::create<CUDARayCastSDF>(host, "raycaster"); //new CUDARayCastSDF(host->getConfig());
-	scene_ = nullptr;
-
-	params_.fx = rays_->value("focal", 430.0f);
-	params_.fy = params_.fx;
-	params_.width = rays_->value("width", 640);
-	params_.height = rays_->value("height", 480);
-	params_.cx =  -((double)params_.width / 2);
-	params_.cy = -((double)params_.height / 2);
-	params_.maxDepth = rays_->value("max_depth", 10.0f);
-	params_.minDepth = rays_->value("min_depth", 0.1f);
-
-	capabilities_ = kCapMovable | kCapVideo | kCapStereo;
-
-	rgb_ = cv::Mat(cv::Size(params_.width,params_.height), CV_8UC3);
-	idepth_ = cv::Mat(cv::Size(params_.width,params_.height), CV_32SC1);
-	depth_ = cv::Mat(cv::Size(params_.width,params_.height), CV_32FC1);
-
-	rays_->on("focal", [this](const ftl::config::Event &e) {
-		params_.fx = rays_->value("focal", 430.0f);
-		params_.fy = params_.fx;
-	});
-
-	rays_->on("width", [this](const ftl::config::Event &e) {
-		params_.width = rays_->value("width", 640);
-	});
-
-	rays_->on("height", [this](const ftl::config::Event &e) {
-		params_.height = rays_->value("height", 480);
-	});
-}
-
-VirtualSource::~VirtualSource() {
-	if (scene_) delete scene_;
-	if (rays_) delete rays_;
-}
-
-void VirtualSource::setScene(ftl::voxhash::SceneRep *scene) {
-	scene_ = scene;
-}
-
-bool VirtualSource::grab(int n, int b) {
-	if (scene_) {
-		// Ensure this host thread is using correct GPU.
-
-		cudaSafeCall(cudaSetDevice(scene_->getCUDADevice()));
-		DepthCameraParams params;
-		params.fx = params_.fx;
-		params.fy = params_.fy;
-		params.mx = -params_.cx;
-		params.my = -params_.cy;
-		params.m_imageWidth = params_.width;
-		params.m_imageHeight = params_.height;
-		params.m_sensorDepthWorldMin = params_.minDepth;
-		params.m_sensorDepthWorldMax = params_.maxDepth;
-
-		// TODO(Nick) Use double precision pose here
-		rays_->render(scene_->getHashData(), scene_->getHashParams(), params, host_->getPose().cast<float>(), scene_->getIntegrationStream());
-
-		//unique_lock<mutex> lk(mutex_);
-		if (rays_->isIntegerDepth()) {
-			rays_->getRayCastData().download((int*)idepth_.data, (uchar3*)rgb_.data, rays_->getRayCastParams(), scene_->getIntegrationStream());
-
-			cudaSafeCall(cudaStreamSynchronize(scene_->getIntegrationStream()));
-			idepth_.convertTo(depth_, CV_32FC1, 1.0f / 100.0f);
-		} else {
-			rays_->getRayCastData().download((int*)depth_.data, (uchar3*)rgb_.data, rays_->getRayCastParams(), scene_->getIntegrationStream());
-			cudaSafeCall(cudaStreamSynchronize(scene_->getIntegrationStream()));
-		}
-	}
-
-	return true;
-}
-
-bool VirtualSource::isReady() {
-	return true;
-}
diff --git a/applications/reconstruct/src/voxel_scene.cpp b/applications/reconstruct/src/voxel_scene.cpp
deleted file mode 100644
index 09067b1f2334e0190e27022b64d374e31532f8c2..0000000000000000000000000000000000000000
--- a/applications/reconstruct/src/voxel_scene.cpp
+++ /dev/null
@@ -1,466 +0,0 @@
-#include <ftl/voxel_scene.hpp>
-#include "depth_camera_cuda.hpp"
-
-#include <opencv2/core/cuda_stream_accessor.hpp>
-
-#include <vector>
-
-using namespace ftl::voxhash;
-using ftl::rgbd::Source;
-using ftl::rgbd::Channel;
-using ftl::Configurable;
-using cv::Mat;
-using std::vector;
-
-#define 	SAFE_DELETE_ARRAY(a)   { delete [] (a); (a) = NULL; }
-
-//extern "C" void resetCUDA(ftl::voxhash::HashData& hashData, const ftl::voxhash::HashParams& hashParams);
-//extern "C" void resetHashBucketMutexCUDA(ftl::voxhash::HashData& hashData, const ftl::voxhash::HashParams& hashParams, cudaStream_t);
-//extern "C" void allocCUDA(ftl::voxhash::HashData& hashData, const ftl::voxhash::HashParams& hashParams, int camid, const DepthCameraParams &depthCameraParams, cudaStream_t);
-//extern "C" void fillDecisionArrayCUDA(ftl::voxhash::HashData& hashData, const ftl::voxhash::HashParams& hashParams, const DepthCameraData& depthCameraData);
-//extern "C" void compactifyHashCUDA(ftl::voxhash::HashData& hashData, const ftl::voxhash::HashParams& hashParams);
-//extern "C" unsigned int compactifyHashAllInOneCUDA(ftl::voxhash::HashData& hashData, const ftl::voxhash::HashParams& hashParams);
-//extern "C" void integrateDepthMapCUDA(ftl::voxhash::HashData& hashData, const ftl::voxhash::HashParams& hashParams, const DepthCameraData& depthCameraData, const DepthCameraParams& depthCameraParams, cudaStream_t);
-//extern "C" void bindInputDepthColorTextures(const DepthCameraData& depthCameraData);
-
-
-SceneRep::SceneRep(nlohmann::json &config) : Configurable(config), m_frameCount(0), do_reset_(false) {
-	_initCUDA();
-
-	// Allocates voxel structure on GPU
-	_create(_parametersFromConfig());
-
-	on("SDFVoxelSize", [this](const ftl::config::Event &e) {
-		do_reset_ = true;
-	});
-	on("hashNumSDFBlocks", [this](const ftl::config::Event &e) {
-		do_reset_ = true;
-	});
-	on("hashNumBuckets", [this](const ftl::config::Event &e) {
-		do_reset_ = true;
-	});
-	on("hashMaxCollisionLinkedListSize", [this](const ftl::config::Event &e) {
-		do_reset_ = true;
-	});
-	on("SDFTruncation", [this](const ftl::config::Event &e) {
-		m_hashParams.m_truncation = value("SDFTruncation", 0.1f);
-	});
-	on("SDFTruncationScale", [this](const ftl::config::Event &e) {
-		m_hashParams.m_truncScale = value("SDFTruncationScale", 0.01f);
-	});
-	on("SDFMaxIntegrationDistance", [this](const ftl::config::Event &e) {
-		m_hashParams.m_maxIntegrationDistance = value("SDFMaxIntegrationDistance", 10.0f);
-	});
-	on("showRegistration", [this](const ftl::config::Event &e) {
-		reg_mode_ = value("showRegistration", false);
-	});
-
-	reg_mode_ = value("showRegistration", false);
-
-	cudaSafeCall(cudaStreamCreate(&integ_stream_));
-	//integ_stream_ = 0;
-}
-
-SceneRep::~SceneRep() {
-	_destroy();
-	cudaStreamDestroy(integ_stream_);
-}
-
-bool SceneRep::_initCUDA() {
-	// Do an initial CUDA check
-	int cuda_device_count = 0;
-	cudaSafeCall(cudaGetDeviceCount(&cuda_device_count));
-	CHECK_GE(cuda_device_count, 1) << "No CUDA devices found";
-
-	LOG(INFO) << "CUDA Devices (" << cuda_device_count << "):";
-
-	vector<cudaDeviceProp> properties(cuda_device_count);
-	for (int i=0; i<cuda_device_count; i++) {
-		cudaSafeCall(cudaGetDeviceProperties(&properties[i], i));
-		LOG(INFO) << " - " << properties[i].name;
-	}
-
-	int desired_device = value("cudaDevice", 0);
-	cuda_device_ = (desired_device < cuda_device_count) ? desired_device : cuda_device_count-1;
-	cudaSafeCall(cudaSetDevice(cuda_device_));
-
-	// TODO:(Nick) Check memory is sufficient
-	// TODO:(Nick) Find out what our compute capability should be.
-
-	LOG(INFO) << "CUDA Compute: " << properties[cuda_device_].major << "." << properties[cuda_device_].minor;
-
-	return true;
-}
-
-void SceneRep::addSource(ftl::rgbd::Source *src) {
-	auto &cam = cameras_.emplace_back();
-	cam.source = src;
-	cam.params.m_imageWidth = 0;
-	src->setChannel(Channel::Depth);
-}
-
-extern "C" void updateCUDACameraConstant(ftl::voxhash::DepthCameraCUDA *data, int count);
-
-void SceneRep::_updateCameraConstant() {
-	std::vector<ftl::voxhash::DepthCameraCUDA> cams(cameras_.size());
-	for (size_t i=0; i<cameras_.size(); ++i) {
-		cameras_[i].gpu.data.pose = MatrixConversion::toCUDA(cameras_[i].source->getPose().cast<float>());
-		cameras_[i].gpu.data.poseInverse = MatrixConversion::toCUDA(cameras_[i].source->getPose().cast<float>().inverse());
-		cams[i] = cameras_[i].gpu.data;
-	}
-	updateCUDACameraConstant(cams.data(), cams.size());
-}
-
-int SceneRep::upload() {
-	int active = 0;
-
-	for (size_t i=0; i<cameras_.size(); ++i) {
-		cameras_[i].source->grab();
-	}
-
-	for (size_t i=0; i<cameras_.size(); ++i) {
-		auto &cam = cameras_[i];
-
-		if (!cam.source->isReady()) {
-			cam.params.m_imageWidth = 0;
-			// TODO(Nick) : Free gpu allocs if was ready before
-			LOG(INFO) << "Source not ready: " << cam.source->getURI();
-			continue;
-		} else {
-			auto in = cam.source;
-
-			cam.params.fx = in->parameters().fx;
-			cam.params.fy = in->parameters().fy;
-			cam.params.mx = -in->parameters().cx;
-			cam.params.my = -in->parameters().cy;
-
-			// Only now do we have camera parameters for allocations...
-			if (cam.params.m_imageWidth == 0) {
-				cam.params.m_imageWidth = in->parameters().width;
-				cam.params.m_imageHeight = in->parameters().height;
-				cam.params.m_sensorDepthWorldMax = in->parameters().maxDepth;
-				cam.params.m_sensorDepthWorldMin = in->parameters().minDepth;
-				cam.gpu.alloc(cam.params, true);
-				LOG(INFO) << "GPU Allocated camera " << i;
-			}
-		}
-
-		cam.params.flags = m_frameCount;
-	}
-
-	_updateCameraConstant();
-	//cudaSafeCall(cudaDeviceSynchronize());
-
-	for (size_t i=0; i<cameras_.size(); ++i) {
-		auto &cam = cameras_[i];
-
-		// Get the RGB-Depth frame from input
-		Source *input = cam.source;
-		Mat rgb, depth;
-
-		// TODO(Nick) Direct GPU upload to save copy
-		input->getFrames(rgb,depth);
-		
-		active += 1;
-
-		if (depth.cols == 0) continue;
-
-		// Must be in RGBA for GPU
-		Mat rgbt, rgba;
-		cv::cvtColor(rgb,rgbt, cv::COLOR_BGR2Lab);
-		cv::cvtColor(rgbt,rgba, cv::COLOR_BGR2BGRA);
-
-		// Send to GPU and merge view into scene
-		//cam.gpu.updateParams(cam.params);
-		cam.gpu.updateData(depth, rgba, cam.stream);
-
-		//setLastRigidTransform(input->getPose().cast<float>());
-
-		//make the rigid transform available on the GPU
-		//m_hashData.updateParams(m_hashParams, cv::cuda::StreamAccessor::getStream(cam.stream));
-
-		//if (i > 0) cudaSafeCall(cudaStreamSynchronize(cv::cuda::StreamAccessor::getStream(cameras_[i-1].stream)));
-
-		//allocate all hash blocks which are corresponding to depth map entries
-		//if (value("voxels", false)) _alloc(i, cv::cuda::StreamAccessor::getStream(cam.stream));
-
-		// Calculate normals
-	}
-
-	// Must have finished all allocations and rendering before next integration
-	cudaSafeCall(cudaDeviceSynchronize());
-
-	return active;
-}
-
-int SceneRep::upload(ftl::rgbd::FrameSet &fs) {
-	int active = 0;
-
-	for (size_t i=0; i<cameras_.size(); ++i) {
-		auto &cam = cameras_[i];
-
-		if (!cam.source->isReady()) {
-			cam.params.m_imageWidth = 0;
-			// TODO(Nick) : Free gpu allocs if was ready before
-			LOG(INFO) << "Source not ready: " << cam.source->getURI();
-			continue;
-		} else {
-			auto in = cam.source;
-
-			cam.params.fx = in->parameters().fx;
-			cam.params.fy = in->parameters().fy;
-			cam.params.mx = -in->parameters().cx;
-			cam.params.my = -in->parameters().cy;
-
-			// Only now do we have camera parameters for allocations...
-			if (cam.params.m_imageWidth == 0) {
-				cam.params.m_imageWidth = in->parameters().width;
-				cam.params.m_imageHeight = in->parameters().height;
-				cam.params.m_sensorDepthWorldMax = in->parameters().maxDepth;
-				cam.params.m_sensorDepthWorldMin = in->parameters().minDepth;
-				cam.gpu.alloc(cam.params, true);
-				LOG(INFO) << "GPU Allocated camera " << i;
-			}
-		}
-
-		cam.params.flags = m_frameCount;
-	}
-
-	_updateCameraConstant();
-	//cudaSafeCall(cudaDeviceSynchronize());
-
-	for (size_t i=0; i<cameras_.size(); ++i) {
-		auto &cam = cameras_[i];
-		auto &chan1 = fs.frames[i].get<cv::Mat>(Channel::Colour);
-		auto &chan2 = fs.frames[i].get<cv::Mat>(fs.sources[i]->getChannel());
-
-		auto test = fs.frames[i].createTexture<uchar4>(Channel::Flow, ftl::rgbd::Format<uchar4>(100,100));
-
-		// Get the RGB-Depth frame from input
-		Source *input = cam.source;
-		//Mat rgb, depth;
-
-		// TODO(Nick) Direct GPU upload to save copy
-		//input->getFrames(rgb,depth);
-		
-		active += 1;
-
-		//if (depth.cols == 0) continue;
-
-		// Must be in RGBA for GPU
-		Mat rgbt, rgba;
-		cv::cvtColor(chan1,rgbt, cv::COLOR_BGR2Lab);
-		cv::cvtColor(rgbt,rgba, cv::COLOR_BGR2BGRA);
-
-		// Send to GPU and merge view into scene
-		//cam.gpu.updateParams(cam.params);
-		cam.gpu.updateData(chan2, rgba, cam.stream);
-
-		//setLastRigidTransform(input->getPose().cast<float>());
-
-		//make the rigid transform available on the GPU
-		//m_hashData.updateParams(m_hashParams, cv::cuda::StreamAccessor::getStream(cam.stream));
-
-		//if (i > 0) cudaSafeCall(cudaStreamSynchronize(cv::cuda::StreamAccessor::getStream(cameras_[i-1].stream)));
-
-		//allocate all hash blocks which are corresponding to depth map entries
-		//if (value("voxels", false)) _alloc(i, cv::cuda::StreamAccessor::getStream(cam.stream));
-
-		// Calculate normals
-	}
-
-	// Must have finished all allocations and rendering before next integration
-	cudaSafeCall(cudaDeviceSynchronize());
-
-	return active;
-}
-
-void SceneRep::integrate() {
-	/*for (size_t i=0; i<cameras_.size(); ++i) {
-		auto &cam = cameras_[i];
-
-		setLastRigidTransform(cam.source->getPose().cast<float>());
-		//m_hashData.updateParams(m_hashParams);
-
-		//generate a linear hash array with only occupied entries
-		_compactifyVisible(cam.params);
-
-		//volumetrically integrate the depth data into the depth SDFBlocks
-		//_integrateDepthMap(cam.gpu, cam.params);
-
-		//_garbageCollect();
-
-		m_numIntegratedFrames++;
-	}*/
-
-	_compactifyAllocated();
-	_integrateDepthMaps();
-}
-
-void SceneRep::garbage() {
-	//_compactifyAllocated();
-	//if (value("voxels", false)) _garbageCollect();
-
-	//cudaSafeCall(cudaStreamSynchronize(integ_stream_));
-}
-
-/*void SceneRep::integrate(const Eigen::Matrix4f& lastRigidTransform, const DepthCameraData& depthCameraData, const DepthCameraParams& depthCameraParams, unsigned int* d_bitMask) {
-		
-	setLastRigidTransform(lastRigidTransform);
-
-	//make the rigid transform available on the GPU
-	m_hashData.updateParams(m_hashParams);
-
-	//allocate all hash blocks which are corresponding to depth map entries
-	_alloc(depthCameraData, depthCameraParams, d_bitMask);
-
-	//generate a linear hash array with only occupied entries
-	_compactifyHashEntries();
-
-	//volumetrically integrate the depth data into the depth SDFBlocks
-	_integrateDepthMap(depthCameraData, depthCameraParams);
-
-	_garbageCollect(depthCameraData);
-
-	m_numIntegratedFrames++;
-}*/
-
-/*void SceneRep::setLastRigidTransformAndCompactify(const Eigen::Matrix4f& lastRigidTransform, const DepthCameraData& depthCameraData) {
-	setLastRigidTransform(lastRigidTransform);
-	_compactifyHashEntries();
-}*/
-
-/* Nick: To reduce weights between frames */
-void SceneRep::nextFrame() {
-	if (do_reset_) {
-		do_reset_ = false;
-		_destroy();
-		_create(_parametersFromConfig());
-	} else {
-		//ftl::cuda::compactifyAllocated(m_hashData, m_hashParams, integ_stream_);
-		//if (reg_mode_) ftl::cuda::clearVoxels(m_hashData, m_hashParams); 
-		//else ftl::cuda::starveVoxels(m_hashData, m_hashParams, integ_stream_);
-		m_numIntegratedFrames = 0;
-	}
-}
-
-//! resets the hash to the initial state (i.e., clears all data)
-void SceneRep::reset() {
-	m_numIntegratedFrames = 0;
-	m_hashData.updateParams(m_hashParams);
-	resetCUDA(m_hashData, m_hashParams);
-}
-
-unsigned int SceneRep::getOccupiedCount() {
-	unsigned int count;
-	cudaSafeCall(cudaMemcpy(&count, m_hashData.d_hashCompactifiedCounter, sizeof(unsigned int), cudaMemcpyDeviceToHost));
-	return count+1;	//there is one more free than the address suggests (0 would be also a valid address)
-}
-
-HashParams SceneRep::_parametersFromConfig() {
-	//auto &cfg = ftl::config::resolve(config);
-	HashParams params;
-	// First camera view is set to identity pose to be at the centre of
-	// the virtual coordinate space.
-	params.m_hashNumBuckets = value("hashNumBuckets", 100000);
-	params.m_virtualVoxelSize = value("SDFVoxelSize", 0.006f);
-	params.m_maxIntegrationDistance = value("SDFMaxIntegrationDistance", 10.0f);
-	params.m_truncation = value("SDFTruncation", 0.1f);
-	params.m_truncScale = value("SDFTruncationScale", 0.01f);
-	params.m_integrationWeightSample = value("SDFIntegrationWeightSample", 10);
-	params.m_integrationWeightMax = value("SDFIntegrationWeightMax", 255);
-	params.m_spatialSmoothing = value("spatialSmoothing", 0.04f); // 4cm
-	params.m_colourSmoothing = value("colourSmoothing", 20.0f);
-	params.m_confidenceThresh = value("confidenceThreshold", 20.0f);
-	params.m_flags = 0;
-	params.m_flags |= (value("clipping", false)) ? ftl::voxhash::kFlagClipping : 0;
-	params.m_flags |= (value("mls", false)) ? ftl::voxhash::kFlagMLS : 0;
-	params.m_maxBounds = make_float3(
-		value("bbox_x_max", 2.0f),
-		value("bbox_y_max", 2.0f),
-		value("bbox_z_max", 2.0f));
-	params.m_minBounds = make_float3(
-		value("bbox_x_min", -2.0f),
-		value("bbox_y_min", -2.0f),
-		value("bbox_z_min", -2.0f));
-	return params;
-}
-
-void SceneRep::_create(const HashParams& params) {
-	m_hashParams = params;
-	m_hashData.allocate(m_hashParams);
-
-	reset();
-}
-
-void SceneRep::_destroy() {
-	m_hashData.free();
-}
-
-void SceneRep::_alloc(int camid, cudaStream_t stream) {
-	// NOTE (nick): We might want this later...
-	/*if (false) {
-		// TODO(Nick) Make this work without memcpy to host first
-		allocate until all blocks are allocated
-		unsigned int prevFree = 0; //getHeapFreeCount();
-		while (1) {
-			resetHashBucketMutexCUDA(m_hashData, m_hashParams, stream);
-			allocCUDA(m_hashData, m_hashParams, camid, cameras_[camid].params, stream);
-
-			unsigned int currFree = getHeapFreeCount();
-
-			if (prevFree != currFree) {
-				prevFree = currFree;
-			}
-			else {
-				break;
-			}
-		}
-	}
-	else {*/
-		//this version is faster, but it doesn't guarantee that all blocks are allocated (staggers alloc to the next frame)
-		//resetHashBucketMutexCUDA(m_hashData, m_hashParams, stream);
-		//allocCUDA(m_hashData, m_hashParams, camid, cameras_[camid].params, stream);
-	//}
-}
-
-
-void SceneRep::_compactifyVisible(const DepthCameraParams &camera) { //const DepthCameraData& depthCameraData) {
-	//ftl::cuda::compactifyOccupied(m_hashData, m_hashParams, integ_stream_);		//this version uses atomics over prefix sums, which has a much better performance
-	//m_hashData.updateParams(m_hashParams);	//make sure numOccupiedBlocks is updated on the GPU
-}
-
-void SceneRep::_compactifyAllocated() {
-	//ftl::cuda::compactifyAllocated(m_hashData, m_hashParams, integ_stream_);		//this version uses atomics over prefix sums, which has a much better performance
-	//std::cout << "Occ blocks = " << m_hashParams.m_numOccupiedBlocks << std::endl;
-	//m_hashData.updateParams(m_hashParams);	//make sure numOccupiedBlocks is updated on the GPU
-}
-
-/*void SceneRep::_integrateDepthMap(const DepthCameraData& depthCameraData, const DepthCameraParams& depthCameraParams) {
-	if (!reg_mode_) ftl::cuda::integrateDepthMap(m_hashData, m_hashParams, depthCameraData, depthCameraParams, integ_stream_);
-	else ftl::cuda::integrateRegistration(m_hashData, m_hashParams, depthCameraData, depthCameraParams, integ_stream_);
-}*/
-
-//extern "C" void bilateralFilterFloatMap(float* d_output, float* d_input, float sigmaD, float sigmaR, unsigned int width, unsigned int height);
-
-void SceneRep::_integrateDepthMaps() {
-	//cudaSafeCall(cudaDeviceSynchronize());
-
-	for (size_t i=0; i<cameras_.size(); ++i) {
-		if (!cameras_[i].source->isReady()) continue;
-		//ftl::cuda::clear_depth(*(cameras_[i].gpu.depth2_tex_), integ_stream_);
-		cameras_[i].gpu.computeNormals(integ_stream_);
-		ftl::cuda::clear_points(*(cameras_[i].gpu.points_tex_), integ_stream_);
-		ftl::cuda::mls_smooth(*(cameras_[i].gpu.points_tex_), m_hashParams, cameras_.size(), i, integ_stream_);
-		//ftl::cuda::int_to_float(*(cameras_[i].gpu.depth2_tex_), *(cameras_[i].gpu.depth_tex_), 1.0f / 1000.0f, integ_stream_);
-		//ftl::cuda::hole_fill(*(cameras_[i].gpu.depth2_tex_), *(cameras_[i].gpu.depth_tex_), cameras_[i].params, integ_stream_);
-		//bilateralFilterFloatMap(cameras_[i].gpu.depth_tex_->devicePtr(), cameras_[i].gpu.depth3_tex_->devicePtr(), 3, 7, cameras_[i].gpu.depth_tex_->width(), cameras_[i].gpu.depth_tex_->height());
-	}
-	//if (value("voxels", false)) ftl::cuda::integrateDepthMaps(m_hashData, m_hashParams, cameras_.size(), integ_stream_);
-}
-
-void SceneRep::_garbageCollect() {
-	//ftl::cuda::garbageCollectIdentify(m_hashData, m_hashParams, integ_stream_);
-	//resetHashBucketMutexCUDA(m_hashData, m_hashParams, integ_stream_);	//needed if linked lists are enabled -> for memeory deletion
-	//ftl::cuda::garbageCollectFree(m_hashData, m_hashParams, integ_stream_);
-}
diff --git a/applications/recorder/CMakeLists.txt b/applications/recorder/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0d7b668c8e9f7dafe519a25f24499ff9c70d6074
--- /dev/null
+++ b/applications/recorder/CMakeLists.txt
@@ -0,0 +1,16 @@
+set(RECSRC
+    src/main.cpp
+    src/registration.cpp
+)
+
+add_executable(ftl-record ${RECSRC})
+
+target_include_directories(ftl-record PUBLIC
+	$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
+	$<INSTALL_INTERFACE:include>
+	PRIVATE src)
+
+#target_include_directories(cv-node PUBLIC ${PROJECT_SOURCE_DIR}/include)
+target_link_libraries(ftl-record ftlcommon ftlrgbd Threads::Threads ${OpenCV_LIBS} ftlctrl ftlnet)
+
+
diff --git a/applications/recorder/src/main.cpp b/applications/recorder/src/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a6f8926a5afa5d02112ca943c6a48af89a17c5fa
--- /dev/null
+++ b/applications/recorder/src/main.cpp
@@ -0,0 +1,246 @@
+/*
+ * Copyright 2019 Nicolas Pope. All rights reserved.
+ *
+ * See LICENSE.
+ */
+
+#define LOGURU_WITH_STREAMS 1
+#include <loguru.hpp>
+#include <ftl/config.h>
+#include <ftl/configuration.hpp>
+#include <ftl/rgbd.hpp>
+#include <ftl/rgbd/virtual.hpp>
+#include <ftl/rgbd/streamer.hpp>
+#include <ftl/slave.hpp>
+#include <ftl/rgbd/group.hpp>
+#include <ftl/threads.hpp>
+#include <ftl/codecs/writer.hpp>
+
+
+#include <fstream>
+#include <string>
+#include <vector>
+#include <thread>
+#include <chrono>
+
+#include <opencv2/opencv.hpp>
+#include <ftl/net/universe.hpp>
+
+#include "registration.hpp"
+
+#include <cuda_profiler_api.h>
+
+#ifdef WIN32
+#pragma comment(lib, "Rpcrt4.lib")
+#endif
+
+using ftl::net::Universe;
+using std::string;
+using std::vector;
+using ftl::rgbd::Source;
+using ftl::config::json_t;
+using ftl::codecs::Channel;
+
+using json = nlohmann::json;
+using std::this_thread::sleep_for;
+using std::chrono::milliseconds;
+//using std::mutex;
+//using std::unique_lock;
+
+//using cv::Mat;
+
+using ftl::registration::loadTransformations;
+using ftl::registration::saveTransformations;
+
+static Eigen::Affine3d create_rotation_matrix(float ax, float ay, float az) {
+  Eigen::Affine3d rx =
+      Eigen::Affine3d(Eigen::AngleAxisd(ax, Eigen::Vector3d(1, 0, 0)));
+  Eigen::Affine3d ry =
+      Eigen::Affine3d(Eigen::AngleAxisd(ay, Eigen::Vector3d(0, 1, 0)));
+  Eigen::Affine3d rz =
+      Eigen::Affine3d(Eigen::AngleAxisd(az, Eigen::Vector3d(0, 0, 1)));
+  return rz * rx * ry;
+}
+
+static void writeSourceProperties(ftl::codecs::Writer &writer, int id, ftl::rgbd::Source *src) {
+	ftl::codecs::StreamPacket spkt;
+	ftl::codecs::Packet pkt;
+
+	spkt.timestamp = 0;
+	spkt.streamID = id;
+	spkt.channel = Channel::Calibration;
+	spkt.channel_count = 1;
+	pkt.codec = ftl::codecs::codec_t::CALIBRATION;
+	pkt.definition = ftl::codecs::definition_t::Any;
+	pkt.block_number = 0;
+	pkt.block_total = 1;
+	pkt.flags = 0;
+	pkt.data = std::move(std::vector<uint8_t>((uint8_t*)&src->parameters(), (uint8_t*)&src->parameters() + sizeof(ftl::rgbd::Camera)));
+
+	writer.write(spkt, pkt);
+
+	spkt.channel = Channel::Pose;
+	pkt.codec = ftl::codecs::codec_t::POSE;
+	pkt.definition = ftl::codecs::definition_t::Any;
+	pkt.block_number = 0;
+	pkt.block_total = 1;
+	pkt.flags = 0;
+	pkt.data = std::move(std::vector<uint8_t>((uint8_t*)src->getPose().data(), (uint8_t*)src->getPose().data() + 4*4*sizeof(double)));
+
+	writer.write(spkt, pkt);
+}
+
+static void run(ftl::Configurable *root) {
+	Universe *net = ftl::create<Universe>(root, "net");
+	ftl::ctrl::Slave slave(net, root);
+
+	// Controls
+	auto *controls = ftl::create<ftl::Configurable>(root, "controls");
+	
+	net->start();
+	net->waitConnections();
+	
+	// Create a vector of all input RGB-Depth sources
+	auto sources = ftl::createArray<Source>(root, "sources", net);
+
+	if (sources.size() == 0) {
+		LOG(ERROR) << "No sources configured!";
+		return;
+	}
+
+	// Create scene transform, intended for axis aligning the walls and floor
+	Eigen::Matrix4d transform;
+	if (root->getConfig()["transform"].is_object()) {
+		auto &c = root->getConfig()["transform"];
+		float rx = c.value("pitch", 0.0f);
+		float ry = c.value("yaw", 0.0f);
+		float rz = c.value("roll", 0.0f);
+		float x = c.value("x", 0.0f);
+		float y = c.value("y", 0.0f);
+		float z = c.value("z", 0.0f);
+
+		Eigen::Affine3d r = create_rotation_matrix(rx, ry, rz);
+		Eigen::Translation3d trans(Eigen::Vector3d(x,y,z));
+		Eigen::Affine3d t(trans);
+		transform = t.matrix() * r.matrix();
+		LOG(INFO) << "Set transform: " << transform;
+	} else {
+		transform.setIdentity();
+	}
+
+	// Must find pose for each source...
+	if (sources.size() > 1) {
+		std::map<std::string, Eigen::Matrix4d> transformations;
+
+		if (loadTransformations(string(FTL_LOCAL_CONFIG_ROOT) + "/registration.json", transformations)) {
+			LOG(INFO) << "Loaded camera trasformations from file";
+		}
+		else {
+			LOG(ERROR) << "Error loading camera transformations from file";
+		}
+
+		for (auto &input : sources) {
+			string uri = input->getURI();
+			auto T = transformations.find(uri);
+			if (T == transformations.end()) {
+				LOG(ERROR) << "Camera pose for " + uri + " not found in transformations";
+				//LOG(WARNING) << "Using only first configured source";
+				// TODO: use target source if configured and found
+				//sources = { sources[0] };
+				//sources[0]->setPose(Eigen::Matrix4d::Identity());
+				//break;
+				input->setPose(transform * input->getPose());
+				continue;
+			}
+			input->setPose(transform * T->second);
+		}
+	}
+
+	ftl::rgbd::FrameSet scene_A;  // Output of align process
+	ftl::rgbd::FrameSet scene_B;  // Input of render process
+
+	ftl::rgbd::Streamer *stream = ftl::create<ftl::rgbd::Streamer>(root, "stream", net);
+	ftl::rgbd::Group *group = new ftl::rgbd::Group;
+
+	for (size_t i=0; i<sources.size(); i++) {
+		Source *in = sources[i];
+		in->setChannel(Channel::Right);
+		group->addSource(in);
+	}
+
+	// ---- Recording code -----------------------------------------------------
+
+	std::ofstream fileout;
+	ftl::codecs::Writer writer(fileout);
+	auto recorder = [&writer,&group](ftl::rgbd::Source *src, const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+		ftl::codecs::StreamPacket s = spkt;
+		// Patch stream ID to match order in group
+		s.streamID = group->streamID(src);
+
+
+        //LOG(INFO) << "Record packet: " << (int)s.streamID << "," << s.timestamp;
+		writer.write(s, pkt);
+	};
+    group->addRawCallback(std::function(recorder));
+
+	root->set("record", false);
+
+	// Allow stream recording
+	root->on("record", [&group,&fileout,&writer,&recorder](const ftl::config::Event &e) {
+		if (e.entity->value("record", false)) {
+			char timestamp[18];
+			std::time_t t=std::time(NULL);
+			std::strftime(timestamp, sizeof(timestamp), "%F-%H%M%S", std::localtime(&t));
+			fileout.open(std::string(timestamp) + ".ftl");
+
+			writer.begin();
+            LOG(INFO) << "Writer begin";
+
+			// TODO: Write pose+calibration+config packets
+			auto sources = group->sources();
+			for (int i=0; i<sources.size(); ++i) {
+				writeSourceProperties(writer, i, sources[i]);
+			}
+		} else {
+			//group->removeRawCallback(recorder);
+            LOG(INFO) << "Writer end";
+			writer.end();
+			fileout.close();
+		}
+	});
+
+	// -------------------------------------------------------------------------
+
+	stream->setLatency(6);  // FIXME: This depends on source!?
+	stream->add(group);
+	stream->run();
+
+	bool busy = false;
+
+	group->setLatency(4);
+	group->setName("ReconGroup");
+	group->sync([](ftl::rgbd::FrameSet &fs) -> bool {
+		return true;
+	});
+
+	LOG(INFO) << "Shutting down...";
+	ftl::timer::stop();
+	slave.stop();
+	net->shutdown();
+	ftl::pool.stop();
+
+	cudaProfilerStop();
+
+	LOG(INFO) << "Deleting...";
+
+	delete stream;
+	delete net;
+	delete group;
+
+	ftl::config::cleanup();  // Remove any last configurable objects.
+	LOG(INFO) << "Done.";
+}
+
+int main(int argc, char **argv) {
+	run(ftl::configure(argc, argv, "reconstruction_default"));
+}
diff --git a/applications/recorder/src/registration.cpp b/applications/recorder/src/registration.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a86f2b3d1c6c5ca8c172a303ad979bf741de67de
--- /dev/null
+++ b/applications/recorder/src/registration.cpp
@@ -0,0 +1,69 @@
+#include "registration.hpp"
+#include <fstream>
+#define LOGURU_WITH_STREAMS 1
+#include <loguru.hpp>
+
+
+namespace ftl {
+namespace registration {
+
+using ftl::rgbd::Camera;
+using ftl::rgbd::Source;
+
+using std::string;
+using std::vector;
+using std::pair;
+using std::map;
+using std::optional;
+
+using cv::Mat;
+using Eigen::Matrix4f;
+using Eigen::Matrix4d;
+
+void from_json(nlohmann::json &json, map<string, Matrix4d> &transformations) {
+	for (auto it = json.begin(); it != json.end(); ++it) {
+		Eigen::Matrix4d m;
+		auto data = m.data();
+		for(size_t i = 0; i < 16; i++) { data[i] = it.value()[i]; }
+		transformations[it.key()] = m;
+	}
+}
+
+void to_json(nlohmann::json &json, map<string, Matrix4d> &transformations) {
+	for (auto &item : transformations) {
+		auto val = nlohmann::json::array();
+		for(size_t i = 0; i < 16; i++) { val.push_back((float) item.second.data()[i]); }
+		json[item.first] = val;
+	}
+}
+
+bool loadTransformations(const string &path, map<string, Matrix4d> &data) {
+	std::ifstream file(path);
+	if (!file.is_open()) {
+		LOG(ERROR) << "Error loading transformations from file " << path;
+		return false;
+	}
+	
+	nlohmann::json json_registration;
+	file >> json_registration;
+	from_json(json_registration, data);
+	return true;
+}
+
+bool saveTransformations(const string &path, map<string, Matrix4d> &data) {
+	nlohmann::json data_json;
+	to_json(data_json, data);
+	std::ofstream file(path);
+
+	if (!file.is_open()) {
+		LOG(ERROR) << "Error writing transformations to file " << path;
+		return false;
+	}
+
+	file << std::setw(4) << data_json;
+	return true;
+}
+
+
+} // namespace registration
+} // namespace ftl
diff --git a/applications/recorder/src/registration.hpp b/applications/recorder/src/registration.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..1af4de8a5e3cbc8b64b291e4e115165e197cf28a
--- /dev/null
+++ b/applications/recorder/src/registration.hpp
@@ -0,0 +1,22 @@
+#ifndef _FTL_RECONSTRUCT_REGISTRATION_HPP_
+#define _FTL_RECONSTRUCT_REGISTRATION_HPP_
+
+#include <ftl/config.h>
+#include <ftl/configurable.hpp>
+#include <ftl/rgbd.hpp>
+#include <opencv2/opencv.hpp>
+
+
+namespace ftl {
+namespace registration {
+
+void to_json(nlohmann::json &json, std::map<std::string, Eigen::Matrix4d> &transformations);
+void from_json(nlohmann::json &json, std::map<std::string, Eigen::Matrix4d> &transformations);
+
+bool loadTransformations(const std::string &path, std::map<std::string, Eigen::Matrix4d> &data);
+bool saveTransformations(const std::string &path, std::map<std::string, Eigen::Matrix4d> &data);
+
+}
+}
+
+#endif  // _FTL_RECONSTRUCT_REGISTRATION_HPP_
\ No newline at end of file
diff --git a/components/codecs/CMakeLists.txt b/components/codecs/CMakeLists.txt
index f951f94a6c10127c989862d154dbe6cee896812a..63e311b247f4c7eb1462e0fe9ec089c282e00218 100644
--- a/components/codecs/CMakeLists.txt
+++ b/components/codecs/CMakeLists.txt
@@ -5,6 +5,9 @@ set(CODECSRC
 	src/opencv_encoder.cpp
 	src/opencv_decoder.cpp
 	src/generate.cpp
+	src/writer.cpp
+	src/reader.cpp
+	src/channels.cpp
 )
 
 if (HAVE_NVPIPE)
diff --git a/components/codecs/include/ftl/codecs/bitrates.hpp b/components/codecs/include/ftl/codecs/bitrates.hpp
index 19572daa5600fd7b78106e951307e92e17f51e33..976e039fc07b3dd15e7f4ede219ed06c27cff0fe 100644
--- a/components/codecs/include/ftl/codecs/bitrates.hpp
+++ b/components/codecs/include/ftl/codecs/bitrates.hpp
@@ -14,7 +14,15 @@ enum struct codec_t : uint8_t {
 	JPG = 0,
 	PNG,
     H264,
-    HEVC  // H265
+    HEVC,  // H265
+
+	// TODO: Add audio codecs
+	WAV,
+
+	JSON = 100,		// A JSON string
+	CALIBRATION,	// Camera parameters object
+	POSE,			// 4x4 eigen matrix
+	RAW				// Some unknown binary format (msgpack?)
 };
 
 /**
@@ -28,7 +36,11 @@ enum struct definition_t : uint8_t {
 	SD576 = 4,
 	SD480 = 5,
 	LD360 = 6,
-	Any = 7
+	Any = 7,
+
+	HTC_VIVE = 8
+
+	// TODO: Add audio definitions
 };
 
 /**
@@ -56,7 +68,7 @@ enum struct bitrate_t {
  * the best quality and kPreset9 is the worst. The use of presets is useful for
  * adaptive bitrate scenarios where the numbers are increased or decreased.
  */
-typedef uint8_t preset_t;
+typedef int8_t preset_t;
 static const preset_t kPreset0 = 0;
 static const preset_t kPreset1 = 1;
 static const preset_t kPreset2 = 2;
@@ -71,6 +83,8 @@ static const preset_t kPresetBest = 0;
 static const preset_t kPresetWorst = 9;
 static const preset_t kPresetLQThreshold = 4;
 
+static const preset_t kPresetHTCVive = -1;
+
 /**
  * Represents the details of each preset codec configuration.
  */
diff --git a/components/rgbd-sources/include/ftl/rgbd/channels.hpp b/components/codecs/include/ftl/codecs/channels.hpp
similarity index 67%
rename from components/rgbd-sources/include/ftl/rgbd/channels.hpp
rename to components/codecs/include/ftl/codecs/channels.hpp
index 9bf731a5319fa47c501a91e09f1e2acc48c5a4a8..d1a0e265b0548adc1fc510947f12d408ed3b2840 100644
--- a/components/rgbd-sources/include/ftl/rgbd/channels.hpp
+++ b/components/codecs/include/ftl/codecs/channels.hpp
@@ -5,30 +5,47 @@
 #include <msgpack.hpp>
 
 namespace ftl {
-namespace rgbd {
+namespace codecs {
 
 enum struct Channel : int {
-    None = -1,
-    Colour = 0,         // 8UC3 or 8UC4
-    Left = 0,
-    Depth = 1,          // 32S or 32F
-    Right = 2,          // 8UC3 or 8UC4
-    Colour2 = 2,
-    Disparity = 3,
-    Depth2 = 3,
-    Deviation = 4,
-    Normals = 5,        // 32FC4
-    Points = 6,         // 32FC4
-    Confidence = 7,     // 32F
-    Contribution = 7,   // 32F
-    EnergyVector,       // 32FC4
-    Flow,               // 32F
-    Energy,             // 32F
-    LeftGray,
-    RightGray,
-    Overlay1
+    None			= -1,
+    Colour			= 0,	// 8UC3 or 8UC4
+    Left			= 0,
+    Depth			= 1,	// 32S or 32F
+    Right			= 2,	// 8UC3 or 8UC4
+    Colour2			= 2,
+    Disparity		= 3,
+    Depth2			= 3,
+    Deviation		= 4,
+    Normals			= 5,	// 32FC4
+    Points			= 6,	// 32FC4
+    Confidence		= 7,	// 32F
+    Contribution	= 7,	// 32F
+    EnergyVector	= 8,	// 32FC4
+    Flow			= 9,	// 32F
+    Energy			= 10,	// 32F
+	Mask			= 11,	// 32U
+	Density			= 12,	// 32F
+    LeftGray		= 13,	// Deprecated
+    RightGray		= 14,	// Deprecated
+    Overlay1		= 15,	// Unused
+
+	AudioLeft		= 32,
+	AudioRight		= 33,
+
+	Configuration	= 64,	// JSON Data
+	Calibration		= 65,	// Camera Parameters Object
+	Pose			= 66,	// Eigen::Matrix4d
+	Data			= 67	// Custom data, any codec.
 };
 
+inline bool isVideo(Channel c) { return (int)c < 32; };
+inline bool isAudio(Channel c) { return (int)c >= 32 && (int)c < 64; };
+inline bool isData(Channel c) { return (int)c >= 64; };
+
+std::string name(Channel c);
+int type(Channel c);
+
 class Channels {
     public:
 
@@ -37,8 +54,8 @@ class Channels {
 		iterator(const Channels &c, unsigned int ix) : channels_(c), ix_(ix) { }
 		iterator operator++();
 		iterator operator++(int junk);
-		inline ftl::rgbd::Channel operator*() { return static_cast<Channel>(static_cast<int>(ix_)); }
-		//ftl::rgbd::Channel operator->() { return ptr_; }
+		inline ftl::codecs::Channel operator*() { return static_cast<Channel>(static_cast<int>(ix_)); }
+		//ftl::codecs::Channel operator->() { return ptr_; }
 		inline bool operator==(const iterator& rhs) { return ix_ == rhs.ix_; }
 		inline bool operator!=(const iterator& rhs) { return ix_ != rhs.ix_; }
 		private:
@@ -100,9 +117,12 @@ inline Channels Channels::All() {
 static const Channels kNoChannels;
 static const Channels kAllChannels(0xFFFFFFFFu);
 
-inline bool isFloatChannel(ftl::rgbd::Channel chan) {
+inline bool isFloatChannel(ftl::codecs::Channel chan) {
 	switch (chan) {
 	case Channel::Depth		:
+    //case Channel::Normals   :
+	case Channel::Confidence:
+	case Channel::Density:
 	case Channel::Energy	: return true;
 	default					: return false;
 	}
@@ -111,14 +131,14 @@ inline bool isFloatChannel(ftl::rgbd::Channel chan) {
 }
 }
 
-MSGPACK_ADD_ENUM(ftl::rgbd::Channel);
+MSGPACK_ADD_ENUM(ftl::codecs::Channel);
 
-inline ftl::rgbd::Channels operator|(ftl::rgbd::Channel a, ftl::rgbd::Channel b) {
-    return ftl::rgbd::Channels(a) | b;
+inline ftl::codecs::Channels operator|(ftl::codecs::Channel a, ftl::codecs::Channel b) {
+    return ftl::codecs::Channels(a) | b;
 }
 
-inline ftl::rgbd::Channels operator+(ftl::rgbd::Channel a, ftl::rgbd::Channel b) {
-    return ftl::rgbd::Channels(a) | b;
+inline ftl::codecs::Channels operator+(ftl::codecs::Channel a, ftl::codecs::Channel b) {
+    return ftl::codecs::Channels(a) | b;
 }
 
 #endif  // _FTL_RGBD_CHANNELS_HPP_
diff --git a/components/codecs/include/ftl/codecs/hevc.hpp b/components/codecs/include/ftl/codecs/hevc.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..f658635d6f239b4aa7a21331f60f6936c517ba93
--- /dev/null
+++ b/components/codecs/include/ftl/codecs/hevc.hpp
@@ -0,0 +1,112 @@
+#ifndef _FTL_CODECS_HEVC_HPP_
+#define _FTL_CODECS_HEVC_HPP_
+
+namespace ftl {
+namespace codecs {
+
+/**
+ * H.265 / HEVC codec utility functions.
+ */
+namespace hevc {
+
+/**
+ * HEVC Network Abstraction Layer Unit types.
+ */
+enum class NALType : int {
+	CODED_SLICE_TRAIL_N = 0,
+    CODED_SLICE_TRAIL_R = 1,
+
+    CODED_SLICE_TSA_N = 2,
+    CODED_SLICE_TSA_R = 3,
+
+    CODED_SLICE_STSA_N = 4,
+    CODED_SLICE_STSA_R = 5,
+
+    CODED_SLICE_RADL_N = 6,
+    CODED_SLICE_RADL_R = 7,
+
+    CODED_SLICE_RASL_N = 8,
+    CODED_SLICE_RASL_R = 9,
+
+    RESERVED_VCL_N10 = 10,
+    RESERVED_VCL_R11 = 11,
+    RESERVED_VCL_N12 = 12,
+    RESERVED_VCL_R13 = 13,
+    RESERVED_VCL_N14 = 14,
+    RESERVED_VCL_R15 = 15,
+
+    CODED_SLICE_BLA_W_LP = 16,
+    CODED_SLICE_BLA_W_RADL = 17,
+    CODED_SLICE_BLA_N_LP = 18,
+    CODED_SLICE_IDR_W_RADL = 19,
+    CODED_SLICE_IDR_N_LP = 20,
+    CODED_SLICE_CRA = 21,
+    RESERVED_IRAP_VCL22 = 22,
+    RESERVED_IRAP_VCL23 = 23,
+
+    RESERVED_VCL24 = 24,
+    RESERVED_VCL25 = 25,
+    RESERVED_VCL26 = 26,
+    RESERVED_VCL27 = 27,
+    RESERVED_VCL28 = 28,
+    RESERVED_VCL29 = 29,
+    RESERVED_VCL30 = 30,
+    RESERVED_VCL31 = 31,
+
+    VPS = 32,
+    SPS = 33,
+    PPS = 34,
+    ACCESS_UNIT_DELIMITER = 35,
+    EOS = 36,
+    EOB = 37,
+    FILLER_DATA = 38,
+    PREFIX_SEI = 39,
+    SUFFIX_SEI = 40,
+
+    RESERVED_NVCL41 = 41,
+    RESERVED_NVCL42 = 42,
+    RESERVED_NVCL43 = 43,
+    RESERVED_NVCL44 = 44,
+    RESERVED_NVCL45 = 45,
+    RESERVED_NVCL46 = 46,
+    RESERVED_NVCL47 = 47,
+    UNSPECIFIED_48 = 48,
+    UNSPECIFIED_49 = 49,
+    UNSPECIFIED_50 = 50,
+    UNSPECIFIED_51 = 51,
+    UNSPECIFIED_52 = 52,
+    UNSPECIFIED_53 = 53,
+    UNSPECIFIED_54 = 54,
+    UNSPECIFIED_55 = 55,
+    UNSPECIFIED_56 = 56,
+    UNSPECIFIED_57 = 57,
+    UNSPECIFIED_58 = 58,
+    UNSPECIFIED_59 = 59,
+    UNSPECIFIED_60 = 60,
+    UNSPECIFIED_61 = 61,
+    UNSPECIFIED_62 = 62,
+    UNSPECIFIED_63 = 63,
+    INVALID = 64
+};
+
+/**
+ * Extract the NAL unit type from the first NAL header.
+ * With NvPipe, the 5th byte contains the NAL Unit header.
+ */
+inline NALType getNALType(const std::vector<uint8_t> &data) {
+	return static_cast<NALType>((data[4] >> 1) & 0x3F);
+}
+
+/**
+ * Check the HEVC bitstream for an I-Frame. With NvPipe, all I-Frames start
+ * with a VPS NAL unit so just check for this.
+ */
+inline bool isIFrame(const std::vector<uint8_t> &data) {
+	return getNALType(data) == NALType::VPS;
+}
+
+}
+}
+}
+
+#endif  // _FTL_CODECS_HEVC_HPP_
diff --git a/components/codecs/include/ftl/codecs/nvpipe_decoder.hpp b/components/codecs/include/ftl/codecs/nvpipe_decoder.hpp
index c13b26ec0c3ab72198f357bdb1f1ad8fae8d1f5e..75807f05ee45c66d7a8b231c5d755190754f63df 100644
--- a/components/codecs/include/ftl/codecs/nvpipe_decoder.hpp
+++ b/components/codecs/include/ftl/codecs/nvpipe_decoder.hpp
@@ -23,6 +23,7 @@ class NvPipeDecoder : public ftl::codecs::Decoder {
 	bool is_float_channel_;
 	ftl::codecs::definition_t last_definition_;
 	MUTEX mutex_;
+	bool seen_iframe_;
 };
 
 }
diff --git a/components/codecs/include/ftl/codecs/packet.hpp b/components/codecs/include/ftl/codecs/packet.hpp
index 98e46a60122e6813fd22b0c0be0f09e40fbc96ce..edddd205a728e66943e6362b0d24b865272de48e 100644
--- a/components/codecs/include/ftl/codecs/packet.hpp
+++ b/components/codecs/include/ftl/codecs/packet.hpp
@@ -4,12 +4,21 @@
 #include <cstdint>
 #include <vector>
 #include <ftl/codecs/bitrates.hpp>
+#include <ftl/codecs/channels.hpp>
 
 #include <msgpack.hpp>
 
 namespace ftl {
 namespace codecs {
 
+/**
+ * First bytes of our file format.
+ */
+struct Header {
+	const char magic[4] = {'F','T','L','F'};
+	uint8_t version = 1;
+};
+
 /**
  * A single network packet for the compressed video stream. It includes the raw
  * data along with any block metadata required to reconstruct. The underlying
@@ -21,9 +30,10 @@ struct Packet {
 	ftl::codecs::definition_t definition;
 	uint8_t block_total;	// Packets expected per frame
 	uint8_t block_number; 	// This packets number within a frame
+	uint8_t flags;			// Codec dependent flags (eg. I-Frame or P-Frame)
 	std::vector<uint8_t> data;
 
-	MSGPACK_DEFINE(codec, definition, block_total, block_number, data);
+	MSGPACK_DEFINE(codec, definition, block_total, block_number, flags, data);
 };
 
 /**
@@ -33,9 +43,11 @@ struct Packet {
  */
 struct StreamPacket {
 	int64_t timestamp;
-	uint8_t channel;  // first bit = channel, second bit indicates second channel being sent
+	uint8_t streamID;  		// Source number... 255 = broadcast stream
+	uint8_t channel_count;	// Number of channels to expect for this frame to complete (usually 1 or 2)
+	ftl::codecs::Channel channel;		// Actual channel of this current set of packets
 
-	MSGPACK_DEFINE(timestamp, channel);
+	MSGPACK_DEFINE(timestamp, streamID, channel_count, channel);
 };
 
 }
diff --git a/components/codecs/include/ftl/codecs/reader.hpp b/components/codecs/include/ftl/codecs/reader.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..cdc50cad30bc07fcc7793b24fb4daf1b308b03a8
--- /dev/null
+++ b/components/codecs/include/ftl/codecs/reader.hpp
@@ -0,0 +1,61 @@
+#ifndef _FTL_CODECS_READER_HPP_
+#define _FTL_CODECS_READER_HPP_
+
+#include <iostream>
+#include <msgpack.hpp>
+#include <inttypes.h>
+#include <functional>
+
+#include <ftl/codecs/packet.hpp>
+#include <ftl/threads.hpp>
+
+namespace ftl {
+namespace codecs {
+
+class Reader {
+	public:
+	explicit Reader(std::istream &);
+	~Reader();
+
+	/**
+	 * Read packets up to and including requested timestamp. A provided callback
+	 * is called for each packet read, in order stored in file. Returns true if
+	 * there are still more packets available beyond specified timestamp, false
+	 * otherwise (end-of-file). Timestamps are in local (clock adjusted) time
+	 * and the timestamps stored in the file are aligned to the time when open
+	 * was called.
+	 */
+	bool read(int64_t ts, const std::function<void(const ftl::codecs::StreamPacket &, ftl::codecs::Packet &)> &);
+
+	/**
+	 * An alternative version of read where packet events are generated for
+	 * specific streams, allowing different handlers for different streams.
+	 * This allows demuxing and is used by player sources. Each source can call
+	 * this read, only the first one will generate the data packets.
+	 */
+	bool read(int64_t ts);
+
+	void onPacket(int streamID, const std::function<void(const ftl::codecs::StreamPacket &, ftl::codecs::Packet &)> &);
+
+	bool begin();
+	bool end();
+
+	inline int64_t getStartTime() const { return timestart_; };
+
+	private:
+	std::istream *stream_;
+	msgpack::unpacker buffer_;
+	std::list<std::tuple<StreamPacket,Packet>> data_;
+	bool has_data_;
+	int64_t timestart_;
+	bool playing_;
+
+	MUTEX mtx_;
+
+	std::vector<std::function<void(const ftl::codecs::StreamPacket &, ftl::codecs::Packet &)>> handlers_;
+};
+
+}
+}
+
+#endif  // _FTL_CODECS_READER_HPP_
diff --git a/components/codecs/include/ftl/codecs/writer.hpp b/components/codecs/include/ftl/codecs/writer.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..044624ba837f17c83e4a22a16787ec86edd4ad67
--- /dev/null
+++ b/components/codecs/include/ftl/codecs/writer.hpp
@@ -0,0 +1,32 @@
+#ifndef _FTL_CODECS_WRITER_HPP_
+#define _FTL_CODECS_WRITER_HPP_
+
+#include <iostream>
+#include <msgpack.hpp>
+//#include <Eigen/Eigen>
+
+#include <ftl/codecs/packet.hpp>
+
+namespace ftl {
+namespace codecs {
+
+class Writer {
+	public:
+	explicit Writer(std::ostream &);
+	~Writer();
+
+	bool begin();
+	bool write(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &);
+	bool end();
+
+	private:
+	std::ostream *stream_;
+	msgpack::sbuffer buffer_;
+	int64_t timestart_;
+	bool active_;
+};
+
+}
+}
+
+#endif  // _FTL_CODECS_WRITER_HPP_
diff --git a/components/codecs/src/bitrates.cpp b/components/codecs/src/bitrates.cpp
index 519c86712487a8be9285f28392342e6ca6c16713..035e850d1ff20f5e113db2f7004b85c8bbea3040 100644
--- a/components/codecs/src/bitrates.cpp
+++ b/components/codecs/src/bitrates.cpp
@@ -8,6 +8,10 @@ using ftl::codecs::preset_t;
 using ftl::codecs::definition_t;
 using ftl::codecs::codec_t;
 
+static const CodecPreset special_presets[] = {
+	definition_t::HTC_VIVE, definition_t::HTC_VIVE, bitrate_t::High, bitrate_t::High
+};
+
 static const CodecPreset presets[] = {
 	definition_t::HD1080, definition_t::HD1080, bitrate_t::High, bitrate_t::High,
 	definition_t::HD1080, definition_t::HD720, bitrate_t::Standard, bitrate_t::Standard,
@@ -35,7 +39,9 @@ static const Resolution resolutions[] = {
 	1280, 720,		// HD720
 	1024, 576,		// SD576
 	854, 480,		// SD480
-	640, 360		// LD360
+	640, 360,		// LD360
+	0, 0,			// ANY
+	1852, 2056		// HTC_VIVE
 };
 
 int ftl::codecs::getWidth(definition_t d) {
@@ -47,6 +53,7 @@ int ftl::codecs::getHeight(definition_t d) {
 }
 
 const CodecPreset &ftl::codecs::getPreset(preset_t p) {
+	if (p < 0 && p >= -1) return special_presets[std::abs(p+1)];
     if (p > kPresetWorst) return presets[kPresetWorst];
     if (p < kPresetBest) return presets[kPresetBest];
     return presets[p];
diff --git a/components/codecs/src/channels.cpp b/components/codecs/src/channels.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..25b06b533b5c95d16bd60365ccbcc4373e09bd45
--- /dev/null
+++ b/components/codecs/src/channels.cpp
@@ -0,0 +1,93 @@
+#include <ftl/codecs/channels.hpp>
+
+#include <opencv2/opencv.hpp>
+
+struct ChannelInfo {
+	const char *name;
+	int type;
+};
+
+static ChannelInfo info[] = {
+    "Colour", CV_8UC3,
+    "Depth", CV_32F,
+    "Right", CV_8UC3,
+    "DepthRight", CV_32F,
+    "Deviation", CV_32F,
+    "Normals", CV_32FC4,
+    "Points", CV_32FC4,
+    "Confidence", CV_32F,
+    "EnergyVector", CV_32FC4,
+    "Flow", CV_32F,
+    "Energy", CV_32F,
+	"Mask", CV_32S,
+	"Density", CV_32F,
+    "LeftGray", CV_8U,
+    "RightGray", CV_8U,
+    "Overlay1", CV_8UC3,
+
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+
+	"AudioLeft", 0,
+	"AudioRight", 0,
+
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+	"NoName", 0,
+
+	"Configuration", 0,
+	"Calibration", 0,
+	"Pose", 0,
+	"Data", 0
+};
+
+std::string ftl::codecs::name(Channel c) {
+	if (c == Channel::None) return "None";
+	else return info[(int)c].name;
+}
+
+int ftl::codecs::type(Channel c)  {
+	if (c == Channel::None) return 0;
+	else return info[(int)c].type;
+}
diff --git a/components/codecs/src/encoder.cpp b/components/codecs/src/encoder.cpp
index 3ebe40cf91ad75cec92937b013d3215de4104004..fdacc89596668937f8f465995f52f0955bbded01 100644
--- a/components/codecs/src/encoder.cpp
+++ b/components/codecs/src/encoder.cpp
@@ -73,5 +73,6 @@ bool Encoder::encode(const cv::Mat &in, preset_t preset,
 	const auto &settings = ftl::codecs::getPreset(preset);
 	const definition_t definition = (in.type() == CV_32F) ? settings.depth_res : settings.colour_res;
 	const bitrate_t bitrate = (in.type() == CV_32F) ? settings.depth_qual : settings.colour_qual;
+	LOG(INFO) << "Encode definition: " << (int)definition;
 	return encode(in, definition, bitrate, cb);
 }
diff --git a/components/codecs/src/nvpipe_decoder.cpp b/components/codecs/src/nvpipe_decoder.cpp
index 0dda5884cc7bf156e7cb134795e6f0ff2c180130..74519a8f2f3b1b5c8e4eb13ea23ff89778402825 100644
--- a/components/codecs/src/nvpipe_decoder.cpp
+++ b/components/codecs/src/nvpipe_decoder.cpp
@@ -3,12 +3,16 @@
 #include <loguru.hpp>
 
 #include <ftl/cuda_util.hpp>
+#include <ftl/codecs/hevc.hpp>
 //#include <cuda_runtime.h>
 
+#include <opencv2/core/cuda/common.hpp>
+
 using ftl::codecs::NvPipeDecoder;
 
 NvPipeDecoder::NvPipeDecoder() {
 	nv_decoder_ = nullptr;
+	seen_iframe_ = false;
 }
 
 NvPipeDecoder::~NvPipeDecoder() {
@@ -17,11 +21,36 @@ NvPipeDecoder::~NvPipeDecoder() {
 	}
 }
 
+void cropAndScaleUp(cv::Mat &in, cv::Mat &out) {
+	CHECK(in.type() == out.type());
+
+	auto isize = in.size();
+	auto osize = out.size();
+	cv::Mat tmp;
+	
+	if (isize != osize) {
+		double x_scale = ((double) isize.width) / osize.width;
+		double y_scale = ((double) isize.height) / osize.height;
+		double x_scalei = 1.0 / x_scale;
+		double y_scalei = 1.0 / y_scale;
+		cv::Size sz_crop;
+
+		// assume downscaled image
+		if (x_scalei > y_scalei) {
+			sz_crop = cv::Size(isize.width, isize.height * x_scale);
+		} else {
+			sz_crop = cv::Size(isize.width * y_scale, isize.height);
+		}
+
+		tmp = in(cv::Rect(cv::Point2i(0, 0), sz_crop));
+		cv::resize(tmp, out, osize);
+	}
+}
+
 bool NvPipeDecoder::decode(const ftl::codecs::Packet &pkt, cv::Mat &out) {
 	cudaSetDevice(0);
 	UNIQUE_LOCK(mutex_,lk);
 	if (pkt.codec != codec_t::HEVC) return false;
-
 	bool is_float_frame = out.type() == CV_32F;
 
 	// Is the previous decoder still valid for current resolution and type?
@@ -33,6 +62,8 @@ bool NvPipeDecoder::decode(const ftl::codecs::Packet &pkt, cv::Mat &out) {
 	is_float_channel_ = is_float_frame;
 	last_definition_ = pkt.definition;
 
+	//LOG(INFO) << "DECODE RESOLUTION: (" << (int)pkt.definition << ") " << ftl::codecs::getWidth(pkt.definition) << "x" << ftl::codecs::getHeight(pkt.definition);
+
 	// Build a decoder instance of the correct kind
 	if (nv_decoder_ == nullptr) {
 		nv_decoder_ = NvPipe_CreateDecoder(
@@ -44,13 +75,22 @@ bool NvPipeDecoder::decode(const ftl::codecs::Packet &pkt, cv::Mat &out) {
 			//LOG(INFO) << "Bitrate=" << (int)bitrate << " width=" << ABRController::getColourWidth(bitrate);
 			LOG(FATAL) << "Could not create decoder: " << NvPipe_GetError(NULL);
 		} else {
-			LOG(INFO) << "Decoder created";
+			DLOG(INFO) << "Decoder created";
 		}
-	}
 
+		seen_iframe_ = false;
+	}
+	
 	// TODO: (Nick) Move to member variable to prevent re-creation
 	cv::Mat tmp(cv::Size(ftl::codecs::getWidth(pkt.definition),ftl::codecs::getHeight(pkt.definition)), (is_float_frame) ? CV_16U : CV_8UC4);
 
+	if (pkt.codec == ftl::codecs::codec_t::HEVC) {
+		// Obtain NAL unit type
+		if (ftl::codecs::hevc::isIFrame(pkt.data)) seen_iframe_ = true;
+	}
+
+	if (!seen_iframe_) return false;
+
 	int rc = NvPipe_Decode(nv_decoder_, pkt.data.data(), pkt.data.size(), tmp.data, tmp.cols, tmp.rows);
 	if (rc == 0) LOG(ERROR) << "NvPipe decode error: " << NvPipe_GetError(nv_decoder_);
 
@@ -59,8 +99,11 @@ bool NvPipeDecoder::decode(const ftl::codecs::Packet &pkt, cv::Mat &out) {
 		if (out.rows == ftl::codecs::getHeight(pkt.definition)) {
 			tmp.convertTo(out, CV_32FC1, 1.0f/1000.0f);
 		} else {
-			tmp.convertTo(tmp, CV_32FC1, 1.0f/1000.0f);
+			cv::cvtColor(tmp, tmp, cv::COLOR_BGRA2BGR);
 			cv::resize(tmp, out, out.size());
+			//cv::Mat tmp2;
+			//tmp.convertTo(tmp2, CV_32FC1, 1.0f/1000.0f);
+			//cropAndScaleUp(tmp2, out);
 		}
 	} else {
 		// Is the received frame the same size as requested output?
@@ -69,6 +112,9 @@ bool NvPipeDecoder::decode(const ftl::codecs::Packet &pkt, cv::Mat &out) {
 		} else {
 			cv::cvtColor(tmp, tmp, cv::COLOR_BGRA2BGR);
 			cv::resize(tmp, out, out.size());
+			//cv::Mat tmp2;
+			//cv::cvtColor(tmp, tmp2, cv::COLOR_BGRA2BGR);
+			//cropAndScaleUp(tmp2, out);
 		}
 	}
 
diff --git a/components/codecs/src/nvpipe_encoder.cpp b/components/codecs/src/nvpipe_encoder.cpp
index 42374bedecf95a749e842a3a2ed22b5226d6d390..80bde1f9fac0dc63fc68db845b7e53084f6c59ec 100644
--- a/components/codecs/src/nvpipe_encoder.cpp
+++ b/components/codecs/src/nvpipe_encoder.cpp
@@ -4,6 +4,8 @@
 #include <ftl/codecs/bitrates.hpp>
 #include <ftl/cuda_util.hpp>
 
+#include <opencv2/core/cuda/common.hpp>
+
 using ftl::codecs::NvPipeEncoder;
 using ftl::codecs::bitrate_t;
 using ftl::codecs::codec_t;
@@ -14,14 +16,14 @@ using ftl::codecs::Packet;
 
 NvPipeEncoder::NvPipeEncoder(definition_t maxdef,
 			definition_t mindef) : Encoder(maxdef, mindef, ftl::codecs::device_t::Hardware) {
-    nvenc_ = nullptr;
-    current_definition_ = definition_t::HD1080;
-    is_float_channel_ = false;
+	nvenc_ = nullptr;
+	current_definition_ = definition_t::HD1080;
+	is_float_channel_ = false;
 	was_reset_ = false;
 }
 
 NvPipeEncoder::~NvPipeEncoder() {
-    if (nvenc_) NvPipe_Destroy(nvenc_);
+	if (nvenc_) NvPipe_Destroy(nvenc_);
 }
 
 void NvPipeEncoder::reset() {
@@ -41,83 +43,134 @@ definition_t NvPipeEncoder::_verifiedDefinition(definition_t def, const cv::Mat
 	return def;
 }
 
+void scaleDownAndPad(cv::Mat &in, cv::Mat &out) {
+	const auto isize = in.size();
+	const auto osize = out.size();
+	cv::Mat tmp;
+	
+	if (isize != osize) {
+		double x_scale = ((double) isize.width) / osize.width;
+		double y_scale = ((double) isize.height) / osize.height;
+		double x_scalei = 1.0 / x_scale;
+		double y_scalei = 1.0 / y_scale;
+
+		if (x_scale > 1.0 || y_scale > 1.0) {
+			if (x_scale > y_scale) {
+				cv::resize(in, tmp, cv::Size(osize.width, osize.height * x_scalei));
+			} else {
+				cv::resize(in, tmp, cv::Size(osize.width * y_scalei, osize.height));
+			}
+		}
+		else { tmp = in; }
+		
+		if (tmp.size().width < osize.width || tmp.size().height < osize.height) {
+			tmp.copyTo(out(cv::Rect(cv::Point2i(0, 0), tmp.size())));
+		}
+		else { out = tmp; }
+	}
+}
+
 bool NvPipeEncoder::encode(const cv::Mat &in, definition_t odefinition, bitrate_t bitrate, const std::function<void(const ftl::codecs::Packet&)> &cb) {
-    cudaSetDevice(0);
-	auto definition = _verifiedDefinition(odefinition, in);
+	cudaSetDevice(0);
+	auto definition = odefinition; //_verifiedDefinition(odefinition, in);
+
+	auto width = ftl::codecs::getWidth(definition);
+	auto height = ftl::codecs::getHeight(definition);
+
+	cv::Mat tmp;
+	if (width != in.cols || height != in.rows) {
+		LOG(WARNING) << "Mismatch resolution with encoding resolution";
+		if (in.type() == CV_32F) {
+			cv::resize(in, tmp, cv::Size(width,height), 0.0, 0.0, cv::INTER_NEAREST);
+		} else {
+			cv::resize(in, tmp, cv::Size(width,height));
+		}
+	} else {
+		tmp = in;
+	}
 
 	//LOG(INFO) << "Definition: " << ftl::codecs::getWidth(definition) << "x" << ftl::codecs::getHeight(definition);
 
-    if (!_createEncoder(in, definition, bitrate)) return false;
+	if (in.empty()) {
+		LOG(ERROR) << "Missing data for Nvidia encoder";
+		return false;
+	}
+	if (!_createEncoder(tmp, definition, bitrate)) return false;
 
-    //LOG(INFO) << "NvPipe Encode: " << int(definition) << " " << in.cols;
+	//LOG(INFO) << "NvPipe Encode: " << int(definition) << " " << in.cols;
 
-    cv::Mat tmp;
-    if (in.type() == CV_32F) {
-		in.convertTo(tmp, CV_16UC1, 1000);
-    } else if (in.type() == CV_8UC3) {
-        cv::cvtColor(in, tmp, cv::COLOR_BGR2BGRA);
+	//cv::Mat tmp;
+	if (tmp.type() == CV_32F) {
+		tmp.convertTo(tmp, CV_16UC1, 1000);
+	} else if (tmp.type() == CV_8UC3) {
+		cv::cvtColor(tmp, tmp, cv::COLOR_BGR2BGRA);
 	} else {
-		tmp = in;
+		//in.copyTo(tmp);
 	}
 
+	// scale/pad to fit output format
+	//cv::Mat tmp2 = cv::Mat::zeros(getHeight(odefinition), getWidth(odefinition), tmp.type());
+	//scaleDownAndPad(tmp, tmp2);
+	//std::swap(tmp, tmp2);
+
 	Packet pkt;
 	pkt.codec = codec_t::HEVC;
 	pkt.definition = definition;
 	pkt.block_total = 1;
 	pkt.block_number = 0;
 
-    pkt.data.resize(ftl::codecs::kVideoBufferSize);
-    uint64_t cs = NvPipe_Encode(
-        nvenc_,
-        tmp.data,
-        tmp.step,
-        pkt.data.data(),
-        ftl::codecs::kVideoBufferSize,
-        in.cols,
-        in.rows,
-        was_reset_		// Force IFrame!
-    );
-    pkt.data.resize(cs);
+	pkt.data.resize(ftl::codecs::kVideoBufferSize);
+	uint64_t cs = NvPipe_Encode(
+		nvenc_,
+		tmp.data,
+		tmp.step,
+		pkt.data.data(),
+		ftl::codecs::kVideoBufferSize,
+		tmp.cols,
+		tmp.rows,
+		was_reset_		// Force IFrame!
+	);
+	pkt.data.resize(cs);
 	was_reset_ = false;
 
-    if (cs == 0) {
-        LOG(ERROR) << "Could not encode video frame: " << NvPipe_GetError(nvenc_);
-        return false;
-    } else {
+	if (cs == 0) {
+		LOG(ERROR) << "Could not encode video frame: " << NvPipe_GetError(nvenc_);
+		return false;
+	} else {
 		cb(pkt);
-        return true;
-    }
+		return true;
+	}
 }
 
 bool NvPipeEncoder::_encoderMatch(const cv::Mat &in, definition_t def) {
-    return ((in.type() == CV_32F && is_float_channel_) ||
-        ((in.type() == CV_8UC3 || in.type() == CV_8UC4) && !is_float_channel_)) && current_definition_ == def;
+	return ((in.type() == CV_32F && is_float_channel_) ||
+		((in.type() == CV_8UC3 || in.type() == CV_8UC4) && !is_float_channel_)) && current_definition_ == def;
 }
 
 bool NvPipeEncoder::_createEncoder(const cv::Mat &in, definition_t def, bitrate_t rate) {
-    if (_encoderMatch(in, def) && nvenc_) return true;
-
-    if (in.type() == CV_32F) is_float_channel_ = true;
-    else is_float_channel_ = false;
-    current_definition_ = def;
-
-    if (nvenc_) NvPipe_Destroy(nvenc_);
-    const int fps = 1000/ftl::timer::getInterval();
-    nvenc_ = NvPipe_CreateEncoder(
-        (is_float_channel_) ? NVPIPE_UINT16 : NVPIPE_RGBA32,
-        NVPIPE_HEVC,
-        (is_float_channel_) ? NVPIPE_LOSSLESS : NVPIPE_LOSSY,
-        16*1000*1000,
-        fps,				// FPS
-        ftl::codecs::getWidth(def),	// Output Width
-        ftl::codecs::getHeight(def)	// Output Height
-    );
-
-    if (!nvenc_) {
-        LOG(ERROR) << "Could not create video encoder: " << NvPipe_GetError(NULL);
-        return false;
-    } else {
-        LOG(INFO) << "NvPipe encoder created";
-        return true;
-    }
+	if (_encoderMatch(in, def) && nvenc_) return true;
+
+	if (in.type() == CV_32F) is_float_channel_ = true;
+	else is_float_channel_ = false;
+	current_definition_ = def;
+
+	if (nvenc_) NvPipe_Destroy(nvenc_);
+	const int fps = 1000/ftl::timer::getInterval();
+	nvenc_ = NvPipe_CreateEncoder(
+		(is_float_channel_) ? NVPIPE_UINT16 : NVPIPE_RGBA32,
+		NVPIPE_HEVC,
+		(is_float_channel_) ? NVPIPE_LOSSLESS : NVPIPE_LOSSY,
+		16*1000*1000,
+		fps,				// FPS
+		ftl::codecs::getWidth(def),	// Output Width
+		ftl::codecs::getHeight(def)	// Output Height
+	);
+
+	if (!nvenc_) {
+		LOG(ERROR) << "Could not create video encoder: " << NvPipe_GetError(NULL);
+		return false;
+	} else {
+		LOG(INFO) << "NvPipe encoder created";
+		return true;
+	}
 }
diff --git a/components/codecs/src/reader.cpp b/components/codecs/src/reader.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..41d0697f5c23192a405ac9d13286cb38047a7ec5
--- /dev/null
+++ b/components/codecs/src/reader.cpp
@@ -0,0 +1,121 @@
+#include <loguru.hpp>
+#include <ftl/codecs/reader.hpp>
+#include <ftl/timer.hpp>
+
+#include <tuple>
+
+using ftl::codecs::Reader;
+using ftl::codecs::StreamPacket;
+using ftl::codecs::Packet;
+using std::get;
+
+Reader::Reader(std::istream &s) : stream_(&s), has_data_(false), playing_(false) {
+
+}
+
+Reader::~Reader() {
+
+}
+
+bool Reader::begin() {
+	ftl::codecs::Header h;
+	(*stream_).read((char*)&h, sizeof(h));
+	if (h.magic[0] != 'F' || h.magic[1] != 'T' || h.magic[2] != 'L' || h.magic[3] != 'F') return false;
+
+	// Capture current time to adjust timestamps
+	timestart_ = (ftl::timer::get_time() / ftl::timer::getInterval()) * ftl::timer::getInterval();
+	playing_ = true;
+
+	return true;
+}
+
+bool Reader::read(int64_t ts, const std::function<void(const ftl::codecs::StreamPacket &, ftl::codecs::Packet &)> &f) {
+	//UNIQUE_LOCK(mtx_, lk);
+	std::unique_lock<std::mutex> lk(mtx_, std::defer_lock);
+	if (!lk.try_lock()) return true;
+
+	// Check buffer first for frames already read
+	for (auto i = data_.begin(); i != data_.end();) {
+		if (get<0>(*i).timestamp <= ts) {
+			f(get<0>(*i), get<1>(*i));
+			i = data_.erase(i);
+		} else {
+			++i;
+		}
+	}
+
+	bool partial = false;
+	int64_t extended_ts = ts + 200;  // Buffer 200ms ahead
+
+	while (playing_ && stream_->good() || buffer_.nonparsed_size() > 0) {
+		if (buffer_.nonparsed_size() == 0 || (partial && buffer_.nonparsed_size() < 10000000)) {
+			buffer_.reserve_buffer(10000000);
+			stream_->read(buffer_.buffer(), buffer_.buffer_capacity());
+			//if (stream_->bad()) return false;
+
+			int bytes = stream_->gcount();
+			if (bytes == 0) break;
+			buffer_.buffer_consumed(bytes);
+			partial = false;
+		}
+
+		msgpack::object_handle msg;
+		if (!buffer_.next(msg)) {
+			partial = true;
+			continue;
+		}
+
+		//std::tuple<StreamPacket,Packet> data;
+		msgpack::object obj = msg.get();
+		try {
+			obj.convert(data_.emplace_back());
+		} catch (std::exception &e) {
+			LOG(INFO) << "Corrupt message: " << buffer_.nonparsed_size();
+			//partial = true;
+			//continue;
+			return false;
+		}
+
+		auto &data = data_.back();
+
+		// Adjust timestamp
+		get<0>(data).timestamp += timestart_;
+
+		// TODO: Need to read ahead a few frames because there may be a
+		// smaller timestamp after this one... requires a buffer. Ideally this
+		// should be resolved during the write process.
+		if (get<0>(data).timestamp <= ts) {
+			f(get<0>(data),get<1>(data));
+			data_.pop_back();
+		} else if (get<0>(data).timestamp > extended_ts) {
+			//data_ = data;
+			//has_data_ = true;
+			return true;
+		}
+	}
+
+	return data_.size() > 0;
+}
+
+bool Reader::read(int64_t ts) {
+	return read(ts, [this](const ftl::codecs::StreamPacket &spkt, ftl::codecs::Packet &pkt) {
+		if (handlers_.size() > spkt.streamID && (bool)handlers_[spkt.streamID]) {
+			handlers_[spkt.streamID](spkt, pkt);
+		} else if (spkt.streamID == 255) {
+			// Broadcast stream, send packets to every source handler.
+			for (auto &h : handlers_) {
+				h(spkt, pkt);
+			}
+		}
+	});
+}
+
+void Reader::onPacket(int streamID, const std::function<void(const ftl::codecs::StreamPacket &, ftl::codecs::Packet &)> &f) {
+	if (streamID >= handlers_.size()) handlers_.resize(streamID+1);
+	handlers_[streamID] = f;
+}
+
+bool Reader::end() {
+	playing_ = false;
+	return true;
+}
diff --git a/components/codecs/src/writer.cpp b/components/codecs/src/writer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..6561549f6f2e681b7678b328f4357762eae468a1
--- /dev/null
+++ b/components/codecs/src/writer.cpp
@@ -0,0 +1,43 @@
+#include <ftl/codecs/writer.hpp>
+#include <ftl/timer.hpp>
+#include <loguru.hpp>
+
+#include <tuple>
+
+using ftl::codecs::Writer;
+
+Writer::Writer(std::ostream &s) : stream_(&s), active_(false) {}
+
+Writer::~Writer() {
+
+}
+
+bool Writer::begin() {
+	ftl::codecs::Header h;
+	h.version = 0;
+	(*stream_).write((const char*)&h, sizeof(h));
+
+	// Capture current time to adjust timestamps
+	timestart_ = ftl::timer::get_time();
+	active_ = true;
+	return true;
+}
+
+bool Writer::end() {
+	active_ = false;
+	return true;
+}
+
+bool Writer::write(const ftl::codecs::StreamPacket &s, const ftl::codecs::Packet &p) {
+	if (!active_) return false;
+	ftl::codecs::StreamPacket s2 = s;
+	// Adjust timestamp relative to start of file.
+	s2.timestamp -= timestart_;
+
+	auto data = std::make_tuple(s2,p);
+	msgpack::sbuffer buffer;
+	msgpack::pack(buffer, data);
+	(*stream_).write(buffer.data(), buffer.size());
+	//buffer_.clear();
+	return true;
+}
diff --git a/components/codecs/test/CMakeLists.txt b/components/codecs/test/CMakeLists.txt
index 534336fed6ce5135199105f0784fa260204f64de..74035c2280755ea358e2a53fce471523782cbf0c 100644
--- a/components/codecs/test/CMakeLists.txt
+++ b/components/codecs/test/CMakeLists.txt
@@ -30,3 +30,28 @@ target_link_libraries(nvpipe_codec_unit
 
 
 add_test(NvPipeCodecUnitTest nvpipe_codec_unit)
+
+### Reader Writer Unit ################################################################
+add_executable(rw_unit
+	./tests.cpp
+	../src/writer.cpp
+	../src/reader.cpp
+	./readwrite_test.cpp
+)
+target_include_directories(rw_unit PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include")
+target_link_libraries(rw_unit
+	Threads::Threads ${OS_LIBS} ${OpenCV_LIBS} ${CUDA_LIBRARIES} ftlcommon Eigen3::Eigen)
+
+
+add_test(RWUnitTest rw_unit)
+
+### Channel Unit ###############################################################
+add_executable(channel_unit
+	./tests.cpp
+	./channel_unit.cpp
+)
+target_include_directories(channel_unit PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include")
+target_link_libraries(channel_unit
+	ftlcommon)
+
+add_test(ChannelUnitTest channel_unit)
diff --git a/components/rgbd-sources/test/channel_unit.cpp b/components/codecs/test/channel_unit.cpp
similarity index 95%
rename from components/rgbd-sources/test/channel_unit.cpp
rename to components/codecs/test/channel_unit.cpp
index 25171678540f1970088d84e1e842865a4249455e..bc966c63033f07680f1936e971166ca06acadde9 100644
--- a/components/rgbd-sources/test/channel_unit.cpp
+++ b/components/codecs/test/channel_unit.cpp
@@ -1,8 +1,8 @@
 #include "catch.hpp"
-#include <ftl/rgbd/channels.hpp>
+#include <ftl/codecs/channels.hpp>
 
-using ftl::rgbd::Channel;
-using ftl::rgbd::Channels;
+using ftl::codecs::Channel;
+using ftl::codecs::Channels;
 
 TEST_CASE("channel casting", "") {
 	SECTION("cast channel to channels") {
diff --git a/components/codecs/test/readwrite_test.cpp b/components/codecs/test/readwrite_test.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ea8781b1b3b3f9204a26cd2fca527603ee855596
--- /dev/null
+++ b/components/codecs/test/readwrite_test.cpp
@@ -0,0 +1,264 @@
+#include "catch.hpp"
+
+#include <ftl/codecs/writer.hpp>
+#include <ftl/codecs/reader.hpp>
+#include <ftl/timer.hpp>
+
+#include <sstream>
+
+using ftl::codecs::Writer;
+using ftl::codecs::Reader;
+using ftl::codecs::StreamPacket;
+using ftl::codecs::Packet;
+using ftl::codecs::codec_t;
+using ftl::codecs::definition_t;
+using ftl::codecs::Channel;
+
+TEST_CASE( "Write and read - Single frame" ) {
+	std::stringstream s;
+	Writer w(s);
+
+	StreamPacket spkt;
+	Packet pkt;
+
+	spkt.channel = Channel::Colour;
+	spkt.timestamp = ftl::timer::get_time();
+	spkt.streamID = 0;
+
+	pkt.codec = codec_t::JSON;
+	pkt.definition = definition_t::Any;
+	pkt.block_number = 0;
+	pkt.block_total = 1;
+	pkt.flags = 0;
+	pkt.data = {44,44,44};
+
+	w.begin();
+	w.write(spkt, pkt);
+	w.end();
+
+	REQUIRE( s.str().size() > 0 );
+
+	int n = 0;
+
+	Reader r(s);
+	r.begin();
+	bool res = r.read(ftl::timer::get_time()+10, [&n](const StreamPacket &rspkt, const Packet &rpkt) {
+		++n;
+		REQUIRE(rpkt.codec == codec_t::JSON);
+		REQUIRE(rpkt.data.size() == 3);
+		REQUIRE(rpkt.data[0] == 44);
+		REQUIRE(rspkt.channel == Channel::Colour);
+	});
+	r.end();
+
+	REQUIRE( n == 1 );
+	REQUIRE( !res );
+}
+
+TEST_CASE( "Write and read - Multiple frames" ) {
+	std::stringstream s;
+	Writer w(s);
+
+	StreamPacket spkt;
+	Packet pkt;
+
+	spkt.channel = Channel::Colour;
+	spkt.timestamp = ftl::timer::get_time();
+	spkt.streamID = 0;
+
+	pkt.codec = codec_t::JSON;
+	pkt.definition = definition_t::Any;
+	pkt.block_number = 0;
+	pkt.block_total = 1;
+	pkt.flags = 0;
+	pkt.data = {44,44,44};
+
+	w.begin();
+	w.write(spkt, pkt);
+	spkt.timestamp += 50;
+	pkt.data = {55,55,55};
+	w.write(spkt, pkt);
+	spkt.timestamp += 50;
+	pkt.data = {66,66,66};
+	w.write(spkt, pkt);
+	w.end();
+
+	REQUIRE( s.str().size() > 0 );
+
+	int n = 0;
+
+	Reader r(s);
+	r.begin();
+	bool res = r.read(ftl::timer::get_time()+100, [&n](const StreamPacket &rspkt, const Packet &rpkt) {
+		++n;
+		REQUIRE(rpkt.codec == codec_t::JSON);
+		REQUIRE(rpkt.data.size() == 3);
+		REQUIRE(rpkt.data[0] == ((n == 1) ? 44 : (n == 2) ? 55 : 66));
+		REQUIRE(rspkt.channel == Channel::Colour);
+	});
+	r.end();
+
+	REQUIRE( n == 3 );
+	REQUIRE( !res );
+}
+
+TEST_CASE( "Write and read - Multiple streams" ) {
+	std::stringstream s;
+	Writer w(s);
+
+	StreamPacket spkt;
+	Packet pkt;
+
+	spkt.channel = Channel::Colour;
+	spkt.timestamp = ftl::timer::get_time();
+	spkt.streamID = 0;
+
+	pkt.codec = codec_t::JSON;
+	pkt.definition = definition_t::Any;
+	pkt.block_number = 0;
+	pkt.block_total = 1;
+	pkt.flags = 0;
+	pkt.data = {44,44,44};
+
+	w.begin();
+	w.write(spkt, pkt);
+	spkt.streamID = 1;
+	pkt.data = {55,55,55};
+	w.write(spkt, pkt);
+	w.end();
+
+	REQUIRE( s.str().size() > 0 );
+
+	int n1 = 0;
+	int n2 = 0;
+
+	Reader r(s);
+
+	r.onPacket(0, [&n1](const StreamPacket &rspkt, const Packet &rpkt) {
+		++n1;
+		REQUIRE(rspkt.streamID == 0);
+		REQUIRE(rpkt.data.size() == 3);
+		REQUIRE(rpkt.data[0] == 44);
+	});
+
+	r.onPacket(1, [&n2](const StreamPacket &rspkt, const Packet &rpkt) {
+		++n2;
+		REQUIRE(rspkt.streamID == 1);
+		REQUIRE(rpkt.data.size() == 3);
+		REQUIRE(rpkt.data[0] == 55);
+	});
+
+	r.begin();
+	bool res = r.read(ftl::timer::get_time()+100);
+	r.end();
+
+	REQUIRE( n1 == 1 );
+	REQUIRE( n2 == 1 );
+	REQUIRE( !res );
+}
+
+TEST_CASE( "Write and read - Multiple frames with limit" ) {
+	std::stringstream s;
+	Writer w(s);
+
+	StreamPacket spkt;
+	Packet pkt;
+
+	spkt.channel = Channel::Colour;
+	spkt.timestamp = ftl::timer::get_time();
+	spkt.streamID = 0;
+
+	pkt.codec = codec_t::JSON;
+	pkt.definition = definition_t::Any;
+	pkt.block_number = 0;
+	pkt.block_total = 1;
+	pkt.flags = 0;
+	pkt.data = {44,44,44};
+
+	w.begin();
+	w.write(spkt, pkt);
+	spkt.timestamp += 50;
+	pkt.data = {55,55,55};
+	w.write(spkt, pkt);
+	spkt.timestamp += 50;
+	pkt.data = {66,66,66};
+	w.write(spkt, pkt);
+	w.end();
+
+	REQUIRE( s.str().size() > 0 );
+
+	int n = 0;
+
+	Reader r(s);
+	r.begin();
+	bool res = r.read(ftl::timer::get_time()+50, [&n](const StreamPacket &rspkt, const Packet &rpkt) {
+		++n;
+		REQUIRE(rpkt.codec == codec_t::JSON);
+		REQUIRE(rpkt.data.size() == 3);
+		REQUIRE(rpkt.data[0] == ((n == 1) ? 44 : (n == 2) ? 55 : 66));
+		REQUIRE(rspkt.channel == Channel::Colour);
+	});
+	r.end();
+
+	REQUIRE( n == 2 );
+	REQUIRE( res );
+}
+
+TEST_CASE( "Write and read - Multiple reads" ) {
+	std::stringstream s;
+	Writer w(s);
+
+	StreamPacket spkt;
+	Packet pkt;
+
+	spkt.channel = Channel::Colour;
+	spkt.timestamp = ftl::timer::get_time();
+	spkt.streamID = 0;
+
+	pkt.codec = codec_t::JSON;
+	pkt.definition = definition_t::Any;
+	pkt.block_number = 0;
+	pkt.block_total = 1;
+	pkt.flags = 0;
+	pkt.data = {44,44,44};
+
+	w.begin();
+	w.write(spkt, pkt);
+	spkt.timestamp += 50;
+	pkt.data = {55,55,55};
+	w.write(spkt, pkt);
+	spkt.timestamp += 50;
+	pkt.data = {66,66,66};
+	w.write(spkt, pkt);
+	w.end();
+
+	REQUIRE( s.str().size() > 0 );
+
+	int n = 0;
+
+	Reader r(s);
+	r.begin();
+	bool res = r.read(ftl::timer::get_time()+50, [&n](const StreamPacket &rspkt, const Packet &rpkt) {
+		++n;
+		REQUIRE(rpkt.codec == codec_t::JSON);
+		REQUIRE(rpkt.data.size() == 3);
+		REQUIRE(rpkt.data[0] == ((n == 1) ? 44 : (n == 2) ? 55 : 66));
+		REQUIRE(rspkt.channel == Channel::Colour);
+	});
+
+	REQUIRE( n == 2 );
+	REQUIRE( res );
+
+	n = 0;
+	res = r.read(ftl::timer::get_time()+100, [&n](const StreamPacket &rspkt, const Packet &rpkt) {
+		++n;
+		REQUIRE(rpkt.codec == codec_t::JSON);
+		REQUIRE(rpkt.data.size() == 3);
+		REQUIRE(rpkt.data[0] == 66 );
+		REQUIRE(rspkt.channel == Channel::Colour);
+	});
+	r.end();
+
+	REQUIRE( n == 1 );
+	REQUIRE( !res );
+}
diff --git a/components/common/cpp/include/ftl/configurable.hpp b/components/common/cpp/include/ftl/configurable.hpp
index 94a080eaed480fa2e57b113e9c8c3d7bc04388bd..6593119c794205e0bf901814437f79974bb81e00 100644
--- a/components/common/cpp/include/ftl/configurable.hpp
+++ b/components/common/cpp/include/ftl/configurable.hpp
@@ -39,7 +39,7 @@ class Configurable {
 	public:
 	Configurable();
 	explicit Configurable(nlohmann::json &config);
-	virtual ~Configurable() {}
+	virtual ~Configurable();
 
 	/**
 	 * Force the JSON object to have specific properties with a specific type.
diff --git a/components/common/cpp/include/ftl/configuration.hpp b/components/common/cpp/include/ftl/configuration.hpp
index c060b70382d0174d351d836d5476fa5a1f29db61..94fcc7dfa6b57e9bfd59fbcf36c5be80e0b82638 100644
--- a/components/common/cpp/include/ftl/configuration.hpp
+++ b/components/common/cpp/include/ftl/configuration.hpp
@@ -37,6 +37,10 @@ Configurable *configure(int argc, char **argv, const std::string &root);
 
 Configurable *configure(json_t &);
 
+void cleanup();
+
+void removeConfigurable(Configurable *cfg);
+
 /**
  * Change a configuration value based upon a URI. Return true if changed,
  * false if it was not able to change.
diff --git a/components/common/cpp/include/ftl/exception.hpp b/components/common/cpp/include/ftl/exception.hpp
index 6719158bba5f5f53abfcdeb0fb39a5b5dda0d5f2..921e53493eda023a35af6f4c53b43ca2e26125d9 100644
--- a/components/common/cpp/include/ftl/exception.hpp
+++ b/components/common/cpp/include/ftl/exception.hpp
@@ -1,18 +1,49 @@
 #ifndef _FTL_EXCEPTION_HPP_
 #define _FTL_EXCEPTION_HPP_
 
+#include <sstream>
+
 namespace ftl {
+class Formatter {
+	public:
+	Formatter() {}
+    ~Formatter() {}
+
+    template <typename Type>
+    inline Formatter & operator << (const Type & value)
+    {
+        stream_ << value;
+        return *this;
+    }
+
+    inline std::string str() const         { return stream_.str(); }
+    inline operator std::string () const   { return stream_.str(); }
+
+    enum ConvertToString 
+    {
+        to_str
+    };
+    inline std::string operator >> (ConvertToString) { return stream_.str(); }
+
+private:
+    std::stringstream stream_;
+
+    Formatter(const Formatter &);
+    Formatter & operator = (Formatter &);
+};
+
 class exception : public std::exception
 {
 	public:
-	explicit exception(const char *msg) : msg_(msg) {};
+	explicit exception(const char *msg) : msg_(msg) {}
+	explicit exception(const Formatter &msg) : msg_(msg.str()) {}
 
 	const char * what () const throw () {
-    	return msg_;
+    	return msg_.c_str();
     }
 
 	private:
-	const char *msg_;
+	std::string msg_;
 };
 }
 
diff --git a/components/common/cpp/include/ftl/traits.hpp b/components/common/cpp/include/ftl/traits.hpp
index 6ac54e8e66f6a12aa13e39e1228f5cf17ca69312..71668359d1b0bfa036c29fb9f6d4f73ee0d2d5a7 100644
--- a/components/common/cpp/include/ftl/traits.hpp
+++ b/components/common/cpp/include/ftl/traits.hpp
@@ -13,30 +13,30 @@ struct AlwaysFalse : std::false_type {};
 template <typename T> struct OpenCVType {
 	static_assert(AlwaysFalse<T>::value, "Not a valid format type");
 };
-template <> struct OpenCVType<uchar> { static const int value = CV_8UC1; };
-template <> struct OpenCVType<uchar2> { static const int value = CV_8UC2; };
-template <> struct OpenCVType<uchar3> { static const int value = CV_8UC3; };
-template <> struct OpenCVType<uchar4> { static const int value = CV_8UC4; };
-template <> struct OpenCVType<char> { static const int value = CV_8SC1; };
-template <> struct OpenCVType<char2> { static const int value = CV_8SC2; };
-template <> struct OpenCVType<char3> { static const int value = CV_8SC3; };
-template <> struct OpenCVType<char4> { static const int value = CV_8SC4; };
-template <> struct OpenCVType<ushort> { static const int value = CV_16UC1; };
-template <> struct OpenCVType<ushort2> { static const int value = CV_16UC2; };
-template <> struct OpenCVType<ushort3> { static const int value = CV_16UC3; };
-template <> struct OpenCVType<ushort4> { static const int value = CV_16UC4; };
-template <> struct OpenCVType<short> { static const int value = CV_16SC1; };
-template <> struct OpenCVType<short2> { static const int value = CV_16SC2; };
-template <> struct OpenCVType<short3> { static const int value = CV_16SC3; };
-template <> struct OpenCVType<short4> { static const int value = CV_16SC4; };
-template <> struct OpenCVType<int> { static const int value = CV_32SC1; };
-template <> struct OpenCVType<int2> { static const int value = CV_32SC2; };
-template <> struct OpenCVType<int3> { static const int value = CV_32SC3; };
-template <> struct OpenCVType<int4> { static const int value = CV_32SC4; };
-template <> struct OpenCVType<float> { static const int value = CV_32FC1; };
-template <> struct OpenCVType<float2> { static const int value = CV_32FC2; };
-template <> struct OpenCVType<float3> { static const int value = CV_32FC3; };
-template <> struct OpenCVType<float4> { static const int value = CV_32FC4; };
+template <> struct OpenCVType<uchar> { static constexpr int value = CV_8UC1; };
+template <> struct OpenCVType<uchar2> { static constexpr int value = CV_8UC2; };
+template <> struct OpenCVType<uchar3> { static constexpr int value = CV_8UC3; };
+template <> struct OpenCVType<uchar4> { static constexpr int value = CV_8UC4; };
+template <> struct OpenCVType<char> { static constexpr int value = CV_8SC1; };
+template <> struct OpenCVType<char2> { static constexpr int value = CV_8SC2; };
+template <> struct OpenCVType<char3> { static constexpr int value = CV_8SC3; };
+template <> struct OpenCVType<char4> { static constexpr int value = CV_8SC4; };
+template <> struct OpenCVType<ushort> { static constexpr int value = CV_16UC1; };
+template <> struct OpenCVType<ushort2> { static constexpr int value = CV_16UC2; };
+template <> struct OpenCVType<ushort3> { static constexpr int value = CV_16UC3; };
+template <> struct OpenCVType<ushort4> { static constexpr int value = CV_16UC4; };
+template <> struct OpenCVType<short> { static constexpr int value = CV_16SC1; };
+template <> struct OpenCVType<short2> { static constexpr int value = CV_16SC2; };
+template <> struct OpenCVType<short3> { static constexpr int value = CV_16SC3; };
+template <> struct OpenCVType<short4> { static constexpr int value = CV_16SC4; };
+template <> struct OpenCVType<int> { static constexpr int value = CV_32SC1; };
+template <> struct OpenCVType<int2> { static constexpr int value = CV_32SC2; };
+template <> struct OpenCVType<int3> { static constexpr int value = CV_32SC3; };
+template <> struct OpenCVType<int4> { static constexpr int value = CV_32SC4; };
+template <> struct OpenCVType<float> { static constexpr int value = CV_32FC1; };
+template <> struct OpenCVType<float2> { static constexpr int value = CV_32FC2; };
+template <> struct OpenCVType<float3> { static constexpr int value = CV_32FC3; };
+template <> struct OpenCVType<float4> { static constexpr int value = CV_32FC4; };
 
 }
 }
diff --git a/components/common/cpp/src/configurable.cpp b/components/common/cpp/src/configurable.cpp
index 7cea5adeb61162f83a62c5724e99b41186b6e8ff..f47e12195e00d6db0bce1ee65ec70339e062448f 100644
--- a/components/common/cpp/src/configurable.cpp
+++ b/components/common/cpp/src/configurable.cpp
@@ -21,6 +21,10 @@ Configurable::Configurable(nlohmann::json &config) : config_(&config) {
 	ftl::config::registerConfigurable(this);
 }
 
+Configurable::~Configurable() {
+	ftl::config::removeConfigurable(this);
+}
+
 void Configurable::required(const char *f, const std::vector<std::tuple<std::string, std::string, std::string>> &r) {
 	bool diderror = false;
 	for (auto i : r) {
diff --git a/components/common/cpp/src/configuration.cpp b/components/common/cpp/src/configuration.cpp
index 8a3849e8ee8799119f3d15700dc277e1dece7654..bd9d9feec20538cebc210cafba72a98f651b243c 100644
--- a/components/common/cpp/src/configuration.cpp
+++ b/components/common/cpp/src/configuration.cpp
@@ -185,7 +185,15 @@ static void _indexConfig(json_t &cfg) {
 }
 
 ftl::Configurable *ftl::config::find(const std::string &uri) {
-	auto ix = config_instance.find(uri);
+	std::string actual_uri = uri;
+	if (uri[0] == '/') {
+		if (uri.size() == 1) {
+			return rootCFG;
+		} else {
+			actual_uri = rootCFG->getID() + uri;
+		}
+	}
+	auto ix = config_instance.find(actual_uri);
 	if (ix == config_instance.end()) return nullptr;
 	else return (*ix).second;
 }
@@ -470,6 +478,24 @@ Configurable *ftl::config::configure(ftl::config::json_t &cfg) {
 	return rootcfg;
 }
 
+static bool doing_cleanup = false;
+void ftl::config::cleanup() {
+	doing_cleanup = true;
+	for (auto f : config_instance) {
+		delete f.second;
+	}
+	config_instance.clear();
+}
+
+void ftl::config::removeConfigurable(Configurable *cfg) {
+	if (doing_cleanup) return;
+
+	auto i = config_instance.find(cfg->getID());
+	if (i != config_instance.end()) {
+		config_instance.erase(i);
+	}
+}
+
 
 Configurable *ftl::config::configure(int argc, char **argv, const std::string &root) {
 	loguru::g_preamble_date = false;
diff --git a/components/renderers/cpp/CMakeLists.txt b/components/renderers/cpp/CMakeLists.txt
index b575721587262e2e468d6cb48cf8c44c6771e6fc..8fc7996efcbad8502bcc3dce0fef4fee8ffd327d 100644
--- a/components/renderers/cpp/CMakeLists.txt
+++ b/components/renderers/cpp/CMakeLists.txt
@@ -2,6 +2,8 @@ add_library(ftlrender
 	src/splat_render.cpp
 	src/splatter.cu
 	src/points.cu
+	src/normals.cu
+	src/mask.cu
 )
 
 # These cause errors in CI build and are being removed from PCL in newer versions
diff --git a/components/renderers/cpp/include/ftl/cuda/intersections.hpp b/components/renderers/cpp/include/ftl/cuda/intersections.hpp
index 9cfdbc2544d9c1bd32c9f5e12a0a161f45c50d54..9008d68c66ac258d1dca651f7cc0e4a3230bdbda 100644
--- a/components/renderers/cpp/include/ftl/cuda/intersections.hpp
+++ b/components/renderers/cpp/include/ftl/cuda/intersections.hpp
@@ -49,8 +49,7 @@ __device__ inline bool intersectDisk(const float3 &n, const float3 &p0, float ra
  * @param l Normalised ray direction in camera space
  * @return Radius from centre of disk where intersection occurred.
  */
-__device__ inline float intersectDistance(const float3 &n, const float3 &p0, const float3 &l0, const float3 &l) { 
-    float t = 0; 
+__device__ inline float intersectDistance(const float3 &n, const float3 &p0, const float3 &l0, const float3 &l, float &t) { 
     if (intersectPlane(n, p0, l0, l, t)) { 
         const float3 p = l0 + l * t; 
         const float3 v = p - p0; 
diff --git a/components/renderers/cpp/include/ftl/cuda/mask.hpp b/components/renderers/cpp/include/ftl/cuda/mask.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..3f49f3bfc65e67186cc8caf321743ae2800805e7
--- /dev/null
+++ b/components/renderers/cpp/include/ftl/cuda/mask.hpp
@@ -0,0 +1,43 @@
+#ifndef _FTL_CUDA_MASK_HPP_
+#define _FTL_CUDA_MASK_HPP_
+
+namespace ftl {
+namespace cuda {
+
+/**
+ * Wrap an int mask value used to flag individual depth pixels.
+ */
+class Mask {
+	public:
+	__device__ inline Mask() : v_(0) {}
+	__device__ explicit inline Mask(int v) : v_(v) {}
+	#ifdef __CUDACC__
+	__device__ inline Mask(const ftl::cuda::TextureObject<int> &m, int x, int y) : v_(m.tex2D(x,y)) {}
+	#endif
+	__device__ inline operator int() const { return v_; }
+
+    __device__ inline bool is(int m) const { return v_ & m; }
+
+	__device__ inline bool isFilled() const { return v_ & kMask_Filled; }
+	__device__ inline bool isDiscontinuity() const { return v_ & kMask_Discontinuity; }
+	__device__ inline bool hasCorrespondence() const { return v_ & kMask_Correspondence; }
+	__device__ inline bool isBad() const { return v_ & kMask_Bad; }
+
+	__device__ inline void isFilled(bool v) { v_ = (v) ? v_ | kMask_Filled : v_ & (~kMask_Filled); }
+	__device__ inline void isDiscontinuity(bool v) { v_ = (v) ? v_ | kMask_Discontinuity : v_ & (~kMask_Discontinuity); }
+	__device__ inline void hasCorrespondence(bool v) { v_ = (v) ? v_ | kMask_Correspondence : v_ & (~kMask_Correspondence); }
+	__device__ inline void isBad(bool v) { v_ = (v) ? v_ | kMask_Bad : v_ & (~kMask_Bad); }
+
+    static constexpr int kMask_Filled = 0x0001;
+	static constexpr int kMask_Discontinuity = 0x0002;
+	static constexpr int kMask_Correspondence = 0x0004;
+	static constexpr int kMask_Bad = 0x0008;
+
+	private:
+	int v_;
+};
+
+}
+}
+
+#endif
diff --git a/components/renderers/cpp/include/ftl/cuda/normals.hpp b/components/renderers/cpp/include/ftl/cuda/normals.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..da2247723206cc9a1167ffbb0bc659ec847208c2
--- /dev/null
+++ b/components/renderers/cpp/include/ftl/cuda/normals.hpp
@@ -0,0 +1,44 @@
+#ifndef _FTL_CUDA_NORMALS_HPP_
+#define _FTL_CUDA_NORMALS_HPP_
+
+#include <ftl/cuda_common.hpp>
+#include <ftl/rgbd/camera.hpp>
+#include <ftl/cuda_matrix_util.hpp>
+
+namespace ftl {
+namespace cuda {
+
+void normals(ftl::cuda::TextureObject<float4> &output,
+        ftl::cuda::TextureObject<float4> &temp,
+        ftl::cuda::TextureObject<float4> &input,
+		int radius,
+		float smoothing,
+        const ftl::rgbd::Camera &camera,
+        const float3x3 &pose, cudaStream_t stream);
+
+void normals(ftl::cuda::TextureObject<float4> &output,
+        ftl::cuda::TextureObject<float4> &temp,
+        ftl::cuda::TextureObject<int> &input,  // Integer depth values
+		int radius,
+		float smoothing,
+        const ftl::rgbd::Camera &camera,
+        const float3x3 &pose_inv, const float3x3 &pose, cudaStream_t stream);
+
+void normal_visualise(ftl::cuda::TextureObject<float4> &norm,
+        ftl::cuda::TextureObject<uchar4> &output,
+        const float3 &light, const uchar4 &diffuse, const uchar4 &ambient,
+        cudaStream_t stream);
+
+void normal_filter(ftl::cuda::TextureObject<float4> &norm,
+        ftl::cuda::TextureObject<float4> &points,
+        const ftl::rgbd::Camera &camera, const float4x4 &pose,
+        float thresh, cudaStream_t stream);
+
+void transform_normals(ftl::cuda::TextureObject<float4> &norm,
+        const float3x3 &pose,
+        cudaStream_t stream);
+
+}
+}
+
+#endif  // _FTL_CUDA_NORMALS_HPP_
diff --git a/components/renderers/cpp/include/ftl/cuda/points.hpp b/components/renderers/cpp/include/ftl/cuda/points.hpp
index deffe32777789e2b58a96aef2106975ad37e0cdd..3b31a0be159fb4111b480d969267fa037811d63e 100644
--- a/components/renderers/cpp/include/ftl/cuda/points.hpp
+++ b/components/renderers/cpp/include/ftl/cuda/points.hpp
@@ -8,7 +8,28 @@
 namespace ftl {
 namespace cuda {
 
-void point_cloud(ftl::cuda::TextureObject<float4> &output, ftl::cuda::TextureObject<float> &depth, const ftl::rgbd::Camera &params, const float4x4 &pose, cudaStream_t stream);
+struct ClipSpace {
+	float4x4 origin;
+	float3 size;
+};
+
+void point_cloud(ftl::cuda::TextureObject<float4> &output,
+		ftl::cuda::TextureObject<float> &depth,
+		const ftl::rgbd::Camera &params,
+		const float4x4 &pose, uint discon, cudaStream_t stream);
+
+void clipping(ftl::cuda::TextureObject<float4> &points,
+		const ClipSpace &clip, cudaStream_t stream);
+
+void clipping(ftl::cuda::TextureObject<float> &depth,
+		const ftl::rgbd::Camera &camera,
+		const ClipSpace &clip, cudaStream_t stream);
+
+void point_cloud(ftl::cuda::TextureObject<float> &output, ftl::cuda::TextureObject<float4> &points, const ftl::rgbd::Camera &params, const float4x4 &poseinv, cudaStream_t stream);
+
+void world_to_cam(ftl::cuda::TextureObject<float4> &points, const float4x4 &poseinv, cudaStream_t stream);
+
+void cam_to_world(ftl::cuda::TextureObject<float4> &points, const float4x4 &pose, cudaStream_t stream);
 
 }
 }
diff --git a/components/renderers/cpp/include/ftl/cuda/warp.hpp b/components/renderers/cpp/include/ftl/cuda/warp.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..9164b0eeeb8b3ef606aef4930f55b38a1afacdc4
--- /dev/null
+++ b/components/renderers/cpp/include/ftl/cuda/warp.hpp
@@ -0,0 +1,48 @@
+#ifndef _FTL_CUDA_WARP_HPP_
+#define _FTL_CUDA_WARP_HPP_
+
+#ifndef WARP_SIZE
+#define WARP_SIZE 32
+#endif
+
+#define FULL_MASK 0xffffffff
+
+namespace ftl {
+namespace cuda {
+
+__device__ inline float warpMin(float e) {
+	for (int i = WARP_SIZE/2; i > 0; i /= 2) {
+		const float other = __shfl_xor_sync(FULL_MASK, e, i, WARP_SIZE);
+		e = min(e, other);
+	}
+	return e;
+}
+
+__device__ inline float warpMax(float e) {
+	for (int i = WARP_SIZE/2; i > 0; i /= 2) {
+		const float other = __shfl_xor_sync(FULL_MASK, e, i, WARP_SIZE);
+		e = max(e, other);
+	}
+	return e;
+}
+
+__device__ inline float warpSum(float e) {
+	for (int i = WARP_SIZE/2; i > 0; i /= 2) {
+		const float other = __shfl_xor_sync(FULL_MASK, e, i, WARP_SIZE);
+		e += other;
+	}
+	return e;
+}
+
+__device__ inline int warpSum(int e) {
+	for (int i = WARP_SIZE/2; i > 0; i /= 2) {
+		const float other = __shfl_xor_sync(FULL_MASK, e, i, WARP_SIZE);
+		e += other;
+	}
+	return e;
+}
+
+}
+}
+
+#endif  // _FTL_CUDA_WARP_HPP_
diff --git a/components/renderers/cpp/include/ftl/cuda/weighting.hpp b/components/renderers/cpp/include/ftl/cuda/weighting.hpp
index 9498d0508605087306db2658b2a1ae1943cde536..15d3dbcec387f97d8ffe60690bdb5c1fda2c098c 100644
--- a/components/renderers/cpp/include/ftl/cuda/weighting.hpp
+++ b/components/renderers/cpp/include/ftl/cuda/weighting.hpp
@@ -4,19 +4,41 @@
 namespace ftl {
 namespace cuda {
 
+__device__ inline float weighting(float r, float h) {
+	if (r >= h) return 0.0f;
+	float rh = r / h;
+	rh = 1.0f - rh*rh;
+	return rh*rh*rh*rh;
+}
+
 /*
  * Guennebaud, G.; Gross, M. Algebraic point set surfaces. ACMTransactions on Graphics Vol. 26, No. 3, Article No. 23, 2007.
  * Used in: FusionMLS: Highly dynamic 3D reconstruction with consumer-grade RGB-D cameras
  *     r = distance between points
  *     h = smoothing parameter in meters (default 4cm)
  */
-__device__ inline float spatialWeighting(float r, float h) {
+__device__ inline float spatialWeighting(const float3 &a, const float3 &b, float h) {
+	const float r = length(a-b);
 	if (r >= h) return 0.0f;
 	float rh = r / h;
 	rh = 1.0f - rh*rh;
 	return rh*rh*rh*rh;
 }
 
+/*
+ * Colour weighting as suggested in:
+ * C. Kuster et al. Spatio-Temporal Geometry Fusion for Multiple Hybrid Cameras using Moving Least Squares Surfaces. 2014.
+ * c = colour distance
+ */
+ __device__ inline float colourWeighting(uchar4 a, uchar4 b, float h) {
+	const float3 delta = make_float3((float)a.x - (float)b.x, (float)a.y - (float)b.y, (float)a.z - (float)b.z);
+	const float c = length(delta);
+	if (c >= h) return 0.0f;
+	float ch = c / h;
+	ch = 1.0f - ch*ch;
+	return ch*ch*ch*ch;
+}
+
 }
 }
 
diff --git a/components/renderers/cpp/include/ftl/render/renderer.hpp b/components/renderers/cpp/include/ftl/render/renderer.hpp
index 1871b9f9f2a8e1fda0766e1c2e74d2169f47f3fa..432be6839de24e94448afbaf407260ea44c5a508 100644
--- a/components/renderers/cpp/include/ftl/render/renderer.hpp
+++ b/components/renderers/cpp/include/ftl/render/renderer.hpp
@@ -26,7 +26,7 @@ class Renderer : public ftl::Configurable {
      * the virtual camera object passed, and writes the result into the
      * virtual camera.
      */
-    virtual bool render(ftl::rgbd::VirtualSource *, ftl::rgbd::Frame &, cudaStream_t)=0;
+    virtual bool render(ftl::rgbd::VirtualSource *, ftl::rgbd::Frame &)=0;
 };
 
 }
diff --git a/components/renderers/cpp/include/ftl/render/splat_params.hpp b/components/renderers/cpp/include/ftl/render/splat_params.hpp
index 4f9c8882b161d7774388e8d9fff7337cb1d6e685..0509ee37f0d85163fde2b462ad0871f8a824070b 100644
--- a/components/renderers/cpp/include/ftl/render/splat_params.hpp
+++ b/components/renderers/cpp/include/ftl/render/splat_params.hpp
@@ -8,10 +8,8 @@
 namespace ftl {
 namespace render {
 
-static const uint kShowBlockBorders = 0x00000001;  // Deprecated: from voxels system
-static const uint kNoSplatting = 0x00000002;
-static const uint kNoUpsampling = 0x00000004;
-static const uint kNoTexturing = 0x00000008;
+static const uint kShowDisconMask = 0x00000001;
+static const uint kNormalWeightColours = 0x00000002;
 
 struct __align__(16) SplatParams {
 	float4x4 m_viewMatrix;
diff --git a/components/renderers/cpp/include/ftl/render/splat_render.hpp b/components/renderers/cpp/include/ftl/render/splat_render.hpp
index 2cbb82a8183a3a6269a3888134a57144c93050ca..3b36e8ec98dd37aaf40b0e3a7e12cad6b3b5a14e 100644
--- a/components/renderers/cpp/include/ftl/render/splat_render.hpp
+++ b/components/renderers/cpp/include/ftl/render/splat_render.hpp
@@ -4,6 +4,7 @@
 #include <ftl/render/renderer.hpp>
 #include <ftl/rgbd/frameset.hpp>
 #include <ftl/render/splat_params.hpp>
+#include <ftl/cuda/points.hpp>
 
 namespace ftl {
 namespace render {
@@ -21,11 +22,11 @@ class Splatter : public ftl::render::Renderer {
 	explicit Splatter(nlohmann::json &config, ftl::rgbd::FrameSet *fs);
 	~Splatter();
 
-	bool render(ftl::rgbd::VirtualSource *src, ftl::rgbd::Frame &out, cudaStream_t stream=0) override;
+	bool render(ftl::rgbd::VirtualSource *src, ftl::rgbd::Frame &out) override;
 	//void setOutputDevice(int);
 
 	protected:
-	void renderChannel(ftl::render::SplatParams &params, ftl::rgbd::Frame &out, const ftl::rgbd::Channel &channel, cudaStream_t stream);
+	void _renderChannel(ftl::rgbd::Frame &out, ftl::codecs::Channel channel_in, ftl::codecs::Channel channel_out, cudaStream_t stream);
 
 	private:
 	int device_;
@@ -39,7 +40,25 @@ class Splatter : public ftl::render::Renderer {
 	//SplatParams params_;
 
 	ftl::rgbd::Frame temp_;
+	ftl::rgbd::Frame accum_;
 	ftl::rgbd::FrameSet *scene_;
+	ftl::cuda::ClipSpace clip_;
+	bool clipping_;
+	float norm_filter_;
+	bool backcull_;
+	cv::Scalar background_;
+	bool splat_;
+	float3 light_dir_;
+	uchar4 light_diffuse_;
+	uchar4 light_ambient_;
+	ftl::render::SplatParams params_;
+	cudaStream_t stream_;
+	float3 light_pos_;
+
+	template <typename T>
+	void __blendChannel(ftl::rgbd::Frame &, ftl::codecs::Channel in, ftl::codecs::Channel out, cudaStream_t);
+	void _blendChannel(ftl::rgbd::Frame &, ftl::codecs::Channel in, ftl::codecs::Channel out, cudaStream_t);
+	void _dibr(cudaStream_t);
 };
 
 }
diff --git a/components/renderers/cpp/src/mask.cu b/components/renderers/cpp/src/mask.cu
new file mode 100644
index 0000000000000000000000000000000000000000..6e9621c8064359021de616c8662f3b4227ba1806
--- /dev/null
+++ b/components/renderers/cpp/src/mask.cu
@@ -0,0 +1,39 @@
+#include "splatter_cuda.hpp"
+#include <ftl/cuda/mask.hpp>
+
+using ftl::cuda::TextureObject;
+using ftl::cuda::Mask;
+using ftl::render::SplatParams;
+
+#define T_PER_BLOCK 16
+
+__global__ void show_mask_kernel(
+        TextureObject<uchar4> colour,
+        TextureObject<int> mask,
+        int id, uchar4 style) {
+        
+	const int x = (blockIdx.x*blockDim.x + threadIdx.x);
+	const int y = blockIdx.y*blockDim.y + threadIdx.y;
+
+	if (x >= 0 && x < colour.width() && y >=0 && y < colour.height()) {
+        Mask m(mask.tex2D(x,y));
+
+        if (m.is(id)) {
+            colour(x,y) = style;
+        }
+    }
+}
+
+
+void ftl::cuda::show_mask(
+        TextureObject<uchar4> &colour,
+        TextureObject<int> &mask,
+        int id, uchar4 style, cudaStream_t stream) {
+	const dim3 gridSize((colour.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (colour.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
+	const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
+
+    show_mask_kernel<<<gridSize, blockSize, 0, stream>>>(
+        colour, mask, id, style
+    );
+    cudaSafeCall( cudaGetLastError() );
+}
diff --git a/components/renderers/cpp/src/normals.cu b/components/renderers/cpp/src/normals.cu
new file mode 100644
index 0000000000000000000000000000000000000000..14357d02ed00f212577e463051d315e6e5a90078
--- /dev/null
+++ b/components/renderers/cpp/src/normals.cu
@@ -0,0 +1,353 @@
+#include <ftl/cuda/normals.hpp>
+#include <ftl/cuda/weighting.hpp>
+
+#define T_PER_BLOCK 16
+#define MINF __int_as_float(0xff800000)
+
+__global__ void computeNormals_kernel(ftl::cuda::TextureObject<float4> output,
+        ftl::cuda::TextureObject<float4> input) {
+	const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
+	const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
+
+	if(x >= input.width() || y >= input.height()) return;
+
+	output(x,y) = make_float4(0, 0, 0, 0);
+
+	if(x > 0 && x < input.width()-1 && y > 0 && y < input.height()-1) {
+		const float3 CC = make_float3(input.tex2D((int)x+0, (int)y+0)); //[(y+0)*width+(x+0)];
+		const float3 PC = make_float3(input.tex2D((int)x+0, (int)y+1)); //[(y+1)*width+(x+0)];
+		const float3 CP = make_float3(input.tex2D((int)x+1, (int)y+0)); //[(y+0)*width+(x+1)];
+		const float3 MC = make_float3(input.tex2D((int)x+0, (int)y-1)); //[(y-1)*width+(x+0)];
+		const float3 CM = make_float3(input.tex2D((int)x-1, (int)y+0)); //[(y+0)*width+(x-1)];
+
+		if(CC.x != MINF && PC.x != MINF && CP.x != MINF && MC.x != MINF && CM.x != MINF) {
+			const float3 n = cross(PC-MC, CP-CM);
+			const float  l = length(n);
+
+			if(l > 0.0f) {
+				output(x,y) = make_float4(n/-l, 1.0f);
+			}
+		}
+	}
+}
+
+__device__ inline bool isValid(const ftl::rgbd::Camera &camera, const float3 &d) {
+	return d.z >= camera.minDepth && d.z <= camera.maxDepth;
+}
+
+__global__ void computeNormals_kernel(ftl::cuda::TextureObject<float4> output,
+		ftl::cuda::TextureObject<int> input, ftl::rgbd::Camera camera, float3x3 pose) {
+	const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
+	const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
+
+	if(x >= input.width() || y >= input.height()) return;
+
+	output(x,y) = make_float4(0, 0, 0, 0);
+
+	if(x > 0 && x < input.width()-1 && y > 0 && y < input.height()-1) {
+		const float3 CC = camera.screenToCam(x+0, y+0, (float)input.tex2D((int)x+0, (int)y+0) / 1000.0f);
+		const float3 PC = camera.screenToCam(x+0, y+1, (float)input.tex2D((int)x+0, (int)y+1) / 1000.0f);
+		const float3 CP = camera.screenToCam(x+1, y+0, (float)input.tex2D((int)x+1, (int)y+0) / 1000.0f);
+		const float3 MC = camera.screenToCam(x+0, y-1, (float)input.tex2D((int)x+0, (int)y-1) / 1000.0f);
+		const float3 CM = camera.screenToCam(x-1, y+0, (float)input.tex2D((int)x-1, (int)y+0) / 1000.0f);
+
+		//if(CC.z <  && PC.x != MINF && CP.x != MINF && MC.x != MINF && CM.x != MINF) {
+		if (isValid(camera,CC) && isValid(camera,PC) && isValid(camera,CP) && isValid(camera,MC) && isValid(camera,CM)) {
+			const float3 n = cross(PC-MC, CP-CM);
+			const float  l = length(n);
+
+			if(l > 0.0f) {
+				output(x,y) = make_float4((n/-l), 1.0f);
+			}
+		}
+	}
+}
+
+template <int RADIUS>
+__global__ void smooth_normals_kernel(ftl::cuda::TextureObject<float4> norms,
+        ftl::cuda::TextureObject<float4> output,
+        ftl::cuda::TextureObject<float4> points,
+        ftl::rgbd::Camera camera, float3x3 pose, float smoothing) {
+    const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
+    const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
+
+    if(x >= points.width() || y >= points.height()) return;
+
+    const float3 p0 = make_float3(points.tex2D((int)x,(int)y));
+    float3 nsum = make_float3(0.0f);
+    float contrib = 0.0f;
+
+    output(x,y) = make_float4(0.0f,0.0f,0.0f,0.0f);
+
+    if (p0.x == MINF) return;
+
+    for (int v=-RADIUS; v<=RADIUS; ++v) {
+        for (int u=-RADIUS; u<=RADIUS; ++u) {
+            const float3 p = make_float3(points.tex2D((int)x+u,(int)y+v));
+            if (p.x == MINF) continue;
+            const float s = ftl::cuda::spatialWeighting(p0, p, smoothing);
+            //const float s = 1.0f;
+
+            if (s > 0.0f) {
+                const float4 n = norms.tex2D((int)x+u,(int)y+v);
+                if (n.w > 0.0f) {
+                    nsum += make_float3(n) * s;
+                    contrib += s;
+                }
+            }
+        }
+    }
+
+    // Compute dot product of normal with camera to obtain measure of how
+    // well this point faces the source camera, a measure of confidence
+    float3 ray = pose * camera.screenToCam(x, y, 1.0f);
+    ray = ray / length(ray);
+    nsum /= contrib;
+    nsum /= length(nsum);
+
+    output(x,y) = (contrib > 0.0f) ? make_float4(nsum, dot(nsum, ray)) : make_float4(0.0f);
+}
+
+template <int RADIUS>
+__global__ void smooth_normals_kernel(ftl::cuda::TextureObject<float4> norms,
+        ftl::cuda::TextureObject<float4> output,
+        ftl::cuda::TextureObject<int> depth,
+        ftl::rgbd::Camera camera, float3x3 pose, float smoothing) {
+    const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
+    const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
+
+    if(x >= depth.width() || y >= depth.height()) return;
+
+    const float3 p0 = camera.screenToCam(x,y, (float)depth.tex2D((int)x,(int)y) / 1000.0f);
+    float3 nsum = make_float3(0.0f);
+    float contrib = 0.0f;
+
+    output(x,y) = make_float4(0.0f,0.0f,0.0f,0.0f);
+
+    if (p0.z < camera.minDepth || p0.z > camera.maxDepth) return;
+
+    for (int v=-RADIUS; v<=RADIUS; ++v) {
+        for (int u=-RADIUS; u<=RADIUS; ++u) {
+            const float3 p = camera.screenToCam(x+u,y+v, (float)depth.tex2D((int)x+u,(int)y+v) / 1000.0f);
+            if (p.z < camera.minDepth || p.z > camera.maxDepth) continue;
+            const float s = ftl::cuda::spatialWeighting(p0, p, smoothing);
+            //const float s = 1.0f;
+
+            //if (s > 0.0f) {
+                const float4 n = norms.tex2D((int)x+u,(int)y+v);
+                if (n.w > 0.0f) {
+                    nsum += make_float3(n) * s;
+                    contrib += s;
+                }
+            //}
+        }
+    }
+
+    // Compute dot product of normal with camera to obtain measure of how
+    // well this point faces the source camera, a measure of confidence
+    float3 ray = camera.screenToCam(x, y, 1.0f);
+    ray = ray / length(ray);
+    nsum /= contrib;
+    nsum /= length(nsum);
+
+    output(x,y) = (contrib > 0.0f) ? make_float4(pose*nsum, 1.0f) : make_float4(0.0f);
+}
+
+template <>
+__global__ void smooth_normals_kernel<0>(ftl::cuda::TextureObject<float4> norms,
+        ftl::cuda::TextureObject<float4> output,
+        ftl::cuda::TextureObject<int> depth,
+        ftl::rgbd::Camera camera, float3x3 pose, float smoothing) {
+    const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
+    const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
+
+    if(x >= depth.width() || y >= depth.height()) return;
+
+    output(x,y) = make_float4(0.0f,0.0f,0.0f,0.0f);
+
+    const float3 p0 = camera.screenToCam(x,y, (float)depth.tex2D((int)x,(int)y) / 1000.0f);
+
+    if (p0.z < camera.minDepth || p0.z > camera.maxDepth) return;
+
+    // Compute dot product of normal with camera to obtain measure of how
+    // well this point faces the source camera, a measure of confidence
+    //float3 ray = camera.screenToCam(x, y, 1.0f);
+    //ray = ray / length(ray);
+    //nsum /= contrib;
+    //nsum /= length(nsum);
+
+    const float4 n = norms.tex2D((int)x,(int)y);
+    output(x,y) = n;
+}
+
+void ftl::cuda::normals(ftl::cuda::TextureObject<float4> &output,
+        ftl::cuda::TextureObject<float4> &temp,
+		ftl::cuda::TextureObject<float4> &input,
+		int radius,
+		float smoothing,
+        const ftl::rgbd::Camera &camera,
+        const float3x3 &pose,cudaStream_t stream) {
+	const dim3 gridSize((input.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (input.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
+	const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
+
+	computeNormals_kernel<<<gridSize, blockSize, 0, stream>>>(temp, input);
+    cudaSafeCall( cudaGetLastError() );
+
+	switch (radius) {
+	case 9: smooth_normals_kernel<9><<<gridSize, blockSize, 0, stream>>>(temp, output, input, camera, pose, smoothing); break;
+	case 7: smooth_normals_kernel<7><<<gridSize, blockSize, 0, stream>>>(temp, output, input, camera, pose, smoothing); break;
+	case 5: smooth_normals_kernel<5><<<gridSize, blockSize, 0, stream>>>(temp, output, input, camera, pose, smoothing); break;
+    case 3: smooth_normals_kernel<3><<<gridSize, blockSize, 0, stream>>>(temp, output, input, camera, pose, smoothing); break;
+    case 2: smooth_normals_kernel<2><<<gridSize, blockSize, 0, stream>>>(temp, output, input, camera, pose, smoothing); break;
+    case 1: smooth_normals_kernel<1><<<gridSize, blockSize, 0, stream>>>(temp, output, input, camera, pose, smoothing); break;
+    case 0: smooth_normals_kernel<0><<<gridSize, blockSize, 0, stream>>>(temp, output, input, camera, pose, smoothing); break;
+	}
+    cudaSafeCall( cudaGetLastError() );
+
+#ifdef _DEBUG
+	cudaSafeCall(cudaDeviceSynchronize());
+	//cutilCheckMsg(__FUNCTION__);
+#endif
+}
+
+void ftl::cuda::normals(ftl::cuda::TextureObject<float4> &output,
+		ftl::cuda::TextureObject<float4> &temp,
+		ftl::cuda::TextureObject<int> &input,
+		int radius,
+		float smoothing,
+		const ftl::rgbd::Camera &camera,
+		const float3x3 &pose_inv, const float3x3 &pose,cudaStream_t stream) {
+	const dim3 gridSize((input.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (input.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
+	const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
+
+	computeNormals_kernel<<<gridSize, blockSize, 0, stream>>>(temp, input, camera, pose);
+	cudaSafeCall( cudaGetLastError() );
+
+	switch (radius) {
+	case 7: smooth_normals_kernel<7><<<gridSize, blockSize, 0, stream>>>(temp, output, input, camera, pose, smoothing);
+	case 5: smooth_normals_kernel<5><<<gridSize, blockSize, 0, stream>>>(temp, output, input, camera, pose, smoothing);
+	case 3: smooth_normals_kernel<3><<<gridSize, blockSize, 0, stream>>>(temp, output, input, camera, pose, smoothing);
+	}
+	cudaSafeCall( cudaGetLastError() );
+
+	#ifdef _DEBUG
+	cudaSafeCall(cudaDeviceSynchronize());
+	//cutilCheckMsg(__FUNCTION__);
+	#endif
+}
+
+//==============================================================================
+
+__global__ void vis_normals_kernel(ftl::cuda::TextureObject<float4> norm,
+        ftl::cuda::TextureObject<uchar4> output,
+        float3 direction, uchar4 diffuse, uchar4 ambient) {
+    const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
+    const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
+
+    if(x >= norm.width() || y >= norm.height()) return;
+
+    output(x,y) = make_uchar4(0,0,0,0);
+    float3 ray = direction;
+    ray = ray / length(ray);
+    float3 n = make_float3(norm.tex2D((int)x,(int)y));
+    float l = length(n);
+    if (l == 0) return;
+    n /= l;
+
+    const float d = max(dot(ray, n), 0.0f);
+    output(x,y) = make_uchar4(
+		min(255.0f, diffuse.x*d + ambient.x),
+		min(255.0f, diffuse.y*d + ambient.y),
+		min(255.0f, diffuse.z*d + ambient.z), 255);
+}
+
+void ftl::cuda::normal_visualise(ftl::cuda::TextureObject<float4> &norm,
+        ftl::cuda::TextureObject<uchar4> &output,
+        const float3 &light, const uchar4 &diffuse, const uchar4 &ambient,
+        cudaStream_t stream) {
+
+    const dim3 gridSize((norm.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (norm.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
+    const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
+
+    vis_normals_kernel<<<gridSize, blockSize, 0, stream>>>(norm, output, light, diffuse, ambient);
+
+    cudaSafeCall( cudaGetLastError() );
+#ifdef _DEBUG
+    cudaSafeCall(cudaDeviceSynchronize());
+    //cutilCheckMsg(__FUNCTION__);
+#endif
+}
+
+//==============================================================================
+
+__global__ void filter_normals_kernel(ftl::cuda::TextureObject<float4> norm,
+        ftl::cuda::TextureObject<float4> output,
+        ftl::rgbd::Camera camera, float4x4 pose, float thresh) {
+    const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
+    const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
+
+    if(x >= norm.width() || y >= norm.height()) return;
+
+    float3 ray = pose.getFloat3x3() * camera.screenToCam(x,y,1.0f);
+    ray = ray / length(ray);
+    float3 n = make_float3(norm.tex2D((int)x,(int)y));
+    float l = length(n);
+    if (l == 0) {
+        output(x,y) = make_float4(MINF);
+        return;
+    }
+    n /= l;
+
+    const float d = dot(ray, n);
+    if (d <= thresh) {
+        output(x,y) = make_float4(MINF);
+    }
+}
+
+void ftl::cuda::normal_filter(ftl::cuda::TextureObject<float4> &norm,
+        ftl::cuda::TextureObject<float4> &output,
+        const ftl::rgbd::Camera &camera, const float4x4 &pose,
+        float thresh,
+        cudaStream_t stream) {
+
+    const dim3 gridSize((norm.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (norm.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
+    const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
+
+    filter_normals_kernel<<<gridSize, blockSize, 0, stream>>>(norm, output, camera, pose, thresh);
+
+    cudaSafeCall( cudaGetLastError() );
+    #ifdef _DEBUG
+    cudaSafeCall(cudaDeviceSynchronize());
+    //cutilCheckMsg(__FUNCTION__);
+    #endif
+}
+
+//==============================================================================
+
+__global__ void transform_normals_kernel(ftl::cuda::TextureObject<float4> norm,
+        float3x3 pose) {
+    const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
+    const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
+
+    if(x >= norm.width() || y >= norm.height()) return;
+
+    float3 normal = pose * make_float3(norm.tex2D((int)x,(int)y));
+    normal /= length(normal);
+    norm(x,y) = make_float4(normal, 0.0f);
+}
+
+void ftl::cuda::transform_normals(ftl::cuda::TextureObject<float4> &norm,
+        const float3x3 &pose,
+        cudaStream_t stream) {
+
+    const dim3 gridSize((norm.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (norm.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
+    const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
+
+    transform_normals_kernel<<<gridSize, blockSize, 0, stream>>>(norm, pose);
+
+    cudaSafeCall( cudaGetLastError() );
+    #ifdef _DEBUG
+    cudaSafeCall(cudaDeviceSynchronize());
+    //cutilCheckMsg(__FUNCTION__);
+    #endif
+}
diff --git a/components/renderers/cpp/src/points.cu b/components/renderers/cpp/src/points.cu
index 39764e4c8aba523caf2758262d9f41f8782ac9dc..1b8dbf84ed7b1cc58e4a19c28928075d495759cc 100644
--- a/components/renderers/cpp/src/points.cu
+++ b/components/renderers/cpp/src/points.cu
@@ -2,27 +2,123 @@
 
 #define T_PER_BLOCK 8
 
+template <int RADIUS>
 __global__ void point_cloud_kernel(ftl::cuda::TextureObject<float4> output, ftl::cuda::TextureObject<float> depth, ftl::rgbd::Camera params, float4x4 pose)
 {
 	const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
 	const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
 
 	if (x < params.width && y < params.height) {
+		output(x,y) = make_float4(MINF, MINF, MINF, MINF);
+
+		const float d = depth.tex2D((int)x, (int)y);
+
+		// Calculate depth between 0.0 and 1.0
+		float p = (d - params.minDepth) / (params.maxDepth - params.minDepth);
+
+		if (d >= params.minDepth && d <= params.maxDepth) {
+			/* Orts-Escolano S. et al. 2016. Holoportation: Virtual 3D teleportation in real-time. */
+			// Is there a discontinuity nearby?
+			for (int u=-RADIUS; u<=RADIUS; ++u) {
+				for (int v=-RADIUS; v<=RADIUS; ++v) {
+					// If yes, the flag using w = -1
+					if (fabs(depth.tex2D((int)x+u, (int)y+v) - d) > 0.1f) p = -1.0f;
+				}
+			}
+
+			output(x,y) = make_float4(pose * params.screenToCam(x, y, d), p);
+		}
+	}
+}
+
+template <>
+__global__ void point_cloud_kernel<0>(ftl::cuda::TextureObject<float4> output, ftl::cuda::TextureObject<float> depth, ftl::rgbd::Camera params, float4x4 pose)
+{
+	const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
+	const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
+
+	if (x < params.width && y < params.height) {
+		output(x,y) = make_float4(MINF, MINF, MINF, MINF);
+
 		float d = depth.tex2D((int)x, (int)y);
 
-		output(x,y) = (d >= params.minDepth && d <= params.maxDepth) ?
-			make_float4(pose * params.screenToCam(x, y, d), 0.0f) :
-			make_float4(MINF, MINF, MINF, MINF);
+		if (d >= params.minDepth && d <= params.maxDepth) {
+			output(x,y) = make_float4(pose * params.screenToCam(x, y, d), d);
+		}
 	}
 }
 
-void ftl::cuda::point_cloud(ftl::cuda::TextureObject<float4> &output, ftl::cuda::TextureObject<float> &depth, const ftl::rgbd::Camera &params, const float4x4 &pose, cudaStream_t stream) {
+void ftl::cuda::point_cloud(ftl::cuda::TextureObject<float4> &output, ftl::cuda::TextureObject<float> &depth, const ftl::rgbd::Camera &params, const float4x4 &pose, uint discon, cudaStream_t stream) {
 	const dim3 gridSize((params.width + T_PER_BLOCK - 1)/T_PER_BLOCK, (params.height + T_PER_BLOCK - 1)/T_PER_BLOCK);
 	const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
 
-	point_cloud_kernel<<<gridSize, blockSize, 0, stream>>>(output, depth, params, pose);
+	switch (discon) {
+	case 4 :	point_cloud_kernel<4><<<gridSize, blockSize, 0, stream>>>(output, depth, params, pose); break;
+	case 3 :	point_cloud_kernel<3><<<gridSize, blockSize, 0, stream>>>(output, depth, params, pose); break;
+	case 2 :	point_cloud_kernel<2><<<gridSize, blockSize, 0, stream>>>(output, depth, params, pose); break;
+	case 1 :	point_cloud_kernel<1><<<gridSize, blockSize, 0, stream>>>(output, depth, params, pose); break;
+	default:	point_cloud_kernel<0><<<gridSize, blockSize, 0, stream>>>(output, depth, params, pose);
+	}
+	cudaSafeCall( cudaGetLastError() );
 
 #ifdef _DEBUG
 	cudaSafeCall(cudaDeviceSynchronize());
 #endif
 }
+
+//==============================================================================
+
+__device__ bool isClipped(const float4 &p, const ftl::cuda::ClipSpace &clip) {
+	const float3 tp = clip.origin * make_float3(p);
+	return fabs(tp.x) > clip.size.x || fabs(tp.y) > clip.size.y || fabs(tp.z) > clip.size.z;
+}
+
+__global__ void clipping_kernel(ftl::cuda::TextureObject<float4> points, ftl::cuda::ClipSpace clip)
+{
+	const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
+	const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
+
+	if (x < points.width() && y < points.height()) {
+		float4 p = points(x,y);
+
+		if (isClipped(p, clip)) {
+			points(x,y) = make_float4(MINF, MINF, MINF, MINF);
+		}
+	}
+}
+
+__global__ void clipping_kernel(ftl::cuda::TextureObject<float> depth, ftl::rgbd::Camera camera, ftl::cuda::ClipSpace clip)
+{
+	const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
+	const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
+
+	if (x < depth.width() && y < depth.height()) {
+		float d = depth(x,y);
+		float4 p = make_float4(camera.screenToCam(x,y,d), 0.0f);
+
+		if (isClipped(p, clip)) {
+			depth(x,y) = MINF;
+		}
+	}
+}
+
+void ftl::cuda::clipping(ftl::cuda::TextureObject<float4> &points,
+		const ClipSpace &clip, cudaStream_t stream) {
+
+	const dim3 gridSize((points.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (points.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
+	const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
+
+	clipping_kernel<<<gridSize, blockSize, 0, stream>>>(points, clip);
+	cudaSafeCall( cudaGetLastError() );
+}
+
+void ftl::cuda::clipping(ftl::cuda::TextureObject<float> &depth,
+	const ftl::rgbd::Camera &camera,
+	const ClipSpace &clip, cudaStream_t stream) {
+
+const dim3 gridSize((depth.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
+const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
+
+clipping_kernel<<<gridSize, blockSize, 0, stream>>>(depth, camera, clip);
+cudaSafeCall( cudaGetLastError() );
+}
diff --git a/components/renderers/cpp/src/splat_render.cpp b/components/renderers/cpp/src/splat_render.cpp
index 1b39ccebf69e589372ab2944cb907bb03d1dbdd8..30c47dc1179018a552d8b083245eac71ba56dae2 100644
--- a/components/renderers/cpp/src/splat_render.cpp
+++ b/components/renderers/cpp/src/splat_render.cpp
@@ -2,36 +2,206 @@
 #include <ftl/utility/matrix_conversion.hpp>
 #include "splatter_cuda.hpp"
 #include <ftl/cuda/points.hpp>
+#include <ftl/cuda/normals.hpp>
+#include <ftl/cuda/mask.hpp>
 
 #include <opencv2/core/cuda_stream_accessor.hpp>
 
+#include <string>
+
 using ftl::render::Splatter;
-using ftl::rgbd::Channel;
-using ftl::rgbd::Channels;
+using ftl::codecs::Channel;
+using ftl::codecs::Channels;
 using ftl::rgbd::Format;
 using cv::cuda::GpuMat;
+using std::stoul;
+using ftl::cuda::Mask;
+
+static Eigen::Affine3d create_rotation_matrix(float ax, float ay, float az) {
+  Eigen::Affine3d rx =
+      Eigen::Affine3d(Eigen::AngleAxisd(ax, Eigen::Vector3d(1, 0, 0)));
+  Eigen::Affine3d ry =
+      Eigen::Affine3d(Eigen::AngleAxisd(ay, Eigen::Vector3d(0, 1, 0)));
+  Eigen::Affine3d rz =
+      Eigen::Affine3d(Eigen::AngleAxisd(az, Eigen::Vector3d(0, 0, 1)));
+  return rz * rx * ry;
+}
+
+/*
+ * Parse a CSS style colour string into a scalar.
+ */
+static cv::Scalar parseCVColour(const std::string &colour) {
+	std::string c = colour;
+	if (c[0] == '#') {
+		c.erase(0, 1);
+		unsigned long value = stoul(c.c_str(), nullptr, 16);
+		return cv::Scalar(
+			(value >> 0) & 0xff,
+			(value >> 8) & 0xff,
+			(value >> 16) & 0xff,
+			(value >> 24) & 0xff
+		);
+	}
+
+	return cv::Scalar(0,0,0,0);
+}
+
+/*
+ * Parse a CSS style colour string into a scalar.
+ */
+static uchar4 parseCUDAColour(const std::string &colour) {
+	std::string c = colour;
+	if (c[0] == '#') {
+		c.erase(0, 1);
+		unsigned long value = stoul(c.c_str(), nullptr, 16);
+		return make_uchar4(
+			(value >> 0) & 0xff,
+			(value >> 8) & 0xff,
+			(value >> 16) & 0xff,
+			(value >> 24) & 0xff
+		);
+	}
+
+	return make_uchar4(0,0,0,0);
+}
 
 Splatter::Splatter(nlohmann::json &config, ftl::rgbd::FrameSet *fs) : ftl::render::Renderer(config), scene_(fs) {
+	if (config["clipping"].is_object()) {
+		auto &c = config["clipping"];
+		float rx = c.value("pitch", 0.0f);
+		float ry = c.value("yaw", 0.0f);
+		float rz = c.value("roll", 0.0f);
+		float x = c.value("x", 0.0f);
+		float y = c.value("y", 0.0f);
+		float z = c.value("z", 0.0f);
+		float width = c.value("width", 1.0f);
+		float height = c.value("height", 1.0f);
+		float depth = c.value("depth", 1.0f);
+
+		Eigen::Affine3f r = create_rotation_matrix(rx, ry, rz).cast<float>();
+		Eigen::Translation3f trans(Eigen::Vector3f(x,y,z));
+		Eigen::Affine3f t(trans);
+
+		clip_.origin = MatrixConversion::toCUDA(r.matrix() * t.matrix());
+		clip_.size = make_float3(width, height, depth);
+		clipping_ = value("clipping_enabled", true);
+	} else {
+		clipping_ = false;
+	}
 
+	on("clipping_enabled", [this](const ftl::config::Event &e) {
+		clipping_ = value("clipping_enabled", true);
+	});
+
+	norm_filter_ = value("normal_filter", -1.0f);
+	on("normal_filter", [this](const ftl::config::Event &e) {
+		norm_filter_ = value("normal_filter", -1.0f);
+	});
+
+	backcull_ = value("back_cull", true);
+	on("back_cull", [this](const ftl::config::Event &e) {
+		backcull_ = value("back_cull", true);
+	});
+
+	splat_ = value("splatting", true);
+	on("splatting", [this](const ftl::config::Event &e) {
+		splat_ = value("splatting", true);
+	});
+
+	background_ = parseCVColour(value("background", std::string("#4c4c4c")));
+	on("background", [this](const ftl::config::Event &e) {
+		background_ = parseCVColour(value("background", std::string("#4c4c4c")));
+	});
+
+	light_diffuse_ = parseCUDAColour(value("diffuse", std::string("#e0e0e0")));
+	on("diffuse", [this](const ftl::config::Event &e) {
+		light_diffuse_ = parseCUDAColour(value("diffuse", std::string("#e0e0e0")));
+	});
+
+	light_ambient_ = parseCUDAColour(value("ambient", std::string("#0e0e0e")));
+	on("ambient", [this](const ftl::config::Event &e) {
+		light_ambient_ = parseCUDAColour(value("ambient", std::string("#0e0e0e")));
+	});
+
+	light_pos_ = make_float3(value("light_x", 0.3f), value("light_y", 0.2f), value("light_z", 1.0f));
+	on("light_x", [this](const ftl::config::Event &e) { light_pos_.x = value("light_x", 0.3f); });
+	on("light_y", [this](const ftl::config::Event &e) { light_pos_.y = value("light_y", 0.3f); });
+	on("light_z", [this](const ftl::config::Event &e) { light_pos_.z = value("light_z", 0.3f); });
+
+	cudaSafeCall(cudaStreamCreate(&stream_));
 }
 
 Splatter::~Splatter() {
 
 }
 
-void Splatter::renderChannel(
-					ftl::render::SplatParams &params, ftl::rgbd::Frame &out,
-					const Channel &channel, cudaStream_t stream)
-{
+template <typename T>
+struct AccumSelector {
+	typedef float4 type;
+	static constexpr Channel channel = Channel::Colour;
+	//static constexpr cv::Scalar value = cv::Scalar(0.0f,0.0f,0.0f,0.0f);
+};
+
+template <>
+struct AccumSelector<float> {
+	typedef float type;
+	static constexpr Channel channel = Channel::Colour2;
+	//static constexpr cv::Scalar value = cv::Scalar(0.0f);
+};
+
+template <typename T>
+void Splatter::__blendChannel(ftl::rgbd::Frame &output, ftl::codecs::Channel in, ftl::codecs::Channel out, cudaStream_t stream) {
 	cv::cuda::Stream cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
-	temp_.get<GpuMat>(Channel::Depth).setTo(cv::Scalar(0x7FFFFFFF), cvstream);
-	temp_.get<GpuMat>(Channel::Depth2).setTo(cv::Scalar(0x7FFFFFFF), cvstream);
-	temp_.get<GpuMat>(Channel::Colour).setTo(cv::Scalar(0.0f,0.0f,0.0f,0.0f), cvstream);
+	temp_.create<GpuMat>(
+		AccumSelector<T>::channel,
+		Format<typename AccumSelector<T>::type>(params_.camera.width, params_.camera.height)
+	).setTo(cv::Scalar(0.0f), cvstream);
 	temp_.get<GpuMat>(Channel::Contribution).setTo(cv::Scalar(0.0f), cvstream);
 
-	bool is_float = ftl::rgbd::isFloatChannel(channel);
+	temp_.createTexture<float>(Channel::Contribution);
+
+	for (auto &f : scene_->frames) {
+		if (f.get<GpuMat>(in).type() == CV_8UC3) {
+			// Convert to 4 channel colour
+			auto &col = f.get<GpuMat>(in);
+			GpuMat tmp(col.size(), CV_8UC4);
+			cv::cuda::swap(col, tmp);
+			cv::cuda::cvtColor(tmp,col, cv::COLOR_BGR2BGRA);
+		}
+
+		ftl::cuda::dibr_attribute(
+			f.createTexture<T>(in),
+			f.createTexture<float4>(Channel::Points),
+			temp_.getTexture<int>(Channel::Depth2),
+			temp_.createTexture<typename AccumSelector<T>::type>(AccumSelector<T>::channel),
+			temp_.getTexture<float>(Channel::Contribution),
+			params_, stream
+		);
+	}
+
+	ftl::cuda::dibr_normalise(
+		temp_.getTexture<typename AccumSelector<T>::type>(AccumSelector<T>::channel),
+		output.createTexture<T>(out),
+		temp_.getTexture<float>(Channel::Contribution),
+		stream
+	);
+}
+
+void Splatter::_blendChannel(ftl::rgbd::Frame &output, ftl::codecs::Channel in, ftl::codecs::Channel out, cudaStream_t stream) {
+	int type = output.get<GpuMat>(out).type(); // == CV_32F; //ftl::rgbd::isFloatChannel(channel);
 	
-	// Render each camera into virtual view
+	switch (type) {
+	case CV_32F		: __blendChannel<float>(output, in, out, stream); break;
+	case CV_32FC4	: __blendChannel<float4>(output, in, out, stream); break;
+	case CV_8UC4	: __blendChannel<uchar4>(output, in, out, stream); break;
+	default			: LOG(ERROR) << "Invalid output channel format";
+	}
+}
+
+void Splatter::_dibr(cudaStream_t stream) {
+	cv::cuda::Stream cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
+	temp_.get<GpuMat>(Channel::Depth2).setTo(cv::Scalar(0x7FFFFFFF), cvstream);
+
 	for (size_t i=0; i < scene_->frames.size(); ++i) {
 		auto &f = scene_->frames[i];
 		auto *s = scene_->sources[i];
@@ -41,178 +211,285 @@ void Splatter::renderChannel(
 			continue;
 		}
 
-		// Needs to create points channel first?
-		if (!f.hasChannel(Channel::Points)) {
-			//LOG(INFO) << "Creating points... " << s->parameters().width;
-			
-			auto &t = f.createTexture<float4>(Channel::Points, Format<float4>(f.get<GpuMat>(Channel::Colour).size()));
-			auto pose = MatrixConversion::toCUDA(s->getPose().cast<float>()); //.inverse());
-			ftl::cuda::point_cloud(t, f.createTexture<float>(Channel::Depth), s->parameters(), pose, stream);
-
-			//LOG(INFO) << "POINTS Added";
-		}
-
 		ftl::cuda::dibr_merge(
 			f.createTexture<float4>(Channel::Points),
-			temp_.getTexture<int>(Channel::Depth),
-			params, stream
+			f.createTexture<float4>(Channel::Normals),
+			temp_.createTexture<int>(Channel::Depth2),
+			params_, backcull_, stream
 		);
 
 		//LOG(INFO) << "DIBR DONE";
 	}
+}
+
+void Splatter::_renderChannel(
+		ftl::rgbd::Frame &out,
+		Channel channel_in, Channel channel_out, cudaStream_t stream)
+{
+	if (channel_out == Channel::None || channel_in == Channel::None) return;
+	cv::cuda::Stream cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
+
+	if (scene_->frames.size() < 1) return;
+	bool is_float = out.get<GpuMat>(channel_out).type() == CV_32F; //ftl::rgbd::isFloatChannel(channel);
+	bool is_4chan = out.get<GpuMat>(channel_out).type() == CV_32FC4;
 
-	// TODO: Add the depth splatting step..
 
 	temp_.createTexture<float4>(Channel::Colour);
 	temp_.createTexture<float>(Channel::Contribution);
 
-	// Accumulate attribute contributions for each pixel
-	for (auto &f : scene_->frames) {
-		// Convert colour from BGR to BGRA if needed
-		if (f.get<GpuMat>(Channel::Colour).type() == CV_8UC3) {
-			// Convert to 4 channel colour
-			auto &col = f.get<GpuMat>(Channel::Colour);
-			GpuMat tmp(col.size(), CV_8UC4);
-			cv::cuda::swap(col, tmp);
-			cv::cuda::cvtColor(tmp,col, cv::COLOR_BGR2BGRA);
-		}
-	
-		if (is_float) {
-			ftl::cuda::dibr_attribute(
-				f.createTexture<float>(channel),
-				f.createTexture<float4>(Channel::Points),
-				temp_.getTexture<int>(Channel::Depth),
-				temp_.getTexture<float4>(Channel::Colour),
-				temp_.getTexture<float>(Channel::Contribution),
-				params, stream
+	// Generate initial normals for the splats
+	accum_.create<GpuMat>(Channel::Normals, Format<float4>(params_.camera.width, params_.camera.height));
+	_blendChannel(accum_, Channel::Normals, Channel::Normals, stream);
+	// Put normals in camera space here...
+	ftl::cuda::transform_normals(accum_.getTexture<float4>(Channel::Normals), params_.m_viewMatrix.getFloat3x3(), stream);
+
+	// Estimate point density
+	accum_.create<GpuMat>(Channel::Density, Format<float>(params_.camera.width, params_.camera.height));
+	_blendChannel(accum_, Channel::Depth, Channel::Density, stream);
+
+	// FIXME: Using colour 2 in this way seems broken since it is already used
+	if (is_4chan) {
+		accum_.create<GpuMat>(channel_out, Format<float4>(params_.camera.width, params_.camera.height));
+		accum_.get<GpuMat>(channel_out).setTo(cv::Scalar(0.0f,0.0f,0.0f,0.0f), cvstream);
+	} else if (is_float) {
+		accum_.create<GpuMat>(channel_out, Format<float>(params_.camera.width, params_.camera.height));
+		accum_.get<GpuMat>(channel_out).setTo(cv::Scalar(0.0f), cvstream);
+	} else {
+		accum_.create<GpuMat>(channel_out, Format<uchar4>(params_.camera.width, params_.camera.height));
+		accum_.get<GpuMat>(channel_out).setTo(cv::Scalar(0,0,0,0), cvstream);
+	}
+
+	//if (splat_) {
+		_blendChannel(accum_, channel_in, channel_out, stream);
+	//} else {
+	//	_blendChannel(out, channel, channel, stream);
+	//}
+
+	// Now splat the points
+	if (splat_) {
+		if (is_4chan) {
+			ftl::cuda::splat(
+				accum_.getTexture<float4>(Channel::Normals),
+				accum_.getTexture<float>(Channel::Density),
+				accum_.getTexture<float4>(channel_out),
+				temp_.getTexture<int>(Channel::Depth2),
+				out.createTexture<float>(Channel::Depth),
+				out.createTexture<float4>(channel_out),
+				params_, stream
 			);
-		} else if (channel == Channel::Colour || channel == Channel::Right) {
-			ftl::cuda::dibr_attribute(
-				f.createTexture<uchar4>(Channel::Colour),
-				f.createTexture<float4>(Channel::Points),
-				temp_.getTexture<int>(Channel::Depth),
-				temp_.getTexture<float4>(Channel::Colour),
-				temp_.getTexture<float>(Channel::Contribution),
-				params, stream
+		} else if (is_float) {
+			ftl::cuda::splat(
+				accum_.getTexture<float4>(Channel::Normals),
+				accum_.getTexture<float>(Channel::Density),
+				accum_.getTexture<float>(channel_out),
+				temp_.getTexture<int>(Channel::Depth2),
+				out.createTexture<float>(Channel::Depth),
+				out.createTexture<float>(channel_out),
+				params_, stream
 			);
 		} else {
-			ftl::cuda::dibr_attribute(
-				f.createTexture<uchar4>(channel),
-				f.createTexture<float4>(Channel::Points),
-				temp_.getTexture<int>(Channel::Depth),
-				temp_.getTexture<float4>(Channel::Colour),
-				temp_.getTexture<float>(Channel::Contribution),
-				params, stream
+			ftl::cuda::splat(
+				accum_.getTexture<float4>(Channel::Normals),
+				accum_.getTexture<float>(Channel::Density),
+				accum_.getTexture<uchar4>(channel_out),
+				temp_.getTexture<int>(Channel::Depth2),
+				out.createTexture<float>(Channel::Depth),
+				out.createTexture<uchar4>(channel_out),
+				params_, stream
 			);
 		}
-	}
-
-	if (is_float) {
-		// Normalise attribute contributions
-		ftl::cuda::dibr_normalise(
-			temp_.createTexture<float4>(Channel::Colour),
-			out.createTexture<float>(channel),
-			temp_.createTexture<float>(Channel::Contribution),
-			stream
-		);
 	} else {
-		// Normalise attribute contributions
-		ftl::cuda::dibr_normalise(
-			temp_.createTexture<float4>(Channel::Colour),
-			out.createTexture<uchar4>(channel),
-			temp_.createTexture<float>(Channel::Contribution),
-			stream
-		);
+		// Swap accum frames directly to output.
+		accum_.swapTo(Channels(channel_out), out);
 	}
 }
 
-bool Splatter::render(ftl::rgbd::VirtualSource *src, ftl::rgbd::Frame &out, cudaStream_t stream) {
+bool Splatter::render(ftl::rgbd::VirtualSource *src, ftl::rgbd::Frame &out) {
 	SHARED_LOCK(scene_->mtx, lk);
 	if (!src->isReady()) return false;
 
-	const auto &camera = src->parameters();
+	scene_->upload(Channel::Colour + Channel::Depth, stream_);
 
+	const auto &camera = src->parameters();
 	//cudaSafeCall(cudaSetDevice(scene_->getCUDADevice()));
 
 	// Create all the required channels
+	
 	out.create<GpuMat>(Channel::Depth, Format<float>(camera.width, camera.height));
 	out.create<GpuMat>(Channel::Colour, Format<uchar4>(camera.width, camera.height));
 
-	// FIXME: Use source resolutions, not virtual resolution
+
+	if (scene_->frames.size() == 0) return false;
+	auto &g = scene_->frames[0].get<GpuMat>(Channel::Colour);
+
 	temp_.create<GpuMat>(Channel::Colour, Format<float4>(camera.width, camera.height));
-	temp_.create<GpuMat>(Channel::Colour2, Format<uchar4>(camera.width, camera.height));
 	temp_.create<GpuMat>(Channel::Contribution, Format<float>(camera.width, camera.height));
 	temp_.create<GpuMat>(Channel::Depth, Format<int>(camera.width, camera.height));
 	temp_.create<GpuMat>(Channel::Depth2, Format<int>(camera.width, camera.height));
-	temp_.create<GpuMat>(Channel::Normals, Format<float4>(camera.width, camera.height));
+	temp_.create<GpuMat>(Channel::Normals, Format<float4>(g.cols, g.rows));
 
-	cv::cuda::Stream cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
-
-	// Create buffers if they don't exist
-	/*if ((unsigned int)depth1_.width() != camera.width || (unsigned int)depth1_.height() != camera.height) {
-		depth1_ = ftl::cuda::TextureObject<int>(camera.width, camera.height);
-	}
-	if ((unsigned int)depth3_.width() != camera.width || (unsigned int)depth3_.height() != camera.height) {
-		depth3_ = ftl::cuda::TextureObject<int>(camera.width, camera.height);
-	}
-	if ((unsigned int)colour1_.width() != camera.width || (unsigned int)colour1_.height() != camera.height) {
-		colour1_ = ftl::cuda::TextureObject<uchar4>(camera.width, camera.height);
-	}
-	if ((unsigned int)colour_tmp_.width() != camera.width || (unsigned int)colour_tmp_.height() != camera.height) {
-		colour_tmp_ = ftl::cuda::TextureObject<float4>(camera.width, camera.height);
-	}
-	if ((unsigned int)normal1_.width() != camera.width || (unsigned int)normal1_.height() != camera.height) {
-		normal1_ = ftl::cuda::TextureObject<float4>(camera.width, camera.height);
-	}
-	if ((unsigned int)depth2_.width() != camera.width || (unsigned int)depth2_.height() != camera.height) {
-		depth2_ = ftl::cuda::TextureObject<float>(camera.width, camera.height);
-	}
-	if ((unsigned int)colour2_.width() != camera.width || (unsigned int)colour2_.height() != camera.height) {
-		colour2_ = ftl::cuda::TextureObject<uchar4>(camera.width, camera.height);
-	}*/
+	cv::cuda::Stream cvstream = cv::cuda::StreamAccessor::wrapStream(stream_);
 
 	// Parameters object to pass to CUDA describing the camera
-	SplatParams params;
+	SplatParams &params = params_;
 	params.m_flags = 0;
-	if (src->value("splatting", true) == false) params.m_flags |= ftl::render::kNoSplatting;
-	if (src->value("upsampling", true) == false) params.m_flags |= ftl::render::kNoUpsampling;
-	if (src->value("texturing", true) == false) params.m_flags |= ftl::render::kNoTexturing;
+	//if () params.m_flags |= ftl::render::kShowDisconMask;
+	if (value("normal_weight_colours", true)) params.m_flags |= ftl::render::kNormalWeightColours;
 	params.m_viewMatrix = MatrixConversion::toCUDA(src->getPose().cast<float>().inverse());
 	params.m_viewMatrixInverse = MatrixConversion::toCUDA(src->getPose().cast<float>());
 	params.camera = camera;
-
 	// Clear all channels to 0 or max depth
 
 	out.get<GpuMat>(Channel::Depth).setTo(cv::Scalar(1000.0f), cvstream);
-	out.get<GpuMat>(Channel::Colour).setTo(cv::Scalar(76,76,76), cvstream);
+	out.get<GpuMat>(Channel::Colour).setTo(background_, cvstream);
 
 	//LOG(INFO) << "Render ready: " << camera.width << "," << camera.height;
 
+	bool show_discon = value("show_discontinuity_mask", false);
+	bool show_fill = value("show_filled", false);
+
 	temp_.createTexture<int>(Channel::Depth);
+	//temp_.get<GpuMat>(Channel::Normals).setTo(cv::Scalar(0.0f,0.0f,0.0f,0.0f), cvstream);
+
+	// First make sure each input has normals
+	temp_.createTexture<float4>(Channel::Normals);
+	for (int i=0; i<scene_->frames.size(); ++i) {
+		auto &f = scene_->frames[i];
+		auto s = scene_->sources[i];
+
+		if (f.hasChannel(Channel::Mask)) {
+			if (show_discon) {
+				ftl::cuda::show_mask(f.getTexture<uchar4>(Channel::Colour), f.getTexture<int>(Channel::Mask), Mask::kMask_Discontinuity, make_uchar4(0,0,255,255), stream_);
+			}
+			if (show_fill) {
+				ftl::cuda::show_mask(f.getTexture<uchar4>(Channel::Colour), f.getTexture<int>(Channel::Mask), Mask::kMask_Filled, make_uchar4(0,255,0,255), stream_);
+			}
+		}
+
+		// Needs to create points channel first?
+		if (!f.hasChannel(Channel::Points)) {
+			//LOG(INFO) << "Creating points... " << s->parameters().width;
+			
+			auto &t = f.createTexture<float4>(Channel::Points, Format<float4>(f.get<GpuMat>(Channel::Colour).size()));
+			auto pose = MatrixConversion::toCUDA(s->getPose().cast<float>()); //.inverse());
+			ftl::cuda::point_cloud(t, f.createTexture<float>(Channel::Depth), s->parameters(), pose, 0, stream_);
+
+			//LOG(INFO) << "POINTS Added";
+		}
+
+		// Clip first?
+		if (clipping_) {
+			ftl::cuda::clipping(f.createTexture<float4>(Channel::Points), clip_, stream_);
+		}
+
+		if (!f.hasChannel(Channel::Normals)) {
+			Eigen::Matrix4f matrix =  s->getPose().cast<float>().transpose();
+			auto pose = MatrixConversion::toCUDA(matrix);
+
+			auto &g = f.get<GpuMat>(Channel::Colour);
+			ftl::cuda::normals(f.createTexture<float4>(Channel::Normals, Format<float4>(g.cols, g.rows)),
+				temp_.getTexture<float4>(Channel::Normals),
+				f.getTexture<float4>(Channel::Points),
+				1, 0.02f,
+				s->parameters(), pose.getFloat3x3(), stream_);
+
+			if (norm_filter_ > -0.1f) {
+				ftl::cuda::normal_filter(f.getTexture<float4>(Channel::Normals), f.getTexture<float4>(Channel::Points), s->parameters(), pose, norm_filter_, stream_);
+			}
+		}
+	}
 
-	renderChannel(params, out, Channel::Colour, stream);
-	
 	Channel chan = src->getChannel();
+
+	int aligned_source = value("aligned_source",-1);
+	if (aligned_source >= 0 && aligned_source < scene_->frames.size()) {
+		// FIXME: Output may not be same resolution as source!
+		cudaSafeCall(cudaStreamSynchronize(stream_));
+		scene_->frames[aligned_source].copyTo(Channel::Depth + Channel::Colour, out);
+
+		if (chan == Channel::Normals) {
+			// Convert normal to single float value
+			temp_.create<GpuMat>(Channel::Colour, Format<uchar4>(camera.width, camera.height)).setTo(cv::Scalar(0,0,0,0), cvstream);
+			ftl::cuda::normal_visualise(scene_->frames[aligned_source].getTexture<float4>(Channel::Normals), temp_.createTexture<uchar4>(Channel::Colour),
+					light_pos_,
+					light_diffuse_,
+					light_ambient_, stream_);
+
+			// Put in output as single float
+			cv::cuda::swap(temp_.get<GpuMat>(Channel::Colour), out.create<GpuMat>(Channel::Normals));
+			out.resetTexture(Channel::Normals);
+		}
+
+		return true;
+	}
+
+	_dibr(stream_);
+	_renderChannel(out, Channel::Colour, Channel::Colour, stream_);
+	
 	if (chan == Channel::Depth)
 	{
-		temp_.get<GpuMat>(Channel::Depth).convertTo(out.get<GpuMat>(Channel::Depth), CV_32F, 1.0f / 1000.0f, cvstream);
+		//temp_.get<GpuMat>(Channel::Depth).convertTo(out.get<GpuMat>(Channel::Depth), CV_32F, 1.0f / 1000.0f, cvstream);
+	} else if (chan == Channel::Normals) {
+		out.create<GpuMat>(Channel::Normals, Format<float4>(camera.width, camera.height));
+
+		// Render normal attribute
+		_renderChannel(out, Channel::Normals, Channel::Normals, stream_);
+
+		// Convert normal to single float value
+		temp_.create<GpuMat>(Channel::Colour, Format<uchar4>(camera.width, camera.height)).setTo(cv::Scalar(0,0,0,0), cvstream);
+		ftl::cuda::normal_visualise(out.getTexture<float4>(Channel::Normals), temp_.createTexture<uchar4>(Channel::Colour),
+				light_pos_,
+				light_diffuse_,
+				light_ambient_, stream_);
+
+		// Put in output as single float
+		cv::cuda::swap(temp_.get<GpuMat>(Channel::Colour), out.create<GpuMat>(Channel::Normals));
+		out.resetTexture(Channel::Normals);
 	}
-	else if (chan == Channel::Contribution)
-	{
-		cv::cuda::swap(temp_.get<GpuMat>(Channel::Contribution), out.create<GpuMat>(Channel::Contribution));
+	//else if (chan == Channel::Contribution)
+	//{
+	//	cv::cuda::swap(temp_.get<GpuMat>(Channel::Contribution), out.create<GpuMat>(Channel::Contribution));
+	//}
+	else if (chan == Channel::Density) {
+		out.create<GpuMat>(chan, Format<float>(camera.width, camera.height));
+		out.get<GpuMat>(chan).setTo(cv::Scalar(0.0f), cvstream);
+		_renderChannel(out, Channel::Depth, Channel::Density, stream_);
 	}
 	else if (chan == Channel::Right)
 	{
-		Eigen::Affine3f transform(Eigen::Translation3f(camera.baseline,0.0f,0.0f));
-		Eigen::Matrix4f matrix =  src->getPose().cast<float>() * transform.matrix();
+		float baseline = camera.baseline;
+		
+		//Eigen::Translation3f translation(baseline, 0.0f, 0.0f);
+		//Eigen::Affine3f transform(translation);
+		//Eigen::Matrix4f matrix = transform.matrix() * src->getPose().cast<float>();
+
+		Eigen::Matrix4f transform = Eigen::Matrix4f::Identity();
+		transform(0, 3) = baseline;
+		Eigen::Matrix4f matrix = transform.inverse() * src->getPose().cast<float>();
+		
 		params.m_viewMatrix = MatrixConversion::toCUDA(matrix.inverse());
 		params.m_viewMatrixInverse = MatrixConversion::toCUDA(matrix);
+
+		params.camera = src->parameters(Channel::Right);
 		
 		out.create<GpuMat>(Channel::Right, Format<uchar4>(camera.width, camera.height));
-		out.get<GpuMat>(Channel::Right).setTo(cv::Scalar(76,76,76), cvstream);
-		renderChannel(params, out, Channel::Right, stream);
+		out.get<GpuMat>(Channel::Right).setTo(background_, cvstream);
+
+		_dibr(stream_); // Need to re-dibr due to pose change
+		_renderChannel(out, Channel::Left, Channel::Right, stream_);
+
+	} else if (chan != Channel::None) {
+		if (ftl::codecs::isFloatChannel(chan)) {
+			out.create<GpuMat>(chan, Format<float>(camera.width, camera.height));
+			out.get<GpuMat>(chan).setTo(cv::Scalar(0.0f), cvstream);
+		} else {
+			out.create<GpuMat>(chan, Format<uchar4>(camera.width, camera.height));
+			out.get<GpuMat>(chan).setTo(background_, cvstream);
+		}
+		_renderChannel(out, chan, chan, stream_);
 	}
 
+	cudaSafeCall(cudaStreamSynchronize(stream_));
 	return true;
 }
 
diff --git a/components/renderers/cpp/src/splatter.cu b/components/renderers/cpp/src/splatter.cu
index 3b1ae4b47ef0fe6b29b15d1fa20fdc9a0fd0b9bb..3a9270e542ee58e836410e0de598cbd4ab9e269c 100644
--- a/components/renderers/cpp/src/splatter.cu
+++ b/components/renderers/cpp/src/splatter.cu
@@ -4,6 +4,8 @@
 #include <ftl/cuda_common.hpp>
 
 #include <ftl/cuda/weighting.hpp>
+#include <ftl/cuda/intersections.hpp>
+#include <ftl/cuda/warp.hpp>
 
 #define T_PER_BLOCK 8
 #define UPSAMPLE_FACTOR 1.8f
@@ -13,22 +15,51 @@
 #define MAX_ITERATIONS 32  // Note: Must be multiple of 32
 #define SPATIAL_SMOOTHING 0.005f
 
+#define ENERGY_THRESHOLD 0.1f
+#define SMOOTHING_MULTIPLIER_A 10.0f	// For surface search
+#define SMOOTHING_MULTIPLIER_B 4.0f		// For z contribution
+#define SMOOTHING_MULTIPLIER_C 2.0f		// For colour contribution
+
+#define ACCUM_DIAMETER 8
+
 using ftl::cuda::TextureObject;
 using ftl::render::SplatParams;
+using ftl::cuda::warpMin;
+using ftl::cuda::warpSum;
 
 /*
  * Pass 1: Directly render each camera into virtual view but with no upsampling
  * for sparse points.
  */
- __global__ void dibr_merge_kernel(TextureObject<float4> points, TextureObject<int> depth, SplatParams params) {
+ template <bool CULLING>
+ __global__ void dibr_merge_kernel(TextureObject<float4> points,
+		TextureObject<float4> normals,
+		TextureObject<int> depth, SplatParams params) {
 	const int x = blockIdx.x*blockDim.x + threadIdx.x;
 	const int y = blockIdx.y*blockDim.y + threadIdx.y;
 
-	const float3 worldPos = make_float3(points.tex2D(x, y));
-	if (worldPos.x == MINF) return;
+	const float4 worldPos = points.tex2D(x, y);
+	if (worldPos.x == MINF || (!(params.m_flags & ftl::render::kShowDisconMask) && worldPos.w < 0.0f)) return;
+
+	// Compile time enable/disable of culling back facing points
+	if (CULLING) {
+		float3 ray = params.m_viewMatrixInverse.getFloat3x3() * params.camera.screenToCam(x,y,1.0f);
+        ray = ray / length(ray);
+        float4 n4 = normals.tex2D((int)x,(int)y);
+        if (n4.w == 0.0f) return;
+		float3 n = make_float3(n4);
+		float l = length(n);
+		if (l == 0) {
+			return;
+		}
+		n /= l;
+
+		const float facing = dot(ray, n);
+		if (facing <= 0.0f) return;
+	}
 
     // Find the virtual screen position of current point
-	const float3 camPos = params.m_viewMatrix * worldPos;
+	const float3 camPos = params.m_viewMatrix * make_float3(worldPos);
 	if (camPos.z < params.camera.minDepth) return;
 	if (camPos.z > params.camera.maxDepth) return;
 
@@ -43,11 +74,12 @@ using ftl::render::SplatParams;
 	}
 }
 
-void ftl::cuda::dibr_merge(TextureObject<float4> &points, TextureObject<int> &depth, SplatParams params, cudaStream_t stream) {
+void ftl::cuda::dibr_merge(TextureObject<float4> &points, TextureObject<float4> &normals, TextureObject<int> &depth, SplatParams params, bool culling, cudaStream_t stream) {
     const dim3 gridSize((depth.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
     const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
 
-    dibr_merge_kernel<<<gridSize, blockSize, 0, stream>>>(points, depth, params);
+	if (culling) dibr_merge_kernel<true><<<gridSize, blockSize, 0, stream>>>(points, normals, depth, params);
+	else dibr_merge_kernel<false><<<gridSize, blockSize, 0, stream>>>(points, normals, depth, params);
     cudaSafeCall( cudaGetLastError() );
 }
 
@@ -57,248 +89,400 @@ __device__ inline float4 make_float4(const uchar4 &c) {
     return make_float4(c.x,c.y,c.z,c.w);
 }
 
+__device__ inline float4 make_float4(const float4 &v) {
+	return v;
+}
 
-#define ENERGY_THRESHOLD 0.1f
-#define SMOOTHING_MULTIPLIER_A 10.0f	// For surface search
-#define SMOOTHING_MULTIPLIER_B 4.0f		// For z contribution
-#define SMOOTHING_MULTIPLIER_C 4.0f		// For colour contribution
+template <typename T>
+__device__ inline T make();
 
-/*
- * Pass 2: Accumulate attribute contributions if the points pass a visibility test.
- */
-__global__ void dibr_attribute_contrib_kernel(
-        TextureObject<uchar4> colour_in,    // Original colour image
-        TextureObject<float4> points,       // Original 3D points
-        TextureObject<int> depth_in,        // Virtual depth map
-        TextureObject<float4> colour_out,   // Accumulated output
-        //TextureObject<float4> normal_out,
-        TextureObject<float> contrib_out,
-        SplatParams params) {
-        
-	//const ftl::voxhash::DepthCameraCUDA &camera = c_cameras[cam];
+template <>
+__device__ inline uchar4 make() {
+	return make_uchar4(0,0,0,0);
+}
 
-	const int tid = (threadIdx.x + threadIdx.y * blockDim.x);
-	//const int warp = tid / WARP_SIZE;
-	const int x = (blockIdx.x*blockDim.x + threadIdx.x) / WARP_SIZE;
-	const int y = blockIdx.y*blockDim.y + threadIdx.y;
+template <>
+__device__ inline float4 make() {
+	return make_float4(0.0f,0.0f,0.0f,0.0f);
+}
 
-	const float3 worldPos = make_float3(points.tex2D(x, y));
-	//const float3 normal = make_float3(tex2D<float4>(camera.normal, x, y));
-	if (worldPos.x == MINF) return;
-    //const float r = (camera.poseInverse * worldPos).z / camera.params.fx;
+template <>
+__device__ inline float make() {
+	return 0.0f;
+}
 
-	const float3 camPos = params.m_viewMatrix * worldPos;
-	if (camPos.z < params.camera.minDepth) return;
-	if (camPos.z > params.camera.maxDepth) return;
-	const uint2 screenPos = params.camera.camToScreen<uint2>(camPos);
+template <typename T>
+__device__ inline T make(const float4 &);
 
-    const int upsample = 8; //min(UPSAMPLE_MAX, int((5.0f*r) * params.camera.fx / camPos.z));
+template <>
+__device__ inline uchar4 make(const float4 &v) {
+	return make_uchar4((int)v.x, (int)v.y, (int)v.z, (int)v.w);
+}
 
-	// Not on screen so stop now...
-	if (screenPos.x >= depth_in.width() || screenPos.y >= depth_in.height()) return;
-            
-    // Is this point near the actual surface and therefore a contributor?
-    const float d = ((float)depth_in.tex2D((int)screenPos.x, (int)screenPos.y)/1000.0f);
-    //if (abs(d - camPos.z) > DEPTH_THRESHOLD) return;
+template <>
+__device__ inline float4 make(const float4 &v) {
+	return v;
+}
 
-    // TODO:(Nick) Should just one thread load these to shared mem?
-    const float4 colour = make_float4(colour_in.tex2D(x, y));
-    //const float4 normal = tex2D<float4>(camera.normal, x, y);
+template <>
+__device__ inline float make(const float4 &v) {
+	return v.x;
+}
 
-	// Each thread in warp takes an upsample point and updates corresponding depth buffer.
-	const int lane = tid % WARP_SIZE;
-	for (int i=lane; i<upsample*upsample; i+=WARP_SIZE) {
-		const float u = (i % upsample) - (upsample / 2);
-		const float v = (i / upsample) - (upsample / 2);
+template <typename T>
+__device__ inline T make(const uchar4 &v);
 
-        // Use the depth buffer to determine this pixels 3D position in camera space
-        const float d = ((float)depth_in.tex2D(screenPos.x+u, screenPos.y+v)/1000.0f);
-		const float3 nearest = params.camera.screenToCam((int)(screenPos.x+u),(int)(screenPos.y+v),d);
-
-        // What is contribution of our current point at this pixel?
-        const float weight = ftl::cuda::spatialWeighting(length(nearest - camPos), SMOOTHING_MULTIPLIER_C*(nearest.z/params.camera.fx));
-        if (screenPos.x+u < colour_out.width() && screenPos.y+v < colour_out.height() && weight > 0.0f) {  // TODO: Use confidence threshold here
-            const float4 wcolour = colour * weight;
-			//const float4 wnormal = normal * weight;
-			
-			//printf("Z %f\n", d);
-
-            // Add this points contribution to the pixel buffer
-            atomicAdd((float*)&colour_out(screenPos.x+u, screenPos.y+v), wcolour.x);
-            atomicAdd((float*)&colour_out(screenPos.x+u, screenPos.y+v)+1, wcolour.y);
-            atomicAdd((float*)&colour_out(screenPos.x+u, screenPos.y+v)+2, wcolour.z);
-            atomicAdd((float*)&colour_out(screenPos.x+u, screenPos.y+v)+3, wcolour.w);
-            //atomicAdd((float*)&normal_out(screenPos.x+u, screenPos.y+v), wnormal.x);
-            //atomicAdd((float*)&normal_out(screenPos.x+u, screenPos.y+v)+1, wnormal.y);
-            //atomicAdd((float*)&normal_out(screenPos.x+u, screenPos.y+v)+2, wnormal.z);
-            //atomicAdd((float*)&normal_out(screenPos.x+u, screenPos.y+v)+3, wnormal.w);
-            atomicAdd(&contrib_out(screenPos.x+u, screenPos.y+v), weight);
-        }
-	}
+template <>
+__device__ inline float4 make(const uchar4 &v) {
+	return make_float4((float)v.x, (float)v.y, (float)v.z, (float)v.w);
 }
 
-__global__ void dibr_attribute_contrib_kernel(
-    TextureObject<float> colour_in,    // Original colour image
-    TextureObject<float4> points,       // Original 3D points
-    TextureObject<int> depth_in,        // Virtual depth map
-    TextureObject<float4> colour_out,   // Accumulated output
-    //TextureObject<float4> normal_out,
-    TextureObject<float> contrib_out,
-    SplatParams params) {
-    
+template <typename T>
+__device__ inline T make(float v);
+
+template <>
+__device__ inline float make(float v) {
+	return v;
+}
+
+/*
+ * Pass 1b: Expand splats to full size and merge
+ */
+ template <int SEARCH_RADIUS, typename T>
+ __global__ void splat_kernel(
+        //TextureObject<float4> points,       // Original 3D points
+        TextureObject<float4> normals,
+        TextureObject<float> density,
+        TextureObject<T> in,
+        TextureObject<int> depth_in,        // Virtual depth map
+        TextureObject<float> depth_out,   // Accumulated output
+        TextureObject<T> out,
+        //ftl::rgbd::Camera camera,
+        //float4x4 pose_inv,
+        SplatParams params) {
+        
     //const ftl::voxhash::DepthCameraCUDA &camera = c_cameras[cam];
 
-    const int tid = (threadIdx.x + threadIdx.y * blockDim.x);
+    //const int tid = (threadIdx.x + threadIdx.y * blockDim.x);
     //const int warp = tid / WARP_SIZE;
-    const int x = (blockIdx.x*blockDim.x + threadIdx.x) / WARP_SIZE;
+    const int x = blockIdx.x*blockDim.x + threadIdx.x;
     const int y = blockIdx.y*blockDim.y + threadIdx.y;
 
-    const float3 worldPos = make_float3(points.tex2D(x, y));
-    //const float3 normal = make_float3(tex2D<float4>(camera.normal, x, y));
-    if (worldPos.x == MINF) return;
-    //const float r = (camera.poseInverse * worldPos).z / camera.params.fx;
+    if (x < 0 || y < 0 || x >= depth_in.width() || y >= depth_in.height()) return;
 
-    const float3 camPos = params.m_viewMatrix * worldPos;
-    if (camPos.z < params.camera.minDepth) return;
-    if (camPos.z > params.camera.maxDepth) return;
-    const uint2 screenPos = params.camera.camToScreen<uint2>(camPos);
+    //const float3 origin = params.m_viewMatrixInverse * make_float3(0.0f);
+    float3 ray = params.camera.screenToCam(x,y,1.0f);
+    ray = ray / length(ray);
+    const float scale = ray.z;
+    //ray = params.m_viewMatrixInverse.getFloat3x3() * ray;
 
-    const int upsample = 8; //min(UPSAMPLE_MAX, int((5.0f*r) * params.camera.fx / camPos.z));
+    //float depth = 0.0f;
+    //float contrib = 0.0f;
+    float depth = 1000.0f;
+    //float pdepth = 1000.0f;
 
-    // Not on screen so stop now...
-    if (screenPos.x >= depth_in.width() || screenPos.y >= depth_in.height()) return;
-            
-    // Is this point near the actual surface and therefore a contributor?
-    const float d = ((float)depth_in.tex2D((int)screenPos.x, (int)screenPos.y)/1000.0f);
-    //if (abs(d - camPos.z) > DEPTH_THRESHOLD) return;
+    struct Result {
+        float weight;
+        float depth;
+    };
 
-    // TODO:(Nick) Should just one thread load these to shared mem?
-    const float colour = (colour_in.tex2D(x, y));
-    //const float4 normal = tex2D<float4>(camera.normal, x, y);
+    Result results[2*SEARCH_RADIUS+1][2*SEARCH_RADIUS+1];
 
     // Each thread in warp takes an upsample point and updates corresponding depth buffer.
-    const int lane = tid % WARP_SIZE;
-    for (int i=lane; i<upsample*upsample; i+=WARP_SIZE) {
-        const float u = (i % upsample) - (upsample / 2);
-        const float v = (i / upsample) - (upsample / 2);
+    //const int lane = tid % WARP_SIZE;
+    //for (int i=lane; i<SEARCH_DIAMETER*SEARCH_DIAMETER; i+=WARP_SIZE) {
+    //    const float u = (i % SEARCH_DIAMETER) - (SEARCH_DIAMETER / 2);
+    //    const float v = (i / SEARCH_DIAMETER) - (SEARCH_DIAMETER / 2);
+    for (int v=-SEARCH_RADIUS; v<=SEARCH_RADIUS; ++v) {
+    for (int u=-SEARCH_RADIUS; u<=SEARCH_RADIUS; ++u) {
+
+        results[v+SEARCH_RADIUS][u+SEARCH_RADIUS] = {0.0f, 1000.0f};
 
         // Use the depth buffer to determine this pixels 3D position in camera space
-        const float d = ((float)depth_in.tex2D(screenPos.x+u, screenPos.y+v)/1000.0f);
-        const float3 nearest = params.camera.screenToCam((int)(screenPos.x+u),(int)(screenPos.y+v),d);
-
-        // What is contribution of our current point at this pixel?
-        const float weight = ftl::cuda::spatialWeighting(length(nearest - camPos), SMOOTHING_MULTIPLIER_C*(nearest.z/params.camera.fx));
-        if (screenPos.x+u < colour_out.width() && screenPos.y+v < colour_out.height() && weight > 0.0f) {  // TODO: Use confidence threshold here
-            const float wcolour = colour * weight;
-            //const float4 wnormal = normal * weight;
-            
-            //printf("Z %f\n", d);
+        const float d = ((float)depth_in.tex2D(x+u, y+v)/1000.0f);
 
-            // Add this points contribution to the pixel buffer
-            atomicAdd((float*)&colour_out(screenPos.x+u, screenPos.y+v), wcolour);
-            atomicAdd(&contrib_out(screenPos.x+u, screenPos.y+v), weight);
-        }
+        const float3 n = make_float3(normals.tex2D((int)(x)+u, (int)(y)+v));
+        const float dens = density.tex2D((int)(x)+u, (int)(y)+v);
+
+        if (d < params.camera.minDepth || d > params.camera.maxDepth) continue;
+
+        const float3 camPos = params.camera.screenToCam((int)(x)+u,(int)(y)+v,d);
+        //const float3 camPos2 = params.camera.screenToCam((int)(x),(int)(y),d);
+        //const float3 worldPos = params.m_viewMatrixInverse * camPos;
+
+
+        
+		//if (length(make_float3(n)) == 0.0f) printf("BAD NORMAL\n");
+
+        // Does the ray intersect plane of splat?
+        float t = 1000.0f;
+        const float r = ftl::cuda::intersectDistance(n, camPos, make_float3(0.0f), ray, t);
+        //if (r != PINF) { //} && fabs(t-camPos.z) < 0.01f) {
+            // Adjust from normalised ray back to original meters units
+            t *= scale;
+            float weight = ftl::cuda::weighting(r, dens/params.camera.fx); // (1.0f/params.camera.fx) / (t/params.camera.fx)
+
+            /* Buehler C. et al. 2001. Unstructured Lumigraph Rendering. */
+            /* Orts-Escolano S. et al. 2016. Holoportation: Virtual 3D teleportation in real-time. */
+            // This is the simple naive colour weighting. It might be good
+            // enough for our purposes if the alignment step prevents ghosting
+            // TODO: Use depth and perhaps the neighbourhood consistency in:
+            //     Kuster C. et al. 2011. FreeCam: A hybrid camera system for interactive free-viewpoint video
+            //if (params.m_flags & ftl::render::kNormalWeightColours) weight *= n.w * n.w;
+            //if (params.m_flags & ftl::render::kDepthWeightColours) weight *= ???
+
+            if (weight <= 0.0f) continue;
+
+            depth = min(depth, t);
+            results[v+SEARCH_RADIUS][u+SEARCH_RADIUS] = {weight, t};
+        //}
+    }
+    }
+
+    //depth = warpMin(depth);
+    //pdepth = warpMin(pdepth);
+
+    float adepth = 0.0f;
+    float contrib = 0.0f;
+    float4 attr = make_float4(0.0f);
+
+    // Loop over results array
+    for (int v=-SEARCH_RADIUS; v<=SEARCH_RADIUS; ++v) {
+    for (int u=-SEARCH_RADIUS; u<=SEARCH_RADIUS; ++u) {
+        auto &result = results[v+SEARCH_RADIUS][u+SEARCH_RADIUS];
+        float s = ftl::cuda::weighting(fabs(result.depth - depth), 0.04f);
+        //if (result.depth - depth < 0.04f) {
+            adepth += result.depth * result.weight * s;
+            attr += make_float4(in.tex2D((int)x+u, (int)y+v)) * result.weight * s;
+            contrib += result.weight * s;
+        //}
+    }
+    }
+
+    // Sum all attributes and contributions
+    //adepth = warpSum(adepth);
+    //attr.x = warpSum(attr.x);
+    //attr.y = warpSum(attr.y);
+    //attr.z = warpSum(attr.z);
+    //contrib = warpSum(contrib);
+
+    if (contrib > 0.0f) {
+        depth_out(x,y) = adepth / contrib;
+        out(x,y) = make<T>(attr / contrib);
     }
 }
 
-void ftl::cuda::dibr_attribute(
-        TextureObject<uchar4> &colour_in,    // Original colour image
-        TextureObject<float4> &points,       // Original 3D points
+template <typename T>
+void ftl::cuda::splat(
+        TextureObject<float4> &normals,
+        TextureObject<float> &density,
+        TextureObject<T> &colour_in,
         TextureObject<int> &depth_in,        // Virtual depth map
-        TextureObject<float4> &colour_out,   // Accumulated output
-        //TextureObject<float4> normal_out,
-        TextureObject<float> &contrib_out,
-        SplatParams &params, cudaStream_t stream) {
-    const dim3 gridSize((depth_in.width() + 2 - 1)/2, (depth_in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
-	const dim3 blockSize(2*WARP_SIZE, T_PER_BLOCK);
-
-    dibr_attribute_contrib_kernel<<<gridSize, blockSize, 0, stream>>>(
+        TextureObject<float> &depth_out,
+        TextureObject<T> &colour_out,
+        const SplatParams &params, cudaStream_t stream) {
+            const dim3 gridSize((depth_in.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth_in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
+            const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
+
+    splat_kernel<4,T><<<gridSize, blockSize, 0, stream>>>(
+        normals,
+        density,
         colour_in,
-        points,
         depth_in,
+        depth_out,
         colour_out,
-        contrib_out,
         params
     );
     cudaSafeCall( cudaGetLastError() );
 }
 
+template void ftl::cuda::splat<uchar4>(
+        TextureObject<float4> &normals,
+        TextureObject<float> &density,
+        TextureObject<uchar4> &colour_in,
+        TextureObject<int> &depth_in,        // Virtual depth map
+        TextureObject<float> &depth_out,
+        TextureObject<uchar4> &colour_out,
+		const SplatParams &params, cudaStream_t stream);
+		
+template void ftl::cuda::splat<float4>(
+    TextureObject<float4> &normals,
+    TextureObject<float> &density,
+	TextureObject<float4> &colour_in,
+	TextureObject<int> &depth_in,        // Virtual depth map
+	TextureObject<float> &depth_out,
+	TextureObject<float4> &colour_out,
+	const SplatParams &params, cudaStream_t stream);
+
+template void ftl::cuda::splat<float>(
+    TextureObject<float4> &normals,
+    TextureObject<float> &density,
+	TextureObject<float> &colour_in,
+	TextureObject<int> &depth_in,        // Virtual depth map
+	TextureObject<float> &depth_out,
+	TextureObject<float> &colour_out,
+	const SplatParams &params, cudaStream_t stream);
+
+//==============================================================================
+
+template <typename T>
+__device__ inline T generateInput(const T &in, const SplatParams &params, const float4 &worldPos) {
+	return in;
+}
+
+template <>
+__device__ inline uchar4 generateInput(const uchar4 &in, const SplatParams &params, const float4 &worldPos) {
+	return (params.m_flags & ftl::render::kShowDisconMask && worldPos.w < 0.0f) ?
+		make_uchar4(0,0,255,255) :  // Show discontinuity mask in red
+		in;
+}
+
+template <typename A, typename B>
+__device__ inline B weightInput(const A &in, float weight) {
+	return in * weight;
+}
+
+template <>
+__device__ inline float4 weightInput(const uchar4 &in, float weight) {
+	return make_float4(
+		(float)in.x * weight,
+		(float)in.y * weight,
+		(float)in.z * weight,
+		(float)in.w * weight);
+}
+
+template <typename T>
+__device__ inline void accumulateOutput(TextureObject<T> &out, TextureObject<float> &contrib, const uint2 &pos, const T &in, float w) {
+	atomicAdd(&out(pos.x, pos.y), in);
+	atomicAdd(&contrib(pos.x, pos.y), w);
+} 
+
+template <>
+__device__ inline void accumulateOutput(TextureObject<float4> &out, TextureObject<float> &contrib, const uint2 &pos, const float4 &in, float w) {
+	atomicAdd((float*)&out(pos.x, pos.y), in.x);
+	atomicAdd(((float*)&out(pos.x, pos.y))+1, in.y);
+	atomicAdd(((float*)&out(pos.x, pos.y))+2, in.z);
+	atomicAdd(((float*)&out(pos.x, pos.y))+3, in.w);
+	atomicAdd(&contrib(pos.x, pos.y), w);
+} 
+
+/*
+ * Pass 2: Accumulate attribute contributions if the points pass a visibility test.
+ */
+ template <typename A, typename B>
+__global__ void dibr_attribute_contrib_kernel(
+        TextureObject<A> in,				// Attribute input
+        TextureObject<float4> points,       // Original 3D points
+        TextureObject<int> depth_in,        // Virtual depth map
+		TextureObject<B> out,			// Accumulated output
+		TextureObject<float> contrib,
+        SplatParams params) {
+        
+	const int x = (blockIdx.x*blockDim.x + threadIdx.x);
+	const int y = blockIdx.y*blockDim.y + threadIdx.y;
+
+	const float4 worldPos = points.tex2D(x, y);
+	if (worldPos.x == MINF || (!(params.m_flags & ftl::render::kShowDisconMask) && worldPos.w < 0.0f)) return;
+
+	const float3 camPos = params.m_viewMatrix * make_float3(worldPos);
+	if (camPos.z < params.camera.minDepth) return;
+	if (camPos.z > params.camera.maxDepth) return;
+	const uint2 screenPos = params.camera.camToScreen<uint2>(camPos);
+
+	// Not on screen so stop now...
+	if (screenPos.x >= depth_in.width() || screenPos.y >= depth_in.height()) return;
+            
+    // Is this point near the actual surface and therefore a contributor?
+    const float d = (float)depth_in.tex2D((int)screenPos.x, (int)screenPos.y) / 1000.0f;
+
+	const A input = generateInput(in.tex2D(x, y), params, worldPos);
+	const float weight = ftl::cuda::weighting(fabs(camPos.z - d), 0.02f);
+	const B weighted = make<B>(input) * weight; //weightInput(input, weight);
+
+	if (weight > 0.0f) {
+		accumulateOutput(out, contrib, screenPos, weighted, weight);
+		//out(screenPos.x, screenPos.y) = input;
+	}
+}
+
+
+template <typename A, typename B>
 void ftl::cuda::dibr_attribute(
-        TextureObject<float> &colour_in,    // Original colour image
+        TextureObject<A> &in,
         TextureObject<float4> &points,       // Original 3D points
         TextureObject<int> &depth_in,        // Virtual depth map
-        TextureObject<float4> &colour_out,   // Accumulated output
-        //TextureObject<float4> normal_out,
-        TextureObject<float> &contrib_out,
+		TextureObject<B> &out,   // Accumulated output
+		TextureObject<float> &contrib,
         SplatParams &params, cudaStream_t stream) {
-    const dim3 gridSize((depth_in.width() + 2 - 1)/2, (depth_in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
-    const dim3 blockSize(2*WARP_SIZE, T_PER_BLOCK);
+	const dim3 gridSize((in.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
+	const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
 
     dibr_attribute_contrib_kernel<<<gridSize, blockSize, 0, stream>>>(
-        colour_in,
+        in,
         points,
         depth_in,
-        colour_out,
-        contrib_out,
+		out,
+		contrib,
         params
     );
     cudaSafeCall( cudaGetLastError() );
 }
 
-//==============================================================================
+template void ftl::cuda::dibr_attribute(
+	ftl::cuda::TextureObject<uchar4> &in,	// Original colour image
+	ftl::cuda::TextureObject<float4> &points,		// Original 3D points
+	ftl::cuda::TextureObject<int> &depth_in,		// Virtual depth map
+	ftl::cuda::TextureObject<float4> &out,	// Accumulated output
+	ftl::cuda::TextureObject<float> &contrib,
+	ftl::render::SplatParams &params, cudaStream_t stream);
+
+template void ftl::cuda::dibr_attribute(
+	ftl::cuda::TextureObject<float> &in,	// Original colour image
+	ftl::cuda::TextureObject<float4> &points,		// Original 3D points
+	ftl::cuda::TextureObject<int> &depth_in,		// Virtual depth map
+	ftl::cuda::TextureObject<float> &out,	// Accumulated output
+	ftl::cuda::TextureObject<float> &contrib,
+	ftl::render::SplatParams &params, cudaStream_t stream);
+
+template void ftl::cuda::dibr_attribute(
+	ftl::cuda::TextureObject<float4> &in,	// Original colour image
+	ftl::cuda::TextureObject<float4> &points,		// Original 3D points
+	ftl::cuda::TextureObject<int> &depth_in,		// Virtual depth map
+	ftl::cuda::TextureObject<float4> &out,	// Accumulated output
+	ftl::cuda::TextureObject<float> &contrib,
+	ftl::render::SplatParams &params, cudaStream_t stream);
 
-__global__ void dibr_normalise_kernel(
-        TextureObject<float4> colour_in,
-        TextureObject<uchar4> colour_out,
-        //TextureObject<float4> normals,
-        TextureObject<float> contribs) {
-    const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
-    const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
-
-    if (x < colour_in.width() && y < colour_in.height()) {
-        const float4 colour = colour_in.tex2D((int)x,(int)y);
-        //const float4 normal = normals.tex2D((int)x,(int)y);
-        const float contrib = contribs.tex2D((int)x,(int)y);
-
-        if (contrib > 0.0f) {
-            colour_out(x,y) = make_uchar4(colour.x / contrib, colour.y / contrib, colour.z / contrib, 0);
-            //normals(x,y) = normal / contrib;
-        }
-    }
-}
+//==============================================================================
 
+template <typename A, typename B>
 __global__ void dibr_normalise_kernel(
-        TextureObject<float4> colour_in,
-        TextureObject<float> colour_out,
-        //TextureObject<float4> normals,
+        TextureObject<A> in,
+        TextureObject<B> out,
         TextureObject<float> contribs) {
     const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
     const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
 
-    if (x < colour_in.width() && y < colour_in.height()) {
-        const float4 colour = colour_in.tex2D((int)x,(int)y);
+    if (x < in.width() && y < in.height()) {
+        const A a = in.tex2D((int)x,(int)y);
         //const float4 normal = normals.tex2D((int)x,(int)y);
         const float contrib = contribs.tex2D((int)x,(int)y);
 
         if (contrib > 0.0f) {
-            colour_out(x,y) = colour.x / contrib;
+            out(x,y) = make<B>(a / contrib);
             //normals(x,y) = normal / contrib;
         }
     }
 }
 
-void ftl::cuda::dibr_normalise(TextureObject<float4> &colour_in, TextureObject<uchar4> &colour_out, TextureObject<float> &contribs, cudaStream_t stream) {
-    const dim3 gridSize((colour_in.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (colour_in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
+template <typename A, typename B>
+void ftl::cuda::dibr_normalise(TextureObject<A> &in, TextureObject<B> &out, TextureObject<float> &contribs, cudaStream_t stream) {
+    const dim3 gridSize((in.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
     const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
 
-    dibr_normalise_kernel<<<gridSize, blockSize, 0, stream>>>(colour_in, colour_out, contribs);
+    dibr_normalise_kernel<<<gridSize, blockSize, 0, stream>>>(in, out, contribs);
     cudaSafeCall( cudaGetLastError() );
 }
 
-void ftl::cuda::dibr_normalise(TextureObject<float4> &colour_in, TextureObject<float> &colour_out, TextureObject<float> &contribs, cudaStream_t stream) {
-    const dim3 gridSize((colour_in.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (colour_in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
-    const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
-
-    dibr_normalise_kernel<<<gridSize, blockSize, 0, stream>>>(colour_in, colour_out, contribs);
-    cudaSafeCall( cudaGetLastError() );
-}
+template void ftl::cuda::dibr_normalise<float4,uchar4>(TextureObject<float4> &in, TextureObject<uchar4> &out, TextureObject<float> &contribs, cudaStream_t stream);
+template void ftl::cuda::dibr_normalise<float,float>(TextureObject<float> &in, TextureObject<float> &out, TextureObject<float> &contribs, cudaStream_t stream);
+template void ftl::cuda::dibr_normalise<float4,float4>(TextureObject<float4> &in, TextureObject<float4> &out, TextureObject<float> &contribs, cudaStream_t stream);
diff --git a/components/renderers/cpp/src/splatter_cuda.hpp b/components/renderers/cpp/src/splatter_cuda.hpp
index 8c57d58486c04aff5960f215c6a6308719e6af7b..42ec8a0c7dbca51b1c3fe7e22ba2c38eb2447169 100644
--- a/components/renderers/cpp/src/splatter_cuda.hpp
+++ b/components/renderers/cpp/src/splatter_cuda.hpp
@@ -8,39 +8,42 @@ namespace ftl {
 namespace cuda {
 	void dibr_merge(
 		ftl::cuda::TextureObject<float4> &points,
+		ftl::cuda::TextureObject<float4> &normals,
 		ftl::cuda::TextureObject<int> &depth,
 		ftl::render::SplatParams params,
+		bool culling,
 		cudaStream_t stream);
 
-	void dibr_attribute(
-		ftl::cuda::TextureObject<uchar4> &in,	// Original colour image
-		ftl::cuda::TextureObject<float4> &points,		// Original 3D points
-		ftl::cuda::TextureObject<int> &depth_in,		// Virtual depth map
-		ftl::cuda::TextureObject<float4> &out,	// Accumulated output
-		//TextureObject<float4> normal_out,
-		ftl::cuda::TextureObject<float> &contrib_out,
-		ftl::render::SplatParams &params, cudaStream_t stream);
+	template <typename T>
+	void splat(
+        ftl::cuda::TextureObject<float4> &normals,
+		ftl::cuda::TextureObject<float> &density,
+		ftl::cuda::TextureObject<T> &colour_in,
+        ftl::cuda::TextureObject<int> &depth_in,        // Virtual depth map
+        ftl::cuda::TextureObject<float> &depth_out,
+		ftl::cuda::TextureObject<T> &colour_out,
+        const ftl::render::SplatParams &params, cudaStream_t stream);
 
+	template <typename A, typename B>
 	void dibr_attribute(
-		ftl::cuda::TextureObject<float> &in,	// Original colour image
+		ftl::cuda::TextureObject<A> &in,	// Original colour image
 		ftl::cuda::TextureObject<float4> &points,		// Original 3D points
 		ftl::cuda::TextureObject<int> &depth_in,		// Virtual depth map
-		ftl::cuda::TextureObject<float4> &out,	// Accumulated output
-		//TextureObject<float4> normal_out,
-		ftl::cuda::TextureObject<float> &contrib_out,
+		ftl::cuda::TextureObject<B> &out,	// Accumulated output
+		ftl::cuda::TextureObject<float> &contrib,
 		ftl::render::SplatParams &params, cudaStream_t stream);
 
+	template <typename A, typename B>
 	void dibr_normalise(
-		ftl::cuda::TextureObject<float4> &in,
-		ftl::cuda::TextureObject<uchar4> &out,
+		ftl::cuda::TextureObject<A> &in,
+		ftl::cuda::TextureObject<B> &out,
 		ftl::cuda::TextureObject<float> &contribs,
 		cudaStream_t stream);
 
-	void dibr_normalise(
-		ftl::cuda::TextureObject<float4> &in,
-		ftl::cuda::TextureObject<float> &out,
-		ftl::cuda::TextureObject<float> &contribs,
-		cudaStream_t stream);
+	void show_mask(
+        ftl::cuda::TextureObject<uchar4> &colour,
+		ftl::cuda::TextureObject<int> &mask,
+        int id, uchar4 style, cudaStream_t stream);
 }
 }
 
diff --git a/components/rgbd-sources/CMakeLists.txt b/components/rgbd-sources/CMakeLists.txt
index 2b056d009a73dd7ee82adaedbf03bb98194d636f..0dbd5ccf8cca5aecea9b270c6fc28d4b53b3b4ac 100644
--- a/components/rgbd-sources/CMakeLists.txt
+++ b/components/rgbd-sources/CMakeLists.txt
@@ -1,13 +1,13 @@
 set(RGBDSRC
-	src/calibrate.cpp
-	src/local.cpp
+	src/sources/stereovideo/calibrate.cpp
+	src/sources/stereovideo/local.cpp
 	src/disparity.cpp
 	src/source.cpp
 	src/frame.cpp
 	src/frameset.cpp
-	src/stereovideo.cpp
-	src/middlebury_source.cpp
-	src/net.cpp
+	src/sources/stereovideo/stereovideo.cpp
+	src/sources/middlebury/middlebury_source.cpp
+	src/sources/net/net.cpp
 	src/streamer.cpp
 	src/colour.cpp
 	src/group.cpp
@@ -18,17 +18,19 @@ set(RGBDSRC
 	src/cb_segmentation.cpp
 	src/abr.cpp
 	src/offilter.cpp
-	src/virtual.cpp
+	src/sources/virtual/virtual.cpp
+	src/sources/ftlfile/file_source.cpp
+	src/sources/ftlfile/player.cpp
 )
 
 if (HAVE_REALSENSE)
-	list(APPEND RGBDSRC "src/realsense_source.cpp")
+	list(APPEND RGBDSRC "src/sources/realsense/realsense_source.cpp")
 endif()
 
 if (LibArchive_FOUND)
 	list(APPEND RGBDSRC
-		src/snapshot.cpp
-		src/snapshot_source.cpp
+		src/sources/snapshot/snapshot.cpp
+		src/sources/snapshot/snapshot_source.cpp
 	)
 endif (LibArchive_FOUND)
 
diff --git a/components/rgbd-sources/include/ftl/rgbd/detail/netframe.hpp b/components/rgbd-sources/include/ftl/rgbd/detail/netframe.hpp
index f01154e12c876292f724f072fbaa69812e1b29bb..1abb624c96e6a6871c2058b2e69574331183e7b4 100644
--- a/components/rgbd-sources/include/ftl/rgbd/detail/netframe.hpp
+++ b/components/rgbd-sources/include/ftl/rgbd/detail/netframe.hpp
@@ -17,8 +17,9 @@ struct NetFrame {
 	cv::Mat channel1;
 	cv::Mat channel2;
 	volatile int64_t timestamp;
-	std::atomic<int> chunk_count;
-	int chunk_total;
+	std::atomic<int> chunk_count[2];
+	std::atomic<int> channel_count;
+	int chunk_total[2];
 	std::atomic<int> tx_size;
 	int64_t tx_latency;
 	MUTEX mtx;
diff --git a/components/rgbd-sources/include/ftl/rgbd/detail/source.hpp b/components/rgbd-sources/include/ftl/rgbd/detail/source.hpp
index e98ff38aacd4cf0731ef96b67ecc85732d4c0c7f..0d188c0b163777e842f3bd8c31f91ddd51435269 100644
--- a/components/rgbd-sources/include/ftl/rgbd/detail/source.hpp
+++ b/components/rgbd-sources/include/ftl/rgbd/detail/source.hpp
@@ -56,7 +56,7 @@ class Source {
 	virtual bool isReady() { return false; };
 	virtual void setPose(const Eigen::Matrix4d &pose) { };
 
-	virtual Camera parameters(ftl::rgbd::Channel) { return params_; };
+	virtual Camera parameters(ftl::codecs::Channel) { return params_; };
 
 	protected:
 	capability_t capabilities_;
diff --git a/components/rgbd-sources/include/ftl/rgbd/frame.hpp b/components/rgbd-sources/include/ftl/rgbd/frame.hpp
index 252ff271de36336938d51d616cdd5a9e87d52187..b08673c973f42253670b51a3da8e28735406c952 100644
--- a/components/rgbd-sources/include/ftl/rgbd/frame.hpp
+++ b/components/rgbd-sources/include/ftl/rgbd/frame.hpp
@@ -8,7 +8,7 @@
 #include <opencv2/core/cuda.hpp>
 #include <opencv2/core/cuda_stream_accessor.hpp>
 
-#include <ftl/rgbd/channels.hpp>
+#include <ftl/codecs/channels.hpp>
 #include <ftl/rgbd/format.hpp>
 #include <ftl/codecs/bitrates.hpp>
 
@@ -40,57 +40,66 @@ public:
 	//Frame(const Frame &)=delete;
 	//Frame &operator=(const Frame &)=delete;
 
-	void download(ftl::rgbd::Channel c, cv::cuda::Stream stream);
-	void upload(ftl::rgbd::Channel c, cv::cuda::Stream stream);
-	void download(ftl::rgbd::Channels c, cv::cuda::Stream stream);
-	void upload(ftl::rgbd::Channels c, cv::cuda::Stream stream);
+	void download(ftl::codecs::Channel c, cv::cuda::Stream stream);
+	void upload(ftl::codecs::Channel c, cv::cuda::Stream stream);
+	void download(ftl::codecs::Channels c, cv::cuda::Stream stream);
+	void upload(ftl::codecs::Channels c, cv::cuda::Stream stream);
 
-	inline void download(ftl::rgbd::Channel c, cudaStream_t stream=0) { download(c, cv::cuda::StreamAccessor::wrapStream(stream)); };
-	inline void upload(ftl::rgbd::Channel c, cudaStream_t stream=0) { upload(c, cv::cuda::StreamAccessor::wrapStream(stream)); };
-	inline void download(ftl::rgbd::Channels c, cudaStream_t stream=0) { download(c, cv::cuda::StreamAccessor::wrapStream(stream)); };
-	inline void upload(ftl::rgbd::Channels c, cudaStream_t stream=0) { upload(c, cv::cuda::StreamAccessor::wrapStream(stream)); };
+	inline void download(ftl::codecs::Channel c, cudaStream_t stream=0) { download(c, cv::cuda::StreamAccessor::wrapStream(stream)); };
+	inline void upload(ftl::codecs::Channel c, cudaStream_t stream=0) { upload(c, cv::cuda::StreamAccessor::wrapStream(stream)); };
+	inline void download(ftl::codecs::Channels c, cudaStream_t stream=0) { download(c, cv::cuda::StreamAccessor::wrapStream(stream)); };
+	inline void upload(ftl::codecs::Channels c, cudaStream_t stream=0) { upload(c, cv::cuda::StreamAccessor::wrapStream(stream)); };
 
 	/**
 	 * Perform a buffer swap of the selected channels. This is intended to be
 	 * a copy from `this` to the passed frame object but by buffer swap
 	 * instead of memory copy, meaning `this` may become invalid afterwards.
 	 */
-	void swapTo(ftl::rgbd::Channels, Frame &);
+	void swapTo(ftl::codecs::Channels, Frame &);
+
+	void swapChannels(ftl::codecs::Channel, ftl::codecs::Channel);
+
+	/**
+	 * Does a host or device memory copy into the given frame.
+	 */
+	void copyTo(ftl::codecs::Channels, Frame &);
 
 	/**
 	 * Create a channel with a given format. This will discard any existing
 	 * data associated with the channel and ensure all data structures and
 	 * memory allocations match the new format.
 	 */
-	template <typename T> T &create(ftl::rgbd::Channel c, const ftl::rgbd::FormatBase &f);
+	template <typename T> T &create(ftl::codecs::Channel c, const ftl::rgbd::FormatBase &f);
 
 	/**
 	 * Create a channel but without any format.
 	 */
-	template <typename T> T &create(ftl::rgbd::Channel c);
+	template <typename T> T &create(ftl::codecs::Channel c);
 
 	/**
 	 * Create a CUDA texture object for a channel. This version takes a format
 	 * argument to also create (or recreate) the associated GpuMat.
 	 */
 	template <typename T>
-	ftl::cuda::TextureObject<T> &createTexture(ftl::rgbd::Channel c, const ftl::rgbd::Format<T> &f);
+	ftl::cuda::TextureObject<T> &createTexture(ftl::codecs::Channel c, const ftl::rgbd::Format<T> &f);
 
 	/**
 	 * Create a CUDA texture object for a channel. With this version the GpuMat
 	 * must already exist and be of the correct type.
 	 */
 	template <typename T>
-	ftl::cuda::TextureObject<T> &createTexture(ftl::rgbd::Channel c);
+	ftl::cuda::TextureObject<T> &createTexture(ftl::codecs::Channel c);
+
+	void resetTexture(ftl::codecs::Channel c);
 
 	/**
 	 * Reset all channels without releasing memory.
 	 */
 	void reset();
 
-	bool empty(ftl::rgbd::Channels c);
+	bool empty(ftl::codecs::Channels c);
 
-	inline bool empty(ftl::rgbd::Channel c) {
+	inline bool empty(ftl::codecs::Channel c) {
 		auto &m = _get(c);
 		return !hasChannel(c) || (m.host.empty() && m.gpu.empty());
 	}
@@ -98,17 +107,17 @@ public:
 	/**
 	 * Is there valid data in channel (either host or gpu).
 	 */
-	inline bool hasChannel(ftl::rgbd::Channel channel) const {
+	inline bool hasChannel(ftl::codecs::Channel channel) const {
 		return channels_.has(channel);
 	}
 
-	inline ftl::rgbd::Channels getChannels() const { return channels_; }
+	inline ftl::codecs::Channels getChannels() const { return channels_; }
 
 	/**
 	 * Is the channel data currently located on GPU. This also returns false if
 	 * the channel does not exist.
 	 */
-	inline bool isGPU(ftl::rgbd::Channel channel) const {
+	inline bool isGPU(ftl::codecs::Channel channel) const {
 		return channels_.has(channel) && gpu_.has(channel);
 	}
 
@@ -116,7 +125,7 @@ public:
 	 * Is the channel data currently located on CPU memory. This also returns
 	 * false if the channel does not exist.
 	 */
-	inline bool isCPU(ftl::rgbd::Channel channel) const {
+	inline bool isCPU(ftl::codecs::Channel channel) const {
 		return channels_.has(channel) && !gpu_.has(channel);
 	}
 
@@ -129,7 +138,7 @@ public:
 	 * performed, if necessary, but with a warning since an explicit upload or
 	 * download should be used.
 	 */
-	template <typename T> const T& get(ftl::rgbd::Channel channel) const;
+	template <typename T> const T& get(ftl::codecs::Channel channel) const;
 
 	/**
 	 * Method to get reference to the channel content.
@@ -140,49 +149,50 @@ public:
 	 * performed, if necessary, but with a warning since an explicit upload or
 	 * download should be used.
 	 */
-	template <typename T> T& get(ftl::rgbd::Channel channel);
+	template <typename T> T& get(ftl::codecs::Channel channel);
 
-	template <typename T> const ftl::cuda::TextureObject<T> &getTexture(ftl::rgbd::Channel) const;
-	template <typename T> ftl::cuda::TextureObject<T> &getTexture(ftl::rgbd::Channel);
+	template <typename T> const ftl::cuda::TextureObject<T> &getTexture(ftl::codecs::Channel) const;
+	template <typename T> ftl::cuda::TextureObject<T> &getTexture(ftl::codecs::Channel);
 
 private:
 	struct ChannelData {
+		ftl::cuda::TextureObjectBase tex;
 		cv::Mat host;
 		cv::cuda::GpuMat gpu;
-		ftl::cuda::TextureObjectBase tex;
 	};
 
-	std::array<ChannelData, Channels::kMax> data_;
+	std::array<ChannelData, ftl::codecs::Channels::kMax> data_;
 
-	ftl::rgbd::Channels channels_;	// Does it have a channel
-	ftl::rgbd::Channels gpu_;		// Is the channel on a GPU
+	ftl::codecs::Channels channels_;	// Does it have a channel
+	ftl::codecs::Channels gpu_;		// Is the channel on a GPU
 
 	ftl::rgbd::Source *src_;
 
-	inline ChannelData &_get(ftl::rgbd::Channel c) { return data_[static_cast<unsigned int>(c)]; }
-	inline const ChannelData &_get(ftl::rgbd::Channel c) const { return data_[static_cast<unsigned int>(c)]; }
+	inline ChannelData &_get(ftl::codecs::Channel c) { return data_[static_cast<unsigned int>(c)]; }
+	inline const ChannelData &_get(ftl::codecs::Channel c) const { return data_[static_cast<unsigned int>(c)]; }
 };
 
 // Specialisations
 
-template<> const cv::Mat& Frame::get(ftl::rgbd::Channel channel) const;
-template<> const cv::cuda::GpuMat& Frame::get(ftl::rgbd::Channel channel) const;
-template<> cv::Mat& Frame::get(ftl::rgbd::Channel channel);
-template<> cv::cuda::GpuMat& Frame::get(ftl::rgbd::Channel channel);
+template<> const cv::Mat& Frame::get(ftl::codecs::Channel channel) const;
+template<> const cv::cuda::GpuMat& Frame::get(ftl::codecs::Channel channel) const;
+template<> cv::Mat& Frame::get(ftl::codecs::Channel channel);
+template<> cv::cuda::GpuMat& Frame::get(ftl::codecs::Channel channel);
 
-template <> cv::Mat &Frame::create(ftl::rgbd::Channel c, const ftl::rgbd::FormatBase &);
-template <> cv::cuda::GpuMat &Frame::create(ftl::rgbd::Channel c, const ftl::rgbd::FormatBase &);
-template <> cv::Mat &Frame::create(ftl::rgbd::Channel c);
-template <> cv::cuda::GpuMat &Frame::create(ftl::rgbd::Channel c);
+template <> cv::Mat &Frame::create(ftl::codecs::Channel c, const ftl::rgbd::FormatBase &);
+template <> cv::cuda::GpuMat &Frame::create(ftl::codecs::Channel c, const ftl::rgbd::FormatBase &);
+template <> cv::Mat &Frame::create(ftl::codecs::Channel c);
+template <> cv::cuda::GpuMat &Frame::create(ftl::codecs::Channel c);
 
 template <typename T>
-ftl::cuda::TextureObject<T> &Frame::getTexture(ftl::rgbd::Channel c) {
-	if (!channels_.has(c)) throw ftl::exception("Texture channel does not exist");
+ftl::cuda::TextureObject<T> &Frame::getTexture(ftl::codecs::Channel c) {
+	if (!channels_.has(c)) throw ftl::exception(ftl::Formatter() << "Texture channel does not exist: " << (int)c);
 	if (!gpu_.has(c)) throw ftl::exception("Texture channel is not on GPU");
 
 	auto &m = _get(c);
 
 	if (m.tex.cvType() != ftl::traits::OpenCVType<T>::value || m.tex.width() != m.gpu.cols || m.tex.height() != m.gpu.rows || m.gpu.type() != m.tex.cvType()) {
+		LOG(ERROR) << "Texture has not been created for channel = " << (int)c;
 		throw ftl::exception("Texture has not been created properly for this channel");
 	}
 
@@ -190,7 +200,7 @@ ftl::cuda::TextureObject<T> &Frame::getTexture(ftl::rgbd::Channel c) {
 }
 
 template <typename T>
-ftl::cuda::TextureObject<T> &Frame::createTexture(ftl::rgbd::Channel c, const ftl::rgbd::Format<T> &f) {
+ftl::cuda::TextureObject<T> &Frame::createTexture(ftl::codecs::Channel c, const ftl::rgbd::Format<T> &f) {
 	if (!channels_.has(c)) channels_ += c;
 	if (!gpu_.has(c)) gpu_ += c;
 
@@ -203,16 +213,17 @@ ftl::cuda::TextureObject<T> &Frame::createTexture(ftl::rgbd::Channel c, const ft
 	}
 
 	if (m.gpu.type() != ftl::traits::OpenCVType<T>::value) {
+		LOG(ERROR) << "Texture type mismatch: " << (int)c << " " << m.gpu.type() << " != " << ftl::traits::OpenCVType<T>::value;
 		throw ftl::exception("Texture type does not match underlying data type");
 	}
 
 	// TODO: Check tex cvType
 
 	if (m.tex.devicePtr() == nullptr) {
-		LOG(INFO) << "Creating texture object";
+		//LOG(INFO) << "Creating texture object";
 		m.tex = ftl::cuda::TextureObject<T>(m.gpu);
 	} else if (m.tex.cvType() != ftl::traits::OpenCVType<T>::value || m.tex.width() != m.gpu.cols || m.tex.height() != m.gpu.rows) {
-		LOG(INFO) << "Recreating texture object";
+		LOG(INFO) << "Recreating texture object for '" << ftl::codecs::name(c) << "'";
 		m.tex.free();
 		m.tex = ftl::cuda::TextureObject<T>(m.gpu);
 	}
@@ -221,7 +232,7 @@ ftl::cuda::TextureObject<T> &Frame::createTexture(ftl::rgbd::Channel c, const ft
 }
 
 template <typename T>
-ftl::cuda::TextureObject<T> &Frame::createTexture(ftl::rgbd::Channel c) {
+ftl::cuda::TextureObject<T> &Frame::createTexture(ftl::codecs::Channel c) {
 	if (!channels_.has(c)) throw ftl::exception("createTexture needs a format if the channel does not exist");
 
 	auto &m = _get(c);
@@ -235,15 +246,17 @@ ftl::cuda::TextureObject<T> &Frame::createTexture(ftl::rgbd::Channel c) {
 	}
 
 	if (m.gpu.type() != ftl::traits::OpenCVType<T>::value) {
+		LOG(ERROR) << "Texture type mismatch: " << (int)c << " " << m.gpu.type() << " != " << ftl::traits::OpenCVType<T>::value;
 		throw ftl::exception("Texture type does not match underlying data type");
 	}
 
 	// TODO: Check tex cvType
 
 	if (m.tex.devicePtr() == nullptr) {
-		LOG(INFO) << "Creating texture object";
+		//LOG(INFO) << "Creating texture object";
 		m.tex = ftl::cuda::TextureObject<T>(m.gpu);
 	} else if (m.tex.cvType() != ftl::traits::OpenCVType<T>::value || m.tex.width() != m.gpu.cols || m.tex.height() != m.gpu.rows || m.tex.devicePtr() != m.gpu.data) {
+		LOG(INFO) << "Recreating texture object for '" << ftl::codecs::name(c) << "'";
 		m.tex.free();
 		m.tex = ftl::cuda::TextureObject<T>(m.gpu);
 	}
diff --git a/components/rgbd-sources/include/ftl/rgbd/frameset.hpp b/components/rgbd-sources/include/ftl/rgbd/frameset.hpp
index 2fa39e2eacf19339860e98fa98df44f687ac64c7..f18b52635cde42f3a3e7d2adf047b9fe4048befd 100644
--- a/components/rgbd-sources/include/ftl/rgbd/frameset.hpp
+++ b/components/rgbd-sources/include/ftl/rgbd/frameset.hpp
@@ -26,8 +26,8 @@ struct FrameSet {
 	bool stale;						// True if buffers have been invalidated
 	SHARED_MUTEX mtx;
 
-	void upload(ftl::rgbd::Channels, cudaStream_t stream=0);
-	void download(ftl::rgbd::Channels, cudaStream_t stream=0);
+	void upload(ftl::codecs::Channels, cudaStream_t stream=0);
+	void download(ftl::codecs::Channels, cudaStream_t stream=0);
 	void swapTo(ftl::rgbd::FrameSet &);
 };
 
diff --git a/components/rgbd-sources/include/ftl/rgbd/group.hpp b/components/rgbd-sources/include/ftl/rgbd/group.hpp
index 0ded29e80b7d2fa01ad656c2fbb3b8865b726a70..fdd7ba7412a2f0d247fb545150cd428ae4519d9f 100644
--- a/components/rgbd-sources/include/ftl/rgbd/group.hpp
+++ b/components/rgbd-sources/include/ftl/rgbd/group.hpp
@@ -6,6 +6,7 @@
 #include <ftl/timer.hpp>
 #include <ftl/rgbd/frame.hpp>
 #include <ftl/rgbd/frameset.hpp>
+#include <ftl/codecs/packet.hpp>
 
 #include <opencv2/opencv.hpp>
 #include <vector>
@@ -65,6 +66,22 @@ class Group {
 	 */
 	void sync(std::function<bool(FrameSet &)>);
 
+	/**
+	 * Whenever any source within the group receives raw data, this callback
+	 * will be called with that raw data. This is used to allow direct data
+	 * capture (to disk) or proxy over a network without needing to re-encode.
+	 * There is no guarantee about order or timing and the callback itself will
+	 * need to ensure synchronisation of timestamps.
+	 */
+	void addRawCallback(const std::function<void(ftl::rgbd::Source*, const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt)> &);
+
+	/**
+	 * Removes a raw data callback from all sources in the group.
+	 */
+	void removeRawCallback(const std::function<void(ftl::rgbd::Source*, const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt)> &);
+
+	inline std::vector<Source*> sources() const { return sources_; }
+
 	/** @deprecated */
 	//bool getFrames(FrameSet &, bool complete=false);
 
@@ -82,6 +99,8 @@ class Group {
 
 	void stop() {}
 
+	int streamID(const ftl::rgbd::Source *s) const;
+
 	private:
 	std::vector<FrameSet> framesets_;
 	std::vector<Source*> sources_;
diff --git a/components/rgbd-sources/include/ftl/rgbd/source.hpp b/components/rgbd-sources/include/ftl/rgbd/source.hpp
index 0ee163add0009023ec24e6df6bd18a1da927af1e..862d472f16b170fc8de28974462b2a0f9df63e54 100644
--- a/components/rgbd-sources/include/ftl/rgbd/source.hpp
+++ b/components/rgbd-sources/include/ftl/rgbd/source.hpp
@@ -8,9 +8,11 @@
 #include <ftl/net/universe.hpp>
 #include <ftl/uri.hpp>
 #include <ftl/rgbd/detail/source.hpp>
+#include <ftl/codecs/packet.hpp>
 #include <opencv2/opencv.hpp>
 #include <Eigen/Eigen>
 #include <string>
+#include <map>
 
 #include <ftl/cuda_common.hpp>
 #include <ftl/rgbd/frame.hpp>
@@ -27,6 +29,7 @@ static inline bool isValidDepth(float d) { return (d > 0.01f) && (d < 39.99f); }
 
 class SnapshotReader;
 class VirtualSource;
+class Player;
 
 /**
  * RGBD Generic data source configurable entity. This class hides the
@@ -68,9 +71,9 @@ class Source : public ftl::Configurable {
 	/**
 	 * Change the second channel source.
 	 */
-	bool setChannel(ftl::rgbd::Channel c);
+	bool setChannel(ftl::codecs::Channel c);
 
-	ftl::rgbd::Channel getChannel() const { return channel_; }
+	ftl::codecs::Channel getChannel() const { return channel_; }
 
 	/**
 	 * Perform the hardware or virtual frame grab operation. This should be
@@ -143,7 +146,7 @@ class Source : public ftl::Configurable {
 		else return params_;
 	}
 
-	const Camera parameters(ftl::rgbd::Channel) const;
+	const Camera parameters(ftl::codecs::Channel) const;
 
 	cv::Mat cameraMatrix() const;
 
@@ -201,9 +204,26 @@ class Source : public ftl::Configurable {
 	SHARED_MUTEX &mutex() { return mutex_; }
 
 	std::function<void(int64_t, cv::Mat &, cv::Mat &)> &callback() { return callback_; }
+
+	/**
+	 * Set the callback that receives decoded frames as they are generated.
+	 */
 	void setCallback(std::function<void(int64_t, cv::Mat &, cv::Mat &)> cb);
 	void removeCallback() { callback_ = nullptr; }
 
+	/**
+	 * Add a callback to immediately receive any raw data from this source.
+	 * Currently this only works for a net source since other sources don't
+	 * produce raw encoded data.
+	 */
+	void addRawCallback(const std::function<void(ftl::rgbd::Source*, const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt)> &);
+
+	void removeRawCallback(const std::function<void(ftl::rgbd::Source*, const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt)> &);
+
+	/**
+	 * INTERNAL. Used to send raw data to callbacks.
+	 */
+	void notifyRaw(const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt);
 
 	protected:
 	detail::Source *impl_;
@@ -216,15 +236,20 @@ class Source : public ftl::Configurable {
 	SHARED_MUTEX mutex_;
 	bool paused_;
 	bool bullet_;
-	ftl::rgbd::Channel channel_;
+	ftl::codecs::Channel channel_;
 	cudaStream_t stream_;
 	int64_t timestamp_;
 	std::function<void(int64_t, cv::Mat &, cv::Mat &)> callback_;
+	std::list<std::function<void(ftl::rgbd::Source*, const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt)>> rawcallbacks_;
 
 	detail::Source *_createImplementation();
 	detail::Source *_createFileImpl(const ftl::URI &uri);
 	detail::Source *_createNetImpl(const ftl::URI &uri);
 	detail::Source *_createDeviceImpl(const ftl::URI &uri);
+
+	static ftl::rgbd::Player *__createReader(const std::string &path);
+
+	static std::map<std::string, ftl::rgbd::Player*> readers__;
 };
 
 }
diff --git a/components/rgbd-sources/include/ftl/rgbd/streamer.hpp b/components/rgbd-sources/include/ftl/rgbd/streamer.hpp
index 7c6e6f479afe022cdacefbabf9098e276e0c9f79..cfa552a862e1d46b9600ce523815eb8fa5efa001 100644
--- a/components/rgbd-sources/include/ftl/rgbd/streamer.hpp
+++ b/components/rgbd-sources/include/ftl/rgbd/streamer.hpp
@@ -54,6 +54,7 @@ struct StreamSource {
 	std::list<detail::StreamClient> clients;
 	SHARED_MUTEX mutex;
 	unsigned long long frame;
+	int id;
 
 	ftl::codecs::Encoder *hq_encoder_c1 = nullptr;
 	ftl::codecs::Encoder *hq_encoder_c2 = nullptr;
@@ -101,6 +102,11 @@ class Streamer : public ftl::Configurable {
 	 */
 	void add(Source *);
 
+	/**
+	 * Allow all sources in another group to be proxy streamed by this streamer.
+	 */
+	void add(ftl::rgbd::Group *grp);
+
 	void remove(Source *);
 	void remove(const std::string &);
 
@@ -130,6 +136,7 @@ class Streamer : public ftl::Configurable {
 	private:
 	ftl::rgbd::Group group_;
 	std::map<std::string, detail::StreamSource*> sources_;
+	std::list<ftl::rgbd::Group*> proxy_grps_;
 	//ctpl::thread_pool pool_;
 	SHARED_MUTEX mutex_;
 	bool active_;
@@ -152,16 +159,23 @@ class Streamer : public ftl::Configurable {
 
 	ftl::codecs::device_t hq_devices_;
 
+	enum class Quality {
+		High,
+		Low,
+		Any
+	};
+
 	void _process(ftl::rgbd::FrameSet &);
 	void _cleanUp();
 	void _addClient(const std::string &source, int N, int rate, const ftl::UUID &peer, const std::string &dest);
-	void _transmitPacket(detail::StreamSource *src, const ftl::codecs::Packet &pkt, int chan, bool hasChan2, bool hqonly);
+	void _transmitPacket(detail::StreamSource *src, const ftl::codecs::Packet &pkt, ftl::codecs::Channel chan, bool hasChan2, Quality q);
+	void _transmitPacket(detail::StreamSource *src, const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt, Quality q);
 
 	//void _encodeHQAndTransmit(detail::StreamSource *src, const cv::Mat &, const cv::Mat &, int chunk);
 	//void _encodeLQAndTransmit(detail::StreamSource *src, const cv::Mat &, const cv::Mat &, int chunk);
 	//void _encodeAndTransmit(detail::StreamSource *src, ftl::codecs::Encoder *enc1, ftl::codecs::Encoder *enc2, const cv::Mat &, const cv::Mat &);
 	//void _encodeImageChannel1(const cv::Mat &in, std::vector<unsigned char> &out, unsigned int b);
-	//bool _encodeImageChannel2(const cv::Mat &in, std::vector<unsigned char> &out, ftl::rgbd::channel_t c, unsigned int b);
+	//bool _encodeImageChannel2(const cv::Mat &in, std::vector<unsigned char> &out, ftl::codecs::Channel_t c, unsigned int b);
 };
 
 }
diff --git a/components/rgbd-sources/src/algorithms/fixstars_sgm.cpp b/components/rgbd-sources/src/algorithms/fixstars_sgm.cpp
index 5f8921bda0e562bcc26b454d750a35294ed75537..732c4ffb56a65a2ac8f53841b5dcb45e52ed5280 100644
--- a/components/rgbd-sources/src/algorithms/fixstars_sgm.cpp
+++ b/components/rgbd-sources/src/algorithms/fixstars_sgm.cpp
@@ -7,7 +7,7 @@
 using ftl::algorithms::FixstarsSGM;
 using cv::Mat;
 using cv::cuda::GpuMat;
-using ftl::rgbd::Channel;
+using ftl::codecs::Channel;
 using ftl::rgbd::Format;
 
 //static ftl::Disparity::Register fixstarssgm("libsgm", FixstarsSGM::create);
diff --git a/components/rgbd-sources/src/disparity.hpp b/components/rgbd-sources/src/disparity.hpp
index 44215871d37b2944c08d072d63afd5bf871082e4..6802869c1c727a60f5ae3b308c86390486404044 100644
--- a/components/rgbd-sources/src/disparity.hpp
+++ b/components/rgbd-sources/src/disparity.hpp
@@ -50,9 +50,9 @@ class Disparity : public ftl::Configurable {
 	{
 		// FIXME: What were these for?
 		//ftl::rgbd::Frame frame;
-		//frame.create<cv::cuda::GpuMat>(ftl::rgbd::Channel::Left) = l;
-		//frame.create<cv::cuda::GpuMat>(ftl::rgbd::Channel::Right) = r;
-		//frame.create<cv::cuda::GpuMat>(ftl::rgbd::Channel::Disparity) = disp;
+		//frame.create<cv::cuda::GpuMat>(ftl::codecs::Channel::Left) = l;
+		//frame.create<cv::cuda::GpuMat>(ftl::codecs::Channel::Right) = r;
+		//frame.create<cv::cuda::GpuMat>(ftl::codecs::Channel::Disparity) = disp;
 	}
 
 	/**
diff --git a/components/rgbd-sources/src/frame.cpp b/components/rgbd-sources/src/frame.cpp
index a56a19355526d9cdcdf25faaecdc67c05d09469d..212ca55375993789bb832e678fe4f1a8f22d5c48 100644
--- a/components/rgbd-sources/src/frame.cpp
+++ b/components/rgbd-sources/src/frame.cpp
@@ -2,8 +2,8 @@
 #include <ftl/rgbd/frame.hpp>
 
 using ftl::rgbd::Frame;
-using ftl::rgbd::Channels;
-using ftl::rgbd::Channel;
+using ftl::codecs::Channels;
+using ftl::codecs::Channel;
 
 static cv::Mat none;
 static cv::cuda::GpuMat noneGPU;
@@ -39,14 +39,14 @@ void Frame::upload(Channels c, cv::cuda::Stream stream) {
 	}
 }
 
-bool Frame::empty(ftl::rgbd::Channels channels) {
+bool Frame::empty(ftl::codecs::Channels channels) {
 	for (auto c : channels) {
 		if (empty(c)) return true;
 	}
 	return false;
 }
 
-void Frame::swapTo(ftl::rgbd::Channels channels, Frame &f) {
+void Frame::swapTo(ftl::codecs::Channels channels, Frame &f) {
 	f.reset();
 
 	// For all channels in this frame object
@@ -74,7 +74,31 @@ void Frame::swapTo(ftl::rgbd::Channels channels, Frame &f) {
 	}
 }
 
-template<> cv::Mat& Frame::get(ftl::rgbd::Channel channel) {
+void Frame::swapChannels(ftl::codecs::Channel a, ftl::codecs::Channel b) {
+	auto &m1 = _get(a);
+	auto &m2 = _get(b);
+	cv::swap(m1.host, m2.host);
+	cv::cuda::swap(m1.gpu, m2.gpu);
+
+	auto temptex = std::move(m2.tex);
+	m2.tex = std::move(m1.tex);
+	m1.tex = std::move(temptex);
+}
+
+void Frame::copyTo(ftl::codecs::Channels channels, Frame &f) {
+	f.reset();
+
+	// For all channels in this frame object
+	for (auto c : channels_) {
+		// Should we copy this channel?
+		if (channels.has(c)) {
+			if (isCPU(c)) get<cv::Mat>(c).copyTo(f.create<cv::Mat>(c));
+			else get<cv::cuda::GpuMat>(c).copyTo(f.create<cv::cuda::GpuMat>(c));
+		}
+	}
+}
+
+template<> cv::Mat& Frame::get(ftl::codecs::Channel channel) {
 	if (channel == Channel::None) {
 		DLOG(WARNING) << "Cannot get the None channel from a Frame";
 		none.release();
@@ -94,7 +118,7 @@ template<> cv::Mat& Frame::get(ftl::rgbd::Channel channel) {
 	return _get(channel).host;
 }
 
-template<> cv::cuda::GpuMat& Frame::get(ftl::rgbd::Channel channel) {
+template<> cv::cuda::GpuMat& Frame::get(ftl::codecs::Channel channel) {
 	if (channel == Channel::None) {
 		DLOG(WARNING) << "Cannot get the None channel from a Frame";
 		noneGPU.release();
@@ -114,7 +138,7 @@ template<> cv::cuda::GpuMat& Frame::get(ftl::rgbd::Channel channel) {
 	return _get(channel).gpu;
 }
 
-template<> const cv::Mat& Frame::get(ftl::rgbd::Channel channel) const {
+template<> const cv::Mat& Frame::get(ftl::codecs::Channel channel) const {
 	if (channel == Channel::None) {
 		LOG(FATAL) << "Cannot get the None channel from a Frame";
 	}
@@ -128,7 +152,7 @@ template<> const cv::Mat& Frame::get(ftl::rgbd::Channel channel) const {
 	return _get(channel).host;
 }
 
-template<> const cv::cuda::GpuMat& Frame::get(ftl::rgbd::Channel channel) const {
+template<> const cv::cuda::GpuMat& Frame::get(ftl::codecs::Channel channel) const {
 	if (channel == Channel::None) {
 		LOG(FATAL) << "Cannot get the None channel from a Frame";
 	}
@@ -145,7 +169,7 @@ template<> const cv::cuda::GpuMat& Frame::get(ftl::rgbd::Channel channel) const
 	return _get(channel).gpu;
 }
 
-template <> cv::Mat &Frame::create(ftl::rgbd::Channel c, const ftl::rgbd::FormatBase &f) {
+template <> cv::Mat &Frame::create(ftl::codecs::Channel c, const ftl::rgbd::FormatBase &f) {
 	if (c == Channel::None) {
 		throw ftl::exception("Cannot create a None channel");
 	}
@@ -161,7 +185,7 @@ template <> cv::Mat &Frame::create(ftl::rgbd::Channel c, const ftl::rgbd::Format
 	return m;
 }
 
-template <> cv::cuda::GpuMat &Frame::create(ftl::rgbd::Channel c, const ftl::rgbd::FormatBase &f) {
+template <> cv::cuda::GpuMat &Frame::create(ftl::codecs::Channel c, const ftl::rgbd::FormatBase &f) {
 	if (c == Channel::None) {
 		throw ftl::exception("Cannot create a None channel");
 	}
@@ -177,7 +201,7 @@ template <> cv::cuda::GpuMat &Frame::create(ftl::rgbd::Channel c, const ftl::rgb
 	return m;
 }
 
-template <> cv::Mat &Frame::create(ftl::rgbd::Channel c) {
+template <> cv::Mat &Frame::create(ftl::codecs::Channel c) {
 	if (c == Channel::None) {
 		throw ftl::exception("Cannot create a None channel");
 	}
@@ -188,7 +212,7 @@ template <> cv::Mat &Frame::create(ftl::rgbd::Channel c) {
 	return m;
 }
 
-template <> cv::cuda::GpuMat &Frame::create(ftl::rgbd::Channel c) {
+template <> cv::cuda::GpuMat &Frame::create(ftl::codecs::Channel c) {
 	if (c == Channel::None) {
 		throw ftl::exception("Cannot create a None channel");
 	}
@@ -199,3 +223,8 @@ template <> cv::cuda::GpuMat &Frame::create(ftl::rgbd::Channel c) {
 	return m;
 }
 
+void Frame::resetTexture(ftl::codecs::Channel c) {
+	auto &m = _get(c);
+	m.tex.free();
+}
+
diff --git a/components/rgbd-sources/src/frameset.cpp b/components/rgbd-sources/src/frameset.cpp
index 9b9a807d8599c23141b6c3806546bf8038ef30f9..38232e3f6bc1bbfcb1f3a4963743171d4401fc39 100644
--- a/components/rgbd-sources/src/frameset.cpp
+++ b/components/rgbd-sources/src/frameset.cpp
@@ -1,16 +1,16 @@
 #include <ftl/rgbd/frameset.hpp>
 
 using ftl::rgbd::FrameSet;
-using ftl::rgbd::Channels;
-using ftl::rgbd::Channel;
+using ftl::codecs::Channels;
+using ftl::codecs::Channel;
 
-void FrameSet::upload(ftl::rgbd::Channels c, cudaStream_t stream) {
+void FrameSet::upload(ftl::codecs::Channels c, cudaStream_t stream) {
 	for (auto &f : frames) {
 		f.upload(c, stream);
 	}
 }
 
-void FrameSet::download(ftl::rgbd::Channels c, cudaStream_t stream) {
+void FrameSet::download(ftl::codecs::Channels c, cudaStream_t stream) {
 	for (auto &f : frames) {
 		f.download(c, stream);
 	}
diff --git a/components/rgbd-sources/src/group.cpp b/components/rgbd-sources/src/group.cpp
index 96ca3a82fd3e306656f047d4971ce4a29b00fe48..4e178c0245e815da6b836fbd2de44df4dcc84e1b 100644
--- a/components/rgbd-sources/src/group.cpp
+++ b/components/rgbd-sources/src/group.cpp
@@ -10,7 +10,7 @@ using ftl::rgbd::kFrameBufferSize;
 using std::vector;
 using std::chrono::milliseconds;
 using std::this_thread::sleep_for;
-using ftl::rgbd::Channel;
+using ftl::codecs::Channel;
 
 Group::Group() : framesets_(kFrameBufferSize), head_(0) {
 	framesets_[0].timestamp = -1;
@@ -145,6 +145,13 @@ void Group::_computeJob(ftl::rgbd::Source *src) {
 	}
 }
 
+int Group::streamID(const ftl::rgbd::Source *s) const {
+	for (int i=0; i<sources_.size(); ++i) {
+		if (sources_[i] == s) return i;
+	}
+	return -1;
+}
+
 void Group::sync(std::function<bool(ftl::rgbd::FrameSet &)> cb) {
 	if (latency_ == 0) {
 		callback_ = cb;
@@ -222,9 +229,22 @@ void Group::sync(std::function<bool(ftl::rgbd::FrameSet &)> cb) {
 		return true;
 	});
 
+	LOG(INFO) << "Start timer";
 	ftl::timer::start(true);
 }
 
+void Group::addRawCallback(const std::function<void(ftl::rgbd::Source*, const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt)> &f) {
+	for (auto s : sources_) {
+		s->addRawCallback(f);
+	}
+}
+
+void Group::removeRawCallback(const std::function<void(ftl::rgbd::Source*, const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt)> &f) {
+	for (auto s : sources_) {
+		s->removeRawCallback(f);
+	}
+}
+
 //ftl::rgbd::FrameSet &Group::_getRelativeFrameset(int rel) {
 //	int idx = (rel < 0) ? (head_+kFrameBufferSize+rel)%kFrameBufferSize : (head_+rel)%kFrameBufferSize;
 //	return framesets_[idx];
@@ -237,8 +257,9 @@ ftl::rgbd::FrameSet *Group::_getFrameset(int f) {
 		int idx = (head_+kFrameBufferSize-i)%kFrameBufferSize;
 
 		if (framesets_[idx].timestamp == lookfor && framesets_[idx].count != sources_.size()) {
-			LOG(INFO) << "Required frame not complete (timestamp="  << (framesets_[idx].timestamp) << " buffer=" << i << ")";
+			LOG(WARNING) << "Required frame not complete in '" << name_ << "' (timestamp="  << (framesets_[idx].timestamp) << " buffer=" << i << ")";
 			//framesets_[idx].stale = true;
+			//return &framesets_[idx];
 			continue;
 		}
 
@@ -282,6 +303,8 @@ void Group::_addFrameset(int64_t timestamp) {
 		//framesets_[head_].channel2.resize(sources_.size());
 		framesets_[head_].frames.resize(sources_.size());
 
+		for (auto &f : framesets_[head_].frames) f.reset();
+
 		if (framesets_[head_].sources.size() != sources_.size()) {
 			framesets_[head_].sources.clear();
 			for (auto s : sources_) framesets_[head_].sources.push_back(s);
@@ -313,6 +336,8 @@ void Group::_addFrameset(int64_t timestamp) {
 		//framesets_[head_].channel2.resize(sources_.size());
 		framesets_[head_].frames.resize(sources_.size());
 
+		for (auto &f : framesets_[head_].frames) f.reset();
+
 		if (framesets_[head_].sources.size() != sources_.size()) {
 			framesets_[head_].sources.clear();
 			for (auto s : sources_) framesets_[head_].sources.push_back(s);
diff --git a/components/rgbd-sources/src/offilter.cpp b/components/rgbd-sources/src/offilter.cpp
index 466aa9249517b91ac54726e821401e85272aa082..34c01c1feb73b8171b0890f8d94405ee22a0433b 100644
--- a/components/rgbd-sources/src/offilter.cpp
+++ b/components/rgbd-sources/src/offilter.cpp
@@ -6,6 +6,7 @@
 #include <loguru.hpp>
 
 using namespace ftl::rgbd;
+using namespace ftl::codecs;
 
 using cv::Mat;
 using cv::Size;
diff --git a/components/rgbd-sources/src/source.cpp b/components/rgbd-sources/src/source.cpp
index 35d23f27ad7edac18d3e3e02247296f1382be5e2..7e2cc10ed8971cf2c0b29d2a07b345121f92cb9e 100644
--- a/components/rgbd-sources/src/source.cpp
+++ b/components/rgbd-sources/src/source.cpp
@@ -2,21 +2,25 @@
 #include <ftl/rgbd/source.hpp>
 #include <ftl/threads.hpp>
 
-#include "net.hpp"
-#include "stereovideo.hpp"
-#include "image.hpp"
-#include "middlebury_source.hpp"
+#include "sources/net/net.hpp"
+#include "sources/stereovideo/stereovideo.hpp"
+#include "sources/image/image.hpp"
+#include "sources/middlebury/middlebury_source.hpp"
 
 #ifdef HAVE_LIBARCHIVE
 #include <ftl/rgbd/snapshot.hpp>
-#include "snapshot_source.hpp"
+#include "sources/snapshot/snapshot_source.hpp"
 #endif
 
+#include "sources/ftlfile/file_source.hpp"
+
 #ifdef HAVE_REALSENSE
-#include "realsense_source.hpp"
+#include "sources/realsense/realsense_source.hpp"
 using ftl::rgbd::detail::RealsenseSource;
 #endif
 
+#include <fstream>
+
 using ftl::rgbd::Source;
 using ftl::Configurable;
 using std::string;
@@ -25,7 +29,10 @@ using ftl::rgbd::detail::NetSource;
 using ftl::rgbd::detail::ImageSource;
 using ftl::rgbd::detail::MiddleburySource;
 using ftl::rgbd::capability_t;
-using ftl::rgbd::Channel;
+using ftl::codecs::Channel;
+using ftl::rgbd::detail::FileSource;
+
+std::map<std::string, ftl::rgbd::Player*> Source::readers__;
 
 Source::Source(ftl::config::json_t &cfg) : Configurable(cfg), pose_(Eigen::Matrix4d::Identity()), net_(nullptr) {
 	impl_ = nullptr;
@@ -54,7 +61,7 @@ Source::Source(ftl::config::json_t &cfg, ftl::net::Universe *net) : Configurable
 }
 
 Source::~Source() {
-
+	if (impl_) delete impl_;
 }
 
 cv::Mat Source::cameraMatrix() const {
@@ -114,7 +121,11 @@ ftl::rgbd::detail::Source *Source::_createFileImpl(const ftl::URI &uri) {
 	} else if (ftl::is_file(path)) {
 		string ext = path.substr(eix+1);
 
-		if (ext == "png" || ext == "jpg") {
+		if (ext == "ftl") {
+			ftl::rgbd::Player *reader = __createReader(path);
+			LOG(INFO) << "Playing track: " << uri.getFragment();
+			return new FileSource(this, reader, std::stoi(uri.getFragment()));
+		} else if (ext == "png" || ext == "jpg") {
 			return new ImageSource(this, path);
 		} else if (ext == "mp4") {
 			return new StereoVideoSource(this, path);
@@ -137,6 +148,22 @@ ftl::rgbd::detail::Source *Source::_createFileImpl(const ftl::URI &uri) {
 	return nullptr;
 }
 
+ftl::rgbd::Player *Source::__createReader(const std::string &path) {
+	if (readers__.find(path) != readers__.end()) {
+		return readers__[path];
+	}
+
+	std::ifstream *file = new std::ifstream;
+	file->open(path);
+
+	// FIXME: This is a memory leak, must delete ifstream somewhere.
+
+	auto *r = new ftl::rgbd::Player(*file);
+	readers__[path] = r;
+	r->begin();
+	return r;
+}
+
 ftl::rgbd::detail::Source *Source::_createNetImpl(const ftl::URI &uri) {
 	return new NetSource(this);
 }
@@ -296,13 +323,13 @@ bool Source::thumbnail(cv::Mat &t) {
 	return !thumb_.empty();
 }
 
-bool Source::setChannel(ftl::rgbd::Channel c) {
+bool Source::setChannel(ftl::codecs::Channel c) {
 	channel_ = c;
 	// FIXME:(Nick) Verify channel is supported by this source...
 	return true;
 }
 
-const ftl::rgbd::Camera Source::parameters(ftl::rgbd::Channel chan) const {
+const ftl::rgbd::Camera Source::parameters(ftl::codecs::Channel chan) const {
 	return (impl_) ? impl_->parameters(chan) : parameters();
 }
 
@@ -310,3 +337,28 @@ void Source::setCallback(std::function<void(int64_t, cv::Mat &, cv::Mat &)> cb)
 	if (bool(callback_)) LOG(ERROR) << "Source already has a callback: " << getURI();
 	callback_ = cb;
 }
+
+void Source::addRawCallback(const std::function<void(ftl::rgbd::Source*, const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt)> &f) {
+	UNIQUE_LOCK(mutex_,lk);
+	LOG(INFO) << "ADD RAW";
+	rawcallbacks_.push_back(f);
+}
+
+void Source::removeRawCallback(const std::function<void(ftl::rgbd::Source*, const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt)> &f) {
+	UNIQUE_LOCK(mutex_,lk);
+	for (auto i=rawcallbacks_.begin(); i!=rawcallbacks_.end(); ++i) {
+		const auto targ = (*i).target<void(*)(ftl::rgbd::Source*, const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)>();
+		if (targ && targ == f.target<void(*)(ftl::rgbd::Source*, const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)>()) {
+			rawcallbacks_.erase(i);
+			LOG(INFO) << "Removing RAW callback";
+			return;
+		}
+	}
+}
+
+void Source::notifyRaw(const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+	SHARED_LOCK(mutex_,lk);
+	for (auto &i : rawcallbacks_) {
+		i(this, spkt, pkt);
+	}
+}
diff --git a/components/rgbd-sources/src/sources/ftlfile/file_source.cpp b/components/rgbd-sources/src/sources/ftlfile/file_source.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..323f1ae72179c46ad816ec653ffc940d87cad0f7
--- /dev/null
+++ b/components/rgbd-sources/src/sources/ftlfile/file_source.cpp
@@ -0,0 +1,142 @@
+#include "file_source.hpp"
+
+#include <ftl/codecs/hevc.hpp>
+#include <ftl/timer.hpp>
+
+using ftl::rgbd::detail::FileSource;
+using ftl::codecs::codec_t;
+using ftl::codecs::Channel;
+
+void FileSource::_createDecoder(int ix, const ftl::codecs::Packet &pkt) {
+	if (decoders_[ix]) {
+		if (!decoders_[ix]->accepts(pkt)) {
+			ftl::codecs::free(decoders_[ix]);
+		} else {
+			return;
+		}
+	}
+
+	DLOG(INFO) << "Create a decoder: " << ix;
+	decoders_[ix] = ftl::codecs::allocateDecoder(pkt);
+}
+
+FileSource::FileSource(ftl::rgbd::Source *s, ftl::rgbd::Player *r, int sid) : ftl::rgbd::detail::Source(s) {
+    reader_ = r;
+	has_calibration_ = false;
+	decoders_[0] = nullptr;
+	decoders_[1] = nullptr;
+	cache_read_ = -1;
+	cache_write_ = 0;
+	realtime_ = host_->value("realtime", true);
+	timestamp_ = r->getStartTime();
+	sourceid_ = sid;
+
+    r->onPacket(sid, [this](const ftl::codecs::StreamPacket &spkt, ftl::codecs::Packet &pkt) {
+		host_->notifyRaw(spkt, pkt);
+		if (pkt.codec == codec_t::POSE) {
+			Eigen::Matrix4d p = Eigen::Map<Eigen::Matrix4d>((double*)pkt.data.data());
+			host_->setPose(p);
+		} else if (pkt.codec == codec_t::CALIBRATION) {
+			ftl::rgbd::Camera *camera = (ftl::rgbd::Camera*)pkt.data.data();
+            LOG(INFO) << "Have calibration: " << camera->fx;
+			params_ = *camera;
+			has_calibration_ = true;
+		} else {
+			if (pkt.codec == codec_t::HEVC) {
+				if (ftl::codecs::hevc::isIFrame(pkt.data)) _removeChannel(spkt.channel);
+			}
+			cache_[cache_write_].emplace_back();
+			auto &c = cache_[cache_write_].back();
+
+			// TODO: Attempt to avoid this copy operation
+			c.spkt = spkt;
+			c.pkt = pkt;
+		}
+    });
+}
+
+FileSource::~FileSource() {
+
+}
+
+void FileSource::_removeChannel(ftl::codecs::Channel channel) {
+	int c = 0;
+	for (auto i=cache_[cache_write_].begin(); i != cache_[cache_write_].end(); ++i) {
+		if ((*i).spkt.channel == channel) {
+			++c;
+			i = cache_[cache_write_].erase(i);
+		}
+	}
+	DLOG(INFO) << "Skipped " << c << " packets";
+}
+
+bool FileSource::capture(int64_t ts) {
+	if (realtime_) {
+    	timestamp_ = ts;
+	} else {
+		timestamp_ += ftl::timer::getInterval();
+	}
+    return true;
+}
+
+bool FileSource::retrieve() {
+	if (!reader_->read(timestamp_)) {
+		cache_write_ = -1;
+	}
+    return true;
+}
+
+void FileSource::swap() {
+	cache_read_ = cache_write_;
+	cache_write_ = (cache_write_ == 0) ? 1 : 0;
+}
+
+bool FileSource::compute(int n, int b) {
+	if (cache_read_ < 0) return false;
+	if (cache_[cache_read_].size() == 0) return false;
+
+	int64_t lastts = 0;
+	int lastc = 0;
+
+	for (auto i=cache_[cache_read_].begin(); i!=cache_[cache_read_].end(); ++i) {
+		auto &c = *i;
+
+		if (c.spkt.timestamp > lastts) {
+			lastts = c.spkt.timestamp;
+			lastc = 1;
+		} else if (c.spkt.timestamp == lastts) {
+			lastc++;
+		}
+
+		if (c.spkt.channel == Channel::Colour) {
+			rgb_.create(cv::Size(ftl::codecs::getWidth(c.pkt.definition),ftl::codecs::getHeight(c.pkt.definition)), CV_8UC3);
+		} else {
+			depth_.create(cv::Size(ftl::codecs::getWidth(c.pkt.definition),ftl::codecs::getHeight(c.pkt.definition)), CV_32F);
+		}
+	
+		_createDecoder((c.spkt.channel == Channel::Colour) ? 0 : 1, c.pkt);
+
+		try {
+			decoders_[(c.spkt.channel == Channel::Colour) ? 0 : 1]->decode(c.pkt, (c.spkt.channel == Channel::Colour) ? rgb_ : depth_);
+		} catch (std::exception &e) {
+			LOG(INFO) << "Decoder exception: " << e.what();
+		}
+	}
+
+	if (lastc != 2) {
+		LOG(ERROR) << "Channels not in sync (" << sourceid_ << "): " << lastts;
+		return false;
+	}
+
+	cache_[cache_read_].clear();
+
+	if (rgb_.empty() || depth_.empty()) return false;
+
+	auto cb = host_->callback();
+	if (cb) cb(timestamp_, rgb_, depth_);
+    return true;
+}
+
+bool FileSource::isReady() {
+    return true;
+}
diff --git a/components/rgbd-sources/src/sources/ftlfile/file_source.hpp b/components/rgbd-sources/src/sources/ftlfile/file_source.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..80f3b368b1ec30f5e5b1ebd9ac2f051ceba20901
--- /dev/null
+++ b/components/rgbd-sources/src/sources/ftlfile/file_source.hpp
@@ -0,0 +1,55 @@
+#pragma once
+#ifndef _FTL_RGBD_FILE_SOURCE_HPP_
+#define _FTL_RGBD_FILE_SOURCE_HPP_
+
+#include <loguru.hpp>
+
+#include <ftl/rgbd/source.hpp>
+#include "player.hpp"
+#include <ftl/codecs/decoder.hpp>
+
+#include <list>
+
+namespace ftl {
+namespace rgbd {
+namespace detail {
+
+class FileSource : public detail::Source {
+	public:
+	FileSource(ftl::rgbd::Source *, ftl::rgbd::Player *, int sid);
+	~FileSource();
+
+	bool capture(int64_t ts);
+	bool retrieve();
+	bool compute(int n, int b);
+	bool isReady();
+	void swap();
+
+	//void reset();
+	private:
+	ftl::rgbd::Player *reader_;
+	bool has_calibration_;
+
+	struct PacketPair {
+		ftl::codecs::StreamPacket spkt;
+		ftl::codecs::Packet pkt;
+	};
+	
+	std::list<PacketPair> cache_[2];
+	int cache_read_;
+	int cache_write_;
+	int sourceid_;
+
+	ftl::codecs::Decoder *decoders_[2];
+
+	bool realtime_;
+
+	void _removeChannel(ftl::codecs::Channel channel);
+	void _createDecoder(int ix, const ftl::codecs::Packet &pkt);
+};
+
+}
+}
+}
+
+#endif  // _FTL_RGBD_FILE_SOURCE_HPP_
diff --git a/components/rgbd-sources/src/sources/ftlfile/player.cpp b/components/rgbd-sources/src/sources/ftlfile/player.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..cabbce6425586f6473156cdb6e548add0648d770
--- /dev/null
+++ b/components/rgbd-sources/src/sources/ftlfile/player.cpp
@@ -0,0 +1,79 @@
+#include "player.hpp"
+#include <ftl/configuration.hpp>
+
+using ftl::rgbd::Player;
+
+Player::Player(std::istream &s) : stream_(&s), reader_(s) {
+    auto *c = ftl::config::find("/controls");
+
+    offset_ = 0;
+	
+    if (c) {
+        paused_ = c->value("paused", false);
+        c->on("paused", [this,c](const ftl::config::Event &e) {
+            UNIQUE_LOCK(mtx_, lk);
+            paused_ = c->value("paused", false);
+            if (paused_) {
+                pause_time_ = last_ts_;
+            } else {
+                offset_ += last_ts_ - pause_time_;
+            }
+        });
+
+        looping_ = c->value("looping", true);
+        c->on("looping", [this,c](const ftl::config::Event &e) {
+            looping_ = c->value("looping", false);
+        });
+
+        speed_ = c->value("speed", 1.0f);
+        c->on("speed", [this,c](const ftl::config::Event &e) {
+            speed_ = c->value("speed", 1.0f);
+        });
+
+        reversed_ = c->value("reverse", false);
+        c->on("reverse", [this,c](const ftl::config::Event &e) {
+            reversed_ = c->value("reverse", false);
+        });
+    }
+}
+
+Player::~Player() {
+    // TODO: Remove callbacks
+}
+
+bool Player::read(int64_t ts) {
+    std::unique_lock<std::mutex> lk(mtx_, std::defer_lock);
+	if (!lk.try_lock()) return true;
+
+    last_ts_ = ts;
+    if (paused_) return true;
+
+    int64_t adjusted_ts = int64_t(float(ts - reader_.getStartTime()) * speed_) + reader_.getStartTime() + offset_;
+    bool res = reader_.read(adjusted_ts);
+
+    if (looping_ && !res) {
+        reader_.end();
+        offset_ = 0;
+        stream_->clear();
+        stream_->seekg(0);
+        if (!reader_.begin()) {
+            LOG(ERROR) << "Could not loop";
+            return false;
+        }
+        return true;
+    }
+
+    return res;
+}
+
+void Player::onPacket(int streamID, const std::function<void(const ftl::codecs::StreamPacket &, ftl::codecs::Packet &)> &f) {
+    reader_.onPacket(streamID, f);
+}
+
+bool Player::begin() {
+    return reader_.begin();
+}
+
+bool Player::end() {
+    return reader_.end();
+}
diff --git a/components/rgbd-sources/src/sources/ftlfile/player.hpp b/components/rgbd-sources/src/sources/ftlfile/player.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..f826caabaf6b461cc77b719427381e3b1c5828c4
--- /dev/null
+++ b/components/rgbd-sources/src/sources/ftlfile/player.hpp
@@ -0,0 +1,47 @@
+#ifndef _FTL_RGBD_PLAYER_HPP_
+#define _FTL_RGBD_PLAYER_HPP_
+
+#include <iostream>
+#include <ftl/codecs/reader.hpp>
+#include <ftl/threads.hpp>
+
+namespace ftl {
+namespace rgbd {
+
+/**
+ * Simple wrapper around stream reader to control file playback.
+ */
+class Player {
+    public:
+    explicit Player(std::istream &s);
+    ~Player();
+
+    bool read(int64_t ts);
+
+	void onPacket(int streamID, const std::function<void(const ftl::codecs::StreamPacket &, ftl::codecs::Packet &)> &);
+
+	bool begin();
+	bool end();
+
+    inline int64_t getStartTime() { return reader_.getStartTime(); }
+
+    private:
+    std::istream *stream_;
+    ftl::codecs::Reader reader_;
+
+    bool paused_;
+    bool reversed_;
+    bool looping_;
+    float speed_;
+
+    int64_t pause_time_;
+    int64_t offset_;
+    int64_t last_ts_;
+
+    MUTEX mtx_;
+};
+
+}
+}
+
+#endif  // _FTL_RGBD_PLAYER_HPP_
diff --git a/components/rgbd-sources/src/image.hpp b/components/rgbd-sources/src/sources/image/image.hpp
similarity index 100%
rename from components/rgbd-sources/src/image.hpp
rename to components/rgbd-sources/src/sources/image/image.hpp
diff --git a/components/rgbd-sources/src/middlebury_source.cpp b/components/rgbd-sources/src/sources/middlebury/middlebury_source.cpp
similarity index 100%
rename from components/rgbd-sources/src/middlebury_source.cpp
rename to components/rgbd-sources/src/sources/middlebury/middlebury_source.cpp
diff --git a/components/rgbd-sources/src/middlebury_source.hpp b/components/rgbd-sources/src/sources/middlebury/middlebury_source.hpp
similarity index 100%
rename from components/rgbd-sources/src/middlebury_source.hpp
rename to components/rgbd-sources/src/sources/middlebury/middlebury_source.hpp
diff --git a/components/rgbd-sources/src/net.cpp b/components/rgbd-sources/src/sources/net/net.cpp
similarity index 69%
rename from components/rgbd-sources/src/net.cpp
rename to components/rgbd-sources/src/sources/net/net.cpp
index ba485c9402d888be0636902f3962cce2dd1e85ed..5956114d57bc2b9bc0a0b10bee48fd7a29d6b470 100644
--- a/components/rgbd-sources/src/net.cpp
+++ b/components/rgbd-sources/src/sources/net/net.cpp
@@ -20,7 +20,7 @@ using std::vector;
 using std::this_thread::sleep_for;
 using std::chrono::milliseconds;
 using std::tuple;
-using ftl::rgbd::Channel;
+using ftl::codecs::Channel;
 
 // ===== NetFrameQueue =========================================================
 
@@ -40,22 +40,42 @@ NetFrame &NetFrameQueue::getFrame(int64_t ts, const cv::Size &s, int c1type, int
 		if (f.timestamp == ts) return f;
 	}
 
+	int64_t oldest = ts;
+
 	// No match so find an empty slot
 	for (auto &f : frames_) {
 		if (f.timestamp == -1) {
 			f.timestamp = ts;
-			f.chunk_count = 0;
-			f.chunk_total = 0;
+			f.chunk_count[0] = 0;
+			f.chunk_count[1] = 0;
+			f.chunk_total[0] = 0;
+			f.chunk_total[1] = 0;
+			f.channel_count = 0;
 			f.tx_size = 0;
 			f.channel1.create(s, c1type);
 			f.channel2.create(s, c2type);
 			return f;
 		}
+		oldest = (f.timestamp < oldest) ? f.timestamp : oldest;
 	}
 
 	// No empty slot, so give a fatal error
 	for (auto &f : frames_) {
 		LOG(ERROR) << "Stale frame: " << f.timestamp << " - " << f.chunk_count;
+
+		// Force release of frame!
+		if (f.timestamp == oldest) {
+			f.timestamp = ts;
+			f.chunk_count[0] = 0;
+			f.chunk_count[1] = 0;
+			f.chunk_total[0] = 0;
+			f.chunk_total[1] = 0;
+			f.channel_count = 0;
+			f.tx_size = 0;
+			f.channel1.create(s, c1type);
+			f.channel2.create(s, c2type);
+			return f;
+		}
 	}
 	LOG(FATAL) << "Net Frame Queue not large enough: " << ts;
 	// FIXME: (Nick) Could auto resize the queue.
@@ -70,7 +90,7 @@ void NetFrameQueue::freeFrame(NetFrame &f) {
 
 // ===== NetSource =============================================================
 
-bool NetSource::_getCalibration(Universe &net, const UUID &peer, const string &src, ftl::rgbd::Camera &p, ftl::rgbd::Channel chan) {
+bool NetSource::_getCalibration(Universe &net, const UUID &peer, const string &src, ftl::rgbd::Camera &p, ftl::codecs::Channel chan) {
 	try {
 		while(true) {
 			auto [cap,buf] = net.call<tuple<unsigned int,vector<unsigned char>>>(peer_, "source_details", src, chan);
@@ -87,12 +107,20 @@ bool NetSource::_getCalibration(Universe &net, const UUID &peer, const string &s
 
 				LOG(INFO) << "Calibration received: " << p.cx << ", " << p.cy << ", " << p.fx << ", " << p.fy;
 
-				// Put calibration into config manually
-				host_->getConfig()["focal"] = p.fx;
-				host_->getConfig()["centre_x"] = p.cx;
-				host_->getConfig()["centre_y"] = p.cy;
-				host_->getConfig()["baseline"] = p.baseline;
-				host_->getConfig()["doffs"] = p.doffs;
+				if (chan == Channel::Left) {
+					// Put calibration into config manually
+					host_->getConfig()["focal"] = p.fx;
+					host_->getConfig()["centre_x"] = p.cx;
+					host_->getConfig()["centre_y"] = p.cy;
+					host_->getConfig()["baseline"] = p.baseline;
+					host_->getConfig()["doffs"] = p.doffs;
+				} else {
+					host_->getConfig()["focal_right"] = p.fx;
+					host_->getConfig()["centre_x_right"] = p.cx;
+					host_->getConfig()["centre_y_right"] = p.cy;
+					host_->getConfig()["baseline_right"] = p.baseline;
+					host_->getConfig()["doffs_right"] = p.doffs;
+				}
 				
 				return true;
 			} else {
@@ -118,6 +146,7 @@ NetSource::NetSource(ftl::rgbd::Source *host)
 	temperature_ = host->value("temperature", 6500);
 	default_quality_ = host->value("quality", 0);
 	last_bitrate_ = 0;
+	params_right_.width = 0;
 
 	decoder_c1_ = nullptr;
 	decoder_c2_ = nullptr;
@@ -136,6 +165,16 @@ NetSource::NetSource(ftl::rgbd::Source *host)
 		host_->getNet()->send(peer_, "update_cfg", host_->getURI() + "/focal", host_->getConfig()["focal"].dump());
 	});
 
+	host->on("centre_x", [this,host](const ftl::config::Event&) {
+		params_.cx = host_->value("centre_x", 0.0);
+		host_->getNet()->send(peer_, "update_cfg", host_->getURI() + "/centre_x", host_->getConfig()["centre_x"].dump());
+	});
+
+	host->on("centre_y", [this,host](const ftl::config::Event&) {
+		params_.cy = host_->value("centre_y", 0.0);
+		host_->getNet()->send(peer_, "update_cfg", host_->getURI() + "/centre_y", host_->getConfig()["centre_y"].dump());
+	});
+
 	host->on("doffs", [this,host](const ftl::config::Event&) {
 		params_.doffs = host_->value("doffs", params_.doffs);
 		host_->getNet()->send(peer_, "update_cfg", host_->getURI() + "/doffs", host_->getConfig()["doffs"].dump());
@@ -146,11 +185,25 @@ NetSource::NetSource(ftl::rgbd::Source *host)
 		host_->getNet()->send(peer_, "update_cfg", host_->getURI() + "/baseline", host_->getConfig()["baseline"].dump());
 	});
 
-	host->on("doffs", [this,host](const ftl::config::Event&) {
-		params_.doffs = host_->value("doffs", params_.doffs);
-		host_->getNet()->send(peer_, "update_cfg", host_->getURI() + "/doffs", host_->getConfig()["doffs"].dump());
+	// Right parameters
+
+	host->on("focal_right", [this,host](const ftl::config::Event&) {
+		params_right_.fx = host_->value("focal_right", 0.0);
+		params_right_.fy = params_right_.fx;
+		host_->getNet()->send(peer_, "update_cfg", host_->getURI() + "/focal_right", host_->getConfig()["focal_right"].dump());
+	});
+
+	host->on("centre_x_right", [this,host](const ftl::config::Event&) {
+		params_right_.cx = host_->value("centre_x_right", 0.0);
+		host_->getNet()->send(peer_, "update_cfg", host_->getURI() + "/centre_x_right", host_->getConfig()["centre_x_right"].dump());
+	});
+
+	host->on("centre_y_right", [this,host](const ftl::config::Event&) {
+		params_right_.cy = host_->value("centre_y_right", 0.0);
+		host_->getNet()->send(peer_, "update_cfg", host_->getURI() + "/centre_y_right", host_->getConfig()["centre_y_right"].dump());
 	});
 
+
 	host->on("quality", [this,host](const ftl::config::Event&) {
 		default_quality_ = host->value("quality", 0);
 	});
@@ -228,29 +281,36 @@ void NetSource::_recvPacket(short ttimeoff, const ftl::codecs::StreamPacket &spk
 	int64_t now = std::chrono::time_point_cast<std::chrono::milliseconds>(std::chrono::high_resolution_clock::now()).time_since_epoch().count();
 	if (!active_) return;
 
-	const ftl::rgbd::Channel chan = host_->getChannel();
-	int rchan = spkt.channel & 0x1;
+	// Allow acccess to the raw data elsewhere...
+	host_->notifyRaw(spkt, pkt);
 
-	// Ignore any unwanted second channel
-	if (chan == ftl::rgbd::Channel::None && rchan > 0) {
-		LOG(INFO) << "Unwanted channel";
-		//return;
-		// TODO: Allow decode to be skipped
-	}
+	const ftl::codecs::Channel chan = host_->getChannel();
+	const ftl::codecs::Channel rchan = spkt.channel;
+	const int channum = (rchan == Channel::Colour) ? 0 : 1;
 
 	NetFrame &frame = queue_.getFrame(spkt.timestamp, cv::Size(params_.width, params_.height), CV_8UC3, (isFloatChannel(chan) ? CV_32FC1 : CV_8UC3));
 
 	// Update frame statistics
 	frame.tx_size += pkt.data.size();
 
-	_createDecoder(rchan, pkt);
-	auto *decoder = (rchan == 0) ? decoder_c1_ : decoder_c2_;
-	if (!decoder) {
-		LOG(ERROR) << "No frame decoder available";
-		return;
-	}
+	// Only decode if this channel is wanted.
+	if (rchan == Channel::Colour || rchan == chan) {
+		_createDecoder(channum, pkt);
+		auto *decoder = (rchan == Channel::Colour) ? decoder_c1_ : decoder_c2_;
+		if (!decoder) {
+			LOG(ERROR) << "No frame decoder available";
+			return;
+		}
 
-	decoder->decode(pkt, (rchan == 0) ? frame.channel1 : frame.channel2);
+		decoder->decode(pkt, (rchan == Channel::Colour) ? frame.channel1 : frame.channel2);
+	} else if (chan != Channel::None && rchan != Channel::Colour) {
+		// Didn't receive correct second channel so just clear the images
+		if (isFloatChannel(chan)) {
+			frame.channel2.setTo(cv::Scalar(0.0f));
+		} else {
+			frame.channel2.setTo(cv::Scalar(0,0,0));
+		}
+	}
 
 	// Apply colour correction to chunk
 	//ftl::rgbd::colourCorrection(tmp_rgb, gamma_, temperature_);
@@ -262,29 +322,31 @@ void NetSource::_recvPacket(short ttimeoff, const ftl::codecs::StreamPacket &spk
 		return;
 	}
 
-	// Calculate how many packets to expect for this frame
-	if (frame.chunk_total == 0) {
-		// Getting a second channel first means expect double packets
-		frame.chunk_total = pkt.block_total * ((spkt.channel >> 1) + 1);
+	// Calculate how many packets to expect for this channel
+	if (frame.chunk_total[channum] == 0) {
+		frame.chunk_total[channum] = pkt.block_total;
 	}		
 
-	++frame.chunk_count;
+	++frame.chunk_count[channum];
+	++frame.channel_count;
 
-	if (frame.chunk_count > frame.chunk_total) LOG(FATAL) << "TOO MANY CHUNKS";
+	if (frame.chunk_count[channum] > frame.chunk_total[channum]) LOG(FATAL) << "TOO MANY CHUNKS";
 
 	// Capture tx time of first received chunk
-	if (frame.chunk_count == 1) {
+	if (frame.channel_count == 1 && frame.chunk_count[channum] == 1) {
 		UNIQUE_LOCK(frame.mtx, flk);
-		if (frame.chunk_count == 1) {
+		if (frame.chunk_count[channum] == 1) {
 			frame.tx_latency = int64_t(ttimeoff);
 		}
 	}
 
-	// Last chunk now received
-	if (frame.chunk_count == frame.chunk_total) {
+	// Last chunk of both channels now received
+	if (frame.channel_count == spkt.channel_count &&
+			frame.chunk_count[0] == frame.chunk_total[0] &&
+			frame.chunk_count[1] == frame.chunk_total[1]) {
 		UNIQUE_LOCK(frame.mtx, flk);
 
-		if (frame.timestamp >= 0 && frame.chunk_count == frame.chunk_total) {
+		if (frame.timestamp >= 0 && frame.chunk_count[0] == frame.chunk_total[0] && frame.chunk_count[1] == frame.chunk_total[1]) {
 			timestamp_ = frame.timestamp;
 			frame.tx_latency = now-(spkt.timestamp+frame.tx_latency);
 
@@ -325,14 +387,15 @@ void NetSource::setPose(const Eigen::Matrix4d &pose) {
 	//Source::setPose(pose);
 }
 
-ftl::rgbd::Camera NetSource::parameters(ftl::rgbd::Channel chan) {
-	if (chan == ftl::rgbd::Channel::Right) {
-		auto uri = host_->get<string>("uri");
-		if (!uri) return params_;
+ftl::rgbd::Camera NetSource::parameters(ftl::codecs::Channel chan) {
+	if (chan == ftl::codecs::Channel::Right) {
+		if (params_right_.width == 0) {
+			auto uri = host_->get<string>("uri");
+			if (!uri) return params_;
 
-		ftl::rgbd::Camera params;
-		_getCalibration(*host_->getNet(), peer_, *uri, params, chan);
-		return params;
+			_getCalibration(*host_->getNet(), peer_, *uri, params_right_, chan);
+		}
+		return params_right_;
 	} else {
 		return params_;
 	}
@@ -341,7 +404,7 @@ ftl::rgbd::Camera NetSource::parameters(ftl::rgbd::Channel chan) {
 void NetSource::_updateURI() {
 	UNIQUE_LOCK(mutex_,lk);
 	active_ = false;
-	prev_chan_ = ftl::rgbd::Channel::None;
+	prev_chan_ = ftl::codecs::Channel::None;
 	auto uri = host_->get<string>("uri");
 
 	if (uri_.size() > 0) {
@@ -356,7 +419,8 @@ void NetSource::_updateURI() {
 		}
 		peer_ = *p;
 
-		has_calibration_ = _getCalibration(*host_->getNet(), peer_, *uri, params_, ftl::rgbd::Channel::Left);
+		has_calibration_ = _getCalibration(*host_->getNet(), peer_, *uri, params_, ftl::codecs::Channel::Left);
+		_getCalibration(*host_->getNet(), peer_, *uri, params_right_, ftl::codecs::Channel::Right);
 
 		host_->getNet()->bind(*uri, [this](short ttimeoff, const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
 			//if (chunk == -1) {
@@ -396,7 +460,7 @@ bool NetSource::compute(int n, int b) {
 	// Send k frames before end to prevent unwanted pause
 	// Unless only a single frame is requested
 	if ((N_ <= maxN_/2 && maxN_ > 1) || N_ == 0) {
-		const ftl::rgbd::Channel chan = host_->getChannel();
+		const ftl::codecs::Channel chan = host_->getChannel();
 
 		N_ = maxN_;
 
diff --git a/components/rgbd-sources/src/net.hpp b/components/rgbd-sources/src/sources/net/net.hpp
similarity index 92%
rename from components/rgbd-sources/src/net.hpp
rename to components/rgbd-sources/src/sources/net/net.hpp
index 51f31861fa3c9c39ea0cb53217e0fa3f764aeef3..469902a5709f1ce9674d4fcafb0b36b957b5f7a4 100644
--- a/components/rgbd-sources/src/net.hpp
+++ b/components/rgbd-sources/src/sources/net/net.hpp
@@ -39,7 +39,7 @@ class NetSource : public detail::Source {
 	bool isReady();
 
 	void setPose(const Eigen::Matrix4d &pose);
-	Camera parameters(ftl::rgbd::Channel chan);
+	Camera parameters(ftl::codecs::Channel chan);
 
 	void reset();
 
@@ -57,7 +57,8 @@ class NetSource : public detail::Source {
 	int minB_;
 	int maxN_;
 	int default_quality_;
-	ftl::rgbd::Channel prev_chan_;
+	ftl::codecs::Channel prev_chan_;
+	ftl::rgbd::Camera params_right_;
 
 	ftl::rgbd::detail::ABRController abr_;
 	int last_bitrate_;
@@ -77,7 +78,7 @@ class NetSource : public detail::Source {
 
 	NetFrameQueue queue_;
 
-	bool _getCalibration(ftl::net::Universe &net, const ftl::UUID &peer, const std::string &src, ftl::rgbd::Camera &p, ftl::rgbd::Channel chan);
+	bool _getCalibration(ftl::net::Universe &net, const ftl::UUID &peer, const std::string &src, ftl::rgbd::Camera &p, ftl::codecs::Channel chan);
 	void _recv(const std::vector<unsigned char> &jpg, const std::vector<unsigned char> &d);
 	void _recvPacket(short ttimeoff, const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &);
 	//void _recvChunk(int64_t frame, short ttimeoff, uint8_t bitrate, int chunk, const std::vector<unsigned char> &jpg, const std::vector<unsigned char> &d);
diff --git a/components/rgbd-sources/src/realsense_source.cpp b/components/rgbd-sources/src/sources/realsense/realsense_source.cpp
similarity index 100%
rename from components/rgbd-sources/src/realsense_source.cpp
rename to components/rgbd-sources/src/sources/realsense/realsense_source.cpp
diff --git a/components/rgbd-sources/src/realsense_source.hpp b/components/rgbd-sources/src/sources/realsense/realsense_source.hpp
similarity index 100%
rename from components/rgbd-sources/src/realsense_source.hpp
rename to components/rgbd-sources/src/sources/realsense/realsense_source.hpp
diff --git a/components/rgbd-sources/src/snapshot.cpp b/components/rgbd-sources/src/sources/snapshot/snapshot.cpp
similarity index 100%
rename from components/rgbd-sources/src/snapshot.cpp
rename to components/rgbd-sources/src/sources/snapshot/snapshot.cpp
diff --git a/components/rgbd-sources/src/snapshot_source.cpp b/components/rgbd-sources/src/sources/snapshot/snapshot_source.cpp
similarity index 100%
rename from components/rgbd-sources/src/snapshot_source.cpp
rename to components/rgbd-sources/src/sources/snapshot/snapshot_source.cpp
diff --git a/components/rgbd-sources/src/snapshot_source.hpp b/components/rgbd-sources/src/sources/snapshot/snapshot_source.hpp
similarity index 100%
rename from components/rgbd-sources/src/snapshot_source.hpp
rename to components/rgbd-sources/src/sources/snapshot/snapshot_source.hpp
diff --git a/components/rgbd-sources/src/calibrate.cpp b/components/rgbd-sources/src/sources/stereovideo/calibrate.cpp
similarity index 99%
rename from components/rgbd-sources/src/calibrate.cpp
rename to components/rgbd-sources/src/sources/stereovideo/calibrate.cpp
index 3940520d2374661449c376020cb98c5802e2c674..fc99d701fa40a8b824248c775a7020ed7d449fd1 100644
--- a/components/rgbd-sources/src/calibrate.cpp
+++ b/components/rgbd-sources/src/sources/stereovideo/calibrate.cpp
@@ -199,7 +199,7 @@ void Calibrate::_updateIntrinsics() {
 	// Set correct camera matrices for
 	// getCameraMatrix(), getCameraMatrixLeft(), getCameraMatrixRight()
 	Kl_ = Mat(P1, cv::Rect(0, 0, 3, 3));
-	Kr_ = Mat(P1, cv::Rect(0, 0, 3, 3));
+	Kr_ = Mat(P2, cv::Rect(0, 0, 3, 3));
 
 	initUndistortRectifyMap(K1_, D1_, R1, P1, img_size_, CV_32FC1, map1_.first, map2_.first);
 	initUndistortRectifyMap(K2_, D2_, R2, P2, img_size_, CV_32FC1, map1_.second, map2_.second);
diff --git a/components/rgbd-sources/src/calibrate.hpp b/components/rgbd-sources/src/sources/stereovideo/calibrate.hpp
similarity index 100%
rename from components/rgbd-sources/src/calibrate.hpp
rename to components/rgbd-sources/src/sources/stereovideo/calibrate.hpp
diff --git a/components/rgbd-sources/src/local.cpp b/components/rgbd-sources/src/sources/stereovideo/local.cpp
similarity index 100%
rename from components/rgbd-sources/src/local.cpp
rename to components/rgbd-sources/src/sources/stereovideo/local.cpp
diff --git a/components/rgbd-sources/src/local.hpp b/components/rgbd-sources/src/sources/stereovideo/local.hpp
similarity index 100%
rename from components/rgbd-sources/src/local.hpp
rename to components/rgbd-sources/src/sources/stereovideo/local.hpp
diff --git a/components/rgbd-sources/src/stereovideo.cpp b/components/rgbd-sources/src/sources/stereovideo/stereovideo.cpp
similarity index 98%
rename from components/rgbd-sources/src/stereovideo.cpp
rename to components/rgbd-sources/src/sources/stereovideo/stereovideo.cpp
index 6573f74f4d7cf1f3761f98a66c68c6963e10af31..b84385fcecb3a8c00a94398d998d872c68929951 100644
--- a/components/rgbd-sources/src/stereovideo.cpp
+++ b/components/rgbd-sources/src/sources/stereovideo/stereovideo.cpp
@@ -12,7 +12,7 @@
 using ftl::rgbd::detail::Calibrate;
 using ftl::rgbd::detail::LocalSource;
 using ftl::rgbd::detail::StereoVideoSource;
-using ftl::rgbd::Channel;
+using ftl::codecs::Channel;
 using std::string;
 
 StereoVideoSource::StereoVideoSource(ftl::rgbd::Source *host)
@@ -219,7 +219,7 @@ bool StereoVideoSource::compute(int n, int b) {
 	auto &left = frame.get<cv::cuda::GpuMat>(Channel::Left);
 	auto &right = frame.get<cv::cuda::GpuMat>(Channel::Right);
 
-	const ftl::rgbd::Channel chan = host_->getChannel();
+	const ftl::codecs::Channel chan = host_->getChannel();
 	if (left.empty() || right.empty()) return false;
 
 	if (chan == Channel::Depth) {
diff --git a/components/rgbd-sources/src/stereovideo.hpp b/components/rgbd-sources/src/sources/stereovideo/stereovideo.hpp
similarity index 96%
rename from components/rgbd-sources/src/stereovideo.hpp
rename to components/rgbd-sources/src/sources/stereovideo/stereovideo.hpp
index 9d3325e1ac27ec544abeb409149cb89817dc29d2..dfea7937f99777c23f753a7ab2b5bad8c68bb4af 100644
--- a/components/rgbd-sources/src/stereovideo.hpp
+++ b/components/rgbd-sources/src/sources/stereovideo/stereovideo.hpp
@@ -31,7 +31,7 @@ class StereoVideoSource : public detail::Source {
 	bool retrieve();
 	bool compute(int n, int b);
 	bool isReady();
-	Camera parameters(ftl::rgbd::Channel chan);
+	Camera parameters(ftl::codecs::Channel chan);
 
 	//const cv::Mat &getRight() const { return right_; }
 
diff --git a/components/rgbd-sources/src/virtual.cpp b/components/rgbd-sources/src/sources/virtual/virtual.cpp
similarity index 67%
rename from components/rgbd-sources/src/virtual.cpp
rename to components/rgbd-sources/src/sources/virtual/virtual.cpp
index 0e6db973884a3c8361fcaae764cc1688d2434d9d..17ce33638fbc1cfc934beb93243399cf93151118 100644
--- a/components/rgbd-sources/src/virtual.cpp
+++ b/components/rgbd-sources/src/sources/virtual/virtual.cpp
@@ -2,13 +2,58 @@
 
 using ftl::rgbd::VirtualSource;
 using ftl::rgbd::Source;
-using ftl::rgbd::Channel;
+using ftl::codecs::Channel;
+using ftl::rgbd::Camera;
 
 class VirtualImpl : public ftl::rgbd::detail::Source {
 	public:
 	explicit VirtualImpl(ftl::rgbd::Source *host, const ftl::rgbd::Camera &params) : ftl::rgbd::detail::Source(host) {
 		params_ = params;
-		capabilities_ = ftl::rgbd::kCapMovable | ftl::rgbd::kCapVideo | ftl::rgbd::kCapStereo;
+
+		params_right_.width = host->value("width", 1280);
+		params_right_.height = host->value("height", 720);
+		params_right_.fx = host->value("focal_right", 700.0f);
+		params_right_.fy = params_right_.fx;
+		params_right_.cx = host->value("centre_x_right", -(double)params_.width / 2.0);
+		params_right_.cy = host->value("centre_y_right", -(double)params_.height / 2.0);
+		params_right_.minDepth = host->value("minDepth", 0.1f);
+		params_right_.maxDepth = host->value("maxDepth", 20.0f);
+		params_right_.doffs = 0;
+		params_right_.baseline = host->value("baseline", 0.0f);
+
+		capabilities_ = ftl::rgbd::kCapVideo | ftl::rgbd::kCapStereo;
+
+		if (!host->value("locked", false)) capabilities_ |= ftl::rgbd::kCapMovable;
+		host->on("baseline", [this](const ftl::config::Event&) {
+			params_.baseline = host_->value("baseline", 0.0f);
+		});
+		
+		host->on("focal", [this](const ftl::config::Event&) {
+			params_.fx = host_->value("focal", 700.0f);
+			params_.fy = params_.fx;
+		});
+
+		host->on("centre_x", [this](const ftl::config::Event&) {
+			params_.cx = host_->value("centre_x", 0.0f);
+		});
+
+		host->on("centre_y", [this](const ftl::config::Event&) {
+			params_.cy = host_->value("centre_y", 0.0f);
+		});
+
+		host->on("focal_right", [this](const ftl::config::Event&) {
+			params_right_.fx = host_->value("focal_right", 700.0f);
+			params_right_.fy = params_right_.fx;
+		});
+
+		host->on("centre_x_right", [this](const ftl::config::Event&) {
+			params_right_.cx = host_->value("centre_x_right", 0.0f);
+		});
+
+		host->on("centre_y_right", [this](const ftl::config::Event&) {
+			params_right_.cy = host_->value("centre_y_right", 0.0f);
+		});
+
 	}
 
 	~VirtualImpl() {
@@ -47,8 +92,6 @@ class VirtualImpl : public ftl::rgbd::detail::Source {
 					frame.hasChannel(host_->getChannel())) {
 				frame.download(host_->getChannel());
 				cv::swap(frame.get<cv::Mat>(host_->getChannel()), depth_);
-			} else {
-				LOG(ERROR) << "Channel 2 frame in rendering";
 			}
 
 			auto cb = host_->callback();
@@ -57,14 +100,32 @@ class VirtualImpl : public ftl::rgbd::detail::Source {
 		return true;
 	}
 
+	Camera parameters(ftl::codecs::Channel c) {
+		return (c == Channel::Left) ? params_ : params_right_;
+	}
+
 	bool isReady() override { return true; }
 
 	std::function<void(ftl::rgbd::Frame &)> callback;
 	ftl::rgbd::Frame frame;
+
+	ftl::rgbd::Camera params_right_;
 };
 
 VirtualSource::VirtualSource(ftl::config::json_t &cfg) : Source(cfg) {
 	auto params = params_;
+
+	params_.width = value("width", 1280);
+	params_.height = value("height", 720);
+	params_.fx = value("focal", 700.0f);
+	params_.fy = params_.fx;
+	params_.cx = value("centre_x", -(double)params_.width / 2.0);
+	params_.cy = value("centre_y", -(double)params_.height / 2.0);
+	params_.minDepth = value("minDepth", 0.1f);
+	params_.maxDepth = value("maxDepth", 20.0f);
+	params_.doffs = 0;
+	params_.baseline = value("baseline", 0.0f);
+
 	impl_ = new VirtualImpl(this, params);
 }
 
diff --git a/components/rgbd-sources/src/streamer.cpp b/components/rgbd-sources/src/streamer.cpp
index 676cf58cf3045111cf966d78d587c0e918a351e7..939252d77a6704bf7c9c5a2202ec72a6c45562a8 100644
--- a/components/rgbd-sources/src/streamer.cpp
+++ b/components/rgbd-sources/src/streamer.cpp
@@ -17,7 +17,7 @@ using ftl::rgbd::detail::StreamClient;
 using ftl::rgbd::detail::ABRController;
 using ftl::codecs::definition_t;
 using ftl::codecs::device_t;
-using ftl::rgbd::Channel;
+using ftl::codecs::Channel;
 using ftl::net::Universe;
 using std::string;
 using std::list;
@@ -48,6 +48,7 @@ Streamer::Streamer(nlohmann::json &config, Universe *net)
 
 	//group_.setFPS(value("fps", 20));
 	group_.setLatency(4);
+	group_.setName("StreamGroup");
 
 	compress_level_ = value("compression", 1);
 	
@@ -92,7 +93,7 @@ Streamer::Streamer(nlohmann::json &config, Universe *net)
 	});
 
 	// Allow remote users to access camera calibration matrix
-	net->bind("source_details", [this](const std::string &uri, ftl::rgbd::Channel chan) -> tuple<unsigned int,vector<unsigned char>> {
+	net->bind("source_details", [this](const std::string &uri, ftl::codecs::Channel chan) -> tuple<unsigned int,vector<unsigned char>> {
 		vector<unsigned char> buf;
 		SHARED_LOCK(mutex_,slk);
 
@@ -126,6 +127,20 @@ Streamer::Streamer(nlohmann::json &config, Universe *net)
 	//net->bind("ping_streamer", [this](unsigned long long time) -> unsigned long long {
 	//	return time;
 	//});
+
+	on("hq_bitrate", [this](const ftl::config::Event &e) {
+		UNIQUE_LOCK(mutex_,ulk);
+		for (auto &s : sources_) {
+			s.second->hq_bitrate = value("hq_bitrate", ftl::codecs::kPresetBest);
+		}
+	});
+
+	on("lq_bitrate", [this](const ftl::config::Event &e) {
+		UNIQUE_LOCK(mutex_,ulk);
+		for (auto &s : sources_) {
+			s.second->lq_bitrate = value("lq_bitrate", ftl::codecs::kPresetWorst);
+		}
+	});
 }
 
 Streamer::~Streamer() {
@@ -165,6 +180,10 @@ void Streamer::add(Source *src) {
 		s->clientCount = 0;
 		s->hq_count = 0;
 		s->lq_count = 0;
+
+		s->hq_bitrate = value("hq_bitrate", ftl::codecs::kPresetBest);
+		s->lq_bitrate = value("lq_bitrate", ftl::codecs::kPresetWorst);
+
 		sources_[src->getID()] = s;
 
 		group_.addSource(src);
@@ -174,6 +193,40 @@ void Streamer::add(Source *src) {
 	net_->broadcast("add_stream", src->getID());
 }
 
+void Streamer::add(ftl::rgbd::Group *grp) {
+	auto srcs = grp->sources();
+	for (int i=0; i<srcs.size(); ++i) {
+		auto &src = srcs[i];
+		{
+			UNIQUE_LOCK(mutex_,ulk);
+			if (sources_.find(src->getID()) != sources_.end()) return;
+
+			StreamSource *s = new StreamSource;
+			s->src = src;
+			//s->prev_depth = cv::Mat(cv::Size(src->parameters().width, src->parameters().height), CV_16SC1, 0);
+			s->jobs = 0;
+			s->frame = 0;
+			s->clientCount = 0;
+			s->hq_count = 0;
+			s->lq_count = 0;
+			s->id = i;
+			sources_[src->getID()] = s;
+
+			//group_.addSource(src);
+
+			src->addRawCallback([this,s](Source *src, const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+				//LOG(INFO) << "RAW CALLBACK";
+				_transmitPacket(s, spkt, pkt, Quality::Any);
+			});
+		}
+
+		LOG(INFO) << "Proxy Streaming: " << src->getID();
+		net_->broadcast("add_stream", src->getID());
+	}
+
+	LOG(INFO) << "All proxy streams added";
+}
+
 void Streamer::_addClient(const string &source, int N, int rate, const ftl::UUID &peer, const string &dest) {
 	StreamSource *s = nullptr;
 
@@ -349,10 +402,11 @@ void Streamer::_process(ftl::rgbd::FrameSet &fs) {
 	// Prevent new clients during processing.
 	SHARED_LOCK(mutex_,slk);
 
-	if (fs.sources.size() != sources_.size()) {
-		LOG(ERROR) << "Incorrect number of sources in frameset: " << fs.sources.size() << " vs " << sources_.size();
-		return;
-	}
+	// This check is not valid, always assume fs.sources is correct
+	//if (fs.sources.size() != sources_.size()) {
+	//	LOG(ERROR) << "Incorrect number of sources in frameset: " << fs.sources.size() << " vs " << sources_.size();
+		//return;
+	//}
 
 	int totalclients = 0;
 
@@ -389,15 +443,22 @@ void Streamer::_process(ftl::rgbd::FrameSet &fs) {
 				// Receiver only waits for channel 1 by default
 				// TODO: Each encode could be done in own thread
 				if (hasChan2) {
-					enc2->encode(fs.frames[j].get<cv::Mat>(fs.sources[j]->getChannel()), src->hq_bitrate, [this,src,hasChan2](const ftl::codecs::Packet &blk){
-						_transmitPacket(src, blk, 1, hasChan2, true);
+					// TODO: Stagger the reset between nodes... random phasing
+					if (fs.timestamp % (10*ftl::timer::getInterval()) == 0) enc2->reset();
+
+					auto chan = fs.sources[j]->getChannel();
+
+					enc2->encode(fs.frames[j].get<cv::Mat>(chan), src->hq_bitrate, [this,src,hasChan2,chan](const ftl::codecs::Packet &blk){
+						_transmitPacket(src, blk, chan, hasChan2, Quality::High);
 					});
 				} else {
 					if (enc2) enc2->reset();
 				}
 
+				// TODO: Stagger the reset between nodes... random phasing
+				if (fs.timestamp % (10*ftl::timer::getInterval()) == 0) enc1->reset();
 				enc1->encode(fs.frames[j].get<cv::Mat>(Channel::Colour), src->hq_bitrate, [this,src,hasChan2](const ftl::codecs::Packet &blk){
-					_transmitPacket(src, blk, 0, hasChan2, true);
+					_transmitPacket(src, blk, Channel::Colour, hasChan2, Quality::High);
 				});
 			}
 		}
@@ -417,15 +478,17 @@ void Streamer::_process(ftl::rgbd::FrameSet &fs) {
 				// Important to send channel 2 first if needed...
 				// Receiver only waits for channel 1 by default
 				if (hasChan2) {
-					enc2->encode(fs.frames[j].get<cv::Mat>(fs.sources[j]->getChannel()), src->lq_bitrate, [this,src,hasChan2](const ftl::codecs::Packet &blk){
-						_transmitPacket(src, blk, 1, hasChan2, false);
+					auto chan = fs.sources[j]->getChannel();
+
+					enc2->encode(fs.frames[j].get<cv::Mat>(chan), src->lq_bitrate, [this,src,hasChan2,chan](const ftl::codecs::Packet &blk){
+						_transmitPacket(src, blk, chan, hasChan2, Quality::Low);
 					});
 				} else {
 					if (enc2) enc2->reset();
 				}
 
 				enc1->encode(fs.frames[j].get<cv::Mat>(Channel::Colour), src->lq_bitrate, [this,src,hasChan2](const ftl::codecs::Packet &blk){
-					_transmitPacket(src, blk, 0, hasChan2, false);
+					_transmitPacket(src, blk, Channel::Colour, hasChan2, Quality::Low);
 				});
 			}
 		}
@@ -491,19 +554,27 @@ void Streamer::_process(ftl::rgbd::FrameSet &fs) {
 	} else _cleanUp();
 }
 
-void Streamer::_transmitPacket(StreamSource *src, const ftl::codecs::Packet &pkt, int chan, bool hasChan2, bool hqonly) {
+void Streamer::_transmitPacket(StreamSource *src, const ftl::codecs::Packet &pkt, Channel chan, bool hasChan2, Quality q) {
 	ftl::codecs::StreamPacket spkt = {
 		frame_no_,
-		static_cast<uint8_t>((chan & 0x1) | ((hasChan2) ? 0x2 : 0x0))
+		src->id,
+		(hasChan2) ? 2 : 1,
+		chan
+		//static_cast<uint8_t>((chan & 0x1) | ((hasChan2) ? 0x2 : 0x0))
 	};
-	LOG(INFO) << "codec:" << (int) pkt.codec;
+
+	_transmitPacket(src, spkt, pkt, q);
+}
+
+void Streamer::_transmitPacket(StreamSource *src, const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt, Quality q) {
 	// Lock to prevent clients being added / removed
 	//SHARED_LOCK(src->mutex,lk);
 	auto c = src->clients.begin();
 	while (c != src->clients.end()) {
 		const ftl::codecs::preset_t b = (*c).preset;
-		if ((hqonly && b >= kQualityThreshold) || (!hqonly && b < kQualityThreshold)) {
+		if ((q == Quality::High && b >= kQualityThreshold) || (q == Quality::Low && b < kQualityThreshold)) {
 			++c;
+			LOG(INFO) << "INCORRECT QUALITY";
 			continue;
 		}
 
@@ -520,7 +591,7 @@ void Streamer::_transmitPacket(StreamSource *src, const ftl::codecs::Packet &pkt
 				(*c).txcount = (*c).txmax;
 			} else {
 				// Count frame as completed only if last block and channel is 0
-				if (pkt.block_number == pkt.block_total - 1 && chan == 0) ++(*c).txcount;
+				if (pkt.block_number == pkt.block_total - 1 && spkt.channel == Channel::Colour) ++(*c).txcount;
 			}
 		} catch(...) {
 			(*c).txcount = (*c).txmax;
@@ -718,7 +789,7 @@ void Streamer::_encodeImageChannel1(const cv::Mat &in, vector<unsigned char> &ou
 	cv::imencode(".jpg", in, out, jpgparams);
 }
 
-bool Streamer::_encodeImageChannel2(const cv::Mat &in, vector<unsigned char> &out, ftl::rgbd::channel_t c, unsigned int b) {
+bool Streamer::_encodeImageChannel2(const cv::Mat &in, vector<unsigned char> &out, ftl::codecs::Channel_t c, unsigned int b) {
 	if (c == ftl::rgbd::kChanNone) return false;  // NOTE: Should not happen
 
 	if (isFloatChannel(c) && in.type() == CV_16U && in.channels() == 1) {
diff --git a/components/rgbd-sources/test/CMakeLists.txt b/components/rgbd-sources/test/CMakeLists.txt
index 78bb6cec7e8c411ffbfe982a1b65320f56439bd5..9611e8081d942a6051e0ea254b3308d406161cd7 100644
--- a/components/rgbd-sources/test/CMakeLists.txt
+++ b/components/rgbd-sources/test/CMakeLists.txt
@@ -9,17 +9,6 @@ target_link_libraries(source_unit
 
 add_test(SourceUnitTest source_unit)
 
-### Channel Unit ###############################################################
-add_executable(channel_unit
-	./tests.cpp
-	./channel_unit.cpp
-)
-target_include_directories(channel_unit PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include")
-target_link_libraries(channel_unit
-	ftlcommon)
-
-add_test(ChannelUnitTest channel_unit)
-
 ### Frame Unit #################################################################
 add_executable(frame_unit
 	./tests.cpp
diff --git a/components/rgbd-sources/test/frame_unit.cpp b/components/rgbd-sources/test/frame_unit.cpp
index 6ad528a28fd677e207b05362c0021f7d302784dc..6d858a62ab7765f1efc6f98ab09c5109f8101603 100644
--- a/components/rgbd-sources/test/frame_unit.cpp
+++ b/components/rgbd-sources/test/frame_unit.cpp
@@ -2,8 +2,8 @@
 #include <ftl/rgbd/frame.hpp>
 
 using ftl::rgbd::Frame;
-using ftl::rgbd::Channel;
-using ftl::rgbd::Channels;
+using ftl::codecs::Channel;
+using ftl::codecs::Channels;
 using ftl::rgbd::Format;
 
 TEST_CASE("Frame::create() cpu mat", "") {
diff --git a/components/rgbd-sources/test/source_unit.cpp b/components/rgbd-sources/test/source_unit.cpp
index dca38be2ffb6e06b8be416c8de71a7a799c77867..1b69631aa0ec0312cf1cba06533967f585babf56 100644
--- a/components/rgbd-sources/test/source_unit.cpp
+++ b/components/rgbd-sources/test/source_unit.cpp
@@ -18,6 +18,14 @@ class SnapshotReader {
 	Snapshot readArchive() { return Snapshot(); };
 };
 
+class Player {
+	public:
+	explicit Player(std::istream &) {}
+
+	bool begin() { return true; }
+	bool end() { return true; }
+};
+
 namespace detail {
 
 class ImageSource : public ftl::rgbd::detail::Source {
@@ -74,6 +82,18 @@ class SnapshotSource : public ftl::rgbd::detail::Source {
 	bool isReady() { return true; };
 };
 
+class FileSource : public ftl::rgbd::detail::Source {
+	public:
+	FileSource(ftl::rgbd::Source *host, ftl::rgbd::Player *r, int) : ftl::rgbd::detail::Source(host) {
+		last_type = "filesource";
+	}
+
+	bool capture(int64_t ts) { return true; }
+	bool retrieve() { return true; }
+	bool compute(int n, int b) { return true; };
+	bool isReady() { return true; };
+};
+
 class RealsenseSource : public ftl::rgbd::detail::Source {
 	public:
 	explicit RealsenseSource(ftl::rgbd::Source *host) : ftl::rgbd::detail::Source(host) {
@@ -112,6 +132,7 @@ class MiddleburySource : public ftl::rgbd::detail::Source {
 #define _FTL_RGBD_IMAGE_HPP_
 #define _FTL_RGBD_REALSENSE_HPP_
 #define _FTL_RGBD_MIDDLEBURY_SOURCE_HPP_
+#define _FTL_RGBD_FILE_SOURCE_HPP_
 
 #include "../src/source.cpp"
 
diff --git a/config/config_nick.jsonc b/config/config_nick.jsonc
index 53673a170693bd7ffd6d4386977417897fa55b8a..f0c28c062467f7b43f49adbb279c84b1589a914d 100644
--- a/config/config_nick.jsonc
+++ b/config/config_nick.jsonc
@@ -521,15 +521,22 @@
 			"listen": "tcp://*:9002"
 		},
 		"sources": [
-			{"uri":"file:///home/nick/Pictures/FTL/snaptest10.tar.gz#0", "index": "camera0"},
-			{"uri":"file:///home/nick/Pictures/FTL/snaptest10.tar.gz#1", "index": "camera1"},
-			{"uri":"file:///home/nick/Pictures/FTL/snaptest10.tar.gz#2", "index": "camera2"},
-			{"uri":"file:///home/nick/Pictures/FTL/snaptest10.tar.gz#3", "index": "camera3"},
-			{"uri":"file:///home/nick/Pictures/FTL/snaptest10.tar.gz#4", "index": "camera4"}
+			{"uri":"file:///home/nick/Pictures/FTL/snaptest10.tar.gz#0", "index": "0"},
+			{"uri":"file:///home/nick/Pictures/FTL/snaptest10.tar.gz#1", "index": "1"},
+			{"uri":"file:///home/nick/Pictures/FTL/snaptest10.tar.gz#2", "index": "2"},
+			{"uri":"file:///home/nick/Pictures/FTL/snaptest10.tar.gz#3", "index": "3"},
+			{"uri":"file:///home/nick/Pictures/FTL/snaptest10.tar.gz#4", "index": "4"}
 		],
 		"display": { "$ref": "#displays/left" },
 		"virtual": { "$ref": "#virtual_cams/default" },
-		"voxelhash": { "$ref": "#hash_conf/default" },
+		"renderer": {
+			"clipping": {
+				"width": 0.5,
+				"height": 2.0,
+				"depth": 0.8,
+				"z": -2.2
+			}
+		},
 		"merge": {
 			"$id": "ftl://blah/blah",
 			"targetsource" : "ftl://utu.fi/node3#vision_default/source",
@@ -540,7 +547,11 @@
 			"delay" : 500,
 			"patternsize" : [9, 6]
 		},
-		"stream": {}
+		"stream": {},
+		"transform": {
+			"pitch": -0.4,
+			"y": -0.5
+		}
 	},
 
 	"reconstruction_lab": {