diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 5a81ac8cbd6f27bb4388eab11d6d4b1b15875410..c6173a686d387b22961dcd2ee6b90c120dc39631 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -8,7 +8,7 @@ stages:
  - all
 # - build
 # - test
-# - deploy
+ - deploy
 
 #cache:
 #  paths:
@@ -29,6 +29,20 @@ linux:
     - make
     - ctest --output-on-failure
 
+webserver-deploy:
+  only:
+    - master
+  stage: deploy
+  tags:
+    - linux
+  variables:
+    NODE_SERVER: '10.0.0.9'
+  script:
+    - npm install web-service/server
+    - browserify web-service/public/js/index.js -o web-service/public/js/bundle.js
+    - rsync -vr --delete web-service/ nodejs@${NODE_SERVER}:/srv/nodejs/web-service
+    - ssh nodejs@${NODE_SERVER} -- "npm install web-service/server && pm2 restart web-service"
+
 windows:
   stage: all
   variables:
diff --git a/applications/player/src/main.cpp b/applications/player/src/main.cpp
index 33e146880668fb29224aaffc8942cb4255ebf0c1..2741cac2ef832f3cc60073f7320d443ea9620c9d 100644
--- a/applications/player/src/main.cpp
+++ b/applications/player/src/main.cpp
@@ -40,6 +40,21 @@ static void visualizeDepthMap(	const cv::Mat &depth, cv::Mat &out,
 	//out.setTo(cv::Scalar(255, 255, 255), mask);
 }
 
+static std::string nameForCodec(ftl::codecs::codec_t c) {
+	switch(c) {
+	case codec_t::JPG 	: return "JPEG";
+	case codec_t::PNG	: return "PNG";
+	case codec_t::H264	: return "H264";
+	case codec_t::HEVC	: return "HEVC";
+	case codec_t::JSON	: return "JSON";
+	case codec_t::POSE	: return "POSE";
+	case codec_t::RAW	: return "RAW";
+	case codec_t::CALIBRATION : return "CALIBRATION";
+	case codec_t::MSGPACK : return "MSGPACK";
+	default: return std::string("UNKNOWN (") + std::to_string((int)c) + std::string(")");
+	}
+}
+
 int main(int argc, char **argv) {
     std::string filename(argv[1]);
     LOG(INFO) << "Playing: " << filename;
@@ -73,6 +88,11 @@ int main(int argc, char **argv) {
 			if (!(channel_mask[spkt.streamID][(int)spkt.channel])) {
 				channel_mask[spkt.streamID].set((int)spkt.channel);
 				LOG(INFO) << " - Channel " << (int)spkt.channel << " found (" << (int)spkt.streamID << ")";
+				LOG(INFO) << "     - Codec = " << nameForCodec(pkt.codec);
+				LOG(INFO) << "     - Width = " << ftl::codecs::getWidth(pkt.definition);
+				LOG(INFO) << "     - Height = " << ftl::codecs::getHeight(pkt.definition);
+				LOG(INFO) << "     - Start Time = " << float(spkt.timestamp - r.getStartTime()) / 1000.0f << "(s)";
+				LOG(INFO) << "     - Blocks = " << (int)pkt.block_total;
 			}
 
 			if (spkt.streamID == current_stream) {
diff --git a/applications/reconstruct/src/ilw/discontinuity.cu b/applications/reconstruct/src/ilw/discontinuity.cu
index fe78d47158e81ce02be82953601151f5bf31703a..fcadde03efa6078c8041a244b717c99fbb24581f 100644
--- a/applications/reconstruct/src/ilw/discontinuity.cu
+++ b/applications/reconstruct/src/ilw/discontinuity.cu
@@ -5,11 +5,12 @@
 using ftl::cuda::Mask;
 
 template <int RADIUS>
-__global__ void discontinuity_kernel(ftl::cuda::TextureObject<int> mask_out, ftl::cuda::TextureObject<float> depth, ftl::rgbd::Camera params) {
+__global__ void discontinuity_kernel(ftl::cuda::TextureObject<int> mask_out, ftl::cuda::TextureObject<float> depth,
+										const cv::Size size, const double minDepth, const double maxDepth) {
 	const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
 	const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
 
-	if (x < params.width && y < params.height) {
+	if (x < size.width && y < size.height) {
 		Mask mask(0);
 
 		const float d = depth.tex2D((int)x, (int)y);
@@ -17,7 +18,7 @@ __global__ void discontinuity_kernel(ftl::cuda::TextureObject<int> mask_out, ftl
 		// Calculate depth between 0.0 and 1.0
 		//float p = (d - params.minDepth) / (params.maxDepth - params.minDepth);
 
-		if (d >= params.minDepth && d <= params.maxDepth) {
+		if (d >= minDepth && d <= maxDepth) {
 			/* Orts-Escolano S. et al. 2016. Holoportation: Virtual 3D teleportation in real-time. */
 			// Is there a discontinuity nearby?
 			for (int u=-RADIUS; u<=RADIUS; ++u) {
@@ -26,22 +27,25 @@ __global__ void discontinuity_kernel(ftl::cuda::TextureObject<int> mask_out, ftl
 					if (fabs(depth.tex2D((int)x+u, (int)y+v) - d) > 0.1f) mask.isDiscontinuity(true);
 				}
 			}
-        }
-        
-        mask_out(x,y) = (int)mask;
+		}
+		
+		mask_out(x,y) = (int)mask;
 	}
 }
 
-void ftl::cuda::discontinuity(ftl::cuda::TextureObject<int> &mask_out, ftl::cuda::TextureObject<float> &depth, const ftl::rgbd::Camera &params, uint discon, cudaStream_t stream) {
-	const dim3 gridSize((params.width + T_PER_BLOCK - 1)/T_PER_BLOCK, (params.height + T_PER_BLOCK - 1)/T_PER_BLOCK);
+void ftl::cuda::discontinuity(ftl::cuda::TextureObject<int> &mask_out, ftl::cuda::TextureObject<float> &depth,
+								const cv::Size size, const double minDepth, const double maxDepth,
+								uint discon, cudaStream_t stream) {
+			
+	const dim3 gridSize((size.width + T_PER_BLOCK - 1)/T_PER_BLOCK, (size.height + T_PER_BLOCK - 1)/T_PER_BLOCK);
 	const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
 
 	switch (discon) {
-    case 5 :	discontinuity_kernel<5><<<gridSize, blockSize, 0, stream>>>(mask_out, depth, params); break;
-	case 4 :	discontinuity_kernel<4><<<gridSize, blockSize, 0, stream>>>(mask_out, depth, params); break;
-	case 3 :	discontinuity_kernel<3><<<gridSize, blockSize, 0, stream>>>(mask_out, depth, params); break;
-	case 2 :	discontinuity_kernel<2><<<gridSize, blockSize, 0, stream>>>(mask_out, depth, params); break;
-	case 1 :	discontinuity_kernel<1><<<gridSize, blockSize, 0, stream>>>(mask_out, depth, params); break;
+	case 5 :	discontinuity_kernel<5><<<gridSize, blockSize, 0, stream>>>(mask_out, depth, size, minDepth, maxDepth); break;
+	case 4 :	discontinuity_kernel<4><<<gridSize, blockSize, 0, stream>>>(mask_out, depth, size, minDepth, maxDepth); break;
+	case 3 :	discontinuity_kernel<3><<<gridSize, blockSize, 0, stream>>>(mask_out, depth, size, minDepth, maxDepth); break;
+	case 2 :	discontinuity_kernel<2><<<gridSize, blockSize, 0, stream>>>(mask_out, depth, size, minDepth, maxDepth); break;
+	case 1 :	discontinuity_kernel<1><<<gridSize, blockSize, 0, stream>>>(mask_out, depth, size, minDepth, maxDepth); break;
 	default:	break;
 	}
 	cudaSafeCall( cudaGetLastError() );
diff --git a/applications/reconstruct/src/ilw/ilw_cuda.hpp b/applications/reconstruct/src/ilw/ilw_cuda.hpp
index fad97afbdb8385d9381eff5af40fb272f172ca1a..94e522347bb64d9b0b091c3f0cd4914b8abd91c2 100644
--- a/applications/reconstruct/src/ilw/ilw_cuda.hpp
+++ b/applications/reconstruct/src/ilw/ilw_cuda.hpp
@@ -10,15 +10,15 @@ namespace ftl {
 namespace cuda {
 
 struct ILWParams {
-    float spatial_smooth;
-    float colour_smooth;
+	float spatial_smooth;
+	float colour_smooth;
 	float fill_match;
 	float fill_threshold;
 	float match_threshold;
-    float cost_ratio;
-    float cost_threshold;
+	float cost_ratio;
+	float cost_threshold;
 	float range;
-    uint flags;
+	uint flags;
 };
 
 static const uint kILWFlag_IgnoreBad = 0x0001;
@@ -29,7 +29,9 @@ static const uint kILWFlag_ColourConfidenceOnly = 0x0008;
 void discontinuity(
 	ftl::cuda::TextureObject<int> &mask_out,
 	ftl::cuda::TextureObject<float> &depth,
-	const ftl::rgbd::Camera &params,
+	const cv::Size size,
+	const double minDepth,
+	const double maxDepth,
 	uint discon, cudaStream_t stream
 );
 
@@ -49,32 +51,32 @@ void preprocess_depth(
 );
 
 void correspondence(
-    ftl::cuda::TextureObject<float> &d1,
-    ftl::cuda::TextureObject<float> &d2,
-    ftl::cuda::TextureObject<uchar4> &c1,
-    ftl::cuda::TextureObject<uchar4> &c2,
-    ftl::cuda::TextureObject<float> &dout,
-    ftl::cuda::TextureObject<float> &conf,
+	ftl::cuda::TextureObject<float> &d1,
+	ftl::cuda::TextureObject<float> &d2,
+	ftl::cuda::TextureObject<uchar4> &c1,
+	ftl::cuda::TextureObject<uchar4> &c2,
+	ftl::cuda::TextureObject<float> &dout,
+	ftl::cuda::TextureObject<float> &conf,
 	ftl::cuda::TextureObject<int> &mask,
-    float4x4 &pose1,
-    float4x4 &pose1_inv,
-    float4x4 &pose2,
-    const ftl::rgbd::Camera &cam1,
-    const ftl::rgbd::Camera &cam2,
-    const ILWParams &params, int win,
-    cudaStream_t stream
+	float4x4 &pose1,
+	float4x4 &pose1_inv,
+	float4x4 &pose2,
+	const ftl::rgbd::Camera &cam1,
+	const ftl::rgbd::Camera &cam2,
+	const ILWParams &params, int win,
+	cudaStream_t stream
 );
 
 void move_points(
-    ftl::cuda::TextureObject<float> &d_old,
-    ftl::cuda::TextureObject<float> &d_new,
+	ftl::cuda::TextureObject<float> &d_old,
+	ftl::cuda::TextureObject<float> &d_new,
 	ftl::cuda::TextureObject<float> &conf,
-    const ftl::rgbd::Camera &camera,
-    const float4x4 &pose,
+	const ftl::rgbd::Camera &camera,
+	const float4x4 &pose,
 	const ILWParams &params,
-    float rate,
-    int radius,
-    cudaStream_t stream
+	float rate,
+	int radius,
+	cudaStream_t stream
 );
 
 }
diff --git a/applications/reconstruct/src/main.cpp b/applications/reconstruct/src/main.cpp
index 33db96ae046a1ee5c29898a503d0c483446e7f91..dac32881b73df64b9c02d8803c4b6eba6e763871 100644
--- a/applications/reconstruct/src/main.cpp
+++ b/applications/reconstruct/src/main.cpp
@@ -82,8 +82,8 @@ static Eigen::Affine3d create_rotation_matrix(float ax, float ay, float az) {
 	return rz * rx * ry;
 }
 
-// TODO:	*	Remove this class (requires more general solution). Also does not
-//				process disconnections/reconnections/types etc. correctly.
+// TODO:	*	Remove this class (requires more general solution). Also does
+//				not process disconnections/reconnections/types etc. correctly.
 //			*	Update when new options become available.
 
 class ConfigProxy {
@@ -216,6 +216,7 @@ static void run(ftl::Configurable *root) {
 
 		for (auto &input : sources) {
 			string uri = input->getURI();
+
 			auto T = transformations.find(uri);
 			if (T == transformations.end()) {
 				LOG(WARNING) << "Camera pose for " + uri + " not found in transformations";
diff --git a/applications/reconstruct/src/reconstruction.cpp b/applications/reconstruct/src/reconstruction.cpp
index be99d1581c3555b1d52aab156bb9ed316d559de6..8a868e56eeeba0a459a939f086d891652093c7e2 100644
--- a/applications/reconstruct/src/reconstruction.cpp
+++ b/applications/reconstruct/src/reconstruction.cpp
@@ -60,10 +60,36 @@ Reconstruction::Reconstruction(nlohmann::json &config, const std::string name) :
 
 		ftl::pool.push([this](int id) {
 			UNIQUE_LOCK(fs_align_.mtx, lk);
+			
+			/*rgb_.resize(fs_align_.frames.size());
+			for (size_t i = 0; i < rgb_.size(); i++) {
+				auto &depth = fs_align_.frames[i].get<cv::cuda::GpuMat>(ftl::codecs::Channel::Depth);
+				auto &color = fs_align_.frames[i].get<cv::cuda::GpuMat>(ftl::codecs::Channel::Colour);
+
+				if (depth.size() != color.size()) {
+					std::swap(rgb_[i], color);
+					cv::cuda::resize(rgb_[i], color, depth.size(), 0.0, 0.0, cv::INTER_LINEAR);
+				}
+			}*/
+
 			pipeline_->apply(fs_align_, fs_align_, 0);
 			
 			// TODO: To use second GPU, could do a download, swap, device change,
 			// then upload to other device. Or some direct device-2-device copy.
+			/*
+			for (size_t i = 0; i < rgb_.size(); i++) {
+				auto &depth = fs_align_.frames[i].get<cv::cuda::GpuMat>(ftl::codecs::Channel::Depth);
+				auto &color = fs_align_.frames[i].get<cv::cuda::GpuMat>(ftl::codecs::Channel::Colour);
+				auto &tmp = rgb_[i];
+
+				// TODO doesn't always work correctly if resolution changes
+				if (!tmp.empty() && (depth.size() != tmp.size())) {
+					std::swap(tmp, color);
+					fs_align_.frames[i].resetTexture(ftl::codecs::Channel::Colour);
+					fs_align_.frames[i].createTexture<uchar4>(ftl::codecs::Channel::Colour, true);
+				}
+			}*/
+
 			fs_align_.swapTo(fs_render_);
 
 			LOG(INFO) << "Align complete... " << fs_align_.timestamp;
diff --git a/applications/reconstruct/src/reconstruction.hpp b/applications/reconstruct/src/reconstruction.hpp
index 6546f85c163142d4b1cbdf9f84081ca21d703907..50441bedc32b33bb2e64bfac953eda7a579edcb7 100644
--- a/applications/reconstruct/src/reconstruction.hpp
+++ b/applications/reconstruct/src/reconstruction.hpp
@@ -27,11 +27,14 @@ class Reconstruction : public ftl::Configurable {
 
 	private:
 	bool busy_;
+	
 	ftl::rgbd::FrameSet fs_render_;
 	ftl::rgbd::FrameSet fs_align_;
 	ftl::rgbd::Group *group_;
 	ftl::operators::Graph *pipeline_;
 	ftl::render::Triangular *renderer_;
+
+	std::vector<cv::cuda::GpuMat> rgb_;
 };
 
 }
diff --git a/components/codecs/include/ftl/codecs/bitrates.hpp b/components/codecs/include/ftl/codecs/bitrates.hpp
index d34ede8adb88c647949051853dde33f81221a8d2..fbacb49790577d8d354e9acff69b275834803a1e 100644
--- a/components/codecs/include/ftl/codecs/bitrates.hpp
+++ b/components/codecs/include/ftl/codecs/bitrates.hpp
@@ -49,6 +49,8 @@ enum struct definition_t : uint8_t {
 	Invalid
 };
 
+definition_t findDefinition(int width, int height);
+
 /**
  * Get width in pixels of definition.
  */
@@ -97,10 +99,8 @@ static const preset_t kPresetMinimum = -1;
  * Represents the details of each preset codec configuration.
  */
 struct CodecPreset {
-	definition_t colour_res;
-	definition_t depth_res;
-	bitrate_t colour_qual;
-	bitrate_t depth_qual;
+	definition_t res;
+	bitrate_t qual;
 };
 
 /**
diff --git a/components/codecs/include/ftl/codecs/channels.hpp b/components/codecs/include/ftl/codecs/channels.hpp
index 6673275fe7b4f874b0807c48e690e24b4aaddb51..9aa2143020c89c1331f3d408f33f2ef359cf77b8 100644
--- a/components/codecs/include/ftl/codecs/channels.hpp
+++ b/components/codecs/include/ftl/codecs/channels.hpp
@@ -8,30 +8,31 @@ namespace ftl {
 namespace codecs {
 
 enum struct Channel : int {
-    None			= -1,
-    Colour			= 0,	// 8UC3 or 8UC4
-    Left			= 0,
-    Depth			= 1,	// 32S or 32F
-    Right			= 2,	// 8UC3 or 8UC4
-    Colour2			= 2,
-    Disparity		= 3,
-    Depth2			= 3,
-    Deviation		= 4,
-    Screen          = 4,
-    Normals			= 5,	// 32FC4
-    Points			= 6,	// 32FC4 (should be deprecated)
-    Confidence		= 7,	// 32F
-    Contribution	= 7,	// 32F
-    EnergyVector	= 8,	// 32FC4
-    Flow			= 9,	// 32F
-    Smoothing       = 9,    // 32F
-    Energy			= 10,	// 32F
+	None			= -1,
+	Colour			= 0,	// 8UC3 or 8UC4
+	Left			= 0,
+	Depth			= 1,	// 32S or 32F
+	Right			= 2,	// 8UC3 or 8UC4
+	Colour2			= 2,
+	Disparity		= 3,
+	Depth2			= 3,
+	Deviation		= 4,
+	Screen			= 4,
+	Normals			= 5,	// 32FC4
+	Points			= 6,	// 32FC4 (should be deprecated)
+	Confidence		= 7,	// 32F
+	Contribution	= 7,	// 32F
+	EnergyVector	= 8,	// 32FC4
+	Flow			= 9,	// 32F
+	Smoothing		= 9,	// 32F
+	Energy			= 10,	// 32F
 	Mask			= 11,	// 32U
 	Density			= 12,	// 32F
 	Support1		= 13,	// 8UC4 (currently)
 	Support2		= 14,	// 8UC4 (currently)
-    Segmentation	= 15,	// 32S?
-    ColourNormals   = 16,   // 8UC4
+	Segmentation	= 15,	// 32S?
+	ColourNormals	= 16,	// 8UC4
+	ColourHighRes	= 20,	// 8UC3 or 8UC4
 
 	AudioLeft		= 32,
 	AudioRight		= 33,
@@ -39,7 +40,7 @@ enum struct Channel : int {
 	Configuration	= 64,	// JSON Data
 	Calibration		= 65,	// Camera Parameters Object
 	Pose			= 66,	// Eigen::Matrix4d
-    Index           = 67,
+	Index           = 67,
 	Data			= 2048	// Custom data, any codec.
 };
 
@@ -51,7 +52,7 @@ std::string name(Channel c);
 int type(Channel c);
 
 class Channels {
-    public:
+	public:
 
 	class iterator {
 		public:
@@ -67,48 +68,48 @@ class Channels {
 		unsigned int ix_;
 	};
 
-    inline Channels() { mask = 0; }
-    inline explicit Channels(unsigned int m) { mask = m; }
-    inline explicit Channels(Channel c) { mask = (c == Channel::None) ? 0 : 0x1 << static_cast<unsigned int>(c); }
-    inline Channels &operator=(Channel c) { mask = (c == Channel::None) ? 0 : 0x1 << static_cast<unsigned int>(c); return *this; }
-    inline Channels operator|(Channel c) const { return (c == Channel::None) ? Channels(mask) : Channels(mask | (0x1 << static_cast<unsigned int>(c))); }
-    inline Channels operator+(Channel c) const { return (c == Channel::None) ? Channels(mask) : Channels(mask | (0x1 << static_cast<unsigned int>(c))); }
-    inline Channels &operator|=(Channel c) { mask |= (c == Channel::None) ? 0 : (0x1 << static_cast<unsigned int>(c)); return *this; }
-    inline Channels &operator+=(Channel c) { mask |= (c == Channel::None) ? 0 : (0x1 << static_cast<unsigned int>(c)); return *this; }
-    inline Channels &operator-=(Channel c) { mask &= ~((c == Channel::None) ? 0 : (0x1 << static_cast<unsigned int>(c))); return *this; }
-    inline Channels &operator+=(unsigned int c) { mask |= (0x1 << c); return *this; }
-    inline Channels &operator-=(unsigned int c) { mask &= ~(0x1 << c); return *this; }
-
-    inline bool has(Channel c) const {
-        return (c == Channel::None) ? true : mask & (0x1 << static_cast<unsigned int>(c));
-    }
-
-    inline bool has(unsigned int c) const {
-        return mask & (0x1 << c);
-    }
+	inline Channels() { mask = 0; }
+	inline explicit Channels(unsigned int m) { mask = m; }
+	inline explicit Channels(Channel c) { mask = (c == Channel::None) ? 0 : 0x1 << static_cast<unsigned int>(c); }
+	inline Channels &operator=(Channel c) { mask = (c == Channel::None) ? 0 : 0x1 << static_cast<unsigned int>(c); return *this; }
+	inline Channels operator|(Channel c) const { return (c == Channel::None) ? Channels(mask) : Channels(mask | (0x1 << static_cast<unsigned int>(c))); }
+	inline Channels operator+(Channel c) const { return (c == Channel::None) ? Channels(mask) : Channels(mask | (0x1 << static_cast<unsigned int>(c))); }
+	inline Channels &operator|=(Channel c) { mask |= (c == Channel::None) ? 0 : (0x1 << static_cast<unsigned int>(c)); return *this; }
+	inline Channels &operator+=(Channel c) { mask |= (c == Channel::None) ? 0 : (0x1 << static_cast<unsigned int>(c)); return *this; }
+	inline Channels &operator-=(Channel c) { mask &= ~((c == Channel::None) ? 0 : (0x1 << static_cast<unsigned int>(c))); return *this; }
+	inline Channels &operator+=(unsigned int c) { mask |= (0x1 << c); return *this; }
+	inline Channels &operator-=(unsigned int c) { mask &= ~(0x1 << c); return *this; }
+
+	inline bool has(Channel c) const {
+		return (c == Channel::None) ? true : mask & (0x1 << static_cast<unsigned int>(c));
+	}
+
+	inline bool has(unsigned int c) const {
+		return mask & (0x1 << c);
+	}
 
 	inline iterator begin() { return iterator(*this, 0); }
 	inline iterator end() { return iterator(*this, 32); }
 
-    inline operator unsigned int() { return mask; }
-    inline operator bool() { return mask > 0; }
-    inline operator Channel() {
-        if (mask == 0) return Channel::None;
-        int ix = 0;
-        int tmask = mask;
-        while (!(tmask & 0x1) && ++ix < 32) tmask >>= 1;
-        return static_cast<Channel>(ix);
-    }
-    
-    inline size_t count() { return std::bitset<32>(mask).count(); }
-    inline void clear() { mask = 0; }
-
-    static const size_t kMax = 32;
+	inline operator unsigned int() { return mask; }
+	inline operator bool() { return mask > 0; }
+	inline operator Channel() {
+		if (mask == 0) return Channel::None;
+		int ix = 0;
+		int tmask = mask;
+		while (!(tmask & 0x1) && ++ix < 32) tmask >>= 1;
+		return static_cast<Channel>(ix);
+	}
+	
+	inline size_t count() { return std::bitset<32>(mask).count(); }
+	inline void clear() { mask = 0; }
+
+	static const size_t kMax = 32;
 
 	static Channels All();
 
-    private:
-    unsigned int mask;
+	private:
+	unsigned int mask;
 };
 
 inline Channels::iterator Channels::iterator::operator++() { Channels::iterator i = *this; while (++ix_ < 32 && !channels_.has(ix_)); return i; }
@@ -124,9 +125,9 @@ static const Channels kAllChannels(0xFFFFFFFFu);
 inline bool isFloatChannel(ftl::codecs::Channel chan) {
 	switch (chan) {
 	case Channel::Depth		:
-    //case Channel::Normals   :
+	//case Channel::Normals   :
 	case Channel::Confidence:
-    case Channel::Flow      :
+	case Channel::Flow      :
 	case Channel::Density:
 	case Channel::Energy	: return true;
 	default					: return false;
@@ -139,11 +140,11 @@ inline bool isFloatChannel(ftl::codecs::Channel chan) {
 MSGPACK_ADD_ENUM(ftl::codecs::Channel);
 
 inline ftl::codecs::Channels operator|(ftl::codecs::Channel a, ftl::codecs::Channel b) {
-    return ftl::codecs::Channels(a) | b;
+	return ftl::codecs::Channels(a) | b;
 }
 
 inline ftl::codecs::Channels operator+(ftl::codecs::Channel a, ftl::codecs::Channel b) {
-    return ftl::codecs::Channels(a) | b;
+	return ftl::codecs::Channels(a) | b;
 }
 
 #endif  // _FTL_RGBD_CHANNELS_HPP_
diff --git a/components/codecs/include/ftl/codecs/encoder.hpp b/components/codecs/include/ftl/codecs/encoder.hpp
index 9c3aa8fefc64810bf7660e323b44a3c4440d5098..ed817f7b1c5a59b133c36317195a5c2da9203e56 100644
--- a/components/codecs/include/ftl/codecs/encoder.hpp
+++ b/components/codecs/include/ftl/codecs/encoder.hpp
@@ -46,16 +46,16 @@ void free(Encoder *&e);
  * convert an OpenCV Mat or GpuMat into a compressed byte array of some form.
  */
 class Encoder {
-    public:
-    friend Encoder *allocateEncoder(ftl::codecs::definition_t,
+	public:
+	friend Encoder *allocateEncoder(ftl::codecs::definition_t,
 			ftl::codecs::device_t, ftl::codecs::codec_t);
-    friend void free(Encoder *&);
+	friend void free(Encoder *&);
 
-    public:
-    Encoder(ftl::codecs::definition_t maxdef,
+	public:
+	Encoder(ftl::codecs::definition_t maxdef,
 			ftl::codecs::definition_t mindef,
 			ftl::codecs::device_t dev);
-    virtual ~Encoder();
+	virtual ~Encoder();
 
 	/**
 	 * Wrapper encode to allow use of presets.
@@ -76,21 +76,21 @@ class Encoder {
 	 * @param cb Callback containing compressed data
 	 * @return True if succeeded with encoding.
 	 */
-    virtual bool encode(
+	virtual bool encode(
 			const cv::cuda::GpuMat &in,
 			ftl::codecs::definition_t definition,
 			ftl::codecs::bitrate_t bitrate,
 			const std::function<void(const ftl::codecs::Packet&)> &cb)=0;
 
 	// TODO: Eventually, use GPU memory directly since some encoders can support this
-    //virtual bool encode(const cv::cuda::GpuMat &in, std::vector<uint8_t> &out, bitrate_t bix, bool)=0;
+	//virtual bool encode(const cv::cuda::GpuMat &in, std::vector<uint8_t> &out, bitrate_t bix, bool)=0;
 
 	virtual void reset() {}
 
 	virtual bool supports(ftl::codecs::codec_t codec)=0;
 
-    protected:
-    bool available;
+	protected:
+	bool available;
 	const ftl::codecs::definition_t max_definition;
 	const ftl::codecs::definition_t min_definition;
 	const ftl::codecs::device_t device;
diff --git a/components/codecs/include/ftl/codecs/hevc.hpp b/components/codecs/include/ftl/codecs/hevc.hpp
index f658635d6f239b4aa7a21331f60f6936c517ba93..b3a32246544f3cf24a4ad09345c2f47a96eb0735 100644
--- a/components/codecs/include/ftl/codecs/hevc.hpp
+++ b/components/codecs/include/ftl/codecs/hevc.hpp
@@ -97,6 +97,10 @@ inline NALType getNALType(const std::vector<uint8_t> &data) {
 	return static_cast<NALType>((data[4] >> 1) & 0x3F);
 }
 
+inline bool validNAL(const std::vector<uint8_t> &data) {
+	return data[0] == 0 && data[1] == 0 && data[2] == 0 && data[3] == 1;
+}
+
 /**
  * Check the HEVC bitstream for an I-Frame. With NvPipe, all I-Frames start
  * with a VPS NAL unit so just check for this.
diff --git a/components/codecs/include/ftl/codecs/nvpipe_encoder.hpp b/components/codecs/include/ftl/codecs/nvpipe_encoder.hpp
index 5d04068c53cf3b46dee73c63cf8e2fcf674f148d..07c874d128b8915265f7f4035c2fcf294b4ea07a 100644
--- a/components/codecs/include/ftl/codecs/nvpipe_encoder.hpp
+++ b/components/codecs/include/ftl/codecs/nvpipe_encoder.hpp
@@ -8,20 +8,20 @@ namespace ftl {
 namespace codecs {
 
 class NvPipeEncoder : public ftl::codecs::Encoder {
-    public:
-    NvPipeEncoder(ftl::codecs::definition_t maxdef,
+	public:
+	NvPipeEncoder(ftl::codecs::definition_t maxdef,
 			ftl::codecs::definition_t mindef);
-    ~NvPipeEncoder();
+	~NvPipeEncoder();
 
 	bool encode(const cv::cuda::GpuMat &in, ftl::codecs::preset_t preset,
 			const std::function<void(const ftl::codecs::Packet&)> &cb) {
 		return Encoder::encode(in, preset, cb);
 	}
 
-    bool encode(const cv::cuda::GpuMat &in, ftl::codecs::definition_t definition, ftl::codecs::bitrate_t bitrate,
+	bool encode(const cv::cuda::GpuMat &in, ftl::codecs::definition_t definition, ftl::codecs::bitrate_t bitrate,
 			const std::function<void(const ftl::codecs::Packet&)>&) override;
 
-    //bool encode(const cv::cuda::GpuMat &in, std::vector<uint8_t> &out, bitrate_t bix, bool);
+	//bool encode(const cv::cuda::GpuMat &in, std::vector<uint8_t> &out, bitrate_t bix, bool);
 
 	void reset();
 
@@ -29,18 +29,18 @@ class NvPipeEncoder : public ftl::codecs::Encoder {
 
 	static constexpr int kFlagRGB = 0x00000001;
 
-    private:
-    NvPipe *nvenc_;
-    definition_t current_definition_;
-    bool is_float_channel_;
+	private:
+	NvPipe *nvenc_;
+	definition_t current_definition_;
+	bool is_float_channel_;
 	bool was_reset_;
 	ftl::codecs::codec_t preference_;
 	cv::cuda::GpuMat tmp_;
 	cv::cuda::GpuMat tmp2_;
 	cv::cuda::Stream stream_;
 
-    bool _encoderMatch(const cv::cuda::GpuMat &in, definition_t def);
-    bool _createEncoder(const cv::cuda::GpuMat &in, definition_t def, bitrate_t rate);
+	bool _encoderMatch(const cv::cuda::GpuMat &in, definition_t def);
+	bool _createEncoder(const cv::cuda::GpuMat &in, definition_t def, bitrate_t rate);
 	ftl::codecs::definition_t _verifiedDefinition(ftl::codecs::definition_t def, const cv::cuda::GpuMat &in);
 };
 
diff --git a/components/codecs/src/bitrates.cpp b/components/codecs/src/bitrates.cpp
index 45a5057687f8add5cbfdaf02718e880a3361bd40..37889f5a55bf0337d1b3b750538587d1cc81f537 100644
--- a/components/codecs/src/bitrates.cpp
+++ b/components/codecs/src/bitrates.cpp
@@ -8,21 +8,18 @@ using ftl::codecs::preset_t;
 using ftl::codecs::definition_t;
 using ftl::codecs::codec_t;
 
+
 static const CodecPreset special_presets[] = {
-	definition_t::HTC_VIVE, definition_t::HTC_VIVE, bitrate_t::High, bitrate_t::High
+	definition_t::HTC_VIVE, bitrate_t::High
 };
 
 static const CodecPreset presets[] = {
-	definition_t::HD1080, definition_t::HD1080, bitrate_t::High, bitrate_t::High,
-	definition_t::HD1080, definition_t::HD720, bitrate_t::Standard, bitrate_t::Standard,
-	definition_t::HD720, definition_t::HD720, bitrate_t::High, bitrate_t::High,
-	definition_t::HD720, definition_t::SD576, bitrate_t::Standard, bitrate_t::Standard,
-	definition_t::SD576, definition_t::SD576, bitrate_t::High, bitrate_t::High,
-	definition_t::SD576, definition_t::SD480, bitrate_t::Standard, bitrate_t::Standard,
-	definition_t::SD480, definition_t::SD480, bitrate_t::High, bitrate_t::High,
-	definition_t::SD480, definition_t::LD360, bitrate_t::Standard, bitrate_t::Standard,
-	definition_t::LD360, definition_t::LD360, bitrate_t::Standard, bitrate_t::Standard,
-	definition_t::LD360, definition_t::LD360, bitrate_t::Low, bitrate_t::Low
+	definition_t::HD1080, bitrate_t::High,
+	definition_t::HD720, bitrate_t::High,
+	definition_t::SD576, bitrate_t::High,
+	definition_t::SD480, bitrate_t::High,
+	definition_t::LD360, bitrate_t::Standard,
+	definition_t::LD360, bitrate_t::Low
 };
 
 static const float kAspectRatio = 1.777778f;
@@ -53,11 +50,27 @@ int ftl::codecs::getHeight(definition_t d) {
 	return resolutions[static_cast<int>(d)].height;
 }
 
+definition_t ftl::codecs::findDefinition(int width, int height) {
+	int best = 0;
+	bool smaller = true;
+
+	for(const Resolution res : resolutions) {
+		if ((res.width == width) && (res.height == height)) {
+			return static_cast<definition_t>(best);
+		}
+		best++;
+	}
+
+	// TODO error!
+	return definition_t::Any;
+}
+
+/*
 const CodecPreset &ftl::codecs::getPreset(preset_t p) {
 	if (p < 0 && p >= -1) return special_presets[std::abs(p+1)];
-    if (p > kPresetWorst) return presets[kPresetWorst];
-    if (p < kPresetBest) return presets[kPresetBest];
-    return presets[p];
+	if (p > kPresetWorst) return presets[kPresetWorst];
+	if (p < kPresetBest) return presets[kPresetBest];
+	return presets[p];
 }
 
 preset_t ftl::codecs::findPreset(size_t width, size_t height) {
@@ -80,10 +93,11 @@ preset_t ftl::codecs::findPreset(size_t width, size_t height) {
 	for (preset_t i=kPresetMinimum; i<=kPresetWorst; ++i) {
 		const auto &preset = getPreset(i);
 
-		if ((int)preset.colour_res == best_def && (int)preset.depth_res == best_def) {
+		if ((int)preset.res == best_def) {
 			return i;
 		}
 	}
 
 	return kPresetWorst;
 }
+*/
diff --git a/components/codecs/src/encoder.cpp b/components/codecs/src/encoder.cpp
index 9a7eac72def3a20b966e4332d45d8073f57c47f6..7c7f9a35848441597cd4650fc7d8eaf87bdd01e4 100644
--- a/components/codecs/src/encoder.cpp
+++ b/components/codecs/src/encoder.cpp
@@ -36,7 +36,7 @@ static MUTEX mutex;
 
 Encoder *ftl::codecs::allocateEncoder(ftl::codecs::definition_t maxdef,
 		ftl::codecs::device_t dev, ftl::codecs::codec_t codec) {
-    UNIQUE_LOCK(mutex, lk);
+	UNIQUE_LOCK(mutex, lk);
 	if (!has_been_init) init_encoders();
 
 	for (auto i=encoders.begin(); i!=encoders.end(); ++i) {
@@ -55,10 +55,10 @@ Encoder *ftl::codecs::allocateEncoder(ftl::codecs::definition_t maxdef,
 }
 
 void ftl::codecs::free(Encoder *&enc) {
-    UNIQUE_LOCK(mutex, lk);
-    enc->reset();
+	UNIQUE_LOCK(mutex, lk);
+	enc->reset();
 	enc->available = true;
-    enc = nullptr;
+	enc = nullptr;
 }
 
 Encoder::Encoder(definition_t maxdef, definition_t mindef, device_t dev) :
@@ -72,9 +72,8 @@ Encoder::~Encoder() {
 
 bool Encoder::encode(const cv::cuda::GpuMat &in, preset_t preset,
 			const std::function<void(const ftl::codecs::Packet&)> &cb) {
-	const auto &settings = ftl::codecs::getPreset(preset);
-	const definition_t definition = (in.type() == CV_32F) ? settings.depth_res : settings.colour_res;
-	const bitrate_t bitrate = (in.type() == CV_32F) ? settings.depth_qual : settings.colour_qual;
+	const definition_t definition = ftl::codecs::findDefinition(in.size().width, in.size().height);
+	const bitrate_t bitrate = bitrate_t::High;
 
 	return encode(in, definition, bitrate, cb);
 }
diff --git a/components/codecs/src/nvpipe_decoder.cpp b/components/codecs/src/nvpipe_decoder.cpp
index 77a3105f88b84f2b9c00f5dba152bbc9814c70db..d6652549c73fa5c5d6388030c480e73f331a4a7c 100644
--- a/components/codecs/src/nvpipe_decoder.cpp
+++ b/components/codecs/src/nvpipe_decoder.cpp
@@ -37,6 +37,7 @@ bool NvPipeDecoder::decode(const ftl::codecs::Packet &pkt, cv::cuda::GpuMat &out
 	is_float_channel_ = is_float_frame;
 	last_definition_ = pkt.definition;
 
+	//LOG(INFO) << "DECODE OUT: " << out.rows << ", " << out.type();
 	//LOG(INFO) << "DECODE RESOLUTION: (" << (int)pkt.definition << ") " << ftl::codecs::getWidth(pkt.definition) << "x" << ftl::codecs::getHeight(pkt.definition);
 
 	// Build a decoder instance of the correct kind
@@ -49,8 +50,6 @@ bool NvPipeDecoder::decode(const ftl::codecs::Packet &pkt, cv::cuda::GpuMat &out
 		if (!nv_decoder_) {
 			//LOG(INFO) << "Bitrate=" << (int)bitrate << " width=" << ABRController::getColourWidth(bitrate);
 			LOG(FATAL) << "Could not create decoder: " << NvPipe_GetError(NULL);
-		} else {
-			DLOG(INFO) << "Decoder created";
 		}
 
 		seen_iframe_ = false;
@@ -60,38 +59,46 @@ bool NvPipeDecoder::decode(const ftl::codecs::Packet &pkt, cv::cuda::GpuMat &out
 	tmp_.create(cv::Size(ftl::codecs::getWidth(pkt.definition),ftl::codecs::getHeight(pkt.definition)), (is_float_frame) ? CV_16U : CV_8UC4);
 
 	// Check for an I-Frame
-	if (pkt.codec == ftl::codecs::codec_t::HEVC) {
-		if (ftl::codecs::hevc::isIFrame(pkt.data)) seen_iframe_ = true;
-	} else if (pkt.codec == ftl::codecs::codec_t::H264) {
-		if (ftl::codecs::h264::isIFrame(pkt.data)) seen_iframe_ = true;
+	if (!seen_iframe_) {
+		if (pkt.codec == ftl::codecs::codec_t::HEVC) {
+			if (ftl::codecs::hevc::isIFrame(pkt.data)) seen_iframe_ = true;
+		} else if (pkt.codec == ftl::codecs::codec_t::H264) {
+			if (ftl::codecs::h264::isIFrame(pkt.data)) seen_iframe_ = true;
+		}
 	}
 
 	// No I-Frame yet so don't attempt to decode P-Frames.
 	if (!seen_iframe_) return false;
 
+	// Final checks for validity
+	if (pkt.data.size() == 0 || tmp_.size() != out.size()) { // || !ftl::codecs::hevc::validNAL(pkt.data)) {
+		LOG(ERROR) << "Failed to decode packet";
+		return false;
+	}
+
 	int rc = NvPipe_Decode(nv_decoder_, pkt.data.data(), pkt.data.size(), tmp_.data, tmp_.cols, tmp_.rows, tmp_.step);
 	if (rc == 0) LOG(ERROR) << "NvPipe decode error: " << NvPipe_GetError(nv_decoder_);
 
 	if (is_float_frame) {
 		// Is the received frame the same size as requested output?
-		if (out.rows == ftl::codecs::getHeight(pkt.definition)) {
+		//if (out.rows == ftl::codecs::getHeight(pkt.definition)) {
 			tmp_.convertTo(out, CV_32FC1, 1.0f/1000.0f, stream_);
-		} else {
+		/*} else {
 			LOG(WARNING) << "Resizing decoded frame from " << tmp_.size() << " to " << out.size();
 			// FIXME: This won't work on GPU
 			tmp_.convertTo(tmp_, CV_32FC1, 1.0f/1000.0f, stream_);
 			cv::cuda::resize(tmp_, out, out.size(), 0, 0, cv::INTER_NEAREST, stream_);
-		}
+		}*/
 	} else {
 		// Is the received frame the same size as requested output?
-		if (out.rows == ftl::codecs::getHeight(pkt.definition)) {
+		//if (out.rows == ftl::codecs::getHeight(pkt.definition)) {
 			// Flag 0x1 means frame is in RGB so needs conversion to BGR
 			if (pkt.flags & 0x1) {
 				cv::cuda::cvtColor(tmp_, out, cv::COLOR_RGBA2BGR, 0, stream_);
 			} else {
 				cv::cuda::cvtColor(tmp_, out, cv::COLOR_BGRA2BGR, 0, stream_);
 			}
-		} else {
+		/*} else {
 			LOG(WARNING) << "Resizing decoded frame from " << tmp_.size() << " to " << out.size();
 			// FIXME: This won't work on GPU, plus it allocates extra memory...
 			// Flag 0x1 means frame is in RGB so needs conversion to BGR
@@ -101,7 +108,7 @@ bool NvPipeDecoder::decode(const ftl::codecs::Packet &pkt, cv::cuda::GpuMat &out
 				cv::cuda::cvtColor(tmp_, tmp_, cv::COLOR_BGRA2BGR, 0, stream_);
 			}
 			cv::cuda::resize(tmp_, out, out.size(), 0.0, 0.0, cv::INTER_LINEAR, stream_);
-		}
+		}*/
 	}
 
 	stream_.waitForCompletion();
diff --git a/components/codecs/src/nvpipe_encoder.cpp b/components/codecs/src/nvpipe_encoder.cpp
index 132a3209ad0849dd76f1a5f7438eba8f5655b854..86fccdefc0d91f85694b105986eb49a423cc5863 100644
--- a/components/codecs/src/nvpipe_encoder.cpp
+++ b/components/codecs/src/nvpipe_encoder.cpp
@@ -123,7 +123,7 @@ bool NvPipeEncoder::encode(const cv::cuda::GpuMat &in, definition_t odefinition,
 	pkt.data.resize(cs);
 	was_reset_ = false;
 
-	if (cs == 0) {
+	if (cs == 0 || cs >= ftl::codecs::kVideoBufferSize) {
 		LOG(ERROR) << "Could not encode video frame: " << NvPipe_GetError(nvenc_);
 		return false;
 	} else {
diff --git a/components/codecs/src/opencv_decoder.cpp b/components/codecs/src/opencv_decoder.cpp
index 0b9feea46e5925f16ce5ab323747d94d8bdb1d2a..c3c5e9567deb6752bf2395d0c36e345fcead50ee 100644
--- a/components/codecs/src/opencv_decoder.cpp
+++ b/components/codecs/src/opencv_decoder.cpp
@@ -18,7 +18,7 @@ bool OpenCVDecoder::accepts(const ftl::codecs::Packet &pkt) {
 }
 
 bool OpenCVDecoder::decode(const ftl::codecs::Packet &pkt, cv::cuda::GpuMat &out) {
-
+	//CHECK(cv::Size(ftl::codecs::getWidth(pkt.definition), ftl::codecs::getHeight(pkt.definition)) == out.size()); 
 	int chunk_dim = std::sqrt(pkt.block_total);
 	int chunk_width = out.cols / chunk_dim;
 	int chunk_height = out.rows / chunk_dim;
@@ -37,7 +37,6 @@ bool OpenCVDecoder::decode(const ftl::codecs::Packet &pkt, cv::cuda::GpuMat &out
 	// Apply colour correction to chunk
 	//ftl::rgbd::colourCorrection(tmp_rgb, gamma_, temperature_);
 
-
 	// TODO:(Nick) Decode directly into double buffer if no scaling
 	// Can either check JPG/PNG headers or just use pkt definition.
 
diff --git a/components/codecs/src/opencv_encoder.cpp b/components/codecs/src/opencv_encoder.cpp
index 5dc1995a82e6147184572d6e45d20a8a49561ddc..772922e7bf740a1b47ecea2260d448afca217fe0 100644
--- a/components/codecs/src/opencv_encoder.cpp
+++ b/components/codecs/src/opencv_encoder.cpp
@@ -17,7 +17,7 @@ OpenCVEncoder::OpenCVEncoder(ftl::codecs::definition_t maxdef,
 }
 
 OpenCVEncoder::~OpenCVEncoder() {
-    
+	
 }
 
 bool OpenCVEncoder::supports(ftl::codecs::codec_t codec) {
@@ -30,9 +30,12 @@ bool OpenCVEncoder::supports(ftl::codecs::codec_t codec) {
 
 bool OpenCVEncoder::encode(const cv::cuda::GpuMat &in, definition_t definition, bitrate_t bitrate, const std::function<void(const ftl::codecs::Packet&)> &cb) {
 	bool is_colour = in.type() != CV_32F;
-	current_definition_ = definition;
+
+	// Ensure definition does not exceed max
+	current_definition_ = ((int)definition < (int)max_definition) ? max_definition : definition;
 
 	in.download(tmp_);
+	//CHECK(cv::Size(ftl::codecs::getWidth(definition), ftl::codecs::getHeight(definition)) == in.size()); 
 
 	// Scale down image to match requested definition...
 	if (ftl::codecs::getHeight(current_definition_) < in.rows) {
@@ -42,11 +45,12 @@ bool OpenCVEncoder::encode(const cv::cuda::GpuMat &in, definition_t definition,
 	}
 
 	// Represent float at 16bit int
-    if (!is_colour) {
+	if (!is_colour) {
 		tmp_.convertTo(tmp_, CV_16UC1, 1000);
 	}
 
-	chunk_dim_ = (definition == definition_t::LD360) ? 1 : 4;
+	// FIXME: Chunking is broken so forced to single chunk
+	chunk_dim_ = 1; //(definition == definition_t::LD360) ? 1 : 4;
 	chunk_count_ = chunk_dim_ * chunk_dim_;
 	jobs_ = chunk_count_;
 
@@ -94,6 +98,7 @@ bool OpenCVEncoder::_encodeBlock(const cv::Mat &in, ftl::codecs::Packet &pkt, bi
 	int cx = (pkt.block_number % chunk_dim_) * chunk_width;
 	int cy = (pkt.block_number / chunk_dim_) * chunk_height;
 	cv::Rect roi(cx,cy,chunk_width,chunk_height);
+
 	cv::Mat chunkHead = in(roi);
 
 	if (pkt.codec == codec_t::PNG) {
diff --git a/components/codecs/test/nvpipe_codec_unit.cpp b/components/codecs/test/nvpipe_codec_unit.cpp
index 609ce56a50059978af931b718de57098201a0c1a..dccc65f9671a70ddc1879d12d0b8ef38aa9a1f01 100644
--- a/components/codecs/test/nvpipe_codec_unit.cpp
+++ b/components/codecs/test/nvpipe_codec_unit.cpp
@@ -22,19 +22,18 @@ namespace ftl {
 	}
 }
 
+/*
 TEST_CASE( "NvPipeEncoder::encode() - A colour test image at preset 0" ) {
 	ftl::codecs::NvPipeEncoder encoder(definition_t::HD1080, definition_t::SD480);
 	cv::cuda::GpuMat m(cv::Size(1920,1080), CV_8UC3, cv::Scalar(0,0,0));
 
 	int block_total = 0;
 	std::atomic<int> block_count = 0;
-
-	const CodecPreset &preset = ftl::codecs::getPreset(ftl::codecs::kPreset0);
-
-	bool r = encoder.encode(m, ftl::codecs::kPreset0, [&block_total, &block_count, preset, m](const ftl::codecs::Packet &pkt) {
+	encoder.encode()
+	bool r = encoder.encode(m, definition::H, [&block_total, &block_count, preset, m](const ftl::codecs::Packet &pkt) {
 		REQUIRE( pkt.codec == codec_t::HEVC );
 		REQUIRE( pkt.data.size() > 0 );
-		REQUIRE( pkt.definition == preset.colour_res );
+		REQUIRE( pkt.definition == definition_t::HD1080 );
 
 		block_total = pkt.block_total;
 		block_count++;
@@ -51,12 +50,10 @@ TEST_CASE( "NvPipeEncoder::encode() - A depth test image at preset 0" ) {
 	int block_total = 0;
 	std::atomic<int> block_count = 0;
 
-	const CodecPreset &preset = ftl::codecs::getPreset(ftl::codecs::kPreset0);
-
 	bool r = encoder.encode(m, ftl::codecs::kPreset0, [&block_total, &block_count, preset](const ftl::codecs::Packet &pkt) {
 		REQUIRE( pkt.codec == codec_t::HEVC );
 		REQUIRE( pkt.data.size() > 0 );
-		REQUIRE( pkt.definition == preset.depth_res );
+		REQUIRE( pkt.definition == definition_t::HD1080 );
 
 		block_total = pkt.block_total;
 		block_count++;
@@ -65,6 +62,7 @@ TEST_CASE( "NvPipeEncoder::encode() - A depth test image at preset 0" ) {
 	REQUIRE( r );
 	REQUIRE( block_count == block_total );
 }
+*/
 
 TEST_CASE( "NvPipeDecoder::decode() - A colour test image" ) {
 	ftl::codecs::NvPipeEncoder encoder(definition_t::HD1080, definition_t::SD480);
@@ -83,7 +81,8 @@ TEST_CASE( "NvPipeDecoder::decode() - A colour test image" ) {
 		});
 	}
 
-	SECTION("Full HD in, 720 out, FHD encoding") {
+	// No longer supported
+	/*SECTION("Full HD in, 720 out, FHD encoding") {
 		in = cv::cuda::GpuMat(cv::Size(1920,1080), CV_8UC3, cv::Scalar(255,0,0));
 		out = cv::cuda::GpuMat(cv::Size(1280,720), CV_8UC3, cv::Scalar(0,0,0));
 
@@ -92,9 +91,10 @@ TEST_CASE( "NvPipeDecoder::decode() - A colour test image" ) {
 		});
 
 		REQUIRE( (out.rows == 720) );
-	}
+	}*/
 
-	SECTION("HHD in, FHD out, FHD encoding") {
+	// No longer supported
+	/*SECTION("HHD in, FHD out, FHD encoding") {
 		in = cv::cuda::GpuMat(cv::Size(1280,720), CV_8UC3, cv::Scalar(255,0,0));
 		out = cv::cuda::GpuMat(cv::Size(1920,1080), CV_8UC3, cv::Scalar(0,0,0));
 
@@ -103,9 +103,10 @@ TEST_CASE( "NvPipeDecoder::decode() - A colour test image" ) {
 		});
 
 		REQUIRE( (out.rows == 1080) );
-	}
+	}*/
 
-	SECTION("FHD in, HHD out, SD encoding") {
+	// No longer supported
+	/*SECTION("FHD in, HHD out, SD encoding") {
 		in = cv::cuda::GpuMat(cv::Size(1920,1080), CV_8UC3, cv::Scalar(255,0,0));
 		out = cv::cuda::GpuMat(cv::Size(1280,720), CV_8UC3, cv::Scalar(0,0,0));
 
@@ -114,7 +115,7 @@ TEST_CASE( "NvPipeDecoder::decode() - A colour test image" ) {
 		});
 
 		REQUIRE( (out.rows == 720) );
-	}
+	}*/
 
 	REQUIRE( r );
 	REQUIRE( (cv::cuda::sum(out) != cv::Scalar(0,0,0)) );
diff --git a/components/codecs/test/opencv_codec_unit.cpp b/components/codecs/test/opencv_codec_unit.cpp
index 2505eeb8994e1397bc19175f7f0903bdb3000c9d..961658db5d4da887e8597dd90f093fa8e6abc5b2 100644
--- a/components/codecs/test/opencv_codec_unit.cpp
+++ b/components/codecs/test/opencv_codec_unit.cpp
@@ -21,15 +21,17 @@ namespace ftl {
 	}
 	}
 }
-
+/*
 TEST_CASE( "OpenCVEncoder::encode() - A colour test image at preset 0" ) {
 	ftl::codecs::OpenCVEncoder encoder(definition_t::HD1080, definition_t::SD480);
-	cv::cuda::GpuMat m(cv::Size(1024,576), CV_8UC3, cv::Scalar(0,0,0));
 
 	int block_total = 0;
 	std::atomic<int> block_count = 0;
 
 	const CodecPreset &preset = ftl::codecs::getPreset(ftl::codecs::kPreset4);
+	cv::cuda::GpuMat m(cv::Size(ftl::codecs::getWidth(preset.res),
+								ftl::codecs::getHeight(preset.res)),
+						CV_8UC3, cv::Scalar(0,0,0));
 
 	std::mutex mtx;
 
@@ -37,7 +39,7 @@ TEST_CASE( "OpenCVEncoder::encode() - A colour test image at preset 0" ) {
 		std::unique_lock<std::mutex> lk(mtx);
 		REQUIRE( pkt.codec == codec_t::JPG );
 		REQUIRE( pkt.data.size() > 0 );
-		REQUIRE( pkt.definition == preset.colour_res );
+		REQUIRE( pkt.definition == preset.res );
 
 		block_total = pkt.block_total;
 		block_count++;
@@ -66,7 +68,7 @@ TEST_CASE( "OpenCVEncoder::encode() - A depth test image at preset 0" ) {
 		std::unique_lock<std::mutex> lk(mtx);
 		REQUIRE( pkt.codec == codec_t::PNG );
 		REQUIRE( pkt.data.size() > 0 );
-		REQUIRE( pkt.definition == preset.depth_res );
+		REQUIRE( pkt.definition == preset.res );
 
 		block_total = pkt.block_total;
 		block_count++;
@@ -78,7 +80,7 @@ TEST_CASE( "OpenCVEncoder::encode() - A depth test image at preset 0" ) {
 	REQUIRE( r );
 	REQUIRE( block_count == block_total );
 }
-
+*/
 TEST_CASE( "OpenCVDecoder::decode() - A colour test image no resolution change" ) {
 	ftl::codecs::OpenCVEncoder encoder(definition_t::HD1080, definition_t::SD480);
 	ftl::codecs::OpenCVDecoder decoder;
diff --git a/components/common/cpp/src/configuration.cpp b/components/common/cpp/src/configuration.cpp
index f8c982a811652252f14205b00e7f778d906ab8ed..9fd9d3081f18781ce0474438a3e1829d3b082b33 100644
--- a/components/common/cpp/src/configuration.cpp
+++ b/components/common/cpp/src/configuration.cpp
@@ -438,7 +438,7 @@ static bool findConfiguration(const string &file, const vector<string> &paths) {
 	}
 
 	if (found) {
-		_indexConfig(config);
+		//_indexConfig(config);
 		return true;
 	} else {
 		return false;
@@ -593,6 +593,9 @@ Configurable *ftl::config::configure(int argc, char **argv, const std::string &r
 
 	string root_str = (options.find("root") != options.end()) ? nlohmann::json::parse(options["root"]).get<string>() : root;
 
+	if (options.find("id") != options.end()) config["$id"] = nlohmann::json::parse(options["id"]).get<string>();
+	_indexConfig(config);
+
 	Configurable *rootcfg = create<Configurable>(config);
 	if (root_str.size() > 0) {
 		LOG(INFO) << "Setting root to " << root_str;
diff --git a/components/operators/src/colours.cpp b/components/operators/src/colours.cpp
index 9c6fff8b887fc645da71fbc99e018a6dc99b6313..6a49f6ede7f4cd7f130a889378ce41880bfff617 100644
--- a/components/operators/src/colours.cpp
+++ b/components/operators/src/colours.cpp
@@ -14,11 +14,12 @@ ColourChannels::~ColourChannels() {
 bool ColourChannels::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, ftl::rgbd::Source *s, cudaStream_t stream) {
 	auto cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
 
+	auto &col = in.get<cv::cuda::GpuMat>(Channel::Colour);
+
 	// Convert colour from BGR to BGRA if needed
-	if (in.get<cv::cuda::GpuMat>(Channel::Colour).type() == CV_8UC3) {
+	if (col.type() == CV_8UC3) {
 		//cv::cuda::Stream cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
 		// Convert to 4 channel colour
-		auto &col = in.get<cv::cuda::GpuMat>(Channel::Colour);
 		temp_.create(col.size(), CV_8UC4);
 		cv::cuda::swap(col, temp_);
 		cv::cuda::cvtColor(temp_,col, cv::COLOR_BGR2BGRA, 0, cvstream);
@@ -27,5 +28,13 @@ bool ColourChannels::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, ftl::rgb
 	//in.resetTexture(Channel::Colour);
 	in.createTexture<uchar4>(Channel::Colour, true);
 
+	auto &depth = in.get<cv::cuda::GpuMat>(Channel::Depth);
+	if (depth.size() != col.size()) {
+		auto &col2 = in.create<cv::cuda::GpuMat>(Channel::ColourHighRes);
+		cv::cuda::resize(col, col2, depth.size(), 0.0, 0.0, cv::INTER_LINEAR, cvstream);
+		in.createTexture<uchar4>(Channel::ColourHighRes, true);
+		in.swapChannels(Channel::Colour, Channel::ColourHighRes);
+	}
+
 	return true;
 }
diff --git a/components/operators/src/mask.cpp b/components/operators/src/mask.cpp
index f923f11d06a39df882eaf83289b37296aba0ada5..c7dcbb2ac40ef9ff3b4f445eb467e89663cebccb 100644
--- a/components/operators/src/mask.cpp
+++ b/components/operators/src/mask.cpp
@@ -22,7 +22,9 @@ bool DiscontinuityMask::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, ftl::
 		out.createTexture<int>(Channel::Mask, ftl::rgbd::Format<int>(in.get<cv::cuda::GpuMat>(Channel::Depth).size())),
 		in.createTexture<uchar4>(Channel::Support1),
 		in.createTexture<float>(Channel::Depth),
-		s->parameters(), radius, threshold, stream
+		in.get<cv::cuda::GpuMat>(Channel::Depth).size(),
+		s->parameters().minDepth, s->parameters().maxDepth,
+		radius, threshold, stream
 	);
 
 	return true;
diff --git a/components/operators/src/mask.cu b/components/operators/src/mask.cu
index e385f41b14459802dbf52ef85aef2f891eceff08..91ddf19dd3b6451d7802cc622a08a660adfdd360 100644
--- a/components/operators/src/mask.cu
+++ b/components/operators/src/mask.cu
@@ -4,16 +4,21 @@
 
 using ftl::cuda::Mask;
 
-__global__ void discontinuity_kernel(ftl::cuda::TextureObject<int> mask_out, ftl::cuda::TextureObject<uchar4> support, ftl::cuda::TextureObject<float> depth, ftl::rgbd::Camera params, float threshold, int radius) {
+__global__ void discontinuity_kernel(ftl::cuda::TextureObject<int> mask_out,
+										ftl::cuda::TextureObject<uchar4> support,
+										ftl::cuda::TextureObject<float> depth, 
+										const cv::Size size, const double minDepth, const double maxDepth,
+										float threshold, int radius) {
+	
 	const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
 	const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
 
-	if (x < params.width && y < params.height) {
+	if (x < size.width && y < size.height) {
 		Mask mask(0);
 
 		const float d = depth.tex2D((int)x, (int)y);
 
-		if (d >= params.minDepth && d <= params.maxDepth) {
+		if (d >= minDepth && d <= maxDepth) {
 			/* Orts-Escolano S. et al. 2016. Holoportation: Virtual 3D teleportation in real-time. */
 
 			// If colour cross support region terminates within the requested
@@ -37,17 +42,21 @@ __global__ void discontinuity_kernel(ftl::cuda::TextureObject<int> mask_out, ftl
 				float dS = depth.tex2D((int)x, (int)y + sup.w + radius);
 				if (fabs(dS - d) > threshold) mask.isDiscontinuity(true);
 			}
-        }
-        
-        mask_out(x,y) = (int)mask;
+		}
+		
+		mask_out(x,y) = (int)mask;
 	}
 }
 
-void ftl::cuda::discontinuity(ftl::cuda::TextureObject<int> &mask_out, ftl::cuda::TextureObject<uchar4> &support, ftl::cuda::TextureObject<float> &depth, const ftl::rgbd::Camera &params, int discon, float thresh, cudaStream_t stream) {
-	const dim3 gridSize((params.width + T_PER_BLOCK - 1)/T_PER_BLOCK, (params.height + T_PER_BLOCK - 1)/T_PER_BLOCK);
+void ftl::cuda::discontinuity(	ftl::cuda::TextureObject<int> &mask_out, ftl::cuda::TextureObject<uchar4> &support,
+								ftl::cuda::TextureObject<float> &depth,
+								const cv::Size size, const double minDepth, const double maxDepth,
+								int discon, float thresh, cudaStream_t stream) {
+	
+	const dim3 gridSize((size.width + T_PER_BLOCK - 1)/T_PER_BLOCK, (size.height + T_PER_BLOCK - 1)/T_PER_BLOCK);
 	const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
 
-    discontinuity_kernel<<<gridSize, blockSize, 0, stream>>>(mask_out, support, depth, params, thresh, discon);
+	discontinuity_kernel<<<gridSize, blockSize, 0, stream>>>(mask_out, support, depth, size, minDepth, maxDepth, thresh, discon);
 	cudaSafeCall( cudaGetLastError() );
 
 #ifdef _DEBUG
@@ -55,8 +64,6 @@ void ftl::cuda::discontinuity(ftl::cuda::TextureObject<int> &mask_out, ftl::cuda
 #endif
 }
 
-
-
 __global__ void cull_discontinuity_kernel(ftl::cuda::TextureObject<int> mask, ftl::cuda::TextureObject<float> depth) {
 	const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
 	const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
@@ -71,7 +78,7 @@ void ftl::cuda::cull_discontinuity(ftl::cuda::TextureObject<int> &mask, ftl::cud
 	const dim3 gridSize((depth.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
 	const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
 
-    cull_discontinuity_kernel<<<gridSize, blockSize, 0, stream>>>(mask, depth);
+	cull_discontinuity_kernel<<<gridSize, blockSize, 0, stream>>>(mask, depth);
 	cudaSafeCall( cudaGetLastError() );
 
 #ifdef _DEBUG
diff --git a/components/operators/src/mask_cuda.hpp b/components/operators/src/mask_cuda.hpp
index 6a02aafdbbdfbbbd200355bddc3c7ba33a605483..20c266290f10ce6aca19af3cfe45a9d3f7c03355 100644
--- a/components/operators/src/mask_cuda.hpp
+++ b/components/operators/src/mask_cuda.hpp
@@ -19,7 +19,7 @@ class Mask {
 	#endif
 	__device__ inline operator int() const { return v_; }
 
-    __device__ inline bool is(int m) const { return v_ & m; }
+	__device__ inline bool is(int m) const { return v_ & m; }
 
 	__device__ inline bool isFilled() const { return v_ & kMask_Filled; }
 	__device__ inline bool isDiscontinuity() const { return v_ & kMask_Discontinuity; }
@@ -31,7 +31,7 @@ class Mask {
 	__device__ inline void hasCorrespondence(bool v) { v_ = (v) ? v_ | kMask_Correspondence : v_ & (~kMask_Correspondence); }
 	__device__ inline void isBad(bool v) { v_ = (v) ? v_ | kMask_Bad : v_ & (~kMask_Bad); }
 
-    static constexpr int kMask_Filled = 0x0001;
+	static constexpr int kMask_Filled = 0x0001;
 	static constexpr int kMask_Discontinuity = 0x0002;
 	static constexpr int kMask_Correspondence = 0x0004;
 	static constexpr int kMask_Bad = 0x0008;
@@ -44,7 +44,9 @@ void discontinuity(
 		ftl::cuda::TextureObject<int> &mask,
 		ftl::cuda::TextureObject<uchar4> &support,
 		ftl::cuda::TextureObject<float> &depth,
-		const ftl::rgbd::Camera &params,
+		const cv::Size size,
+		const double minDepth,
+		const double maxDepth,
 		int radius, float threshold,
 		cudaStream_t stream);
 
diff --git a/components/renderers/cpp/src/reprojection.cu b/components/renderers/cpp/src/reprojection.cu
index 9c414f8927572f93795ea4bfb2e17c491f2deb44..72b7cd07275f3c9b41c207009dd4b7eef6ad7c9b 100644
--- a/components/renderers/cpp/src/reprojection.cu
+++ b/components/renderers/cpp/src/reprojection.cu
@@ -94,7 +94,10 @@ __global__ void reprojection_kernel(
 	const float dotproduct = (max(dot(ray,n),-0.1f)+0.1) / 1.1f;
     
 	const float d2 = depth_src.tex2D(int(screenPos.x+0.5f), int(screenPos.y+0.5f));
-	const auto input = in.tex2D(screenPos.x, screenPos.y); //generateInput(in.tex2D((int)screenPos.x, (int)screenPos.y), params, worldPos);
+
+	const float inSX = float(in.width()) / float(depth_src.width());
+	const float inSY = float(in.height()) / float(depth_src.height());
+	const auto input = in.tex2D(screenPos.x*inSX, screenPos.y*inSY); //generateInput(in.tex2D((int)screenPos.x, (int)screenPos.y), params, worldPos);
 
 	// TODO: Z checks need to interpolate between neighbors if large triangles are used
 	//float weight = ftl::cuda::weighting(fabs(camPos.z - d2), params.depthThreshold);
@@ -213,7 +216,11 @@ __global__ void reprojection_kernel(
 	if (screenPos.x >= depth_src.width() || screenPos.y >= depth_src.height()) return;
     
 	const float d2 = depth_src.tex2D((int)(screenPos.x+0.5f), (int)(screenPos.y+0.5f));
-	const auto input = in.tex2D(screenPos.x, screenPos.y); //generateInput(in.tex2D((int)screenPos.x, (int)screenPos.y), params, worldPos);
+
+	const float inSX = float(in.width()) / float(depth_src.width());
+	const float inSY = float(in.height()) / float(depth_src.height());
+	const auto input = in.tex2D(screenPos.x*inSX, screenPos.y*inSY); //generateInput(in.tex2D((int)screenPos.x, (int)screenPos.y), params, worldPos);
+
 	float weight = ftl::cuda::weighting(fabs(camPos.z - d2), 0.02f);
 	const B weighted = make<B>(input) * weight;
 
diff --git a/components/renderers/cpp/src/tri_render.cpp b/components/renderers/cpp/src/tri_render.cpp
index 06d4fe2626f01e989645e19e795ef4df925c96c2..d1d0894f476dfa534ccd075782f17205e6422895 100644
--- a/components/renderers/cpp/src/tri_render.cpp
+++ b/components/renderers/cpp/src/tri_render.cpp
@@ -220,13 +220,13 @@ void Triangular::__reprojectChannel(ftl::rgbd::Frame &output, ftl::codecs::Chann
 		auto &f = scene_->frames[i];
 		auto *s = scene_->sources[i];
 		
-		if (f.get<GpuMat>(in).type() == CV_8UC3) {
+		/*if (f.get<GpuMat>(in).type() == CV_8UC3) {
 			// Convert to 4 channel colour
 			auto &col = f.get<GpuMat>(in);
 			GpuMat tmp(col.size(), CV_8UC4);
 			cv::cuda::swap(col, tmp);
 			cv::cuda::cvtColor(tmp,col, cv::COLOR_BGR2BGRA);
-		}
+		}*/
 
 		auto transform = MatrixConversion::toCUDA(s->getPose().cast<float>().inverse() * t.cast<float>().inverse()) * params_.m_viewMatrixInverse;
 		auto transformR = MatrixConversion::toCUDA(s->getPose().cast<float>().inverse()).getFloat3x3();
@@ -607,7 +607,11 @@ bool Triangular::render(ftl::rgbd::VirtualSource *src, ftl::rgbd::Frame &out, co
 	}
 
 	// Reprojection of colours onto surface
-	_renderChannel(out, Channel::Colour, Channel::Colour, t, stream_);
+	auto main_channel = (scene_->frames[0].hasChannel(Channel::ColourHighRes)) ? Channel::ColourHighRes : Channel::Colour;
+	//if (scene_->frames[0].hasChannel(Channel::ColourHighRes)) {
+	//	LOG(INFO) << "HAVE HIGH RES: " << scene_->frames[0].get<GpuMat>(Channel::ColourHighRes).rows;
+	//}
+	_renderChannel(out, main_channel, Channel::Colour, t, stream_);
 
 	if (value("cool_effect", false)) {
 		auto pose = params.m_viewMatrixInverse.getFloat3x3();
diff --git a/components/rgbd-sources/include/ftl/rgbd/frame.hpp b/components/rgbd-sources/include/ftl/rgbd/frame.hpp
index e7a949600e6ba097aeda54460e83a1529851371e..8411c71a626e23216fcedac5df35e0ce49863f3b 100644
--- a/components/rgbd-sources/include/ftl/rgbd/frame.hpp
+++ b/components/rgbd-sources/include/ftl/rgbd/frame.hpp
@@ -223,7 +223,7 @@ ftl::cuda::TextureObject<T> &Frame::createTexture(ftl::codecs::Channel c, const
 		//LOG(INFO) << "Creating texture object";
 		m.tex = ftl::cuda::TextureObject<T>(m.gpu, interpolated);
 	} else if (m.tex.cvType() != ftl::traits::OpenCVType<T>::value || m.tex.width() != m.gpu.cols || m.tex.height() != m.gpu.rows) {
-		LOG(INFO) << "Recreating texture object for '" << ftl::codecs::name(c) << "'";
+		//LOG(INFO) << "Recreating texture object for '" << ftl::codecs::name(c) << "'";
 		m.tex.free();
 		m.tex = ftl::cuda::TextureObject<T>(m.gpu, interpolated);
 	}
@@ -256,7 +256,7 @@ ftl::cuda::TextureObject<T> &Frame::createTexture(ftl::codecs::Channel c, bool i
 		//LOG(INFO) << "Creating texture object";
 		m.tex = ftl::cuda::TextureObject<T>(m.gpu, interpolated);
 	} else if (m.tex.cvType() != ftl::traits::OpenCVType<T>::value || m.tex.width() != m.gpu.cols || m.tex.height() != m.gpu.rows || m.tex.devicePtr() != m.gpu.data) {
-		LOG(INFO) << "Recreating texture object for '" << ftl::codecs::name(c) << "'.";
+		//LOG(INFO) << "Recreating texture object for '" << ftl::codecs::name(c) << "'.";
 		m.tex.free();
 		m.tex = ftl::cuda::TextureObject<T>(m.gpu, interpolated);
 	}
diff --git a/components/rgbd-sources/src/abr.cpp b/components/rgbd-sources/src/abr.cpp
index c338d4725ed2ab493fd61143d24b9b7241453622..d387cde26990f5e5acc1d38530375f73733d3789 100644
--- a/components/rgbd-sources/src/abr.cpp
+++ b/components/rgbd-sources/src/abr.cpp
@@ -41,7 +41,7 @@ bitrate_t ABRController::selectBitrate(const NetFrame &frame) {
 
     float actual_mbps = (float(frame.tx_size) * 8.0f * (1000.0f / float(frame.tx_latency))) / 1048576.0f;
     float min_mbps = (float(frame.tx_size) * 8.0f * (1000.0f / float(ftl::timer::getInterval()))) / 1048576.0f;
-    //LOG(INFO) << "Bitrate = " << actual_mbps << "Mbps, min required = " << min_mbps << "Mbps";
+    //if (actual_mbps < min_mbps) LOG(WARNING) << "Bitrate = " << actual_mbps << "Mbps, min required = " << min_mbps << "Mbps";
     float ratio = actual_mbps / min_mbps;
     //LOG(INFO) << "Rate Ratio = " << frame.tx_latency;
 
diff --git a/components/rgbd-sources/src/group.cpp b/components/rgbd-sources/src/group.cpp
index 625d62e2c9767ee6164d2835e832de20994ec983..aad850d2cf9d501d6d655b1f77978de6f1bab39e 100644
--- a/components/rgbd-sources/src/group.cpp
+++ b/components/rgbd-sources/src/group.cpp
@@ -214,8 +214,8 @@ void Group::sync(std::function<bool(ftl::rgbd::FrameSet &)> cb) {
 				try {
 					cb(*fs);
 					//LOG(INFO) << "Frameset processed (" << name_ << "): " << fs->timestamp;
-				} catch(...) {
-					LOG(ERROR) << "Exception in group sync callback";
+				} catch(std::exception &e) {
+					LOG(ERROR) << "Exception in group sync callback: " << e.what();
 				}
 
 				// The buffers are invalid after callback so mark stale
diff --git a/components/rgbd-sources/src/source.cpp b/components/rgbd-sources/src/source.cpp
index 13cdd5487edf0b7cbc99f7cd9dd7032b43d31185..4a7873515a2a77faf44965e94208ec005085caf8 100644
--- a/components/rgbd-sources/src/source.cpp
+++ b/components/rgbd-sources/src/source.cpp
@@ -303,27 +303,6 @@ void Source::notify(int64_t ts, cv::cuda::GpuMat &c1, cv::cuda::GpuMat &c2) {
 	int max_width = max(impl_->params_.width, max(c1.cols, c2.cols));
 	int max_height = max(impl_->params_.height, max(c1.rows, c2.rows));
 
-	// Do we need to scale camera parameters
-	if (impl_->params_.width < max_width || impl_->params_.height < max_height) {
-		impl_->params_ = impl_->params_.scaled(max_width, max_height);
-	}
-
-	// Should channel 1 be scaled?
-	if (c1.cols < max_width || c1.rows < max_height) {
-		LOG(WARNING) << "Resizing on GPU";
-		cv::cuda::resize(c1, c1, cv::Size(max_width, max_height));
-	}
-
-	// Should channel 2 be scaled?
-	if (!c2.empty() && (c2.cols < max_width || c2.rows < max_height)) {
-		LOG(WARNING) << "Resizing on GPU";
-		if (c2.type() == CV_32F) {
-			cv::cuda::resize(c2, c2, cv::Size(max_width, max_height), 0.0, 0.0, cv::INTER_NEAREST);
-		} else {
-			cv::cuda::resize(c2, c2, cv::Size(max_width, max_height));
-		}
-	}
-
 	if (callback_) callback_(ts, c1, c2);
 }
 
diff --git a/components/rgbd-sources/src/sources/ftlfile/file_source.cpp b/components/rgbd-sources/src/sources/ftlfile/file_source.cpp
index 0962c1886dc199e50530343c0d01edf4e74e37f0..25033f9af59172f652919ad8868f343bbbab00e8 100644
--- a/components/rgbd-sources/src/sources/ftlfile/file_source.cpp
+++ b/components/rgbd-sources/src/sources/ftlfile/file_source.cpp
@@ -184,21 +184,34 @@ bool FileSource::compute(int n, int b) {
 
 		if (c.spkt.channel == Channel::Colour) {
 			rgb_.create(cv::Size(ftl::codecs::getWidth(c.pkt.definition),ftl::codecs::getHeight(c.pkt.definition)), CV_8UC3);
-		} else {
+			_createDecoder(0, c.pkt);
+
+			try {
+				decoders_[0]->decode(c.pkt, rgb_);
+			} catch (std::exception &e) {
+				LOG(INFO) << "Decoder exception: " << e.what();
+			}
+		} else if (host_->getChannel() == c.spkt.channel) {
 			depth_.create(cv::Size(ftl::codecs::getWidth(c.pkt.definition),ftl::codecs::getHeight(c.pkt.definition)), CV_32F);
+			_createDecoder(1, c.pkt);
+			try {
+				decoders_[1]->decode(c.pkt, depth_);
+			} catch (std::exception &e) {
+				LOG(INFO) << "Decoder exception: " << e.what();
+			}
 		}
 	
-		_createDecoder((c.spkt.channel == Channel::Colour) ? 0 : 1, c.pkt);
+		//_createDecoder((c.spkt.channel == Channel::Colour) ? 0 : 1, c.pkt);
 
-		try {
+		/*try {
 			decoders_[(c.spkt.channel == Channel::Colour) ? 0 : 1]->decode(c.pkt, (c.spkt.channel == Channel::Colour) ? rgb_ : depth_);
 		} catch (std::exception &e) {
 			LOG(INFO) << "Decoder exception: " << e.what();
-		}
+		}*/
 	}
 
 	// FIXME: Consider case of Channel::None
-	if (lastc != 2) {
+	if (lastc < 2) {
 		LOG(ERROR) << "Channels not in sync (" << sourceid_ << "): " << lastts;
 		return false;
 	}
diff --git a/components/rgbd-sources/src/sources/net/net.cpp b/components/rgbd-sources/src/sources/net/net.cpp
index e4073536a574255965de81ab5f2294e008695032..aeb1b954a0c4228304d97518ea007e7bffda52fd 100644
--- a/components/rgbd-sources/src/sources/net/net.cpp
+++ b/components/rgbd-sources/src/sources/net/net.cpp
@@ -52,8 +52,8 @@ NetFrame &NetFrameQueue::getFrame(int64_t ts, const cv::Size &s, int c1type, int
 			f.chunk_total[1] = 0;
 			f.channel_count = 0;
 			f.tx_size = 0;
-			f.channel[0].create(s, c1type);
-			f.channel[1].create(s, c2type);
+			//f.channel[0].create(s, c1type);
+			//f.channel[1].create(s, c2type);
 			return f;
 		}
 		oldest = (f.timestamp < oldest) ? f.timestamp : oldest;
@@ -72,8 +72,8 @@ NetFrame &NetFrameQueue::getFrame(int64_t ts, const cv::Size &s, int c1type, int
 			f.chunk_total[1] = 0;
 			f.channel_count = 0;
 			f.tx_size = 0;
-			f.channel[0].create(s, c1type);
-			f.channel[1].create(s, c2type);
+			//f.channel[0].create(s, c1type);
+			//f.channel[1].create(s, c2type);
 			return f;
 		}
 	}
@@ -253,6 +253,12 @@ void NetSource::_processPose(const ftl::codecs::Packet &pkt) {
 	LOG(INFO) << "Got POSE channel";
 }
 
+void NetSource::_checkDataRate(size_t tx_size, int64_t tx_latency) {
+	float actual_mbps = (float(tx_size) * 8.0f * (1000.0f / float(tx_latency))) / 1048576.0f;
+    float min_mbps = (float(tx_size) * 8.0f * (1000.0f / float(ftl::timer::getInterval()))) / 1048576.0f;
+    if (actual_mbps < min_mbps) LOG(WARNING) << "Bitrate = " << actual_mbps << "Mbps, min required = " << min_mbps << "Mbps";
+}
+
 void NetSource::_recvPacket(short ttimeoff, const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
 	// Capture time here for better net latency estimate
 	int64_t now = std::chrono::time_point_cast<std::chrono::milliseconds>(std::chrono::high_resolution_clock::now()).time_since_epoch().count();
@@ -277,11 +283,40 @@ void NetSource::_recvPacket(short ttimeoff, const ftl::codecs::StreamPacket &spk
 		return;
 	}
 
-	NetFrame &frame = queue_.getFrame(spkt.timestamp, cv::Size(params_.width, params_.height), CV_8UC3, (isFloatChannel(chan) ? CV_32FC1 : CV_8UC3));
+	//LOG(INFO) << "PACKET: " << spkt.timestamp << ", " << (int)spkt.channel << ", " << (int)pkt.codec;
+	
+	const cv::Size size = cv::Size(ftl::codecs::getWidth(pkt.definition), ftl::codecs::getHeight(pkt.definition));
+	NetFrame &frame = queue_.getFrame(spkt.timestamp, size, CV_8UC3, (isFloatChannel(chan) ? CV_32FC1 : CV_8UC3));
+
+	if (timestamp_ > 0 && frame.timestamp <= timestamp_) {
+		LOG(ERROR) << "Duplicate frame - " << frame.timestamp << " received=" << int(rchan) << " uri=" << uri_;
+		return;
+	}
+
+	// Calculate how many packets to expect for this channel
+	if (frame.chunk_total[channum] == 0) {
+		frame.chunk_total[channum] = pkt.block_total;
+	}
+
+	// Capture tx time of first received chunk
+	if (frame.chunk_count[0] == 0 && frame.chunk_count[1] == 0) {
+		UNIQUE_LOCK(frame.mtx, flk);
+		if (frame.chunk_count[0] == 0 && frame.chunk_count[1] == 0) {
+			frame.tx_latency = int64_t(ttimeoff);
+		}
+	}	
+
+	++frame.chunk_count[channum];
+	if (frame.chunk_count[channum] > frame.chunk_total[channum]) {
+		LOG(WARNING) << "Too many channel packets received, discarding";
+		return;
+	}
 
 	// Update frame statistics
 	frame.tx_size += pkt.data.size();
 
+	frame.channel[channum].create(size, (isFloatChannel(rchan) ? CV_32FC1 : CV_8UC3));
+
 	// Only decode if this channel is wanted.
 	if (rchan == Channel::Colour || rchan == chan) {
 		_createDecoder(channum, pkt);
@@ -290,7 +325,7 @@ void NetSource::_recvPacket(short ttimeoff, const ftl::codecs::StreamPacket &spk
 			LOG(ERROR) << "No frame decoder available";
 			return;
 		}
-
+	
 		decoder->decode(pkt, frame.channel[channum]);
 	} else if (chan != Channel::None && rchan != Channel::Colour) {
 		// Didn't receive correct second channel so just clear the images
@@ -305,29 +340,10 @@ void NetSource::_recvPacket(short ttimeoff, const ftl::codecs::StreamPacket &spk
 	//ftl::rgbd::colourCorrection(tmp_rgb, gamma_, temperature_);
 
 	// TODO:(Nick) Decode directly into double buffer if no scaling
+	
+	_checkDataRate(pkt.data.size(), now-(spkt.timestamp+ttimeoff));
 
-	if (timestamp_ > 0 && frame.timestamp <= timestamp_) {
-		LOG(ERROR) << "BAD DUPLICATE FRAME - " << frame.timestamp << " received=" << int(rchan) << " uri=" << uri_;
-		return;
-	}
-
-	// Calculate how many packets to expect for this channel
-	if (frame.chunk_total[channum] == 0) {
-		frame.chunk_total[channum] = pkt.block_total;
-	}		
-
-	++frame.chunk_count[channum];
 	if (frame.chunk_count[channum] == frame.chunk_total[channum]) ++frame.channel_count;
-	if (frame.chunk_count[channum] > frame.chunk_total[channum]) LOG(FATAL) << "TOO MANY CHUNKS";
-
-	// Capture tx time of first received chunk
-	// FIXME: This seems broken
-	if (channum == 1 && frame.chunk_count[channum] == 1) {
-		UNIQUE_LOCK(frame.mtx, flk);
-		if (frame.chunk_count[channum] == 1) {
-			frame.tx_latency = int64_t(ttimeoff);
-		}
-	}
 
 	// Last chunk of both channels now received, so we are done.
 	if (frame.channel_count == spkt.channel_count) {
diff --git a/components/rgbd-sources/src/sources/net/net.hpp b/components/rgbd-sources/src/sources/net/net.hpp
index 5cef2726d2cdc5c34c161a74b25d45234f55ce48..515bb8a5ff7ee5d788530d9ad00495f7f880b83d 100644
--- a/components/rgbd-sources/src/sources/net/net.hpp
+++ b/components/rgbd-sources/src/sources/net/net.hpp
@@ -89,6 +89,7 @@ class NetSource : public detail::Source {
 	void _processCalibration(const ftl::codecs::Packet &pkt);
 	void _processConfig(const ftl::codecs::Packet &pkt);
 	void _processPose(const ftl::codecs::Packet &pkt);
+	void _checkDataRate(size_t tx_size, int64_t tx_latency);
 };
 
 }
diff --git a/components/rgbd-sources/src/sources/snapshot/snapshot_source.cpp b/components/rgbd-sources/src/sources/snapshot/snapshot_source.cpp
index 136a2e7dfcc7b279228cdd5c6efc0bfb8d303baa..61acdb9d814560c72fcd15e7f05cdfaf8fda66cd 100644
--- a/components/rgbd-sources/src/sources/snapshot/snapshot_source.cpp
+++ b/components/rgbd-sources/src/sources/snapshot/snapshot_source.cpp
@@ -53,6 +53,8 @@ SnapshotSource::SnapshotSource(ftl::rgbd::Source *host, Snapshot &snapshot, cons
     host->setPose(pose);
 
 	mspf_ = 1000 / host_->value("fps", 20);
+
+	cudaStreamCreate(&stream_);
 }
 
 bool SnapshotSource::compute(int n, int b) {
@@ -61,11 +63,14 @@ bool SnapshotSource::compute(int n, int b) {
 
 	//snap_rgb_.copyTo(rgb_);
 	//snap_depth_.copyTo(depth_);
-	rgb_.upload(snap_rgb_);
-	depth_.upload(snap_depth_);
-
-	auto cb = host_->callback();
-	if (cb) cb(timestamp_, rgb_, depth_);
+	cv::cuda::Stream cvstream = cv::cuda::StreamAccessor::wrapStream(stream_);
+	rgb_.upload(snap_rgb_, cvstream);
+	depth_.upload(snap_depth_, cvstream);
+	cudaStreamSynchronize(stream_);
+
+	//auto cb = host_->callback();
+	//if (cb) cb(timestamp_, rgb_, depth_);
+	host_->notify(timestamp_, rgb_, depth_);
 
 	frame_idx_ = (frame_idx_ + 1) % snapshot_.getFramesCount();
 
diff --git a/components/rgbd-sources/src/sources/snapshot/snapshot_source.hpp b/components/rgbd-sources/src/sources/snapshot/snapshot_source.hpp
index de1b0df48be79df732f51144226f5c7e6d2f0478..80a0bf392b39fb9d5215dd80034768d806ac7957 100644
--- a/components/rgbd-sources/src/sources/snapshot/snapshot_source.hpp
+++ b/components/rgbd-sources/src/sources/snapshot/snapshot_source.hpp
@@ -32,6 +32,7 @@ class SnapshotSource : public detail::Source {
 	cv::Mat snap_rgb_;
 	cv::Mat snap_depth_;
 	int mspf_;
+	cudaStream_t stream_;
 };
 
 }
diff --git a/components/rgbd-sources/src/sources/stereovideo/calibrate.cpp b/components/rgbd-sources/src/sources/stereovideo/calibrate.cpp
index fc99d701fa40a8b824248c775a7020ed7d449fd1..88d69ab9c6d2fdc23bcd8f629df3e8041d6a75d9 100644
--- a/components/rgbd-sources/src/sources/stereovideo/calibrate.cpp
+++ b/components/rgbd-sources/src/sources/stereovideo/calibrate.cpp
@@ -211,6 +211,26 @@ void Calibrate::_updateIntrinsics() {
 	map2_gpu_.second.upload(map2_.second);
 }
 
+cv::Mat Calibrate::getCameraMatrixLeft(const cv::Size res) {
+	double scale_x = ((double) res.width) / ((double) img_size_.width);
+	double scale_y = ((double) res.height) / ((double) img_size_.height);
+	Mat scale(cv::Size(3, 3), CV_64F, 0.0);
+	scale.at<double>(0, 0) = scale_x;
+	scale.at<double>(1, 1) = scale_y;
+	scale.at<double>(2, 2) = 1.0;
+	return scale * Kl_;
+}
+
+cv::Mat Calibrate::getCameraMatrixRight(const cv::Size res) {
+	double scale_x = ((double) res.width) / ((double) img_size_.width);
+	double scale_y = ((double) res.height) / ((double) img_size_.height);
+	Mat scale(cv::Size(3, 3), CV_64F, 0.0);
+	scale.at<double>(0, 0) = scale_x;
+	scale.at<double>(1, 1) = scale_y;
+	scale.at<double>(2, 2) = 1.0;
+	return scale * Kr_;
+}
+
 void Calibrate::rectifyStereo(GpuMat &l, GpuMat &r, Stream &stream) {
 	// cv::cuda::remap() can not use same Mat for input and output
 
diff --git a/components/rgbd-sources/src/sources/stereovideo/calibrate.hpp b/components/rgbd-sources/src/sources/stereovideo/calibrate.hpp
index 4561b90a79129dd5a0d46d9d54bd005147d766a7..39ec301a98d8419cc19dfb0bb60747e20a6327ff 100644
--- a/components/rgbd-sources/src/sources/stereovideo/calibrate.hpp
+++ b/components/rgbd-sources/src/sources/stereovideo/calibrate.hpp
@@ -49,15 +49,11 @@ class Calibrate : public ftl::Configurable {
 
 	void updateCalibration(const ftl::rgbd::Camera &p);
 	
-	/**
-	 * Get the camera matrix. Used to convert disparity map back to depth and
-	 * a 3D point cloud.
-	 */
+	// Get disparity to depth matrix.
 	const cv::Mat &getQ() const { return Q_; }
 
-	const cv::Mat &getCameraMatrixLeft() { return Kl_; }
-	const cv::Mat &getCameraMatrixRight() { return Kr_; }
-	const cv::Mat &getCameraMatrix() { return getCameraMatrixLeft(); }
+	cv::Mat getCameraMatrixLeft(const cv::Size res);
+	cv::Mat getCameraMatrixRight(const cv::Size res);
 
 private:
 	void _updateIntrinsics();
diff --git a/components/rgbd-sources/src/sources/stereovideo/stereovideo.cpp b/components/rgbd-sources/src/sources/stereovideo/stereovideo.cpp
index 84d1e574b8e7aff91c774a1035c75280cd8ac03c..e8eff732be9120de3cdc9efc2b25003e8635a65f 100644
--- a/components/rgbd-sources/src/sources/stereovideo/stereovideo.cpp
+++ b/components/rgbd-sources/src/sources/stereovideo/stereovideo.cpp
@@ -8,7 +8,6 @@
 #include "ftl/operators/opticalflow.hpp"
 #endif
 
-
 #include "ftl/operators/smoothing.hpp"
 #include "ftl/operators/colours.hpp"
 #include "ftl/operators/normals.hpp"
@@ -69,14 +68,35 @@ void StereoVideoSource::init(const string &file) {
 		lsrc_ = ftl::create<LocalSource>(host_, "feed");
 	}
 
-	cv::Size size = cv::Size(lsrc_->width(), lsrc_->height());
+	color_size_ = cv::Size(lsrc_->width(), lsrc_->height());
 	frames_ = std::vector<Frame>(2);
 
-	calib_ = ftl::create<Calibrate>(host_, "calibration", size, stream_);
+	pipeline_input_ = ftl::config::create<ftl::operators::Graph>(host_, "input");
+	#ifdef HAVE_OPTFLOW
+	pipeline_input_->append<ftl::operators::NVOpticalFlow>("optflow");
+	#endif
+
+	pipeline_depth_ = ftl::config::create<ftl::operators::Graph>(host_, "disparity");
+	depth_size_ = cv::Size(	pipeline_depth_->value("width", color_size_.width),
+							pipeline_depth_->value("height", color_size_.height));
+
+	pipeline_depth_->append<ftl::operators::FixstarsSGM>("algorithm");
+	#ifdef HAVE_OPTFLOW
+	pipeline_depth_->append<ftl::operators::OpticalFlowTemporalSmoothing>("optflow_filter");
+	#endif
+	pipeline_depth_->append<ftl::operators::DisparityBilateralFilter>("bilateral_filter");
+	pipeline_depth_->append<ftl::operators::DisparityToDepth>("calculate_depth");
+	pipeline_depth_->append<ftl::operators::ColourChannels>("colour");  // Convert BGR to BGRA
+	pipeline_depth_->append<ftl::operators::Normals>("normals");  // Estimate surface normals
+	pipeline_depth_->append<ftl::operators::CrossSupport>("cross");
+	pipeline_depth_->append<ftl::operators::DiscontinuityMask>("discontinuity_mask");
+	pipeline_depth_->append<ftl::operators::AggreMLS>("mls");  // Perform MLS (using smoothing channel)
+
+	calib_ = ftl::create<Calibrate>(host_, "calibration", color_size_, stream_);
 	if (!calib_->isCalibrated()) LOG(WARNING) << "Cameras are not calibrated!";
 
 	// Generate camera parameters from camera matrix
-	cv::Mat K = calib_->getCameraMatrix();
+	cv::Mat K = calib_->getCameraMatrixLeft(depth_size_);
 	params_ = {
 		K.at<double>(0,0),	// Fx
 		K.at<double>(1,1),	// Fy
@@ -126,49 +146,34 @@ void StereoVideoSource::init(const string &file) {
 	mask_l_gpu.download(mask_l);
 	mask_l_ = (mask_l == 0);
 	
-	pipeline_input_ = ftl::config::create<ftl::operators::Graph>(host_, "input");
-	#ifdef HAVE_OPTFLOW
-	pipeline_input_->append<ftl::operators::NVOpticalFlow>("optflow");
-	#endif
-
-	pipeline_depth_ = ftl::config::create<ftl::operators::Graph>(host_, "disparity");
-	pipeline_depth_->append<ftl::operators::FixstarsSGM>("algorithm");
-
-	#ifdef HAVE_OPTFLOW
-	pipeline_depth_->append<ftl::operators::OpticalFlowTemporalSmoothing>("optflow_filter");
-	#endif
-	pipeline_depth_->append<ftl::operators::DisparityBilateralFilter>("bilateral_filter");
-	pipeline_depth_->append<ftl::operators::DisparityToDepth>("calculate_depth");
-	pipeline_depth_->append<ftl::operators::ColourChannels>("colour");  // Convert BGR to BGRA
-	pipeline_depth_->append<ftl::operators::Normals>("normals");  // Estimate surface normals
-	pipeline_depth_->append<ftl::operators::CrossSupport>("cross");
-	pipeline_depth_->append<ftl::operators::DiscontinuityMask>("discontinuity_mask");
-	pipeline_depth_->append<ftl::operators::AggreMLS>("mls");  // Perform MLS (using smoothing channel)
-
 	LOG(INFO) << "StereoVideo source ready...";
 	ready_ = true;
 }
 
 ftl::rgbd::Camera StereoVideoSource::parameters(Channel chan) {
+	cv::Mat K;
+	
 	if (chan == Channel::Right) {
-		cv::Mat q = calib_->getCameraMatrixRight();
-		ftl::rgbd::Camera params = {
-			q.at<double>(0,0),	// Fx
-			q.at<double>(1,1),	// Fy
-			-q.at<double>(0,2),	// Cx
-			-q.at<double>(1,2),	// Cy
-			(unsigned int)lsrc_->width(),
-			(unsigned int)lsrc_->height(),
-			0.0f,	// 0m min
-			15.0f,	// 15m max
-			1.0 / calib_->getQ().at<double>(3,2), // Baseline
-			0.0f  // doffs
-		};
-		return params;
-		//params_.doffs = -calib_->getQ().at<double>(3,3) * params_.baseline;
+		K = calib_->getCameraMatrixRight(depth_size_);
 	} else {
-		return params_;
+		K = calib_->getCameraMatrixLeft(depth_size_);
 	}
+
+	// TODO: remove hardcoded values (min/max)
+	ftl::rgbd::Camera params = {
+		K.at<double>(0,0),	// Fx
+		K.at<double>(1,1),	// Fy
+		-K.at<double>(0,2),	// Cx
+		-K.at<double>(1,2),	// Cy
+		(unsigned int) depth_size_.width,
+		(unsigned int) depth_size_.height,
+		0.0f,	// 0m min
+		15.0f,	// 15m max
+		1.0 / calib_->getQ().at<double>(3,2), // Baseline
+		0.0f  // doffs
+	};
+	
+	return params;
 }
 
 bool StereoVideoSource::capture(int64_t ts) {
@@ -205,8 +210,31 @@ bool StereoVideoSource::compute(int n, int b) {
 	}
 
 	if (chan == Channel::Depth) {
-		pipeline_depth_->apply(frame, frame, host_, cv::cuda::StreamAccessor::getStream(stream_));	
+		// stereo algorithms assume input same size as output
+		bool resize = (depth_size_ != color_size_);
+
+		cv::cuda::GpuMat& left = frame.get<cv::cuda::GpuMat>(Channel::Left);
+		cv::cuda::GpuMat& right = frame.get<cv::cuda::GpuMat>(Channel::Right);
+
+		if (left.empty() || right.empty()) {
+			return false;
+		}
+
+		if (resize) {
+			cv::cuda::swap(fullres_left_, left);
+			cv::cuda::swap(fullres_right_, right);
+			cv::cuda::resize(fullres_left_, left, depth_size_, 0, 0, cv::INTER_CUBIC, stream_);
+			cv::cuda::resize(fullres_right_, right, depth_size_, 0, 0, cv::INTER_CUBIC, stream_);
+		}
+
+		pipeline_depth_->apply(frame, frame, host_, cv::cuda::StreamAccessor::getStream(stream_));
 		stream_.waitForCompletion();
+		
+		if (resize) {
+			cv::cuda::swap(fullres_left_, left);
+			cv::cuda::swap(fullres_right_, right);
+		}
+
 		host_->notify(timestamp_,
 						frame.get<cv::cuda::GpuMat>(Channel::Left),
 						frame.get<cv::cuda::GpuMat>(Channel::Depth));
diff --git a/components/rgbd-sources/src/sources/stereovideo/stereovideo.hpp b/components/rgbd-sources/src/sources/stereovideo/stereovideo.hpp
index 78fcdbcf809eeae0d5de6da30b99ce3dc07f9214..9532e618889e78da51e1e152673195000e93be7f 100644
--- a/components/rgbd-sources/src/sources/stereovideo/stereovideo.hpp
+++ b/components/rgbd-sources/src/sources/stereovideo/stereovideo.hpp
@@ -17,9 +17,7 @@ class Disparity;
 
 /**
  * RGBD source from either a stereo video file with left + right images, or
- * direct from two camera devices. A variety of algorithms are included for
- * calculating disparity, before converting to depth.  Calibration of the images
- * is also performed.
+ * direct from two camera devices. 
  */
 class StereoVideoSource : public detail::Source {
 	public:
@@ -32,15 +30,21 @@ class StereoVideoSource : public detail::Source {
 	bool retrieve();
 	bool compute(int n, int b);
 	bool isReady();
-	Camera parameters(ftl::codecs::Channel chan);
+	Camera parameters(ftl::codecs::Channel chan) override;
 
 	private:
 	LocalSource *lsrc_;
 	Calibrate *calib_;
 
+	cv::Size color_size_;
+	cv::Size depth_size_;
+
 	ftl::operators::Graph *pipeline_input_;
 	ftl::operators::Graph *pipeline_depth_;
 
+	cv::cuda::GpuMat fullres_left_;
+	cv::cuda::GpuMat fullres_right_;
+
 	bool ready_;
 	
 	cv::cuda::Stream stream_;
diff --git a/components/rgbd-sources/src/streamer.cpp b/components/rgbd-sources/src/streamer.cpp
index e05e7faf9305ac0a3ede6dade21596bfe8251db7..3ca2bade8ddb4355682a0002beb74749d62a3ffb 100644
--- a/components/rgbd-sources/src/streamer.cpp
+++ b/components/rgbd-sources/src/streamer.cpp
@@ -193,7 +193,7 @@ void Streamer::add(Source *src) {
 			if (spkt.channel == Channel::Calibration) {
 				// Calibration changed, so lets re-check the bitrate presets
 				const auto &params = src->parameters();
-				s->hq_bitrate = ftl::codecs::findPreset(params.width, params.height);
+				s->hq_bitrate = ftl::codecs::kPresetBest;
 			}
 
 			//LOG(INFO) << "RAW CALLBACK";
@@ -294,11 +294,15 @@ void Streamer::_addClient(const string &source, int N, int rate, const ftl::UUID
 	for (auto &client : s->clients) {
 		// If already listening, just update chunk counters
 		if (client.peerid == peer) {
+			// Allow for same client but different quality (beyond threshold)
+			if ((client.preset < kQualityThreshold && rate >= kQualityThreshold) ||
+				(client.preset >= kQualityThreshold && rate < kQualityThreshold)) continue;
+				
 			client.txmax = N;
 			client.txcount = 0;
 
 			// Possible switch from high quality to low quality encoding or vice versa
-			if (client.preset < kQualityThreshold && rate >= kQualityThreshold) {
+			/*if (client.preset < kQualityThreshold && rate >= kQualityThreshold) {
 				s->hq_count--;
 				s->lq_count++;
 				if (s->lq_encoder_c1) s->lq_encoder_c1->reset();
@@ -308,7 +312,8 @@ void Streamer::_addClient(const string &source, int N, int rate, const ftl::UUID
 				s->lq_count--;
 				if (s->hq_encoder_c1) s->hq_encoder_c1->reset();
 				if (s->hq_encoder_c2) s->hq_encoder_c2->reset();
-			}
+				break;
+			}*/
 
 			client.preset = rate;
 			return;
@@ -459,20 +464,32 @@ void Streamer::_process(ftl::rgbd::FrameSet &fs) {
 				auto *enc1 = src->hq_encoder_c1;
 				auto *enc2 = src->hq_encoder_c2;
 
-				// Important to send channel 2 first if needed...
-				// Receiver only waits for channel 1 by default
-				// TODO: Each encode could be done in own thread
-				if (hasChan2) {
-					// TODO: Stagger the reset between nodes... random phasing
-					if (fs.timestamp % (10*ftl::timer::getInterval()) == 0) enc2->reset();
+				MUTEX mtx;
+				std::condition_variable cv;
+				bool chan2done = false;
 
-					auto chan = fs.sources[j]->getChannel();
-
-					enc2->encode(fs.frames[j].get<cv::cuda::GpuMat>(chan), src->hq_bitrate, [this,src,hasChan2,chan](const ftl::codecs::Packet &blk){
-						_transmitPacket(src, blk, chan, hasChan2, Quality::High);
+				if (hasChan2) {
+					ftl::pool.push([this,&fs,enc2,src,hasChan2,&cv,j,&chan2done](int id) {
+						// TODO: Stagger the reset between nodes... random phasing
+						if (fs.timestamp % (10*ftl::timer::getInterval()) == 0) enc2->reset();
+
+						auto chan = fs.sources[j]->getChannel();
+
+						try {
+							enc2->encode(fs.frames[j].get<cv::cuda::GpuMat>(chan), src->hq_bitrate, [this,src,hasChan2,chan,&cv,&chan2done](const ftl::codecs::Packet &blk){
+								_transmitPacket(src, blk, chan, hasChan2, Quality::High);
+								chan2done = true;
+								cv.notify_one();
+							});
+						} catch (std::exception &e) {
+							LOG(ERROR) << "Exception in encoder: " << e.what();
+							chan2done = true;
+							cv.notify_one();
+						}
 					});
 				} else {
 					if (enc2) enc2->reset();
+					chan2done = true;
 				}
 
 				// TODO: Stagger the reset between nodes... random phasing
@@ -480,6 +497,10 @@ void Streamer::_process(ftl::rgbd::FrameSet &fs) {
 				enc1->encode(fs.frames[j].get<cv::cuda::GpuMat>(Channel::Colour), src->hq_bitrate, [this,src,hasChan2](const ftl::codecs::Packet &blk){
 					_transmitPacket(src, blk, Channel::Colour, hasChan2, Quality::High);
 				});
+
+				// Ensure both channels have been completed.
+				std::unique_lock<std::mutex> lk(mtx);
+				cv.wait(lk, [&chan2done]{ return chan2done; });
 			}
 		}
 
@@ -512,51 +533,6 @@ void Streamer::_process(ftl::rgbd::FrameSet &fs) {
 				});
 			}
 		}
-
-		// Do we need to do low quality encoding?
-		/*if (src->lq_count > 0) {
-			if (!src->lq_encoder_c1) src->lq_encoder_c1 = ftl::codecs::allocateLQEncoder();
-			if (!src->lq_encoder_c2) src->lq_encoder_c2 = ftl::codecs::allocateLQEncoder();
-
-			// Do we have the resources to do a LQ encoding?
-			if (src->lq_encoder_c1 && src->lq_encoder_c2) {
-				const auto *enc1 = src->lq_encoder_c1;
-				const auto *enc2 = src->lq_encoder_c2;
-
-				// Do entire frame as single step
-				if (!enc1->useBlocks() || !enc2->useBlocks()) {
-					ftl::pool.push([this,&fs,j,src](int id) {
-						_encodeLQAndTransmit(src, fs.channel1[j], fs.channel2[j], -1);
-						std::unique_lock<std::mutex> lk(job_mtx_);
-						--jobs_;
-						if (jobs_ == 0) job_cv_.notify_one();
-					});
-
-					jobs_++;
-				// Or divide frame into blocks and encode each
-				} else {
-					// Create jobs for each chunk
-					for (int i=0; i<chunk_count_; ++i) {
-						// Add chunk job to thread pool
-						ftl::pool.push([this,&fs,j,i,src](int id) {
-							int chunk = i;
-							try {
-								_encodeLQAndTransmit(src, fs.channel1[j], fs.channel2[j], chunk);
-							} catch(...) {
-								LOG(ERROR) << "Encode Exception: " << chunk;
-							}
-
-							//src->jobs--;
-							std::unique_lock<std::mutex> lk(job_mtx_);
-							--jobs_;
-							if (jobs_ == 0) job_cv_.notify_one();
-						});
-					}
-
-					jobs_ += chunk_count_;
-				}
-			}
-		}*/
 	}
 
 	/*std::unique_lock<std::mutex> lk(job_mtx_);
@@ -619,219 +595,3 @@ void Streamer::_transmitPacket(StreamSource *src, const ftl::codecs::StreamPacke
 		++c;
 	}
 }
-
-/*void Streamer::_encodeHQAndTransmit(StreamSource *src, const cv::Mat &c1, const cv::Mat &c2, int block) {
-	bool hasChan2 = (!c2.empty() && src->src->getChannel() != ftl::rgbd::kChanNone);
-
-	LOG(INFO) << "Encode HQ: " << block;
-
-	vector<unsigned char> c1buff;
-	vector<unsigned char> c2buff;
-
-	if (block == -1) {
-		src->hq_encoder_c1->encode(c1, c1buff, src->hq_bitrate, false);
-		if (hasChan2) src->hq_encoder_c2->encode(c2, c2buff, src->hq_bitrate, false);
-	} else {
-		//bool delta = (chunk+src->frame) % 8 > 0;  // Do XOR or not
-		int chunk_width = c1.cols / chunk_dim_;
-		int chunk_height = c1.rows / chunk_dim_;
-
-		// Build chunk heads
-		int cx = (block % chunk_dim_) * chunk_width;
-		int cy = (block / chunk_dim_) * chunk_height;
-		cv::Rect roi(cx,cy,chunk_width,chunk_height);
-		//vector<unsigned char> rgb_buf;
-		cv::Mat chunkRGB = c1(roi);
-		src->hq_encoder_c1->encode(chunkRGB, c1buff, src->hq_bitrate, false);
-
-		if (hasChan2) {
-			cv::Mat chunkDepth = c2(roi);
-			src->hq_encoder_c2->encode(chunkDepth, c2buff, src->hq_bitrate, false);
-		}
-	}
-
-	// Lock to prevent clients being added / removed
-	SHARED_LOCK(src->mutex,lk);
-	auto c = src->clients.begin();
-	while (c != src->clients.end()) {
-		const int b = (*c).bitrate;
-		if (b >= kQualityThreshold) continue; // Not a HQ request
-
-		try {
-			// TODO:(Nick) Send pose
-			short pre_transmit_latency = short(ftl::timer::get_time() - frame_no_);
-			if (!net_->send((*c).peerid, (*c).uri, frame_no_, pre_transmit_latency, uint8_t(src->hq_bitrate), block, c1buff, c2buff)) {
-				// Send failed so mark as client stream completed
-				(*c).txcount = (*c).txmax;
-			} else {
-				++(*c).txcount;
-				//LOG(INFO) << "SENT CHUNK : " << frame_no_ << "-" << chunk;
-			}
-		} catch(...) {
-			(*c).txcount = (*c).txmax;
-		}
-		++c;
-	}
-}
-
-void Streamer::_encodeLQAndTransmit(StreamSource *src, const cv::Mat &c1, const cv::Mat &c2, int block) {
-	bool hasChan2 = (!c2.empty() && src->src->getChannel() != ftl::rgbd::kChanNone);
-
-	LOG(INFO) << "Encode LQ: " << block;
-
-	vector<unsigned char> c1buff;
-	vector<unsigned char> c2buff;
-
-	if (block == -1) {
-		src->lq_encoder_c1->encode(c1, c1buff, src->lq_bitrate, false);
-		if (hasChan2) src->lq_encoder_c2->encode(c2, c2buff, src->lq_bitrate, false);
-	} else {
-		//bool delta = (chunk+src->frame) % 8 > 0;  // Do XOR or not
-		int chunk_width = c1.cols / chunk_dim_;
-		int chunk_height = c1.rows / chunk_dim_;
-
-		// Build chunk heads
-		int cx = (block % chunk_dim_) * chunk_width;
-		int cy = (block / chunk_dim_) * chunk_height;
-		cv::Rect roi(cx,cy,chunk_width,chunk_height);
-		//vector<unsigned char> rgb_buf;
-		cv::Mat chunkRGB = c1(roi);
-		//cv::resize(chunkRGB, downrgb, cv::Size(ABRController::getColourWidth(b) / chunk_dim_, ABRController::getColourHeight(b) / chunk_dim_));
-
-		src->lq_encoder_c1->encode(chunkRGB, c1buff, src->lq_bitrate, false);
-
-		if (hasChan2) {
-			cv::Mat chunkDepth = c2(roi);
-			//cv::resize(chunkDepth, tmp, cv::Size(ABRController::getDepthWidth(b) / chunk_dim_, ABRController::getDepthHeight(b) / chunk_dim_), 0, 0, cv::INTER_NEAREST);
-			src->lq_encoder_c2->encode(chunkDepth, c2buff, src->lq_bitrate, false);
-		}
-	}
-
-	// Lock to prevent clients being added / removed
-	SHARED_LOCK(src->mutex,lk);
-	auto c = src->clients.begin();
-	while (c != src->clients.end()) {
-		const int b = (*c).bitrate;
-		if (b < kQualityThreshold) continue; // Not an LQ request
-
-		try {
-			// TODO:(Nick) Send pose
-			short pre_transmit_latency = short(ftl::timer::get_time() - frame_no_);
-			if (!net_->send((*c).peerid, (*c).uri, frame_no_, pre_transmit_latency, uint8_t(src->hq_bitrate), block, c1buff, c2buff)) {
-				// Send failed so mark as client stream completed
-				(*c).txcount = (*c).txmax;
-			} else {
-				++(*c).txcount;
-				//LOG(INFO) << "SENT CHUNK : " << frame_no_ << "-" << chunk;
-			}
-		} catch(...) {
-			(*c).txcount = (*c).txmax;
-		}
-		++c;
-	}
-}*/
-
-/*void Streamer::_encodeImagesAndTransmit(StreamSource *src, const cv::Mat &rgb, const cv::Mat &depth, int chunk) {
-	bool hasChan2 = (!depth.empty() && src->src->getChannel() != ftl::rgbd::kChanNone);
-
-	//bool delta = (chunk+src->frame) % 8 > 0;  // Do XOR or not
-	int chunk_width = rgb.cols / chunk_dim_;
-	int chunk_height = rgb.rows / chunk_dim_;
-
-	// Build chunk heads
-	int cx = (chunk % chunk_dim_) * chunk_width;
-	int cy = (chunk / chunk_dim_) * chunk_height;
-	cv::Rect roi(cx,cy,chunk_width,chunk_height);
-	//vector<unsigned char> rgb_buf;
-	cv::Mat chunkRGB = rgb(roi);
-	cv::Mat chunkDepth;
-	//cv::Mat chunkDepthPrev = src->prev_depth(roi);
-
-	cv::Mat d2, d3;
-	//vector<unsigned char> d_buf;
-
-	if (hasChan2) {
-		chunkDepth = depth(roi);
-		if (chunkDepth.type() == CV_32F) chunkDepth.convertTo(d2, CV_16UC1, 1000); // 16*10);
-		else d2 = chunkDepth;
-		//if (delta) d3 = (d2 * 2) - chunkDepthPrev;
-		//else d3 = d2;
-		//d2.copyTo(chunkDepthPrev);
-	}
-
-	// TODO: Verify these don't allocate memory if not needed.
-	// TODO: Reuse these buffers to reduce allocations.
-	vector<unsigned char> brgb[ftl::rgbd::detail::kMaxBitrateLevels];
-	vector<unsigned char> bdepth[ftl::rgbd::detail::kMaxBitrateLevels];
-
-	// Lock to prevent clients being added / removed
-	SHARED_LOCK(src->mutex,lk);
-	auto c = src->clients.begin();
-	while (c != src->clients.end()) {
-		const int b = (*c).bitrate;
-
-		if (brgb[b].empty()) {
-			// Max bitrate means no changes
-			if (b == 0) {
-				_encodeImageChannel1(chunkRGB, brgb[b], b);
-				if (hasChan2) _encodeImageChannel2(d2, bdepth[b], src->src->getChannel(), b);
-
-			// Otherwise must downscale and change compression params
-			} else {
-				cv::Mat downrgb, downdepth;
-				cv::resize(chunkRGB, downrgb, cv::Size(ABRController::getColourWidth(b) / chunk_dim_, ABRController::getColourHeight(b) / chunk_dim_));
-				if (hasChan2) cv::resize(d2, downdepth, cv::Size(ABRController::getDepthWidth(b) / chunk_dim_, ABRController::getDepthHeight(b) / chunk_dim_), 0, 0, cv::INTER_NEAREST);
-
-				_encodeImageChannel1(downrgb, brgb[b], b);
-				if (hasChan2) _encodeImageChannel2(downdepth, bdepth[b], src->src->getChannel(), b);
-			}
-		}
-
-		try {
-			// TODO:(Nick) Send pose
-			short pre_transmit_latency = short(ftl::timer::get_time() - frame_no_);
-			if (!net_->send((*c).peerid, (*c).uri, frame_no_, pre_transmit_latency, uint8_t(b), chunk, brgb[b], bdepth[b])) {
-				// Send failed so mark as client stream completed
-				(*c).txcount = (*c).txmax;
-			} else {
-				++(*c).txcount;
-				//LOG(INFO) << "SENT CHUNK : " << frame_no_ << "-" << chunk;
-			}
-		} catch(...) {
-			(*c).txcount = (*c).txmax;
-		}
-		++c;
-	}
-}
-
-void Streamer::_encodeImageChannel1(const cv::Mat &in, vector<unsigned char> &out, unsigned int b) {
-	vector<int> jpgparams = {cv::IMWRITE_JPEG_QUALITY, ABRController::getColourQuality(b)};
-	cv::imencode(".jpg", in, out, jpgparams);
-}
-
-bool Streamer::_encodeImageChannel2(const cv::Mat &in, vector<unsigned char> &out, ftl::codecs::Channel_t c, unsigned int b) {
-	if (c == ftl::rgbd::kChanNone) return false;  // NOTE: Should not happen
-
-	if (isFloatChannel(c) && in.type() == CV_16U && in.channels() == 1) {
-		vector<int> params = {cv::IMWRITE_PNG_COMPRESSION, ABRController::getDepthQuality(b)};
-		if (!cv::imencode(".png", in, out, params)) {
-			LOG(ERROR) << "PNG Encoding error";
-			return false;
-		}
-		return true;
-	} else if (!isFloatChannel(c) && in.type() == CV_8UC3) {
-		vector<int> params = {cv::IMWRITE_JPEG_QUALITY, ABRController::getColourQuality(b)};
-		cv::imencode(".jpg", in, out, params);
-		return true;
-	} else {
-		LOG(ERROR) << "Bad channel configuration: channel=" << c << " imagetype=" << in.type(); 
-	}
-
-	return false;
-}
-
-Source *Streamer::get(const std::string &uri) {
-	SHARED_LOCK(mutex_,slk);
-	if (sources_.find(uri) != sources_.end()) return sources_[uri]->src;
-	else return nullptr;
-}*/
diff --git a/web-service/public/js/bundle.js b/web-service/public/js/bundle.js
index 1fa935de665164a0759f8931cd672616a2457f88..f3a3487c34f38f492ca2794e39fdf3381f16b36c 100644
--- a/web-service/public/js/bundle.js
+++ b/web-service/public/js/bundle.js
@@ -1,248 +1,248 @@
 (function(){function r(e,n,t){function o(i,f){if(!n[i]){if(!e[i]){var c="function"==typeof require&&require;if(!f&&c)return c(i,!0);if(u)return u(i,!0);var a=new Error("Cannot find module '"+i+"'");throw a.code="MODULE_NOT_FOUND",a}var p=n[i]={exports:{}};e[i][0].call(p.exports,function(r){var n=e[i][1][r];return o(n||r)},p,p.exports,r,e,n,t)}return n[i].exports}for(var u="function"==typeof require&&require,i=0;i<t.length;i++)o(t[i]);return o}return r})()({1:[function(require,module,exports){
-const Peer = require('../../server/src/peer')
-const VideoConverter = require('./lib/dist/video-converter');
-
-let current_data = {};
-let peer;
-
-
-function FtlStream(element, uri, options){
-
-    const backButton = document.createElement("BUTTON")
-    backButton.innerHTML = "STOP"
-    backButton.addEventListener("click",() => {this.stop()});
-    
-    const playButton = document.createElement("BUTTON");
-    playButton.innerHTML = "PLAY";
-    playButton.addEventListener("click",() => {this.play()});
-
-    const videoTag = document.createElement("video");
-    videoTag.setAttribute("width", "640")
-    videoTag.setAttribute("height", "360")
-
-    const homeButton = document.createElement("BUTTON");
-    homeButton.innerHTML = "Home";
-    homeButton.addEventListener("click",() => {this.stop(); renderThumbnails()});
-
-    const br = document.createElement("br")
-
-    element.innerHTML = `<h1>Stream from source ${uri}</h1><br>`;
-    element.appendChild(homeButton)
-    element.appendChild(backButton)
-    element.appendChild(playButton)
-    element.appendChild(br)
-    element.appendChild(videoTag)
-
-
-    this.uri = uri
-    this.running = true
-    this.video = element.lastChild;
-    this.element = element
-
-    this.connectToStream()
-}
-
-FtlStream.prototype.connectToStream = function() {
-    const converter = new VideoConverter.default(this.video, 20, 6);
-    peer.bind(this.uri, (latency, streampckg, pckg) => {
-        if(pckg[0] === 2){
-            function decode(value){
-                converter.appendRawData(value);
-            }
-            decode(pckg[5]);
-        };
-    })
-
-    function sender(that) {
-        const _this = that
-        if(_this.running){
-            peer.send("get_stream", (_this.uri, 30, 0, _this.uri));
-            converter.play();
-            setTimeout(() => sender(that), 1000)
-        }else{
-            converter.pause()
-            setTimeout(() => sender(that), 1000)
-        }
-    }
-    // Start the transaction
-    sender(this)
-}
-
-FtlStream.prototype.stop = function() {
-    this.running = false
-}
-
- FtlStream.prototype.play = function() {
-    this.running = true
- }
-
-
-
-/**
- * Validates that the user is logged in by sending the token 
- */
-checkIfLoggedIn = async () => {
-    //     const token = window.localStorage.getItem('token')
-    //     console.log(token)
-    //     if(!token){
-    //         console.log("You need to login")
-    //         renderLogin()
-    //     }else{
-
-    //         //Check if the token is valid
-    //         const response = await fetch('http://localhost:8080/auth/validation', {
-    //             method: 'POST',
-    //             headers: {'Authorization': token}
-    //         })
-    //         console.log('RESPONSE', response)
-            
-    //         //Token is valid, show available streams
-    //         if(response.status === 200){
-    //             console.log("SUCCESS")
-                createPeer()
-                renderThumbnails()
-
-    //         }
-    //     }
-}
-
-
-
-
-createVideoPlayer = () => {
-     const containerDiv = document.getElementById('container')
-     const player = new FtlStream(containerDiv, current_data.uri, {});
-     console.log(player)
-}
-
-/***********
- * THUMBNAILS
- ***********/
-
-//Returns a list of available streams
-getAvailableStreams = async () => {
-    try{
-        const streamsInJson = await fetch(`./streams`);
-        const streams = await streamsInJson.json();
-        console.log('AVAILABLE', streams)
-        return streams;
-    }catch(err){
-        console.log(err)
-    }
-}
-
-//Creates thumbnail (image) for all available streams and adds them to div class='container'
-renderThumbnails = async () => {
-    const thumbnails = await getAvailableStreams();
-    const containerDiv = document.getElementById('container')
-    containerDiv.innerHTML = '';
-    containerDiv.innerHTML = `<button onClick="configs()">change configs</button>`
-    containerDiv.innerHTML += `<div class="ftlab-stream-thumbnails"></div>`
-    if(thumbnails.length === 0){
-        containerDiv.innerHTML = `<h3>No streams running currently</h3>`
-    }else{
-        for(var i=0; i<thumbnails.length; i++){
-            const encodedURI = encodeURIComponent(thumbnails[i])
-            current_data.uri = thumbnails[i]
-            try{
-                const someData = await fetch(`./stream/rgb?uri=${encodedURI}`)
-                if(!someData.ok){
-                    throw new Error('Image not found')
-                }
-                const myBlob = await someData.blob();
-                const objectURL = URL.createObjectURL(myBlob);
-                // containerDiv.innerHTML += createCard()
-                containerDiv.innerHTML += createCard(objectURL, i+4)
-            }catch(err){
-                console.log("Couldn't create thumbnail");
-                console.log(err) 
-            }
-        }
-    }
-}
-
-//Function to create single card
-createCard = (url, viewers) => {
-    return `<div class='ftlab-card-component' >
-                <img src='${url}' class="thumbnail-img" alt="Hups" width="250px"></img>
-                <p>Viewers: ${viewers}</p>
-                <button onclick="createVideoPlayer()">button</button>
-            </div>`
-}
-
-
-createPeer = () => {
-    peer = "";
-    // FOR PRODUCTION
-    // const ws = new WebSocket("ws://" + location.host + ":" + (location.port == "" ? "80" : location.port) + location.pathname);
-    const ws = new WebSocket("ws://localhost:8080")
-    ws.binaryType = "arraybuffer";
-    peer = new Peer(ws)
-}
-
-webSocketTest = () => {
-    peer.send("update_cfg", "ftl://utu.fi#reconstruction_default/0/renderer/cool_effect", "true")    
-}
-
-
-
-
-closeStream = () => {
-    peer.sock.close()
-}
-
-
-
-/**
- * **************
- * CONFIGURATIONS
- * **************
- */
-
-
-current_data.configURI = "ftl://utu.fi#reconstruction_snap8/net"
-
-configs = () => {
-    const container = document.getElementById("container");
-    container.innerHTML = `<div class="ftlab-configurations"></div>`;
-    renderConfigOptions();
-}
-
-
-renderConfigOptions = () => {
-    const input = `<p>input1</p><br>ftl://utu.fi#<input type="text">`
-    const doc = document.getElementsByClassName('ftlab-configurations')[0];
-    doc.innerHTML = input;
-}
-
-/**
- * 
- */
-loadConfigs = async (str) => {
-    const configURI = encodeURIComponent(`ftl://utu.fi#reconstruction_snap8${str}`);
-    const uri = encodeURIComponent(current_data.uri)
-    const rawResp = await fetch(`./stream/config?settings=${configURI}&uri=${uri}`)
-    const response = await rawResp.json();
-    const content = JSON.parse(response);
-    container.innerHTML += `<p>${response}</p>`;
-}
-
-// current_data.configData = '{"peers": 1}';
-
-/**
- * Method to send configurations to backend 
- */
-saveConfigs = async () => {
-    let {uri, configURI, configData} = current_data
-    const rawResp = await fetch('./stream/config', {
-        method: 'POST',
-        headers: {
-            'Accept': 'application/json',
-            'Content-Type': 'application/json'
-        },
-        body: JSON.stringify({peerURI: uri, configURI, data: configData, saveToCPP: true})
-    });
-    const content = await rawResp.json();
+const Peer = require('../../server/src/peer')
+const VideoConverter = require('./lib/dist/video-converter');
+
+let current_data = {};
+let peer;
+
+
+function FtlStream(element, uri, options){
+
+    const backButton = document.createElement("BUTTON")
+    backButton.innerHTML = "STOP"
+    backButton.addEventListener("click",() => {this.stop()});
+    
+    const playButton = document.createElement("BUTTON");
+    playButton.innerHTML = "PLAY";
+    playButton.addEventListener("click",() => {this.play()});
+
+    const videoTag = document.createElement("video");
+    videoTag.setAttribute("width", "640")
+    videoTag.setAttribute("height", "360")
+
+    const homeButton = document.createElement("BUTTON");
+    homeButton.innerHTML = "Home";
+    homeButton.addEventListener("click",() => {this.stop(); renderThumbnails()});
+
+    const br = document.createElement("br")
+
+    element.innerHTML = `<h1>Stream from source ${uri}</h1><br>`;
+    element.appendChild(homeButton)
+    element.appendChild(backButton)
+    element.appendChild(playButton)
+    element.appendChild(br)
+    element.appendChild(videoTag)
+
+
+    this.uri = uri
+    this.running = true
+    this.video = element.lastChild;
+    this.element = element
+
+    this.connectToStream()
+}
+
+FtlStream.prototype.connectToStream = function() {
+    const converter = new VideoConverter.default(this.video, 20, 6);
+    peer.bind(this.uri, (latency, streampckg, pckg) => {
+        if(pckg[0] === 2){
+            function decode(value){
+                converter.appendRawData(value);
+            }
+            decode(pckg[5]);
+        };
+    })
+
+    function sender(that) {
+        const _this = that
+        if(_this.running){
+            peer.send("get_stream", (_this.uri, 30, 0, _this.uri));
+            converter.play();
+            setTimeout(() => sender(that), 1000)
+        }else{
+            converter.pause()
+            setTimeout(() => sender(that), 1000)
+        }
+    }
+    // Start the transaction
+    sender(this)
+}
+
+FtlStream.prototype.stop = function() {
+    this.running = false
+}
+
+ FtlStream.prototype.play = function() {
+    this.running = true
+ }
+
+
+
+/**
+ * Validates that the user is logged in by sending the token 
+ */
+checkIfLoggedIn = async () => {
+    //     const token = window.localStorage.getItem('token')
+    //     console.log(token)
+    //     if(!token){
+    //         console.log("You need to login")
+    //         renderLogin()
+    //     }else{
+
+    //         //Check if the token is valid
+    //         const response = await fetch('http://localhost:8080/auth/validation', {
+    //             method: 'POST',
+    //             headers: {'Authorization': token}
+    //         })
+    //         console.log('RESPONSE', response)
+            
+    //         //Token is valid, show available streams
+    //         if(response.status === 200){
+    //             console.log("SUCCESS")
+                createPeer()
+                renderThumbnails()
+
+    //         }
+    //     }
+}
+
+
+
+
+createVideoPlayer = () => {
+     const containerDiv = document.getElementById('container')
+     const player = new FtlStream(containerDiv, current_data.uri, {});
+     console.log(player)
+}
+
+/***********
+ * THUMBNAILS
+ ***********/
+
+//Returns a list of available streams
+getAvailableStreams = async () => {
+    try{
+        const streamsInJson = await fetch(`./streams`);
+        const streams = await streamsInJson.json();
+        console.log('AVAILABLE', streams)
+        return streams;
+    }catch(err){
+        console.log(err)
+    }
+}
+
+//Creates thumbnail (image) for all available streams and adds them to div class='container'
+renderThumbnails = async () => {
+    const thumbnails = await getAvailableStreams();
+    const containerDiv = document.getElementById('container')
+    containerDiv.innerHTML = '';
+    containerDiv.innerHTML = `<button onClick="configs()">change configs</button>`
+    containerDiv.innerHTML += `<div class="ftlab-stream-thumbnails"></div>`
+    if(thumbnails.length === 0){
+        containerDiv.innerHTML = `<h3>No streams running currently</h3>`
+    }else{
+        for(var i=0; i<thumbnails.length; i++){
+            const encodedURI = encodeURIComponent(thumbnails[i])
+            current_data.uri = thumbnails[i]
+            try{
+                const someData = await fetch(`./stream/rgb?uri=${encodedURI}`)
+                if(!someData.ok){
+                    throw new Error('Image not found')
+                }
+                const myBlob = await someData.blob();
+                const objectURL = URL.createObjectURL(myBlob);
+                // containerDiv.innerHTML += createCard()
+                containerDiv.innerHTML += createCard(objectURL, i+4)
+            }catch(err){
+                console.log("Couldn't create thumbnail");
+                console.log(err) 
+            }
+        }
+    }
+}
+
+//Function to create single card
+createCard = (url, viewers) => {
+    return `<div class='ftlab-card-component' >
+                <img src='${url}' class="thumbnail-img" alt="Hups" width="250px"></img>
+                <p>Viewers: ${viewers}</p>
+                <button onclick="createVideoPlayer()">button</button>
+            </div>`
+}
+
+
+createPeer = () => {
+    peer = "";
+    // FOR PRODUCTION
+    // const ws = new WebSocket("ws://" + location.host + ":" + (location.port == "" ? "80" : location.port) + location.pathname);
+    const ws = new WebSocket("ws://localhost:8080")
+    ws.binaryType = "arraybuffer";
+    peer = new Peer(ws)
+}
+
+webSocketTest = () => {
+    peer.send("update_cfg", "ftl://utu.fi#reconstruction_default/0/renderer/cool_effect", "true")    
+}
+
+
+
+
+closeStream = () => {
+    peer.sock.close()
+}
+
+
+
+/**
+ * **************
+ * CONFIGURATIONS
+ * **************
+ */
+
+
+current_data.configURI = "ftl://utu.fi#reconstruction_snap8/net"
+
+configs = () => {
+    const container = document.getElementById("container");
+    container.innerHTML = `<div class="ftlab-configurations"></div>`;
+    renderConfigOptions();
+}
+
+
+renderConfigOptions = () => {
+    const input = `<p>input1</p><br>ftl://utu.fi#<input type="text">`
+    const doc = document.getElementsByClassName('ftlab-configurations')[0];
+    doc.innerHTML = input;
+}
+
+/**
+ * 
+ */
+loadConfigs = async (str) => {
+    const configURI = encodeURIComponent(`ftl://utu.fi#reconstruction_snap8${str}`);
+    const uri = encodeURIComponent(current_data.uri)
+    const rawResp = await fetch(`./stream/config?settings=${configURI}&uri=${uri}`)
+    const response = await rawResp.json();
+    const content = JSON.parse(response);
+    container.innerHTML += `<p>${response}</p>`;
+}
+
+// current_data.configData = '{"peers": 1}';
+
+/**
+ * Method to send configurations to backend 
+ */
+saveConfigs = async () => {
+    let {uri, configURI, configData} = current_data
+    const rawResp = await fetch('./stream/config', {
+        method: 'POST',
+        headers: {
+            'Accept': 'application/json',
+            'Content-Type': 'application/json'
+        },
+        body: JSON.stringify({peerURI: uri, configURI, data: configData, saveToCPP: true})
+    });
+    const content = await rawResp.json();
 }
 },{"../../server/src/peer":36,"./lib/dist/video-converter":9}],2:[function(require,module,exports){
 "use strict";