diff --git a/components/codecs/include/ftl/codecs/hevc.hpp b/components/codecs/include/ftl/codecs/hevc.hpp
index f658635d6f239b4aa7a21331f60f6936c517ba93..b3a32246544f3cf24a4ad09345c2f47a96eb0735 100644
--- a/components/codecs/include/ftl/codecs/hevc.hpp
+++ b/components/codecs/include/ftl/codecs/hevc.hpp
@@ -97,6 +97,10 @@ inline NALType getNALType(const std::vector<uint8_t> &data) {
 	return static_cast<NALType>((data[4] >> 1) & 0x3F);
 }
 
+inline bool validNAL(const std::vector<uint8_t> &data) {
+	return data[0] == 0 && data[1] == 0 && data[2] == 0 && data[3] == 1;
+}
+
 /**
  * Check the HEVC bitstream for an I-Frame. With NvPipe, all I-Frames start
  * with a VPS NAL unit so just check for this.
diff --git a/components/codecs/src/nvpipe_decoder.cpp b/components/codecs/src/nvpipe_decoder.cpp
index 93261854d88e75e79878a41dd5f6f0b71bcf6e9f..d6652549c73fa5c5d6388030c480e73f331a4a7c 100644
--- a/components/codecs/src/nvpipe_decoder.cpp
+++ b/components/codecs/src/nvpipe_decoder.cpp
@@ -37,6 +37,7 @@ bool NvPipeDecoder::decode(const ftl::codecs::Packet &pkt, cv::cuda::GpuMat &out
 	is_float_channel_ = is_float_frame;
 	last_definition_ = pkt.definition;
 
+	//LOG(INFO) << "DECODE OUT: " << out.rows << ", " << out.type();
 	//LOG(INFO) << "DECODE RESOLUTION: (" << (int)pkt.definition << ") " << ftl::codecs::getWidth(pkt.definition) << "x" << ftl::codecs::getHeight(pkt.definition);
 
 	// Build a decoder instance of the correct kind
@@ -49,8 +50,6 @@ bool NvPipeDecoder::decode(const ftl::codecs::Packet &pkt, cv::cuda::GpuMat &out
 		if (!nv_decoder_) {
 			//LOG(INFO) << "Bitrate=" << (int)bitrate << " width=" << ABRController::getColourWidth(bitrate);
 			LOG(FATAL) << "Could not create decoder: " << NvPipe_GetError(NULL);
-		} else {
-			DLOG(INFO) << "Decoder created";
 		}
 
 		seen_iframe_ = false;
@@ -60,17 +59,19 @@ bool NvPipeDecoder::decode(const ftl::codecs::Packet &pkt, cv::cuda::GpuMat &out
 	tmp_.create(cv::Size(ftl::codecs::getWidth(pkt.definition),ftl::codecs::getHeight(pkt.definition)), (is_float_frame) ? CV_16U : CV_8UC4);
 
 	// Check for an I-Frame
-	if (pkt.codec == ftl::codecs::codec_t::HEVC) {
-		if (ftl::codecs::hevc::isIFrame(pkt.data)) seen_iframe_ = true;
-	} else if (pkt.codec == ftl::codecs::codec_t::H264) {
-		if (ftl::codecs::h264::isIFrame(pkt.data)) seen_iframe_ = true;
+	if (!seen_iframe_) {
+		if (pkt.codec == ftl::codecs::codec_t::HEVC) {
+			if (ftl::codecs::hevc::isIFrame(pkt.data)) seen_iframe_ = true;
+		} else if (pkt.codec == ftl::codecs::codec_t::H264) {
+			if (ftl::codecs::h264::isIFrame(pkt.data)) seen_iframe_ = true;
+		}
 	}
 
 	// No I-Frame yet so don't attempt to decode P-Frames.
 	if (!seen_iframe_) return false;
 
 	// Final checks for validity
-	if (pkt.data.size() == 0 || tmp_.empty()) {
+	if (pkt.data.size() == 0 || tmp_.size() != out.size()) { // || !ftl::codecs::hevc::validNAL(pkt.data)) {
 		LOG(ERROR) << "Failed to decode packet";
 		return false;
 	}
@@ -80,24 +81,24 @@ bool NvPipeDecoder::decode(const ftl::codecs::Packet &pkt, cv::cuda::GpuMat &out
 
 	if (is_float_frame) {
 		// Is the received frame the same size as requested output?
-		if (out.rows == ftl::codecs::getHeight(pkt.definition)) {
+		//if (out.rows == ftl::codecs::getHeight(pkt.definition)) {
 			tmp_.convertTo(out, CV_32FC1, 1.0f/1000.0f, stream_);
-		} else {
+		/*} else {
 			LOG(WARNING) << "Resizing decoded frame from " << tmp_.size() << " to " << out.size();
 			// FIXME: This won't work on GPU
 			tmp_.convertTo(tmp_, CV_32FC1, 1.0f/1000.0f, stream_);
 			cv::cuda::resize(tmp_, out, out.size(), 0, 0, cv::INTER_NEAREST, stream_);
-		}
+		}*/
 	} else {
 		// Is the received frame the same size as requested output?
-		if (out.rows == ftl::codecs::getHeight(pkt.definition)) {
+		//if (out.rows == ftl::codecs::getHeight(pkt.definition)) {
 			// Flag 0x1 means frame is in RGB so needs conversion to BGR
 			if (pkt.flags & 0x1) {
 				cv::cuda::cvtColor(tmp_, out, cv::COLOR_RGBA2BGR, 0, stream_);
 			} else {
 				cv::cuda::cvtColor(tmp_, out, cv::COLOR_BGRA2BGR, 0, stream_);
 			}
-		} else {
+		/*} else {
 			LOG(WARNING) << "Resizing decoded frame from " << tmp_.size() << " to " << out.size();
 			// FIXME: This won't work on GPU, plus it allocates extra memory...
 			// Flag 0x1 means frame is in RGB so needs conversion to BGR
@@ -107,7 +108,7 @@ bool NvPipeDecoder::decode(const ftl::codecs::Packet &pkt, cv::cuda::GpuMat &out
 				cv::cuda::cvtColor(tmp_, tmp_, cv::COLOR_BGRA2BGR, 0, stream_);
 			}
 			cv::cuda::resize(tmp_, out, out.size(), 0.0, 0.0, cv::INTER_LINEAR, stream_);
-		}
+		}*/
 	}
 
 	stream_.waitForCompletion();
diff --git a/components/codecs/src/nvpipe_encoder.cpp b/components/codecs/src/nvpipe_encoder.cpp
index 132a3209ad0849dd76f1a5f7438eba8f5655b854..86fccdefc0d91f85694b105986eb49a423cc5863 100644
--- a/components/codecs/src/nvpipe_encoder.cpp
+++ b/components/codecs/src/nvpipe_encoder.cpp
@@ -123,7 +123,7 @@ bool NvPipeEncoder::encode(const cv::cuda::GpuMat &in, definition_t odefinition,
 	pkt.data.resize(cs);
 	was_reset_ = false;
 
-	if (cs == 0) {
+	if (cs == 0 || cs >= ftl::codecs::kVideoBufferSize) {
 		LOG(ERROR) << "Could not encode video frame: " << NvPipe_GetError(nvenc_);
 		return false;
 	} else {
diff --git a/components/rgbd-sources/include/ftl/rgbd/frame.hpp b/components/rgbd-sources/include/ftl/rgbd/frame.hpp
index e7a949600e6ba097aeda54460e83a1529851371e..8411c71a626e23216fcedac5df35e0ce49863f3b 100644
--- a/components/rgbd-sources/include/ftl/rgbd/frame.hpp
+++ b/components/rgbd-sources/include/ftl/rgbd/frame.hpp
@@ -223,7 +223,7 @@ ftl::cuda::TextureObject<T> &Frame::createTexture(ftl::codecs::Channel c, const
 		//LOG(INFO) << "Creating texture object";
 		m.tex = ftl::cuda::TextureObject<T>(m.gpu, interpolated);
 	} else if (m.tex.cvType() != ftl::traits::OpenCVType<T>::value || m.tex.width() != m.gpu.cols || m.tex.height() != m.gpu.rows) {
-		LOG(INFO) << "Recreating texture object for '" << ftl::codecs::name(c) << "'";
+		//LOG(INFO) << "Recreating texture object for '" << ftl::codecs::name(c) << "'";
 		m.tex.free();
 		m.tex = ftl::cuda::TextureObject<T>(m.gpu, interpolated);
 	}
@@ -256,7 +256,7 @@ ftl::cuda::TextureObject<T> &Frame::createTexture(ftl::codecs::Channel c, bool i
 		//LOG(INFO) << "Creating texture object";
 		m.tex = ftl::cuda::TextureObject<T>(m.gpu, interpolated);
 	} else if (m.tex.cvType() != ftl::traits::OpenCVType<T>::value || m.tex.width() != m.gpu.cols || m.tex.height() != m.gpu.rows || m.tex.devicePtr() != m.gpu.data) {
-		LOG(INFO) << "Recreating texture object for '" << ftl::codecs::name(c) << "'.";
+		//LOG(INFO) << "Recreating texture object for '" << ftl::codecs::name(c) << "'.";
 		m.tex.free();
 		m.tex = ftl::cuda::TextureObject<T>(m.gpu, interpolated);
 	}
diff --git a/components/rgbd-sources/src/abr.cpp b/components/rgbd-sources/src/abr.cpp
index c338d4725ed2ab493fd61143d24b9b7241453622..bf4b76930816c11e9a5e6f8353ddd176788afd89 100644
--- a/components/rgbd-sources/src/abr.cpp
+++ b/components/rgbd-sources/src/abr.cpp
@@ -41,7 +41,7 @@ bitrate_t ABRController::selectBitrate(const NetFrame &frame) {
 
     float actual_mbps = (float(frame.tx_size) * 8.0f * (1000.0f / float(frame.tx_latency))) / 1048576.0f;
     float min_mbps = (float(frame.tx_size) * 8.0f * (1000.0f / float(ftl::timer::getInterval()))) / 1048576.0f;
-    //LOG(INFO) << "Bitrate = " << actual_mbps << "Mbps, min required = " << min_mbps << "Mbps";
+    if (actual_mbps < min_mbps) LOG(WARNING) << "Bitrate = " << actual_mbps << "Mbps, min required = " << min_mbps << "Mbps";
     float ratio = actual_mbps / min_mbps;
     //LOG(INFO) << "Rate Ratio = " << frame.tx_latency;
 
diff --git a/components/rgbd-sources/src/sources/net/net.cpp b/components/rgbd-sources/src/sources/net/net.cpp
index 51ecbc01e592e217f229512e624a079e68bcff8a..3a9d4730c97ada9240a0f00bb9344f1616e37164 100644
--- a/components/rgbd-sources/src/sources/net/net.cpp
+++ b/components/rgbd-sources/src/sources/net/net.cpp
@@ -276,10 +276,37 @@ void NetSource::_recvPacket(short ttimeoff, const ftl::codecs::StreamPacket &spk
 		LOG(WARNING) << "Missing calibration, skipping frame";
 		return;
 	}
+
+	//LOG(INFO) << "PACKET: " << spkt.timestamp << ", " << (int)spkt.channel << ", " << (int)pkt.codec;
 	
 	const cv::Size size = cv::Size(ftl::codecs::getWidth(pkt.definition), ftl::codecs::getHeight(pkt.definition));
 	NetFrame &frame = queue_.getFrame(spkt.timestamp, size, CV_8UC3, (isFloatChannel(chan) ? CV_32FC1 : CV_8UC3));
 
+	if (timestamp_ > 0 && frame.timestamp <= timestamp_) {
+		LOG(ERROR) << "Duplicate frame - " << frame.timestamp << " received=" << int(rchan) << " uri=" << uri_;
+		return;
+	}
+
+	// Calculate how many packets to expect for this channel
+	if (frame.chunk_total[channum] == 0) {
+		frame.chunk_total[channum] = pkt.block_total;
+	}
+
+	// Capture tx time of first received chunk
+	if (frame.chunk_count[0] == 0 && frame.chunk_count[1] == 0) {
+		UNIQUE_LOCK(frame.mtx, flk);
+		if (frame.chunk_count[0] == 0 && frame.chunk_count[1] == 0) {
+			frame.tx_latency = int64_t(ttimeoff);
+		}
+	}	
+
+	++frame.chunk_count[channum];
+	if (frame.chunk_count[channum] == frame.chunk_total[channum]) ++frame.channel_count;
+	if (frame.chunk_count[channum] > frame.chunk_total[channum]) {
+		LOG(WARNING) << "Too many channel packets received, discarding";
+		return;
+	}
+
 	// Update frame statistics
 	frame.tx_size += pkt.data.size();
 
@@ -309,31 +336,8 @@ void NetSource::_recvPacket(short ttimeoff, const ftl::codecs::StreamPacket &spk
 
 	// TODO:(Nick) Decode directly into double buffer if no scaling
 
-	if (timestamp_ > 0 && frame.timestamp <= timestamp_) {
-		LOG(ERROR) << "BAD DUPLICATE FRAME - " << frame.timestamp << " received=" << int(rchan) << " uri=" << uri_;
-		return;
-	}
-
-	// Calculate how many packets to expect for this channel
-	if (frame.chunk_total[channum] == 0) {
-		frame.chunk_total[channum] = pkt.block_total;
-	}
-
-	// Capture tx time of first received chunk
-	if (frame.chunk_count[0] == 0 && frame.chunk_count[1] == 0) {
-		UNIQUE_LOCK(frame.mtx, flk);
-		if (frame.chunk_count[0] == 0 && frame.chunk_count[1] == 0) {
-			frame.tx_latency = int64_t(ttimeoff);
-		}
-	}	
-
-	++frame.chunk_count[channum];
-	if (frame.chunk_count[channum] == frame.chunk_total[channum]) ++frame.channel_count;
-	if (frame.chunk_count[channum] > frame.chunk_total[channum]) LOG(ERROR) << "TOO MANY CHUNKS";
-
 	// Last chunk of both channels now received, so we are done.
 	if (frame.channel_count == spkt.channel_count) {
-		LOG(INFO) << "COMPLETED FRAME " << frame.channel_count << ", " << (int)spkt.channel_count;
 		_completeFrame(frame, now-(spkt.timestamp+frame.tx_latency));
 	}
 }