diff --git a/applications/gui/src/camera.cpp b/applications/gui/src/camera.cpp
index 4488fbeafad03b7ee5802de88905850585acd1d2..40f4757e2e30b2c0b3d8c6b2b10d823bcf7e7534 100644
--- a/applications/gui/src/camera.cpp
+++ b/applications/gui/src/camera.cpp
@@ -222,6 +222,8 @@ ftl::gui::Camera::~Camera() {
 }
 
 void ftl::gui::Camera::draw(ftl::rgbd::FrameSet &fs) {
+	if (fid_ != 255) return;
+	
 	UNIQUE_LOCK(mutex_, lk);
 	_draw(fs);
 }
diff --git a/applications/reconstruct/src/main.cpp b/applications/reconstruct/src/main.cpp
index f300b292a58ba18a1a0a4ee2e8c5e0352e4ed08f..c7cc97fcdfb440805196936dfc45322be23ac11e 100644
--- a/applications/reconstruct/src/main.cpp
+++ b/applications/reconstruct/src/main.cpp
@@ -111,6 +111,7 @@ static void run(ftl::Configurable *root) {
 	// Use other GPU if available.
 	ftl::cuda::setDevice(ftl::cuda::deviceCount()-1);
 	threadSetCUDADevice();
+	ftl::timer::setClockSlave(false);  // don't sync clocks.
 	
 
 	Universe *net = ftl::create<Universe>(root, "net");
diff --git a/components/audio/src/source.cpp b/components/audio/src/source.cpp
index cec0c2d982987ea62bb6dca0acfe6406bed294b9..0e6c4f4e3c0b15c86be8585e7f1601b7aa46c3a7 100644
--- a/components/audio/src/source.cpp
+++ b/components/audio/src/source.cpp
@@ -141,6 +141,8 @@ Source::Source(nlohmann::json &config) : ftl::Configurable(config), buffer_(4800
         return true;
     }); 
 
+	LOG(INFO) << "Microphone ready.";
+
 	#else  // No portaudio
 
 	active_ = false;
diff --git a/components/audio/src/speaker.cpp b/components/audio/src/speaker.cpp
index 41836b83796718f12351603fdf75faff741e2f6f..adb54aa82ab20e23eef82d3a96061793b70435dc 100644
--- a/components/audio/src/speaker.cpp
+++ b/components/audio/src/speaker.cpp
@@ -60,6 +60,8 @@ Speaker::Speaker(nlohmann::json &config) : ftl::Configurable(config), buffer_(48
 		return;
 	}
 
+	LOG(INFO) << "Speaker ready.";
+
 	#else  // No portaudio
 
 	active_ = false;
diff --git a/components/common/cpp/include/ftl/timer.hpp b/components/common/cpp/include/ftl/timer.hpp
index 97776c2c3ee9bcbb62b8d81909d87a7dc43ecd28..266278b40eaec59342f11ab0a8065fa5e329669c 100644
--- a/components/common/cpp/include/ftl/timer.hpp
+++ b/components/common/cpp/include/ftl/timer.hpp
@@ -80,6 +80,18 @@ int64_t get_time();
  */
 void setClockAdjustment(int64_t ms);
 
+/**
+ * Whether or not this machine should sync its clocks to someone else. Should
+ * be true on capture / vision nodes and false on a reconstruction node.
+ */
+bool isClockSlave();
+
+/**
+ * Change clock slave mode. If set to true then this machine will attempt to
+ * adjust its clock to another machines.
+ */
+void setClockSlave(bool);
+
 /**
  * Add a timer callback with a given precision and ordering. The highest
  * precision callbacks occur within 1ms of required time and should return
diff --git a/components/common/cpp/src/timer.cpp b/components/common/cpp/src/timer.cpp
index 5765eb451ee4ca75a953775b447c4f4163ccc421..28e1911b6132b45ee438a40cd0d389c0db454558 100644
--- a/components/common/cpp/src/timer.cpp
+++ b/components/common/cpp/src/timer.cpp
@@ -25,6 +25,7 @@ static bool active = false;
 static std::atomic<int> active_jobs = 0;
 static MUTEX mtx;
 static int last_id = 0;
+static bool clock_slave = true;
 
 struct TimerJob {
 	int id;
@@ -105,6 +106,14 @@ void ftl::timer::setClockAdjustment(int64_t ms) {
 	clock_adjust += ms;
 }
 
+void ftl::timer::setClockSlave(bool s) {
+	clock_slave = s;
+}
+
+bool ftl::timer::isClockSlave() {
+	return clock_slave;
+}
+
 const TimerHandle ftl::timer::add(timerlevel_t l, const std::function<bool(int64_t ts)> &f) {
 	if (l < 0 || l >= kTimerMAXLEVEL) return {};
 
diff --git a/components/operators/src/operator.cpp b/components/operators/src/operator.cpp
index 0ca639e1e9d1f58ca644b01252bf0205414a7d57..f0e8868ab65516d7810e1177f140ef1133b39ec9 100644
--- a/components/operators/src/operator.cpp
+++ b/components/operators/src/operator.cpp
@@ -87,7 +87,7 @@ bool Graph::apply(FrameSet &in, FrameSet &out, cudaStream_t stream) {
 	}
 
 	if (stream == 0) {
-		cudaStreamSynchronize(stream_actual);
+		cudaSafeCall(cudaStreamSynchronize(stream_actual));
 		cudaSafeCall( cudaGetLastError() );
 	}
 
diff --git a/components/renderers/cpp/include/ftl/render/tri_render.hpp b/components/renderers/cpp/include/ftl/render/tri_render.hpp
index c709e818f97bc9284e75836c13209513be666f53..8e47d80e0d2e0a74f6ca9f9a1b9c7be4997f4f24 100644
--- a/components/renderers/cpp/include/ftl/render/tri_render.hpp
+++ b/components/renderers/cpp/include/ftl/render/tri_render.hpp
@@ -65,6 +65,7 @@ class Triangular : public ftl::render::Renderer {
 	void _renderDensity(ftl::rgbd::Frame &out, const Eigen::Matrix4d &t);
 	void _renderRight(ftl::rgbd::Frame &out, const Eigen::Matrix4d &t);
 	void _renderSecond(ftl::rgbd::Frame &out, ftl::codecs::Channel chan, const Eigen::Matrix4d &t);
+	void _render(ftl::rgbd::FrameSet &in, ftl::rgbd::Frame &out, ftl::codecs::Channel, const Eigen::Matrix4d &t);
 
 	bool _alreadySeen() const { return last_frame_ == scene_->timestamp; }
 };
diff --git a/components/renderers/cpp/src/reprojection.cu b/components/renderers/cpp/src/reprojection.cu
index b5bac1ed14f23b3009de17ccaaba1e115417f5e2..2e342d1e2ceb903416fd7e22f668a0819127cd27 100644
--- a/components/renderers/cpp/src/reprojection.cu
+++ b/components/renderers/cpp/src/reprojection.cu
@@ -350,7 +350,7 @@ __global__ void fix_colour_kernel(
 	const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
 	const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
 
-	if (x < out.width() && y < out.height()) {
+	if (x >= RADIUS && y >= RADIUS && x < out.width()-RADIUS && y < out.height()-RADIUS) {
 		const float contrib = contribs.tex2D((int)x,(int)y);
 		const float d = depth.tex2D(x,y);
 
diff --git a/components/renderers/cpp/src/tri_render.cpp b/components/renderers/cpp/src/tri_render.cpp
index f221f16ec3e4d505b6b4ba7ab96fd8ca7557237b..cb858313c249da53d55ea7693243151e08bdc58f 100644
--- a/components/renderers/cpp/src/tri_render.cpp
+++ b/components/renderers/cpp/src/tri_render.cpp
@@ -232,6 +232,11 @@ void Triangular::__reprojectChannel(ftl::rgbd::Frame &output, ftl::codecs::Chann
 			cv::cuda::cvtColor(tmp,col, cv::COLOR_BGR2BGRA);
 		}*/
 
+		if (!f.hasChannel(in)) {
+			LOG(ERROR) << "Reprojecting unavailable channel";
+			return;
+		}
+
 		auto transform = MatrixConversion::toCUDA(f.getPose().cast<float>().inverse() * t.cast<float>().inverse()) * params_.m_viewMatrixInverse;
 		auto transformR = MatrixConversion::toCUDA(f.getPose().cast<float>().inverse()).getFloat3x3();
 
@@ -555,7 +560,7 @@ void Triangular::_postprocessColours(ftl::rgbd::Frame &out) {
 			params_.camera,
 			stream_
 		);
-	} else {
+	} else if (out.hasChannel(Channel::Depth) && out.hasChannel(Channel::Colour) && temp_.hasChannel(Channel::Contribution)) {
 		ftl::cuda::fix_bad_colour(
 			out.getTexture<float>(Channel::Depth),
 			out.getTexture<uchar4>(Channel::Colour),
@@ -625,10 +630,7 @@ void Triangular::_renderSecond(ftl::rgbd::Frame &out, ftl::codecs::Channel chan,
 	_renderChannel(out, chan, chan, t, stream_);
 }
 
-bool Triangular::render(ftl::rgbd::FrameSet &in, ftl::rgbd::Frame &out, Channel chan, const Eigen::Matrix4d &t) {
-	scene_ = &in;
-	if (scene_->frames.size() == 0) return false;
-
+void Triangular::_render(ftl::rgbd::FrameSet &in, ftl::rgbd::Frame &out, Channel chan, const Eigen::Matrix4d &t) {
 	const auto &camera = out.getLeftCamera();
 	cv::cuda::Stream cvstream = cv::cuda::StreamAccessor::wrapStream(stream_);
 	//cudaSafeCall(cudaSetDevice(scene_->getCUDADevice()));
@@ -649,9 +651,13 @@ bool Triangular::render(ftl::rgbd::FrameSet &in, ftl::rgbd::Frame &out, Channel
 			camera, pose, stream_);
 	}
 
+	cudaSafeCall(cudaStreamSynchronize(stream_));
+
 	// Render source specific debug info into colour channels
 	_preprocessColours();
 
+	cudaSafeCall(cudaStreamSynchronize(stream_));
+
 	if (mesh_) {
 		// Render depth channel using triangles
 		_mesh(out, t, stream_);
@@ -660,13 +666,19 @@ bool Triangular::render(ftl::rgbd::FrameSet &in, ftl::rgbd::Frame &out, Channel
 		_dibr(out, t, stream_);
 	}
 
+	cudaSafeCall(cudaStreamSynchronize(stream_));
+
 	// Reprojection of colours onto surface
 	auto main_channel = (scene_->frames[0].hasChannel(Channel::ColourHighRes)) ? Channel::ColourHighRes : Channel::Colour;
 	_renderChannel(out, main_channel, Channel::Colour, t, stream_);
 
+	cudaSafeCall(cudaStreamSynchronize(stream_));
+
 	// Debug colour info relating to the rendering process
 	_postprocessColours(out);
 
+	cudaSafeCall(cudaStreamSynchronize(stream_));
+
 	// Support rendering of a second channel without redoing all the work
 	switch(chan) {
 	case Channel::None			:
@@ -677,8 +689,30 @@ bool Triangular::render(ftl::rgbd::FrameSet &in, ftl::rgbd::Frame &out, Channel
 	case Channel::Right			: _renderRight(out, t); break;
 	default						: _renderSecond(out, chan, t);
 	}
+}
+
+bool Triangular::render(ftl::rgbd::FrameSet &in, ftl::rgbd::Frame &out, Channel chan, const Eigen::Matrix4d &t) {
+	if (scene_) {
+		LOG(WARNING) << "Already rendering...";
+		return false;
+	}
+	scene_ = &in;
+	if (scene_->frames.size() == 0) {
+		scene_ = nullptr;
+		return false;
+	}
+
+	bool success = true;
+
+	try {
+		_render(in, out, chan, t);
+		cudaSafeCall(cudaStreamSynchronize(stream_));
+	} catch (std::exception &e) {
+		LOG(ERROR) << "Exception in render: " << e.what();
+		success = false;
+	}
 
-	cudaSafeCall(cudaStreamSynchronize(stream_));
 	last_frame_ = scene_->timestamp;
-	return true;
+	scene_ = nullptr;
+	return success;
 }
diff --git a/components/rgbd-sources/include/ftl/rgbd/camera.hpp b/components/rgbd-sources/include/ftl/rgbd/camera.hpp
index 8108b738e57aefb70ca53c76200c1db5dce703bf..82a981d736e97ed42f403e62ef67f2f3a1dd2909 100644
--- a/components/rgbd-sources/include/ftl/rgbd/camera.hpp
+++ b/components/rgbd-sources/include/ftl/rgbd/camera.hpp
@@ -54,8 +54,9 @@ struct __align__(16) Camera {
 template <> __device__
 inline float2 ftl::rgbd::Camera::camToScreen<float2>(const float3 &pos) const {
 	return make_float2(
-			pos.x*fx/pos.z - cx,			
-			pos.y*fy/pos.z - cy);
+		static_cast<float>(pos.x*fx/pos.z - cx),			
+		static_cast<float>(pos.y*fy/pos.z - cy)
+	);
 }
 
 template <> __device__
@@ -72,8 +73,8 @@ inline uint2 ftl::rgbd::Camera::camToScreen<uint2>(const float3 &pos) const {
 
 __device__
 inline float3 ftl::rgbd::Camera::screenToCam(uint ux, uint uy, float depth) const {
-	const float x = ((float)ux+cx) / fx;
-	const float y = ((float)uy+cy) / fy;
+	const float x = static_cast<float>(((float)ux+cx) / fx);
+	const float y = static_cast<float>(((float)uy+cy) / fy);
 	return make_float3(depth*x, depth*y, depth);
 }
 
diff --git a/components/rgbd-sources/include/ftl/rgbd/frameset.hpp b/components/rgbd-sources/include/ftl/rgbd/frameset.hpp
index 6a2efde2510ac23a0a762962bb81c8f5f8dd3b63..e23a42d691414fee9096aabe07ec71442b8f3aab 100644
--- a/components/rgbd-sources/include/ftl/rgbd/frameset.hpp
+++ b/components/rgbd-sources/include/ftl/rgbd/frameset.hpp
@@ -110,7 +110,17 @@ class Builder : public Generator {
 	/**
 	 * Add a new frame at a given timestamp.
 	 */
-	void push(int64_t timestamp, size_t ix, ftl::rgbd::Frame &f);
+	//void push(int64_t timestamp, size_t ix, ftl::rgbd::Frame &f);
+
+	/**
+	 * Instead of pushing a frame, find or create a direct reference to one.
+	 */
+	ftl::rgbd::Frame &get(int64_t timestamp, size_t ix);
+
+	/**
+	 * Mark a frame as completed.
+	 */
+	void completed(int64_t ts, size_t ix);
 
 	void setName(const std::string &name);
 
diff --git a/components/rgbd-sources/src/frameset.cpp b/components/rgbd-sources/src/frameset.cpp
index 70e6c0c47439d59ebc513a7f3850f0313c70d683..e98b5677cb48004c181c614ae31aed310460ed4a 100644
--- a/components/rgbd-sources/src/frameset.cpp
+++ b/components/rgbd-sources/src/frameset.cpp
@@ -84,7 +84,7 @@ Builder::~Builder() {
 }
 
 
-void Builder::push(int64_t timestamp, size_t ix, ftl::rgbd::Frame &frame) {
+/*void Builder::push(int64_t timestamp, size_t ix, ftl::rgbd::Frame &frame) {
 	if (timestamp <= 0 || ix >= kMaxFramesInSet) return;
 
 	UNIQUE_LOCK(mutex_, lk);
@@ -120,6 +120,54 @@ void Builder::push(int64_t timestamp, size_t ix, ftl::rgbd::Frame &frame) {
 		LOG(ERROR) << "Too many frames received for given timestamp: " << timestamp << " (source " << ix << ")";
 	}
 	fs->mask |= (1 << ix);
+}*/
+
+ftl::rgbd::Frame &Builder::get(int64_t timestamp, size_t ix) {
+	if (timestamp <= 0 || ix >= kMaxFramesInSet) throw ftl::exception("Invalid frame timestamp or index");
+
+	UNIQUE_LOCK(mutex_, lk);
+
+	//LOG(INFO) << "BUILDER PUSH: " << timestamp << ", " << ix << ", " << size_;
+
+	// Size is determined by largest frame index received... note that size
+	// cannot therefore reduce.
+	if (ix >= size_) {
+		size_ = ix+1;
+		states_.resize(size_);
+	}
+	//states_[ix] = frame.origin();
+
+	auto *fs = _findFrameset(timestamp);
+
+	if (!fs) {
+		// Add new frameset
+		fs = _addFrameset(timestamp);
+		if (!fs) throw ftl::exception("Could not add frameset");
+	}
+
+	if (fs->frames.size() < size_) fs->frames.resize(size_);
+
+	//lk.unlock();
+	//SHARED_LOCK(fs->mtx, lk2);
+
+	//frame.swapTo(ftl::codecs::kAllChannels, fs->frames[ix]);
+
+	return fs->frames[ix];
+}
+
+void Builder::completed(int64_t ts, size_t ix) {
+	auto *fs = _findFrameset(ts);
+	if (fs && ix < fs->frames.size()) {
+		if (fs->mask & (1 << ix)) {
+			LOG(WARNING) << "Frame completed multiple times: " << ts << " (source " << ix << ")";
+			return;
+		}
+		states_[ix] = fs->frames[ix].origin();
+		fs->mask |= (1 << ix);
+		++fs->count;
+	} else {
+		LOG(ERROR) << "Completing frame that does not exist: " << ts << ":" << ix;
+	}
 }
 
 size_t Builder::size() {
@@ -141,6 +189,7 @@ void Builder::onFrameSet(const std::function<bool(ftl::rgbd::FrameSet &)> &cb) {
 	main_id_ = ftl::timer::add(ftl::timer::kTimerMain, [this,cb](int64_t ts) {
 		//if (jobs_ > 0) LOG(ERROR) << "SKIPPING TIMER JOB " << ts;
 		if (jobs_ > 0) return true;
+		if (size_ == 0) return true;
 		jobs_++;
 
 		// Find a previous frameset and specified latency and do the sync
diff --git a/components/rgbd-sources/src/group.cpp b/components/rgbd-sources/src/group.cpp
index 5739d0396fb2f554dc2c58ed35c0e90e0ca88688..0f9673bfea671b11dd70cfb17a7e66591c953ce1 100644
--- a/components/rgbd-sources/src/group.cpp
+++ b/components/rgbd-sources/src/group.cpp
@@ -15,6 +15,7 @@ using std::vector;
 using std::chrono::milliseconds;
 using std::this_thread::sleep_for;
 using ftl::codecs::Channel;
+using ftl::codecs::Channels;
 
 Group::Group() : pipeline_(nullptr) {
 	jobs_ = 0;
@@ -48,7 +49,10 @@ void Group::addSource(ftl::rgbd::Source *src) {
 		if (pipeline_) {
 			pipeline_->apply(frame, frame, 0);
 		}
-		builder_.push(timestamp, ix, frame);
+
+		frame.swapTo(Channels<0>::All(), builder_.get(timestamp,ix));
+		builder_.completed(timestamp, ix);
+		//builder_.push(timestamp, ix, frame);
 	});
 }
 
diff --git a/components/streams/src/netstream.cpp b/components/streams/src/netstream.cpp
index 95a4ca9414b1e5e924963fd64594103d116780fe..cb0a648764f846dbb01c9f28cbeace88ddfd606f 100644
--- a/components/streams/src/netstream.cpp
+++ b/components/streams/src/netstream.cpp
@@ -320,7 +320,7 @@ bool Net::_processRequest(ftl::net::Peer &p, const ftl::codecs::Packet &pkt) {
 	}
 
 	// Sync clocks!
-	if (p.id() == time_peer_) {
+	if (ftl::timer::isClockSlave() && p.id() == time_peer_) {
 		auto start = std::chrono::high_resolution_clock::now();
 		int64_t mastertime;
 
diff --git a/components/streams/src/receiver.cpp b/components/streams/src/receiver.cpp
index 93ead346533981ce430d44fecb209566dce6887d..26d9c794746b5a2fee14014b7c5ef2ef129fa536 100644
--- a/components/streams/src/receiver.cpp
+++ b/components/streams/src/receiver.cpp
@@ -146,7 +146,7 @@ void Receiver::_processAudio(const StreamPacket &spkt, const Packet &pkt) {
 void Receiver::_processVideo(const StreamPacket &spkt, const Packet &pkt) {
 	const ftl::codecs::Channel rchan = spkt.channel;
 	const unsigned int channum = (unsigned int)spkt.channel;
-	InternalVideoStates &iframe = _getVideoFrame(spkt);
+	InternalVideoStates &ividstate = _getVideoFrame(spkt);
 
 	auto [tx,ty] = ftl::codecs::chooseTileConfig(pkt.frame_count);
 	int width = ftl::codecs::getWidth(pkt.definition);
@@ -155,14 +155,14 @@ void Receiver::_processVideo(const StreamPacket &spkt, const Packet &pkt) {
 	//LOG(INFO) << " CODEC = " << (int)pkt.codec << " " << (int)pkt.flags << " " << (int)spkt.channel;
 	//LOG(INFO) << "Decode surface: " << (width*tx) << "x" << (height*ty);
 
-	auto &surface = iframe.surface[static_cast<int>(spkt.channel)];
+	auto &surface = ividstate.surface[static_cast<int>(spkt.channel)];
 
 	// Allocate a decode surface, this is a tiled image to be split later
 	surface.create(height*ty, width*tx, ((isFloatChannel(spkt.channel)) ? ((pkt.flags & 0x2) ? CV_16UC4 : CV_16U) : CV_8UC4));
 
 	// Find or create the decoder
-	_createDecoder(iframe, channum, pkt);
-	auto *decoder = iframe.decoders[channum];
+	_createDecoder(ividstate, channum, pkt);
+	auto *decoder = ividstate.decoders[channum];
 	if (!decoder) {
 		LOG(ERROR) << "No frame decoder available";
 		return;
@@ -193,29 +193,32 @@ void Receiver::_processVideo(const StreamPacket &spkt, const Packet &pkt) {
 	// Now split the tiles from surface into frames, doing colour conversions
 	// at the same time.
 	for (int i=0; i<pkt.frame_count; ++i) {
-		InternalVideoStates &frame = _getVideoFrame(spkt,i);
+		InternalVideoStates &vidstate = _getVideoFrame(spkt,i);
+		auto &frame = builder_.get(spkt.timestamp, spkt.frame_number+i);
+
+		if (!frame.origin()) frame.setOrigin(&vidstate.state);
 
-		if (frame.frame.hasChannel(spkt.channel)) {
+		if (frame.hasChannel(spkt.channel)) {
 			// FIXME: Is this a corruption in recording or in playback?
 			// Seems to occur in same place in ftl file, one channel is missing
-			LOG(WARNING) << "Previous frame not complete: " << frame.timestamp;
+			LOG(WARNING) << "Previous frame not complete: " << spkt.timestamp;
 		}
 
 		{
 			// This ensures that if previous frames are unfinished then they
 			// are discarded.
-			UNIQUE_LOCK(frame.mutex, lk);
+			/*UNIQUE_LOCK(vidstate.mutex, lk);
 			if (frame.timestamp != spkt.timestamp && frame.timestamp != -1) {
 				frame.frame.reset();
 				frame.completed.clear();
 				LOG(WARNING) << "Frames out-of-phase by: " << spkt.timestamp - frame.timestamp;
 			}
-			frame.timestamp = spkt.timestamp;
+			frame.timestamp = spkt.timestamp;*/
 		}
 
 		// Add channel to frame and allocate memory if required
 		const cv::Size size = cv::Size(width, height);
-		frame.frame.create<cv::cuda::GpuMat>(spkt.channel).create(size, (isFloatChannel(rchan) ? CV_32FC1 : CV_8UC4));
+		frame.getBuffer<cv::cuda::GpuMat>(spkt.channel).create(size, (isFloatChannel(rchan) ? CV_32FC1 : CV_8UC4));
 
 		cv::Rect roi((i % tx)*width, (i / tx)*height, width, height);
 		cv::cuda::GpuMat sroi = surface(roi);
@@ -233,54 +236,61 @@ void Receiver::_processVideo(const StreamPacket &spkt, const Packet &pkt) {
 			}
 
 			if (apply_Y_filter) ftl::cuda::smooth_y(sroi, cvstream);
-			ftl::cuda::vuya_to_depth(frame.frame.get<cv::cuda::GpuMat>(spkt.channel), sroi, 16.0f, cvstream);
+			ftl::cuda::vuya_to_depth(frame.getBuffer<cv::cuda::GpuMat>(spkt.channel), sroi, 16.0f, cvstream);
 		} else if (isFloatChannel(rchan)) {
-			sroi.convertTo(frame.frame.get<cv::cuda::GpuMat>(spkt.channel), CV_32FC1, 1.0f/1000.0f, cvstream);
+			sroi.convertTo(frame.getBuffer<cv::cuda::GpuMat>(spkt.channel), CV_32FC1, 1.0f/1000.0f, cvstream);
 		} else {
-			cv::cuda::cvtColor(sroi, frame.frame.get<cv::cuda::GpuMat>(spkt.channel), cv::COLOR_RGBA2BGRA, 0, cvstream);
+			cv::cuda::cvtColor(sroi, frame.getBuffer<cv::cuda::GpuMat>(spkt.channel), cv::COLOR_RGBA2BGRA, 0, cvstream);
 		}
 	}
 
-	Packet tmppkt = pkt;
-	iframe.frame.pushPacket(spkt.channel, tmppkt);
-
 	// Must ensure all processing is finished before completing a frame.
 	cudaSafeCall(cudaStreamSynchronize(decoder->stream()));
 
 	for (int i=0; i<pkt.frame_count; ++i) {
-		InternalVideoStates &frame = _getVideoFrame(spkt,i);
+		InternalVideoStates &vidstate = _getVideoFrame(spkt,i);
+		auto &frame = builder_.get(spkt.timestamp, spkt.frame_number+i);
 
 		auto sel = stream_->selected(spkt.frameSetID());
+
+		frame.create<cv::cuda::GpuMat>(spkt.channel);
+
+		if (i == 0) {
+			Packet tmppkt = pkt;
+			frame.pushPacket(spkt.channel, tmppkt);
+		}
 		
-		UNIQUE_LOCK(frame.mutex, lk);
-		if (frame.timestamp == spkt.timestamp) {
-			frame.completed += spkt.channel;
+		UNIQUE_LOCK(vidstate.mutex, lk);
+		//if (frame.timestamp == spkt.timestamp) {
+			//frame.completed += spkt.channel;
 			
 			// Complete if all requested channels are found
-			if ((frame.completed & sel) == sel) {
-				timestamp_ = frame.timestamp;
+			if ((frame.getChannels() & sel) == sel) {
+				timestamp_ = spkt.timestamp;
+				//frame.reset.clear();
 
 				//LOG(INFO) << "BUILDER PUSH: " << timestamp_ << ", " << spkt.frameNumber() << ", " << (int)pkt.frame_count;
 
-				if (frame.state.getLeft().width == 0) {
+				if (vidstate.state.getLeft().width == 0) {
 					LOG(WARNING) << "Missing calibration for frame";
 				}
 
 				// TODO: Have multiple builders for different framesets.
-				builder_.push(frame.timestamp, spkt.frameNumber()+i, frame.frame);
+				//builder_.push(frame.timestamp, spkt.frameNumber()+i, frame.frame);
+				builder_.completed(spkt.timestamp, spkt.frame_number+i);
 
 				// Check for any state changes and send them back
-				if (frame.state.hasChanged(Channel::Pose)) injectPose(stream_, frame.frame, frame.timestamp, spkt.frameNumber()+i);
-				if (frame.state.hasChanged(Channel::Calibration)) injectCalibration(stream_, frame.frame, frame.timestamp, spkt.frameNumber()+i);
-				if (frame.state.hasChanged(Channel::Calibration2)) injectCalibration(stream_, frame.frame, frame.timestamp, spkt.frameNumber()+i, true);
+				if (vidstate.state.hasChanged(Channel::Pose)) injectPose(stream_, frame, spkt.timestamp, spkt.frameNumber()+i);
+				if (vidstate.state.hasChanged(Channel::Calibration)) injectCalibration(stream_, frame, spkt.timestamp, spkt.frameNumber()+i);
+				if (vidstate.state.hasChanged(Channel::Calibration2)) injectCalibration(stream_, frame, spkt.timestamp, spkt.frameNumber()+i, true);
 
-				frame.frame.reset();
-				frame.completed.clear();
-				frame.timestamp = -1;
+				//frame.reset();
+				//frame.completed.clear();
+				//frame.timestamp = -1;
 			}
-		} else {
-			LOG(ERROR) << "Frame timestamps mistmatch";
-		}
+		//} else {
+		//	LOG(ERROR) << "Frame timestamps mistmatch";
+		//}
 	}
 }
 
diff --git a/components/streams/src/stream.cpp b/components/streams/src/stream.cpp
index 332c9215f381b161d332fb213adeadaf76d1cb1b..2710dfdd2add91de17924885c06ad457bb2f78d5 100644
--- a/components/streams/src/stream.cpp
+++ b/components/streams/src/stream.cpp
@@ -1,5 +1,8 @@
 #include <ftl/streams/stream.hpp>
 
+#define LOGURU_WITH_STREAMS 1
+#include <loguru.hpp>
+
 using ftl::stream::Muxer;
 using ftl::stream::Broadcast;
 using ftl::stream::Intercept;
@@ -153,7 +156,12 @@ int Muxer::_lookup(int sid, int ssid) {
 void Muxer::_notify(const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
 	SHARED_LOCK(mutex_, lk);
 	available(spkt.frameSetID()) += spkt.channel;
-	if (cb_) cb_(spkt, pkt);  // spkt.frame_number < 255 && 
+
+	try {
+		if (cb_) cb_(spkt, pkt);  // spkt.frame_number < 255 && 
+	} catch (std::exception &e) {
+		LOG(ERROR) << "Exception in packet handler: " << e.what();
+	}
 }
 
 // ==== Broadcaster ============================================================
diff --git a/components/structures/include/ftl/data/frame.hpp b/components/structures/include/ftl/data/frame.hpp
index 14f1c1e25ac2d5085ff6fb81368515221dada391..4016b2f70d7d266b71205e4bd0986734396dc131 100644
--- a/components/structures/include/ftl/data/frame.hpp
+++ b/components/structures/include/ftl/data/frame.hpp
@@ -161,7 +161,10 @@ public:
 	template <typename T> void get(ftl::codecs::Channel channel, T &params) const;
 
 	/**
-	 * Method to get reference to the channel content.
+	 * Method to get reference to the channel content. The channel must already
+	 * have been created of this will throw an exception. See `getBuffer` to
+	 * get access before creation.
+	 * 
 	 * @param	Channel type
 	 * @return	Reference to channel data
 	 * 
@@ -169,6 +172,19 @@ public:
 	 */
 	template <typename T> T& get(ftl::codecs::Channel channel);
 
+	/**
+	 * Method to get reference to the channel content. Unlike `get`, the channel
+	 * must not already exist as this is intended as a pre-create step that
+	 * allocates memory and populates the buffer. `create` must then be called
+	 * to make the channel available.
+	 * 
+	 * @param	Channel type
+	 * @return	Reference to channel data
+	 * 
+	 * Result is valid only if hasChannel() is true.
+	 */
+	template <typename T> T& getBuffer(ftl::codecs::Channel channel);
+
 	/**
 	 * Wrapper accessor function to get frame pose.
 	 */
@@ -240,6 +256,8 @@ public:
 	 */
 	STATE *origin() const { return origin_; }
 
+	//ftl::codecs::Channels<BASE> completed;
+
 	typedef STATE State;
 
 protected:
@@ -346,6 +364,25 @@ T& ftl::data::Frame<BASE,N,STATE,DATA>::get(ftl::codecs::Channel channel) {
 	return getData(channel).template as<T>();
 }
 
+template <int BASE, int N, typename STATE, typename DATA>
+// cppcheck-suppress *
+template <typename T>
+T& ftl::data::Frame<BASE,N,STATE,DATA>::getBuffer(ftl::codecs::Channel channel) {
+	if (channel == ftl::codecs::Channel::None) {
+		throw ftl::exception("Attempting to get channel 'None'");
+	}
+
+	if (channels_.has(channel)) {
+		throw ftl::exception(ftl::Formatter() << "Cannot getBuffer on existing channel: " << (int)channel);
+	}
+
+	if (static_cast<int>(channel) < BASE || static_cast<int>(channel) >= BASE+32) {
+		throw ftl::exception(ftl::Formatter() << "Frame channel does not exist: " << (int)channel);
+	}
+
+	return getData(channel).template make<T>();
+}
+
 template <int BASE, int N, typename STATE, typename DATA>
 // cppcheck-suppress *
 template <typename T>