From a5217c4d4e9addb9e32fc2390933c0a84165df03 Mon Sep 17 00:00:00 2001
From: Nicolas Pope <nicolas.pope@utu.fi>
Date: Tue, 28 Jul 2020 10:25:40 +0300
Subject: [PATCH] Resolves #343 GUI and Frame Refactor

---
 .gitlab-ci.yml                                |   11 +-
 CMakeLists.txt                                |   67 +-
 Doxyfile                                      |    4 +-
 SDK/C/src/streams.cpp                         |  127 +-
 applications/calibration-multi/src/main.cpp   |   72 +-
 .../calibration-multi/src/multicalibrate.cpp  |  210 +--
 .../calibration-multi/src/visibility.hpp      |   10 +-
 applications/calibration/src/lens.cpp         |   32 +-
 applications/ftl2mkv/src/main.cpp             |   18 +-
 applications/gui/src/config_window.cpp        |    6 +-
 applications/gui/src/config_window.hpp        |    2 +-
 applications/gui2/CMakeLists.txt              |   77 ++
 applications/gui2/README.md                   |   50 +
 applications/gui2/src/inputoutput.cpp         |   43 +
 applications/gui2/src/inputoutput.hpp         |   50 +
 applications/gui2/src/main.cpp                |  209 +++
 applications/gui2/src/module.hpp              |   33 +
 applications/gui2/src/modules.hpp             |   11 +
 applications/gui2/src/modules/addsource.cpp   |   78 ++
 applications/gui2/src/modules/addsource.hpp   |   43 +
 .../src/modules/calibration/calibration.cpp   |  381 ++++++
 .../src/modules/calibration/calibration.hpp   |  484 +++++++
 .../src/modules/calibration/extrinsic.cpp     |  420 ++++++
 .../src/modules/calibration/intrinsic.cpp     |  391 ++++++
 .../gui2/src/modules/calibration/stereo.cpp   |  366 ++++++
 applications/gui2/src/modules/camera.cpp      |  454 +++++++
 applications/gui2/src/modules/camera.hpp      |  101 ++
 applications/gui2/src/modules/config.cpp      |   30 +
 applications/gui2/src/modules/config.hpp      |   32 +
 applications/gui2/src/modules/statistics.cpp  |   98 ++
 applications/gui2/src/modules/statistics.hpp  |   56 +
 applications/gui2/src/modules/themes.cpp      |  103 ++
 applications/gui2/src/modules/themes.hpp      |   16 +
 applications/gui2/src/modules/thumbnails.cpp  |   76 ++
 applications/gui2/src/modules/thumbnails.hpp  |   33 +
 applications/gui2/src/screen.cpp              |  216 ++++
 applications/gui2/src/screen.hpp              |  165 +++
 applications/gui2/src/view.cpp                |   10 +
 applications/gui2/src/view.hpp                |   37 +
 applications/gui2/src/views/addsource.cpp     |  274 ++++
 applications/gui2/src/views/addsource.hpp     |   41 +
 .../src/views/calibration/extrinsicview.cpp   |  589 +++++++++
 .../src/views/calibration/extrinsicview.hpp   |   93 ++
 .../src/views/calibration/intrinsicview.cpp   |  629 +++++++++
 .../src/views/calibration/intrinsicview.hpp   |   75 ++
 .../gui2/src/views/calibration/stereoview.cpp |  511 ++++++++
 .../gui2/src/views/calibration/stereoview.hpp |   71 +
 .../src/views/calibration/visualization.hpp   |   85 ++
 .../gui2/src/views/calibration/widgets.cpp    |  165 +++
 .../gui2/src/views/calibration/widgets.hpp    |   35 +
 applications/gui2/src/views/camera.cpp        |  508 ++++++++
 applications/gui2/src/views/camera.hpp        |   44 +
 applications/gui2/src/views/camera3d.cpp      |  138 ++
 applications/gui2/src/views/camera3d.hpp      |   51 +
 applications/gui2/src/views/config.cpp        |  356 +++++
 applications/gui2/src/views/config.hpp        |   36 +
 applications/gui2/src/views/statistics.cpp    |  106 ++
 applications/gui2/src/views/statistics.hpp    |   29 +
 applications/gui2/src/views/thumbnails.cpp    |  211 +++
 applications/gui2/src/views/thumbnails.hpp    |   50 +
 applications/gui2/src/widgets/combobox.cpp    |  103 ++
 applications/gui2/src/widgets/combobox.hpp    |   95 ++
 applications/gui2/src/widgets/imageview.cpp   |  626 +++++++++
 applications/gui2/src/widgets/imageview.hpp   |  247 ++++
 applications/gui2/src/widgets/leftbutton.cpp  |  121 ++
 applications/gui2/src/widgets/leftbutton.hpp  |   23 +
 applications/gui2/src/widgets/popupbutton.cpp |  118 ++
 applications/gui2/src/widgets/popupbutton.hpp |   52 +
 applications/gui2/src/widgets/soundctrl.cpp   |  105 ++
 applications/gui2/src/widgets/soundctrl.hpp   |   50 +
 applications/gui2/src/widgets/window.hpp      |   23 +
 applications/reconstruct2/CMakeLists.txt      |   21 +
 applications/reconstruct2/src/main.cpp        |  147 +++
 applications/tools/CMakeLists.txt             |    3 +-
 .../tools/middlebury_gen/CMakeLists.txt       |    1 +
 .../tools/middlebury_gen/src/main.cpp         |   24 +-
 .../tools/simple_viewer/CMakeLists.txt        |   12 +
 applications/tools/simple_viewer/main.cpp     |  208 +++
 applications/vision/CMakeLists.txt            |    4 +-
 applications/vision/src/main.cpp              |  185 ++-
 applications/vision/src/middlebury.cpp        |  301 -----
 applications/vision/src/streamer.cpp          |   64 -
 applications/vision/src/sync.cpp              |   32 -
 cmake/FindPylon.cmake                         |   10 +
 cmake/Findglog.cmake                          |    2 +-
 components/audio/CMakeLists.txt               |    1 -
 components/audio/include/ftl/audio/audio.hpp  |   10 +
 components/audio/include/ftl/audio/buffer.hpp |    7 +-
 components/audio/include/ftl/audio/frame.hpp  |   36 +-
 .../audio/include/ftl/audio/frameset.hpp      |    4 +-
 components/audio/include/ftl/audio/source.hpp |   26 +-
 .../audio/include/ftl/audio/speaker.hpp       |    3 +
 components/audio/src/portaudio.cpp            |    6 +-
 components/audio/src/software_decoder.cpp     |   13 +-
 components/audio/src/software_encoder.cpp     |   20 +-
 components/audio/src/source.cpp               |   76 +-
 components/audio/src/speaker.cpp              |   49 +-
 components/calibration/CMakeLists.txt         |   10 +-
 .../calibration/include/ftl/calibration.hpp   |    4 +
 .../include/ftl/calibration/extrinsic.hpp     |  244 ++++
 .../include/ftl/calibration/object.hpp        |   52 +
 .../include/ftl/calibration/optimize.hpp      |  107 +-
 .../include/ftl/calibration/parameters.hpp    |   84 +-
 .../include/ftl/calibration/structures.hpp    |  119 ++
 .../include/ftl/calibration/visibility.hpp    |  109 ++
 components/calibration/src/extrinsic.cpp      |  719 +++++++++++
 components/calibration/src/object.cpp         |  159 +++
 components/calibration/src/optimize.cpp       |  440 +++++--
 components/calibration/src/parameters.cpp     |  167 +--
 components/calibration/src/structures.cpp     |  234 ++++
 components/calibration/src/visibility.cpp     |  307 +++++
 components/calibration/test/CMakeLists.txt    |   27 +-
 .../calibration/test/test_extrinsic.cpp       |   48 +
 components/calibration/test/test_helper.cpp   |  139 ++
 .../calibration/test/test_parameters.cpp      |   33 +-
 components/codecs/CMakeLists.txt              |    9 +
 .../codecs/include/ftl/codecs/channels.hpp    |   13 +-
 .../codecs/include/ftl/codecs/codecs.hpp      |    5 +-
 .../codecs/include/ftl/codecs/packet.hpp      |   47 +-
 .../codecs/include/ftl/codecs/touch.hpp       |   35 +
 components/codecs/src/channels.cpp            |  127 +-
 components/codecs/src/depth_convert.cu        |    7 -
 components/codecs/src/nvidia_decoder.cpp      |    9 +-
 components/codecs/src/nvidia_encoder.cpp      |   25 +-
 components/codecs/src/opencv_decoder.cpp      |    4 +-
 components/codecs/src/reader.cpp              |    2 +-
 components/codecs/test/CMakeLists.txt         |   18 +-
 components/codecs/test/nvidia_codec_unit.cpp  |    2 +-
 components/codecs/test/opencv_codec_unit.cpp  |    1 -
 components/common/cpp/CMakeLists.txt          |    9 +
 .../common/cpp/include/ftl/configurable.hpp   |   48 +-
 .../common/cpp/include/ftl/configuration.hpp  |   24 +-
 .../common/cpp/include/ftl/cuda_operators.hpp |   12 +-
 .../common/cpp/include/ftl/exception.hpp      |    2 +-
 components/common/cpp/include/ftl/handle.hpp  |   76 +-
 components/common/cpp/include/ftl/threads.hpp |    2 +
 components/common/cpp/include/ftl/timer.hpp   |   46 +-
 .../common/cpp/include/ftl/transactional.hpp  |   48 +
 components/common/cpp/include/ftl/uri.hpp     |   28 +-
 .../cpp/include/ftl/utility/msgpack.hpp       |  105 +-
 .../common/cpp/include/ftl/utility/string.hpp |   12 +
 components/common/cpp/src/configurable.cpp    |   36 +-
 components/common/cpp/src/configuration.cpp   |  200 ++-
 components/common/cpp/src/cuda_common.cpp     |    4 +-
 components/common/cpp/src/timer.cpp           |  104 +-
 components/common/cpp/src/uri.cpp             |   44 +-
 .../common/cpp/test/configurable_unit.cpp     |    6 +-
 components/common/cpp/test/msgpack_unit.cpp   |   24 +-
 components/common/cpp/test/timer_unit.cpp     |   25 +-
 components/common/cpp/test/uri_unit.cpp       |   12 +
 components/control/cpp/src/master.cpp         |   24 +-
 components/net/cpp/CMakeLists.txt             |    2 +
 .../net/cpp/include/ftl/net/dispatcher.hpp    |   21 +-
 .../net/cpp/include/ftl/net/listener.hpp      |    3 +
 components/net/cpp/include/ftl/net/peer.hpp   |   11 +-
 .../net/cpp/include/ftl/net/universe.hpp      |  128 +-
 components/net/cpp/src/dispatcher.cpp         |    6 +-
 components/net/cpp/src/listener.cpp           |    2 +-
 components/net/cpp/src/peer.cpp               |   98 +-
 components/net/cpp/src/universe.cpp           |   68 +-
 components/operators/CMakeLists.txt           |    4 +
 .../include/ftl/operators/smoothing.hpp       |   12 +-
 components/operators/src/aruco.cpp            |   17 +-
 components/operators/src/clipping.cpp         |   14 +-
 components/operators/src/colours.cpp          |   18 +-
 components/operators/src/depth.cpp            |   22 +-
 components/operators/src/detectandtrack.cpp   |   14 +-
 .../src/disparity/bilateral_filter.cpp        |    2 +-
 .../operators/src/disparity/fixstars_sgm.cpp  |   16 +-
 .../operators/src/disparity/libstereo.cpp     |    2 +-
 .../src/disparity/optflow_smoothing.cpp       |    8 +-
 components/operators/src/fusion/mvmls.cpp     |   37 +-
 components/operators/src/gt_analysis.cpp      |    4 +-
 components/operators/src/mask.cpp             |    7 +-
 components/operators/src/normals.cpp          |    9 +-
 components/operators/src/nvopticalflow.cpp    |    2 +-
 components/operators/src/operator.cpp         |   18 +-
 components/operators/src/poser.cpp            |   27 +-
 components/operators/src/segmentation.cpp     |    9 +-
 components/operators/src/smoothing.cpp        |   62 +-
 components/operators/src/weighting.cpp        |   11 +-
 components/renderers/cpp/CMakeLists.txt       |    6 +
 .../renderers/cpp/include/ftl/cuda/touch.hpp  |   28 +
 .../cpp/include/ftl/render/CUDARender.hpp     |   21 +-
 .../cpp/include/ftl/render/overlay.hpp        |    4 +-
 .../cpp/include/ftl/render/renderer.hpp       |    6 +-
 .../cpp/include/ftl/utility/gltexture.hpp     |   57 +
 components/renderers/cpp/src/CUDARender.cpp   |  165 ++-
 components/renderers/cpp/src/colouriser.cpp   |   10 +-
 components/renderers/cpp/src/dibr.cu          |    9 +-
 components/renderers/cpp/src/gltexture.cpp    |  188 +++
 components/renderers/cpp/src/overlay.cpp      | 1132 ++++++++--------
 components/renderers/cpp/src/reprojection.cu  |   15 +-
 components/renderers/cpp/src/screen.cu        |    6 +
 components/renderers/cpp/src/touch.cu         |   50 +
 components/rgbd-sources/CMakeLists.txt        |   13 +-
 .../include/ftl/cb_segmentation.hpp           |  117 --
 .../rgbd-sources/include/ftl/rgbd/camera.hpp  |   16 +
 .../include/ftl/rgbd/capabilities.hpp         |   37 +
 .../rgbd-sources/include/ftl/rgbd/format.hpp  |    2 +-
 .../rgbd-sources/include/ftl/rgbd/frame.hpp   |  345 ++---
 .../include/ftl/rgbd/frameset.hpp             |  173 +--
 .../rgbd-sources/include/ftl/rgbd/group.hpp   |    6 +-
 .../rgbd-sources/include/ftl/rgbd/source.hpp  |  107 +-
 components/rgbd-sources/src/basesource.hpp    |    9 +-
 components/rgbd-sources/src/camera.cpp        |   18 +
 .../rgbd-sources/src/cb_segmentation.cpp      |  216 ----
 components/rgbd-sources/src/frame.cpp         |  277 ++--
 components/rgbd-sources/src/init.cpp          |   16 +
 components/rgbd-sources/src/source.cpp        |  127 +-
 .../sources/middlebury/middlebury_source.cpp  |    8 +-
 .../sources/middlebury/middlebury_source.hpp  |    2 +-
 .../sources/realsense/realsense_source.cpp    |  101 +-
 .../sources/realsense/realsense_source.hpp    |   14 +-
 .../sources/screencapture/screencapture.cpp   |  179 ++-
 .../sources/screencapture/screencapture.hpp   |   16 +-
 .../src/sources/stereovideo/calibrate.cpp     |  334 -----
 .../src/sources/stereovideo/calibrate.hpp     |  241 ----
 .../src/sources/stereovideo/device.hpp        |   16 +-
 .../src/sources/stereovideo/opencv.cpp        |   70 +-
 .../src/sources/stereovideo/opencv.hpp        |   21 +-
 .../src/sources/stereovideo/pylon.cpp         |  184 ++-
 .../src/sources/stereovideo/pylon.hpp         |   14 +-
 .../src/sources/stereovideo/rectification.cpp |  208 +++
 .../src/sources/stereovideo/rectification.hpp |  100 ++
 .../src/sources/stereovideo/stereovideo.cpp   |  410 +++---
 .../src/sources/stereovideo/stereovideo.hpp   |   27 +-
 components/rgbd-sources/test/CMakeLists.txt   |   11 -
 components/rgbd-sources/test/source_unit.cpp  |    4 +
 components/streams/CMakeLists.txt             |   12 +-
 .../streams/include/ftl/streams/builder.hpp   |  185 +++
 .../streams/include/ftl/streams/feed.hpp      |  222 +++-
 .../include/ftl/streams/filestream.hpp        |   10 +-
 .../streams/include/ftl/streams/injectors.hpp |    1 +
 .../streams/include/ftl/streams/netstream.hpp |   12 +-
 .../streams/include/ftl/streams/receiver.hpp  |   49 +-
 .../streams/include/ftl/streams/renderer.hpp  |   41 +
 .../streams/include/ftl/streams/sender.hpp    |   44 +-
 .../streams/include/ftl/streams/stream.hpp    |   86 +-
 components/streams/src/baserender.hpp         |   37 +
 components/streams/src/builder.cpp            |  440 +++++++
 components/streams/src/feed.cpp               | 1142 +++++++++++++++++
 components/streams/src/filestream.cpp         |  120 +-
 components/streams/src/injectors.cpp          |   10 +-
 components/streams/src/netstream.cpp          |  142 +-
 components/streams/src/receiver.cpp           |  385 ++++--
 components/streams/src/renderer.cpp           |   74 ++
 .../streams/src/renderers/collisions.cpp      |   73 ++
 .../streams/src/renderers/collisions.hpp      |   18 +
 .../streams/src/renderers/openvr_render.cpp   |  448 +++++++
 .../streams/src/renderers/openvr_render.hpp   |   64 +
 .../streams/src/renderers/screen_render.cpp   |  205 +++
 .../streams/src/renderers/screen_render.hpp   |   43 +
 components/streams/src/sender.cpp             |  472 +++++--
 components/streams/src/stream.cpp             |  162 ++-
 components/streams/test/CMakeLists.txt        |   55 +
 components/streams/test/builder_unit.cpp      |  201 +++
 components/streams/test/feed_unit.cpp         |   34 +
 components/streams/test/filestream_unit.cpp   |   43 +-
 components/streams/test/receiver_unit.cpp     |  411 ++++--
 components/streams/test/recsend_unit.cpp      |  179 +++
 components/streams/test/sender_unit.cpp       |  379 +++++-
 components/streams/test/stream_unit.cpp       |   45 +-
 components/structures/CMakeLists.txt          |   12 +-
 .../structures/include/ftl/data/channels.hpp  |   22 +-
 .../structures/include/ftl/data/creators.hpp  |   67 +
 .../structures/include/ftl/data/frame.hpp     |  567 --------
 .../structures/include/ftl/data/framepool.hpp |   31 +-
 .../structures/include/ftl/data/frameset.hpp  |  244 ----
 .../include/ftl/data/framestate.hpp           |  302 -----
 .../structures/include/ftl/data/messages.hpp  |   30 +
 .../structures/include/ftl/data/new_frame.hpp |  548 +++++++-
 .../include/ftl/data/new_frameset.hpp         |  151 +++
 components/structures/src/creators.cpp        |   44 +
 components/structures/src/frameset.cpp        |  144 +++
 components/structures/src/new_frame.cpp       |  308 ++++-
 components/structures/src/pool.cpp            |   52 +-
 components/structures/test/CMakeLists.txt     |   17 +-
 .../structures/test/frame_example_1.cpp       |  178 ++-
 components/structures/test/frame_unit.cpp     |  313 +++--
 components/structures/test/pool_unit.cpp      |  121 +-
 env                                           |    1 +
 lib/libsgm/CMakeLists.txt                     |   29 +-
 lib/libsgm/include/libsgm_config.h            |    8 +-
 lib/libsgm/src/CMakeLists.txt                 |   75 +-
 lib/libstereo/CMakeLists.txt                  |    1 +
 lib/libstereo/test/CMakeLists.txt             |    6 +
 web-service/public/js/bundle.js               |   20 +-
 web-service/public/js/index.js                |   16 +-
 web-service/server/src/index.js               |   60 +-
 290 files changed, 24173 insertions(+), 6572 deletions(-)
 create mode 100644 applications/gui2/CMakeLists.txt
 create mode 100644 applications/gui2/README.md
 create mode 100644 applications/gui2/src/inputoutput.cpp
 create mode 100644 applications/gui2/src/inputoutput.hpp
 create mode 100644 applications/gui2/src/main.cpp
 create mode 100644 applications/gui2/src/module.hpp
 create mode 100644 applications/gui2/src/modules.hpp
 create mode 100644 applications/gui2/src/modules/addsource.cpp
 create mode 100644 applications/gui2/src/modules/addsource.hpp
 create mode 100644 applications/gui2/src/modules/calibration/calibration.cpp
 create mode 100644 applications/gui2/src/modules/calibration/calibration.hpp
 create mode 100644 applications/gui2/src/modules/calibration/extrinsic.cpp
 create mode 100644 applications/gui2/src/modules/calibration/intrinsic.cpp
 create mode 100644 applications/gui2/src/modules/calibration/stereo.cpp
 create mode 100644 applications/gui2/src/modules/camera.cpp
 create mode 100644 applications/gui2/src/modules/camera.hpp
 create mode 100644 applications/gui2/src/modules/config.cpp
 create mode 100644 applications/gui2/src/modules/config.hpp
 create mode 100644 applications/gui2/src/modules/statistics.cpp
 create mode 100644 applications/gui2/src/modules/statistics.hpp
 create mode 100644 applications/gui2/src/modules/themes.cpp
 create mode 100644 applications/gui2/src/modules/themes.hpp
 create mode 100644 applications/gui2/src/modules/thumbnails.cpp
 create mode 100644 applications/gui2/src/modules/thumbnails.hpp
 create mode 100644 applications/gui2/src/screen.cpp
 create mode 100644 applications/gui2/src/screen.hpp
 create mode 100644 applications/gui2/src/view.cpp
 create mode 100644 applications/gui2/src/view.hpp
 create mode 100644 applications/gui2/src/views/addsource.cpp
 create mode 100644 applications/gui2/src/views/addsource.hpp
 create mode 100644 applications/gui2/src/views/calibration/extrinsicview.cpp
 create mode 100644 applications/gui2/src/views/calibration/extrinsicview.hpp
 create mode 100644 applications/gui2/src/views/calibration/intrinsicview.cpp
 create mode 100644 applications/gui2/src/views/calibration/intrinsicview.hpp
 create mode 100644 applications/gui2/src/views/calibration/stereoview.cpp
 create mode 100644 applications/gui2/src/views/calibration/stereoview.hpp
 create mode 100644 applications/gui2/src/views/calibration/visualization.hpp
 create mode 100644 applications/gui2/src/views/calibration/widgets.cpp
 create mode 100644 applications/gui2/src/views/calibration/widgets.hpp
 create mode 100644 applications/gui2/src/views/camera.cpp
 create mode 100644 applications/gui2/src/views/camera.hpp
 create mode 100644 applications/gui2/src/views/camera3d.cpp
 create mode 100644 applications/gui2/src/views/camera3d.hpp
 create mode 100644 applications/gui2/src/views/config.cpp
 create mode 100644 applications/gui2/src/views/config.hpp
 create mode 100644 applications/gui2/src/views/statistics.cpp
 create mode 100644 applications/gui2/src/views/statistics.hpp
 create mode 100644 applications/gui2/src/views/thumbnails.cpp
 create mode 100644 applications/gui2/src/views/thumbnails.hpp
 create mode 100644 applications/gui2/src/widgets/combobox.cpp
 create mode 100644 applications/gui2/src/widgets/combobox.hpp
 create mode 100644 applications/gui2/src/widgets/imageview.cpp
 create mode 100644 applications/gui2/src/widgets/imageview.hpp
 create mode 100644 applications/gui2/src/widgets/leftbutton.cpp
 create mode 100644 applications/gui2/src/widgets/leftbutton.hpp
 create mode 100644 applications/gui2/src/widgets/popupbutton.cpp
 create mode 100644 applications/gui2/src/widgets/popupbutton.hpp
 create mode 100644 applications/gui2/src/widgets/soundctrl.cpp
 create mode 100644 applications/gui2/src/widgets/soundctrl.hpp
 create mode 100644 applications/gui2/src/widgets/window.hpp
 create mode 100644 applications/reconstruct2/CMakeLists.txt
 create mode 100644 applications/reconstruct2/src/main.cpp
 create mode 100644 applications/tools/simple_viewer/CMakeLists.txt
 create mode 100644 applications/tools/simple_viewer/main.cpp
 delete mode 100644 applications/vision/src/middlebury.cpp
 delete mode 100644 applications/vision/src/streamer.cpp
 delete mode 100644 applications/vision/src/sync.cpp
 create mode 100644 components/calibration/include/ftl/calibration/extrinsic.hpp
 create mode 100644 components/calibration/include/ftl/calibration/object.hpp
 create mode 100644 components/calibration/include/ftl/calibration/structures.hpp
 create mode 100644 components/calibration/include/ftl/calibration/visibility.hpp
 create mode 100644 components/calibration/src/extrinsic.cpp
 create mode 100644 components/calibration/src/object.cpp
 create mode 100644 components/calibration/src/structures.cpp
 create mode 100644 components/calibration/src/visibility.cpp
 create mode 100644 components/calibration/test/test_extrinsic.cpp
 create mode 100644 components/calibration/test/test_helper.cpp
 create mode 100644 components/codecs/include/ftl/codecs/touch.hpp
 create mode 100644 components/common/cpp/include/ftl/transactional.hpp
 create mode 100644 components/common/cpp/include/ftl/utility/string.hpp
 create mode 100644 components/renderers/cpp/include/ftl/cuda/touch.hpp
 create mode 100644 components/renderers/cpp/include/ftl/utility/gltexture.hpp
 create mode 100644 components/renderers/cpp/src/gltexture.cpp
 create mode 100644 components/renderers/cpp/src/touch.cu
 delete mode 100644 components/rgbd-sources/include/ftl/cb_segmentation.hpp
 create mode 100644 components/rgbd-sources/include/ftl/rgbd/capabilities.hpp
 delete mode 100644 components/rgbd-sources/src/cb_segmentation.cpp
 create mode 100644 components/rgbd-sources/src/init.cpp
 delete mode 100644 components/rgbd-sources/src/sources/stereovideo/calibrate.cpp
 delete mode 100644 components/rgbd-sources/src/sources/stereovideo/calibrate.hpp
 create mode 100644 components/rgbd-sources/src/sources/stereovideo/rectification.cpp
 create mode 100644 components/rgbd-sources/src/sources/stereovideo/rectification.hpp
 create mode 100644 components/streams/include/ftl/streams/builder.hpp
 create mode 100644 components/streams/include/ftl/streams/renderer.hpp
 create mode 100644 components/streams/src/baserender.hpp
 create mode 100644 components/streams/src/builder.cpp
 create mode 100644 components/streams/src/feed.cpp
 create mode 100644 components/streams/src/renderer.cpp
 create mode 100644 components/streams/src/renderers/collisions.cpp
 create mode 100644 components/streams/src/renderers/collisions.hpp
 create mode 100644 components/streams/src/renderers/openvr_render.cpp
 create mode 100644 components/streams/src/renderers/openvr_render.hpp
 create mode 100644 components/streams/src/renderers/screen_render.cpp
 create mode 100644 components/streams/src/renderers/screen_render.hpp
 create mode 100644 components/streams/test/builder_unit.cpp
 create mode 100644 components/streams/test/feed_unit.cpp
 create mode 100644 components/streams/test/recsend_unit.cpp
 create mode 100644 components/structures/include/ftl/data/creators.hpp
 delete mode 100644 components/structures/include/ftl/data/frame.hpp
 delete mode 100644 components/structures/include/ftl/data/frameset.hpp
 delete mode 100644 components/structures/include/ftl/data/framestate.hpp
 create mode 100644 components/structures/include/ftl/data/messages.hpp
 create mode 100644 components/structures/include/ftl/data/new_frameset.hpp
 create mode 100644 components/structures/src/creators.cpp
 create mode 100644 components/structures/src/frameset.cpp
 create mode 100644 env

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index ec02c8aa0..778d11c15 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -6,7 +6,7 @@
 
 variables:
   GIT_SUBMODULE_STRATEGY: recursive
-  CMAKE_ARGS_WINDOWS: '-DCMAKE_GENERATOR_PLATFORM=x64 -DPORTAUDIO_DIR="D:/Build/portaudio" -DEigen3_DIR="C:/Program Files (x86)/Eigen3/share/eigen3/cmake" -DOpenCV_DIR="D:/Build/opencv/build/install" -DCUDA_TOOLKIT_ROOT_DIR="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v10.2" -DWITH_OPENVR=TRUE -DOPENVR_DIR="D:/Build/OpenVRSDK" -DWITH_CERES=FALSE'
+  CMAKE_ARGS_WINDOWS: '-DCMAKE_GENERATOR_PLATFORM=x64 -DCeres_DIR="D:/Program Files/Ceres" -DPORTAUDIO_DIR="D:/Build/portaudio" -DPYLON_DIR="D:/Program Files/pylon6/Development" -DEigen3_DIR="C:/Program Files (x86)/Eigen3/share/eigen3/cmake" -DOpenCV_DIR="D:/Build/opencv/build/install" -DCUDA_TOOLKIT_ROOT_DIR="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v10.2" -DWITH_OPENVR=TRUE -DOPENVR_DIR="D:/Build/OpenVRSDK" -DWITH_CERES=TRUE'
 
 stages:
  - all
@@ -28,9 +28,9 @@ linux:
   script:
     - mkdir build
     - cd build
-    - cmake .. -DWITH_OPTFLOW=TRUE -DUSE_CPPCHECK=FALSE -DBUILD_CALIBRATION=TRUE -DWITH_CERES=TRUE -DCMAKE_BUILD_TYPE=Release -DCPACK_GENERATOR=DEB
-    - make
-    - make package
+    - /snap/bin/cmake .. -GNinja -DCMAKE_CXX_FLAGS="-fdiagnostics-color" -DWITH_OPTFLOW=TRUE -DUSE_CPPCHECK=FALSE -DBUILD_CALIBRATION=TRUE -DWITH_CERES=TRUE -DCMAKE_BUILD_TYPE=Release -DCPACK_GENERATOR=DEB
+    - ninja
+    - ninja package
     - ctest --output-on-failure
     - cd ../SDK/Python
     - python3 -m unittest discover test
@@ -63,8 +63,7 @@ webserver-deploy:
     - rmdir /q /s "%DEPLOY_DIR%/%CI_COMMIT_REF_SLUG%"
     - mkdir "%DEPLOY_DIR%/%CI_COMMIT_REF_SLUG%"
     - 'copy "applications\vision\Release\ftl-vision.exe" "%DEPLOY_DIR%\%CI_COMMIT_REF_SLUG%"'
-    - 'copy "applications\calibration\Release\ftl-calibrate.exe" "%DEPLOY_DIR%\%CI_COMMIT_REF_SLUG%"'
-    - 'copy "applications\gui\Release\ftl-gui.exe" "%DEPLOY_DIR%\%CI_COMMIT_REF_SLUG%"'
+    - 'copy "applications\gui2\Release\ftl-gui2.exe" "%DEPLOY_DIR%\%CI_COMMIT_REF_SLUG%"'
 
 windows-vision:
   except:
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 4df38ff5c..25fd1d0b5 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.16.0)
 include (CheckIncludeFile)
 include (CheckIncludeFileCXX)
 include (CheckFunctionExists)
@@ -16,6 +16,7 @@ option(WITH_OPENVR "Build with OpenVR support" OFF)
 option(WITH_OPUS "Use Opus audio compression" ON)
 option(WITH_FIXSTARS "Use Fixstars libSGM" ON)
 option(WITH_CERES "Use Ceres solver" ON)
+option(WITH_SDK "Build the C shared SDK" ON)
 option(USE_CPPCHECK "Apply cppcheck during build" ON)
 option(BUILD_VISION "Enable the vision component" ON)
 option(BUILD_RECONSTRUCT "Enable the reconstruction component" ON)
@@ -163,22 +164,17 @@ add_subdirectory(lib/libstereo)
 include_directories(lib/libstereo/include)
 set_property(TARGET libstereo PROPERTY FOLDER "dependencies")
 
-#
-
-if (WITH_FIXSTARS)
-	set(HAVE_LIBSGM true)
-	add_subdirectory(lib/libsgm)
-	include_directories(lib/libsgm/include)
-	set_property(TARGET sgm PROPERTY FOLDER "dependencies")
-else()
-	add_library(sgm INTERFACE)
-endif()
-
 # ==== Ceres ===================================================================
 
 if (WITH_CERES)
+	#find_package(glog QUIET)
 	find_package(Ceres REQUIRED)
 	set(HAVE_CERES true)
+
+	if (WIN32)
+		# Hack to fix missing includes
+		set_property(TARGET ceres PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${Ceres_DIR}/../include)
+	endif()
 else()
 	add_library(ceres INTERFACE)
 endif()
@@ -237,6 +233,7 @@ if (BUILD_GUI)
 	set(NANOGUI_BUILD_EXAMPLE OFF CACHE BOOL " " FORCE)
 	set(NANOGUI_BUILD_PYTHON  OFF CACHE BOOL " " FORCE)
 	set(NANOGUI_INSTALL       OFF CACHE BOOL " " FORCE)
+	set(NANOGUI_EIGEN_INCLUDE_DIR ${EIGEN_INCLUDE_DIR} CACHE STRING " " FORCE)
 
 	# Add the configurations from nanogui
 	add_subdirectory(ext/nanogui)
@@ -305,16 +302,22 @@ endif()
 check_language(CUDA)
 if (CUDA_TOOLKIT_ROOT_DIR)
 enable_language(CUDA)
-set(CMAKE_CUDA_FLAGS "-Xcompiler -fPIC")
+
+if (NOT WIN32)
+	set(CMAKE_CUDA_FLAGS "-Xcompiler -fPIC")
+endif()
+set(CMAKE_CUDA_ARCHITECTURES 61)
 set(CMAKE_CUDA_FLAGS_DEBUG "--gpu-architecture=compute_61 -g -DDEBUG -D_DEBUG")
 set(CMAKE_CUDA_FLAGS_RELEASE "--gpu-architecture=compute_61")
 set(HAVE_CUDA TRUE)
 include_directories(${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
 
 # Some kind of fix for nvcc and -pthread problem on Linux
+if (NOT WIN32)
 set_property(TARGET Threads::Threads
 				 PROPERTY INTERFACE_COMPILE_OPTIONS $<$<COMPILE_LANGUAGE:CUDA>:-Xcompiler -pthread>
 													"$<$<NOT:$<COMPILE_LANGUAGE:CUDA>>:-pthread>")
+endif()
 
 endif ()
 
@@ -388,14 +391,15 @@ include(ftl_paths)
 
 if (WIN32) # TODO(nick) Should do based upon compiler (VS)
 	add_definitions(-DWIN32)
-	set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /std:c++17")
+	set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP4 /std:c++17 /wd4996")
 	set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /DFTL_DEBUG /Wall")
 	set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /O2")
 	set(OS_LIBS "")
 else()
 	add_definitions(-DUNIX)
-	set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17 -fPIC -msse3 -Wall")
-	set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -D_DEBUG -pg")
+	# -fdiagnostics-color
+	set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17 -fPIC -msse3 -Wall -Werror=unused-result -Werror=return-type")
+	set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -pg")
 	set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O3 -mfpmath=sse")
 	set(OS_LIBS "dl")
 endif()
@@ -429,9 +433,18 @@ set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)
 #	add_library(nvpipe INTERFACE)
 #endif()
 
+if (WITH_FIXSTARS)
+	set(HAVE_LIBSGM true)
+	add_subdirectory(lib/libsgm)
+	include_directories(lib/libsgm/include)
+	set_property(TARGET sgm PROPERTY FOLDER "dependencies")
+else()
+	add_library(sgm INTERFACE)
+endif()
+
 
 add_subdirectory(components/common/cpp)
-add_subdirectory(applications/calibration)
+#add_subdirectory(applications/calibration)
 add_subdirectory(components/codecs)
 add_subdirectory(components/structures)
 add_subdirectory(components/net)
@@ -448,8 +461,10 @@ add_subdirectory(components/calibration)
 add_subdirectory(applications/tools)
 
 # SDK only compiles on linux currently
-if (NOT WIN32)
-	add_subdirectory(SDK/C)
+if (WITH_SDK)
+	if (NOT WIN32)
+		add_subdirectory(SDK/C)
+	endif()
 endif()
 
 if (HAVE_AVFORMAT)
@@ -465,21 +480,13 @@ if (BUILD_VISION)
 	add_subdirectory(applications/vision)
 endif()
 
-if (BUILD_CALIBRATION)
-	if (NOT HAVE_CERES)
-		message(ERROR "Ceres is required")
-	endif()
-
-	add_subdirectory(applications/calibration-ceres)
-	add_subdirectory(applications/calibration-multi)
-endif()
-
 if (BUILD_RECONSTRUCT)
-	add_subdirectory(applications/reconstruct)
+	add_subdirectory(applications/reconstruct2)
 endif()
 
 if (HAVE_NANOGUI)
-	add_subdirectory(applications/gui)
+	#add_subdirectory(applications/gui)
+	add_subdirectory(applications/gui2)
 endif()
 
 ### Generate Build Configuration Files =========================================
diff --git a/Doxyfile b/Doxyfile
index bf8029964..25c7f15d1 100644
--- a/Doxyfile
+++ b/Doxyfile
@@ -441,7 +441,7 @@ EXTRACT_ALL            = YES
 # be included in the documentation.
 # The default value is: NO.
 
-EXTRACT_PRIVATE        = NO
+EXTRACT_PRIVATE        = YES
 
 # If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
 # scope will be included in the documentation.
@@ -2306,7 +2306,7 @@ UML_LIMIT_NUM_FIELDS   = 10
 # The default value is: NO.
 # This tag requires that the tag HAVE_DOT is set to YES.
 
-TEMPLATE_RELATIONS     = NO
+TEMPLATE_RELATIONS     = YES
 
 # If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
 # YES then doxygen will generate a graph for each documented file showing the
diff --git a/SDK/C/src/streams.cpp b/SDK/C/src/streams.cpp
index 4e449b24b..e2a48c9bf 100644
--- a/SDK/C/src/streams.cpp
+++ b/SDK/C/src/streams.cpp
@@ -8,6 +8,7 @@
 #include <ftl/operators/operator.hpp>
 #include <ftl/operators/disparity.hpp>
 #include <ftl/operators/mvmls.hpp>
+#include <ftl/data/framepool.hpp>
 
 #include <opencv2/imgproc.hpp>
 
@@ -18,6 +19,8 @@ static ftlError_t last_error = FTLERROR_OK;
 static ftl::Configurable *root = nullptr;
 
 struct FTLStream {
+	FTLStream() : pool(2,5) {}
+
 	bool readonly;
 	ftl::stream::Sender *sender;
 	ftl::stream::Stream *stream;
@@ -27,8 +30,8 @@ struct FTLStream {
 
 	ftl::operators::Graph *pipelines;
 
-	std::vector<ftl::rgbd::FrameState> video_states;
-	ftl::rgbd::FrameSet video_fs;
+	ftl::data::Pool pool;
+	std::shared_ptr<ftl::rgbd::FrameSet> video_fs;
 };
 
 ftlError_t ftlGetLastStreamError(ftlStream_t stream) {
@@ -39,7 +42,7 @@ static void createFileWriteStream(FTLStream *s, const ftl::URI &uri) {
 	if (!root) {
 		int argc = 1;
 		const char *argv[] = {"SDK",0};
-		root = ftl::configure(argc, const_cast<char**>(argv), "sdk_default");
+		root = ftl::configure(argc, const_cast<char**>(argv), "sdk_default", {});
 	}
 
 	auto *fs = ftl::create<ftl::stream::File>(root, "ftlfile");
@@ -62,12 +65,11 @@ ftlStream_t ftlCreateWriteStream(const char *uri) {
 	s->stream = nullptr;
 	s->sender = nullptr;
 	s->pipelines = nullptr;
-	s->video_fs.id = 0;
-	s->video_fs.count = 0;
-	s->video_fs.mask = 0;
+	s->video_fs = std::make_shared<ftl::data::FrameSet>(&s->pool, ftl::data::FrameID(0,0), ftl::timer::get_time());
+	s->video_fs->count = 0;
+	s->video_fs->mask = 0;
 	s->interval = 40;
-	s->video_fs.frames.reserve(32);
-	s->video_states.resize(32);
+	//s->video_fs->frames.reserve(32);
 	s->has_fresh_data = false;
 
 	switch (u.getScheme()) {
@@ -87,7 +89,7 @@ ftlStream_t ftlCreateWriteStream(const char *uri) {
 	}
 	last_error = FTLERROR_OK;
 
-	s->video_fs.timestamp = ftl::timer::get_time();
+	//s->video_fs.timestamp = ftl::timer::get_time();
 
 	return s;
 }
@@ -106,10 +108,10 @@ ftlError_t ftlImageWrite(
 		return FTLERROR_STREAM_INVALID_PARAMETER;
 	if (static_cast<int>(channel) < 0 || static_cast<int>(channel) > 32)
 		return FTLERROR_STREAM_BAD_CHANNEL;
-	if (!stream->video_fs.hasFrame(sourceId))
+	if (!stream->video_fs->hasFrame(sourceId))
 		return FTLERROR_STREAM_NO_INTRINSICS;
 	if (!data) return FTLERROR_STREAM_NO_DATA;
-	if (stream->video_fs.hasChannel(static_cast<ftl::codecs::Channel>(channel)))
+	if (stream->video_fs->hasChannel(static_cast<ftl::codecs::Channel>(channel)))
 		return FTLERROR_STREAM_DUPLICATE;
 
 	stream->sender->set("codec", 1);
@@ -117,9 +119,9 @@ ftlError_t ftlImageWrite(
 	stream->sender->set("lossless", true);
 
 	try {
-		auto &frame = stream->video_fs.frames[sourceId];
+		auto &frame = stream->video_fs->frames[sourceId];
 		auto &img = frame.create<cv::cuda::GpuMat>(static_cast<ftl::codecs::Channel>(channel));
-		auto &intrin = frame.getLeft();
+		auto &intrin = frame.cast<ftl::rgbd::Frame>().getLeft();
 
 		LOG(INFO) << "INTRIN: " << intrin.width << "x" << intrin.height << " for " << sourceId << ", " << (int)channel;
 
@@ -155,10 +157,10 @@ ftlError_t ftlImageWrite(
 		if (tmp2.empty()) return FTLERROR_STREAM_NO_DATA;
 		img.upload(tmp2);
 
-		ftl::codecs::Channels<0> channels;
-		if (stream->stream->size() > static_cast<unsigned int>(stream->video_fs.id)) channels = stream->stream->selected(stream->video_fs.id);
+		std::unordered_set<ftl::codecs::Channel> channels;
+		if (stream->stream->size() > static_cast<unsigned int>(stream->video_fs->id().frameset())) channels = stream->stream->selected(stream->video_fs->id().frameset());
 		channels += static_cast<ftl::codecs::Channel>(channel);
-		stream->stream->select(stream->video_fs.id, channels, true);
+		stream->stream->select(stream->video_fs->id().frameset(), channels, true);
 
 	} catch (const std::exception &e) {
 		return FTLERROR_UNKNOWN;
@@ -174,14 +176,18 @@ ftlError_t ftlIntrinsicsWriteLeft(ftlStream_t stream, int32_t sourceId, int32_t
 	if (sourceId < 0 || sourceId >= 32)
 		return FTLERROR_STREAM_INVALID_PARAMETER;
 
-	while (stream->video_fs.frames.size() <= static_cast<unsigned int>(sourceId)) {
-		stream->video_fs.frames.emplace_back();
-	}
+	//while (stream->video_fs->frames.size() < static_cast<unsigned int>(sourceId)) {
+		stream->video_fs->resize(static_cast<unsigned int>(sourceId)+1);
+	//}
 
-	if (stream->video_fs.hasFrame(sourceId)) {
+	if (stream->video_fs->hasFrame(sourceId)) {
 		return FTLERROR_STREAM_DUPLICATE;
 	}
 
+	if (stream->video_fs->frames[sourceId].status() == ftl::data::FrameStatus::CREATED) {
+		stream->video_fs->frames[sourceId].store();
+	}
+
 	ftl::rgbd::Camera cam;
 	cam.fx = f;
 	cam.fy = f;
@@ -193,12 +199,12 @@ ftlError_t ftlIntrinsicsWriteLeft(ftlStream_t stream, int32_t sourceId, int32_t
 	cam.maxDepth = maxDepth;
 	cam.baseline = baseline;
 	cam.doffs = 0.0f;
-	stream->video_fs.mask |= 1 << sourceId;
-	stream->video_fs.count++;
-	if (!stream->video_fs.frames[sourceId].origin()) {
-		stream->video_fs.frames[sourceId].setOrigin(&stream->video_states[sourceId]);
-	}
-	stream->video_fs.frames[sourceId].setLeft(cam);
+	stream->video_fs->mask |= 1 << sourceId;
+	stream->video_fs->count++;
+	//if (!stream->video_fs->frames[sourceId].origin()) {
+	//	stream->video_fs.frames[sourceId].setOrigin(&stream->video_states[sourceId]);
+	//}
+	stream->video_fs->frames[sourceId].cast<ftl::rgbd::Frame>().setLeft() = cam;
 	stream->has_fresh_data = true;
 
 	return FTLERROR_OK;
@@ -209,7 +215,7 @@ ftlError_t ftlIntrinsicsWriteRight(ftlStream_t stream, int32_t sourceId, int32_t
 		return FTLERROR_STREAM_INVALID_STREAM;
 	if (sourceId < 0 || sourceId >= 32)
 		return FTLERROR_STREAM_INVALID_PARAMETER;
-	if (!stream->video_fs.hasFrame(sourceId))
+	if (!stream->video_fs->hasFrame(sourceId))
 		return FTLERROR_STREAM_NO_INTRINSICS;
 
 	ftl::rgbd::Camera cam;
@@ -223,7 +229,7 @@ ftlError_t ftlIntrinsicsWriteRight(ftlStream_t stream, int32_t sourceId, int32_t
 	cam.maxDepth = maxDepth;
 	cam.baseline = baseline;
 	cam.doffs = 0.0f;
-	stream->video_fs.frames[sourceId].setRight(cam);
+	stream->video_fs->frames[sourceId].cast<ftl::rgbd::Frame>().setRight() = cam;
 	stream->has_fresh_data = true;
 
 	return FTLERROR_OK;
@@ -234,15 +240,15 @@ ftlError_t ftlPoseWrite(ftlStream_t stream, int32_t sourceId, const float *data)
 	if (!stream->stream) return FTLERROR_STREAM_INVALID_STREAM;
 	if (sourceId < 0 || sourceId >= 32)
 		return FTLERROR_STREAM_INVALID_PARAMETER;
-	if (!stream->video_fs.hasFrame(sourceId))
+	if (!stream->video_fs->hasFrame(sourceId))
 		return FTLERROR_STREAM_NO_INTRINSICS;
 	if (!data) return FTLERROR_STREAM_NO_DATA;
 
 	Eigen::Matrix4f pose;
 	for (int i=0; i<16; ++i) pose.data()[i] = data[i];
 
-	auto &frame = stream->video_fs.frames[sourceId];
-	frame.setPose(pose.cast<double>());
+	auto &frame = stream->video_fs->frames[sourceId].cast<ftl::rgbd::Frame>();
+	frame.setPose() = pose.cast<double>();
 
 	return FTLERROR_OK;
 }
@@ -252,16 +258,16 @@ ftlError_t ftlRemoveOcclusion(ftlStream_t stream, int32_t sourceId, ftlChannel_t
 	if (!stream->stream) return FTLERROR_STREAM_INVALID_STREAM;
 	if (sourceId < 0 || sourceId >= 32)
 		return FTLERROR_STREAM_INVALID_PARAMETER;
-	if (!stream->video_fs.hasFrame(sourceId))
+	if (!stream->video_fs->hasFrame(sourceId))
 		return FTLERROR_STREAM_NO_INTRINSICS;
 	if (static_cast<int>(channel) < 0 || static_cast<int>(channel) > 32)
 		return FTLERROR_STREAM_BAD_CHANNEL;
-	if (!stream->video_fs.frames[sourceId].hasChannel(static_cast<ftl::codecs::Channel>(channel)))
+	if (!stream->video_fs->frames[sourceId].hasChannel(static_cast<ftl::codecs::Channel>(channel)))
 		return FTLERROR_STREAM_BAD_CHANNEL;
 
-	auto &frame = stream->video_fs.frames[sourceId];
+	auto &frame = stream->video_fs->frames[sourceId].cast<ftl::rgbd::Frame>();
 	//auto &mask = frame.create<cv::cuda::GpuMat>(ftl::codecs::Channel::Mask);
-	auto &depth = frame.get<cv::cuda::GpuMat>(static_cast<ftl::codecs::Channel>(channel));
+	auto &depth = frame.set<cv::cuda::GpuMat>(static_cast<ftl::codecs::Channel>(channel));
 	auto &intrin = frame.getLeft();
 
 	cv::Mat depthR(intrin.height, intrin.width, CV_32F, const_cast<float*>(data), pitch);
@@ -277,14 +283,14 @@ ftlError_t ftlMaskOcclusion(ftlStream_t stream, int32_t sourceId, ftlChannel_t c
 	if (!stream->stream) return FTLERROR_STREAM_INVALID_STREAM;
 	if (sourceId < 0 || sourceId >= 32)
 		return FTLERROR_STREAM_INVALID_PARAMETER;
-	if (!stream->video_fs.hasFrame(sourceId))
+	if (!stream->video_fs->hasFrame(sourceId))
 		return FTLERROR_STREAM_NO_INTRINSICS;
 	if (static_cast<int>(channel) < 0 || static_cast<int>(channel) > 32)
 		return FTLERROR_STREAM_BAD_CHANNEL;
-	if (!stream->video_fs.frames[sourceId].hasChannel(static_cast<ftl::codecs::Channel>(channel)))
+	if (!stream->video_fs->frames[sourceId].hasChannel(static_cast<ftl::codecs::Channel>(channel)))
 		return FTLERROR_STREAM_BAD_CHANNEL;
 
-	auto &frame = stream->video_fs.frames[sourceId];
+	auto &frame = stream->video_fs->frames[sourceId].cast<ftl::rgbd::Frame>();
 	auto &mask = frame.create<cv::cuda::GpuMat>(ftl::codecs::Channel::Mask);
 	auto &depth = frame.get<cv::cuda::GpuMat>(static_cast<ftl::codecs::Channel>(channel));
 	auto &intrin = frame.getLeft();
@@ -330,10 +336,10 @@ ftlError_t ftlSelect(ftlStream_t stream, ftlChannel_t channel) {
 	if (static_cast<int>(channel) < 0 || static_cast<int>(channel) > 32)
 		return FTLERROR_STREAM_BAD_CHANNEL;
 
-	ftl::codecs::Channels<0> channels;
-	if (stream->stream->size() > static_cast<unsigned int>(stream->video_fs.id)) channels = stream->stream->selected(stream->video_fs.id);
+	std::unordered_set<ftl::codecs::Channel> channels;
+	if (stream->stream->size() > static_cast<unsigned int>(stream->video_fs->id().frameset())) channels = stream->stream->selected(stream->video_fs->id().frameset());
 	channels += static_cast<ftl::codecs::Channel>(channel);
-	stream->stream->select(stream->video_fs.id, channels, true);
+	stream->stream->select(stream->video_fs->id().frameset(), channels, true);
 	return FTLERROR_OK;
 }
 
@@ -354,26 +360,17 @@ ftlError_t ftlNextFrame(ftlStream_t stream) {
 	try {
 		cudaSetDevice(0);
 		if (stream->pipelines) {
-			stream->pipelines->apply(stream->video_fs, stream->video_fs, 0);
+			stream->pipelines->apply(*stream->video_fs, *stream->video_fs, 0);
+		}
+
+		for (auto &c : stream->video_fs->firstFrame().changed()) {
+			stream->sender->post(*stream->video_fs, c.first);
 		}
-		stream->sender->post(stream->video_fs);
 	} catch (const std::exception &e) {
 		return FTLERROR_STREAM_ENCODE_FAILED;
 	}
 
-	// Reset the frameset.
-	for (size_t i=0; i<stream->video_fs.frames.size(); ++i) {
-		if (!stream->video_fs.hasFrame(i)) continue;
-
-		auto &f = stream->video_fs.frames[i];
-		f.reset();
-		f.setOrigin(&stream->video_states[i]);
-	}
-
-	// FIXME: These should be reset each time
-	//stream->video_fs.count = 0;
-	//stream->video_fs.mask = 0;
-	stream->video_fs.timestamp += stream->interval;
+	stream->video_fs = std::make_shared<ftl::data::FrameSet>(&stream->pool, ftl::data::FrameID(0,0), stream->video_fs->timestamp()+stream->interval);
 	stream->has_fresh_data = false;
 	return FTLERROR_OK;
 }
@@ -389,9 +386,11 @@ ftlError_t ftlDestroyStream(ftlStream_t stream) {
 			try {
 				cudaSetDevice(0);
 				if (stream->pipelines) {
-					stream->pipelines->apply(stream->video_fs, stream->video_fs, 0);
+					stream->pipelines->apply(*stream->video_fs, *stream->video_fs, 0);
+				}
+				for (auto &c : stream->video_fs->firstFrame().changed()) {
+					stream->sender->post(*stream->video_fs, c.first);
 				}
-				stream->sender->post(stream->video_fs);
 			} catch (const std::exception &e) {
 				err = FTLERROR_STREAM_ENCODE_FAILED;
 				LOG(ERROR) << "Sender exception: " << e.what();
@@ -418,12 +417,12 @@ ftlError_t ftlSetPropertyString(ftlStream_t stream, int32_t sourceId, const char
 	if (!stream->stream) return FTLERROR_STREAM_INVALID_STREAM;
 	if (sourceId < 0 || sourceId >= 32)
 		return FTLERROR_STREAM_INVALID_PARAMETER;
-	if (!stream->video_fs.hasFrame(sourceId))
+	if (!stream->video_fs->hasFrame(sourceId))
 		return FTLERROR_STREAM_NO_INTRINSICS;
 	if (!value) return FTLERROR_STREAM_INVALID_PARAMETER;
 	if (!prop) return FTLERROR_STREAM_INVALID_PARAMETER;
 
-	stream->video_fs.frames[sourceId].set(std::string(prop), std::string(value));
+	//stream->video_fs.frames[sourceId].set(std::string(prop), std::string(value));
 	return FTLERROR_OK;
 }
 
@@ -432,12 +431,12 @@ ftlError_t ftlSetPropertyInt(ftlStream_t stream, int32_t sourceId, const char *p
 	if (!stream->stream) return FTLERROR_STREAM_INVALID_STREAM;
 	if (sourceId < 0 || sourceId >= 32)
 		return FTLERROR_STREAM_INVALID_PARAMETER;
-	if (!stream->video_fs.hasFrame(sourceId))
+	if (!stream->video_fs->hasFrame(sourceId))
 		return FTLERROR_STREAM_NO_INTRINSICS;
 	if (!value) return FTLERROR_STREAM_INVALID_PARAMETER;
 	if (!prop) return FTLERROR_STREAM_INVALID_PARAMETER;
 
-	stream->video_fs.frames[sourceId].set(std::string(prop), value);
+	//stream->video_fs.frames[sourceId].set(std::string(prop), value);
 	return FTLERROR_OK;
 }
 
@@ -446,11 +445,11 @@ ftlError_t ftlSetPropertyFloat(ftlStream_t stream, int32_t sourceId, const char
 	if (!stream->stream) return FTLERROR_STREAM_INVALID_STREAM;
 	if (sourceId < 0 || sourceId >= 32)
 		return FTLERROR_STREAM_INVALID_PARAMETER;
-	if (!stream->video_fs.hasFrame(sourceId))
+	if (!stream->video_fs->hasFrame(sourceId))
 		return FTLERROR_STREAM_NO_INTRINSICS;
 	if (!value) return FTLERROR_STREAM_INVALID_PARAMETER;
 	if (!prop) return FTLERROR_STREAM_INVALID_PARAMETER;
 
-	stream->video_fs.frames[sourceId].set(std::string(prop), value);
+	//stream->video_fs.frames[sourceId].set(std::string(prop), value);
 	return FTLERROR_OK;
 }
diff --git a/applications/calibration-multi/src/main.cpp b/applications/calibration-multi/src/main.cpp
index 46e2122b2..ddd41ceb1 100644
--- a/applications/calibration-multi/src/main.cpp
+++ b/applications/calibration-multi/src/main.cpp
@@ -116,7 +116,7 @@ void visualizeCalibration(	MultiCameraCalibrationNew &calib, Mat &out,
 		Scalar(64, 255, 64),
 		Scalar(64, 255, 64),
 	};
-	
+
 	vector<int> markers = {cv::MARKER_SQUARE, cv::MARKER_DIAMOND};
 
 	for (size_t c = 0; c < rgb.size(); c++) {
@@ -211,7 +211,7 @@ void calibrateRPC(	ftl::net::Universe* net,
 		Mat P1, P2, Q;
 		Mat R1, R2;
 		Mat R_c1c2, T_c1c2;
-		
+
 		calculateTransform(R[c], t[c], R[c + 1], t[c + 1], R_c1c2, T_c1c2);
 		cv::stereoRectify(K1, D1, K2, D2, params.size, R_c1c2, T_c1c2, R1, R2, P1, P2, Q, 0, params.alpha);
 
@@ -275,12 +275,12 @@ std::vector<cv::Point2d> findCalibrationTarget(const cv::Mat &im, const cv::Mat
 
 	if (corners.size() > 2) { LOG(ERROR) << "Too many ArUco tags in image"; }
 	if (corners.size() != 2) { return {}; }
-	
+
 	const size_t ncorners = 4;
 	const size_t ntags = ids.size();
 
 	std::vector<cv::Point2d> points;
-	
+
 	if (ids[0] == 1) {
 		std::swap(ids[0], ids[1]);
 		std::swap(corners[0], corners[1]);
@@ -314,7 +314,7 @@ void runCameraCalibration(	ftl::Configurable* root,
 
 	net->start();
 	net->waitConnections();
-	
+
 	ftl::stream::Muxer *stream = ftl::create<ftl::stream::Muxer>(root, "muxstream");
 	ftl::stream::Receiver *gen = ftl::create<ftl::stream::Receiver>(root, "receiver");
 	gen->setStream(stream);
@@ -330,10 +330,10 @@ void runCameraCalibration(	ftl::Configurable* root,
 		nstream->set("uri", s);
 		nstreams.push_back(nstream);
 		stream->add(nstream);
-		
+
 		++count;
 	}
-	
+
 	const size_t n_sources = nstreams.size();
 	const size_t n_cameras = n_sources * 2;
 	size_t reference_camera = 0;
@@ -349,7 +349,7 @@ void runCameraCalibration(	ftl::Configurable* root,
 		if (fs.frames.size() != (rgb_new.size()/2)) {
 			// nstreams.size() == (rgb_new.size()/2)
 			LOG(ERROR)	<< "frames.size() != nstreams.size(), "
-						<< fs.frames.size() << " != " << (rgb_new.size()/2); 
+						<< fs.frames.size() << " != " << (rgb_new.size()/2);
 		}
 
 		UNIQUE_LOCK(mutex, CALLBACK);
@@ -370,28 +370,28 @@ void runCameraCalibration(	ftl::Configurable* root,
 
 				auto idx = stream->originStream(0, i);
 				CHECK(idx >= 0) << "negative index";
-				
+
 				fs.frames[i].download(Channel::Left+Channel::Right);
 				Mat &left = fs.frames[i].get<Mat>(Channel::Left);
 				Mat &right = fs.frames[i].get<Mat>(Channel::Right);
-				
+
 				/*
-				// note: also returns empty sometimes 
+				// note: also returns empty sometimes
 				fs.frames[i].upload(Channel::Left+Channel::Right);
 				Mat left, right;
 				fs.frames[i].get<cv::cuda::GpuMat>(Channel::Left).download(left);
 				fs.frames[i].get<cv::cuda::GpuMat>(Channel::Right).download(right);
 				*/
-				
+
 				CHECK(!left.empty() && !right.empty());
 
 				cv::cvtColor(left, rgb_new[2*idx], cv::COLOR_BGRA2BGR);
 				cv::cvtColor(right, rgb_new[2*idx+1], cv::COLOR_BGRA2BGR);
-				
+
 				camera_parameters[2*idx] = ftl::calibration::scaleCameraMatrix(createCameraMatrix(fs.frames[i].getLeftCamera()),
-					rgb_new[2*idx].size(), Size(fs.frames[i].getLeftCamera().width, fs.frames[i].getLeftCamera().height));
+					Size(fs.frames[i].getLeftCamera().width, fs.frames[i].getLeftCamera().height), rgb_new[2*idx].size());
 				camera_parameters[2*idx+1] = ftl::calibration::scaleCameraMatrix(createCameraMatrix(fs.frames[i].getRightCamera()),
-					rgb_new[2*idx].size(), Size(fs.frames[i].getRightCamera().width, fs.frames[i].getRightCamera().height));
+					Size(fs.frames[i].getRightCamera().width, fs.frames[i].getRightCamera().height), rgb_new[2*idx].size());
 
 				if (res.empty()) res = rgb_new[2*idx].size();
 			}
@@ -410,7 +410,7 @@ void runCameraCalibration(	ftl::Configurable* root,
 
 	stream->begin();
 	ftl::timer::start(false);
-	
+
 	while(true) {
 		if (!res.empty()) {
 			params.size = res;
@@ -455,7 +455,7 @@ void runCameraCalibration(	ftl::Configurable* root,
 	while(calib.getMinVisibility() < static_cast<size_t>(n_views)) {
 		loop:
 		cv::waitKey(10);
-		
+
 		while (true) {
 			if (new_frames) {
 				UNIQUE_LOCK(mutex, LOCK);
@@ -465,7 +465,7 @@ void runCameraCalibration(	ftl::Configurable* root,
 			}
 			cv::waitKey(10);
 		}
-		
+
 		for (Mat &im : rgb) {
 			if (im.empty()) {
 				LOG(ERROR) << "EMPTY";
@@ -488,10 +488,10 @@ void runCameraCalibration(	ftl::Configurable* root,
 				n_found++;
 			}
 		}
-		
+
 		if (n_found >= min_visible) {
 			calib.addPoints(points, visible);
-			
+
 			if (save_input) {
 				for (size_t i = 0; i < n_cameras; i++) {
 					cv::imwrite(path + std::to_string(i) + "_" + std::to_string(iter) + ".jpg", rgb[i]);
@@ -508,13 +508,13 @@ void runCameraCalibration(	ftl::Configurable* root,
 						cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, Scalar(64, 64, 255), 1);
 				}
 			}
-			
+
 			// index
 			cv::putText(rgb[i],
 						"Camera " + std::to_string(i),
 						Point2i(10, 30),
 						cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, Scalar(64, 64, 255), 1);
-			
+
 			// resolution
 			cv::putText(rgb[i],
 						"[" + std::to_string(rgb[i].size().width) + "x" + std::to_string(rgb[i].size().height) + "]",
@@ -542,7 +542,7 @@ void runCameraCalibration(	ftl::Configurable* root,
 		cv::imshow("Cameras", show);
 	}
 	cv::destroyWindow("Cameras");
-	
+
 	for (size_t i = 0; i < nstreams.size(); i++) {
 		while(true) {
 			try {
@@ -558,7 +558,7 @@ void runCameraCalibration(	ftl::Configurable* root,
 			catch (...) {}
 		}
 	}
-	
+
 	Mat out;
 	vector<Mat> map1, map2;
 	vector<cv::Rect> roi;
@@ -614,7 +614,7 @@ void runCameraCalibrationPath(	ftl::Configurable* root,
 
 	net->start();
 	net->waitConnections();
-	
+
 	ftl::stream::Muxer *stream = ftl::create<ftl::stream::Muxer>(root, "muxstream");
 	ftl::stream::Receiver *gen = ftl::create<ftl::stream::Receiver>(root, "receiver");
 	gen->setStream(stream);
@@ -630,10 +630,10 @@ void runCameraCalibrationPath(	ftl::Configurable* root,
 		nstream->set("uri", s);
 		nstreams.push_back(nstream);
 		stream->add(nstream);
-		
+
 		++count;
 	}
-	
+
 	const size_t n_sources = nstreams.size();
 	const size_t n_cameras = n_sources * 2;
 	size_t reference_camera = 0;
@@ -649,7 +649,7 @@ void runCameraCalibrationPath(	ftl::Configurable* root,
 		if (fs.frames.size() != (rgb_new.size()/2)) {
 			// nstreams.size() == (rgb_new.size()/2)
 			LOG(ERROR)	<< "frames.size() != nstreams.size(), "
-						<< fs.frames.size() << " != " << (rgb_new.size()/2); 
+						<< fs.frames.size() << " != " << (rgb_new.size()/2);
 		}
 
 		UNIQUE_LOCK(mutex, CALLBACK);
@@ -670,11 +670,11 @@ void runCameraCalibrationPath(	ftl::Configurable* root,
 
 				auto idx = stream->originStream(0, i);
 				CHECK(idx >= 0) << "negative index";
-				
+
 				fs.frames[i].download(Channel::Left+Channel::Right);
 				Mat &left = fs.frames[i].get<Mat>(Channel::Left);
 				Mat &right = fs.frames[i].get<Mat>(Channel::Right);
-				
+
 				CHECK(!left.empty() && !right.empty());
 
 				cv::cvtColor(left, rgb_new[2*idx], cv::COLOR_BGRA2BGR);
@@ -684,7 +684,7 @@ void runCameraCalibrationPath(	ftl::Configurable* root,
 					rgb_new[2*idx].size(), Size(fs.frames[i].getLeftCamera().width, fs.frames[i].getLeftCamera().height));
 				camera_parameters[2*idx+1] = ftl::calibration::scaleCameraMatrix(createCameraMatrix(fs.frames[i].getRightCamera()),
 					rgb_new[2*idx].size(), Size(fs.frames[i].getRightCamera().width, fs.frames[i].getRightCamera().height));
-				
+
 				if (res.empty()) res = rgb_new[2*idx].size();*/
 			}
 		}
@@ -726,7 +726,7 @@ void runCameraCalibrationPath(	ftl::Configurable* root,
 	calib.object_points_ = calibration_target;
 
 	cv::FileStorage fs(path + filename, cv::FileStorage::READ);
-	fs["resolution"] >> params.size; 
+	fs["resolution"] >> params.size;
 	params.idx_cameras.resize(n_cameras);
 	std::iota(params.idx_cameras.begin(), params.idx_cameras.end(), 0);
 	calib.loadInput(path + filename, params.idx_cameras);
@@ -798,7 +798,7 @@ void runCameraCalibrationPath(	ftl::Configurable* root,
 int main(int argc, char **argv) {
 	auto options = ftl::config::read_options(&argv, &argc);
 	auto root = ftl::configure(argc, argv, "registration_default");
-	
+
 	// run calibration from saved input?
 	const bool load_input = root->value<bool>("load_input", false);
 	// should calibration input be saved
@@ -822,7 +822,7 @@ int main(int argc, char **argv) {
 	const string registration_file = root->value<string>("registration_file", FTL_LOCAL_CONFIG_ROOT "/registration.json");
 	// location where extrinsic calibration files saved
 	const string output_directory = root->value<string>("output_directory", "./");
-	
+
 	const bool offline = root->value<bool>("offline", false);
 
 	CalibrationParams params;
@@ -833,7 +833,7 @@ int main(int argc, char **argv) {
 	params.output_path = output_directory;
 	params.registration_file = registration_file;
 	params.reference_camera = ref_camera;
-	
+
 	LOG(INFO)	<< "\n"
 				<< "\n"
 				<< "\n                save_input: " << (int) save_input
@@ -858,4 +858,4 @@ int main(int argc, char **argv) {
 	}
 
 	return 0;
-}
\ No newline at end of file
+}
diff --git a/applications/calibration-multi/src/multicalibrate.cpp b/applications/calibration-multi/src/multicalibrate.cpp
index 3aaea4d06..090cce9da 100644
--- a/applications/calibration-multi/src/multicalibrate.cpp
+++ b/applications/calibration-multi/src/multicalibrate.cpp
@@ -26,15 +26,15 @@ using std::pair;
 using std::make_pair;
 
 double CalibrationTarget::estimateScale(vector<Point3d> points) {
-	
-	// 1. calculate statistics 
-	// 2. reject possible outliers 
+
+	// 1. calculate statistics
+	// 2. reject possible outliers
 	// 3. calculate scale factor
 
 	double f = 0.0;
 	double S = 0.0;
 	double m = 0.0;
-	
+
 	vector<double> d(points.size() / 2, 0.0);
 
 	for (size_t i = 0; i < points.size(); i += 2) {
@@ -87,7 +87,7 @@ double CalibrationTarget::estimateScale(vector<Point3d> points) {
 MultiCameraCalibrationNew::MultiCameraCalibrationNew(
 			size_t n_cameras, size_t reference_camera, Size resolution,
 			CalibrationTarget target, int fix_intrinsics) :
-		
+
 	target_(target),
 	visibility_graph_(n_cameras),
 	is_calibrated_(false),
@@ -122,7 +122,6 @@ Mat MultiCameraCalibrationNew::getCameraMat(size_t idx) {
 	return K;
 }
 
-
 Mat MultiCameraCalibrationNew::getCameraMatNormalized(size_t idx, double scale_x, double scale_y)
 {
 	Mat K = getCameraMat(idx);
@@ -135,7 +134,7 @@ Mat MultiCameraCalibrationNew::getCameraMatNormalized(size_t idx, double scale_x
 	scale.at<double>(0, 0) = scale_x;
 	scale.at<double>(1, 1) = scale_y;
 	scale.at<double>(2, 2) = 1.0;
-	
+
 	return (scale * K);
 }
 
@@ -212,18 +211,18 @@ void MultiCameraCalibrationNew::loadInput(const std::string &filename, const vec
 	fs["visible"] >> visible;
 	fs["resolution"] >> resolution_;
 	fs.release();
-	
+
 	vector<size_t> cameras;
 	if (cameras_in.size() == 0) {
 		cameras.resize(K.size());
 		size_t i = 0;
 		for (auto &c : cameras) { c = i++; }
-	} 
+	}
 	else {
 		cameras.reserve(cameras_in.size());
 		for (auto &c : cameras_in) { cameras.push_back(c); }
 	}
-	
+
 	n_cameras_ = cameras.size();
 
 	points2d_.resize(n_cameras_);
@@ -262,7 +261,7 @@ void MultiCameraCalibrationNew::loadInput(const std::string &filename, const vec
 		}
 	}
 	reset();
-	
+
 	DCHECK(points2d_.size() == n_cameras_);
 	DCHECK(points2d_.size() == visible_.size());
 	size_t len = visible_[0].size();
@@ -296,14 +295,14 @@ bool MultiCameraCalibrationNew::isValid(size_t idx) {
 }
 
 vector<Point2d> MultiCameraCalibrationNew::getPoints(size_t camera, size_t idx) {
-	return vector<Point2d> (points2d_[camera].begin() + idx * (target_.n_points), 
+	return vector<Point2d> (points2d_[camera].begin() + idx * (target_.n_points),
 							points2d_[camera].begin() + idx * (target_.n_points + 1));
 }
 
 
 void MultiCameraCalibrationNew::updatePoints3D(size_t camera, Point3d new_point,
 		size_t idx, const Mat &R, const Mat &t) {
-	
+
 	int &f = inlier_[camera][idx];
 	Point3d &point = points3d_[camera][idx];
 	new_point = transformPoint(new_point, R, t);
@@ -315,7 +314,7 @@ void MultiCameraCalibrationNew::updatePoints3D(size_t camera, Point3d new_point,
 		//			would most likely suggest very bad triangulation (sync? wrong match?)
 		// 			instead store all triangulations and handle outliers
 		//			(perhaps inverse variance weighted mean?)
-		
+
 		if (euclideanDistance(point, new_point) > 10.0) {
 			LOG(ERROR) << "bad value (skipping) " << "(" << point << " vs " << new_point << ")";
 			f = -1;
@@ -333,7 +332,7 @@ void MultiCameraCalibrationNew::updatePoints3D(size_t camera, Point3d new_point,
 
 void MultiCameraCalibrationNew::updatePoints3D(size_t camera, vector<Point3d> points,
 		vector<size_t> idx, const Mat &R, const Mat &t) {
-	
+
 	for (size_t i = 0; i < idx.size(); i++) {
 		updatePoints3D(camera, points[i], idx[i], R, t);
 	}
@@ -341,16 +340,16 @@ void MultiCameraCalibrationNew::updatePoints3D(size_t camera, vector<Point3d> po
 
 void MultiCameraCalibrationNew::getVisiblePoints(
 		vector<size_t> cameras, vector<vector<Point2d>> &points, vector<size_t> &idx) {
-	
+
 	size_t n_points_total = points2d_[0].size();
 	DCHECK(cameras.size() <= n_cameras_);
 	DCHECK(n_points_total % target_.n_points == 0);
-	
+
 	idx.clear();
 	idx.reserve(n_points_total);
 	points.clear();
 	points.resize(cameras.size(), {});
-	
+
 	for (size_t i = 0; i < n_points_total; i += target_.n_points) {
 		bool visible_all = true;
 
@@ -359,7 +358,7 @@ void MultiCameraCalibrationNew::getVisiblePoints(
 				visible_all &= isVisible(c, i + j);
 			}
 		}
-		
+
 		if (!visible_all) { continue; }
 
 		for (size_t j = 0; j < target_.n_points; j++) {
@@ -378,7 +377,7 @@ void MultiCameraCalibrationNew::getVisiblePoints(
 }
 
 double MultiCameraCalibrationNew::calibratePair(size_t camera_from, size_t camera_to, Mat &rmat, Mat &tvec) {
-	
+
 
 	vector<size_t> idx;
 	vector<Point2d> points1, points2;
@@ -394,146 +393,19 @@ double MultiCameraCalibrationNew::calibratePair(size_t camera_from, size_t camer
 
 	// cameras possibly lack line of sight?
 	DCHECK(points1.size() > 8);
-	
+
 	Mat &K1 = K_[camera_from];
 	Mat &K2 = K_[camera_to];
-	/*
-	vector<uchar> inliers;
-	Mat F, E;
-	F = cv::findFundamentalMat(points1, points2, fm_method_, fm_ransac_threshold_, fm_confidence_, inliers);
-
-	if (F.empty())
-	{
-		LOG(ERROR) << "Fundamental matrix estimation failed. Possibly degenerate configuration?";
-		return INFINITY;
-	}
-
-	E = K2.t() * F * K1;
-
-	// Only include inliers
-	if (fm_method_ == cv::FM_LMEDS || fm_method_ == cv::FM_RANSAC) {
-		vector<Point2d> inliers1, inliers2;
-		vector<size_t> inliers_idx;
-
-		inliers1.reserve(points1.size());
-		inliers2.reserve(points1.size());
-		inliers_idx.reserve(points1.size());
-
-		for (size_t i = 0; i < inliers.size(); i += target_.n_points) {
-			bool inlier = true;
-			
-			for (size_t j = 0; j < target_.n_points; j++) {
-				inlier &= inliers[i+j];
-			}
-
-			if (inlier) {
-				inliers1.insert(inliers1.end(), points1.begin() + i, points1.begin() + i + target_.n_points);
-				inliers2.insert(inliers2.end(), points2.begin() + i, points2.begin() + i + target_.n_points);
-				inliers_idx.insert(inliers_idx.end(), idx.begin() + i, idx.begin() + i + target_.n_points);
-			}
-		}
-		
-		LOG(INFO) << "Total points: " << points1.size() << ", inliers: " << inliers1.size();
-		double ratio_good_points = (double) inliers1.size() / (double) points1.size();
-		if (ratio_good_points < 0.66) {
-			// TODO: ... 
-			LOG(WARNING) << "Over 1/3 of points rejected!";
-			if (ratio_good_points < 0.33) { LOG(FATAL) << "Over 2/3 points rejected!"; }
-		}
-		
-		DCHECK(inliers1.size() == inliers_idx.size());
-		DCHECK(inliers2.size() == inliers_idx.size());
-
-		std::swap(inliers1, points1);
-		std::swap(inliers2, points2);
-		std::swap(inliers_idx, idx);
-	}
-	
-	// Estimate initial rotation matrix and translation vector and triangulate
-	// points (in camera 1 coordinate system).
 
 	Mat R1, R2, t1, t2;
 	R1 = Mat::eye(Size(3, 3), CV_64FC1);
 	t1 = Mat(Size(1, 3), CV_64FC1, Scalar(0.0));
 
 	vector<Point3d> points3d;
-	// Convert homogeneous coordinates 
-	{
-		Mat points3dh;
-		recoverPose(E, points1, points2, K1, K2, R2, t2, 1000.0, points3dh);
-		points3d.reserve(points3dh.cols);
-
-		for (int col = 0; col < points3dh.cols; col++) {
-			Point3d p = Point3d(points3dh.at<double>(0, col),
-								points3dh.at<double>(1, col),
-								points3dh.at<double>(2, col))
-								/ points3dh.at<double>(3, col);
-			points3d.push_back(p);
-		}
-	}
-	DCHECK(points3d.size() == points1.size());
-
-	// Estimate and apply scale factor
-	{
-		double scale = ftl::calibration::optimizeScale(object_points_, points3d);
-		t1 = t1 * scale;
-		t2 = t2 * scale;
-	}
-
-	// Reprojection error before BA
-	{
-		// SBA should report squared mean error
-		const double err1 = reprojectionError(points3d, points1, K1, R1, t1);
-		const double err2 = reprojectionError(points3d, points2, K2, R2, t2);
-		
-		if (abs(err1 - err2) > 2.0) {
-			LOG(INFO) << "Initial reprojection error (camera " << camera_from << "): " << err1;
-			LOG(INFO) << "Initial reprojection error (camera " << camera_to << "): " << err2;
-		}
-		LOG(INFO)	<< "Initial reprojection error (" << camera_from << ", " << camera_to << "): "
-					<< sqrt(err1 * err1 + err2 * err2);
-		
-	}
-	
-	// Bundle Adjustment
-	
-	double err = INFINITY;
-	{
-		auto cameras = vector<ftl::calibration::Camera> {
-			ftl::calibration::Camera(K1, dist_coeffs_[camera_from], R1, t1),
-			ftl::calibration::Camera(K2, dist_coeffs_[camera_to], R2, t2)
-		};
-
-		ftl::calibration::BundleAdjustment ba;
-		ba.addCameras(cameras);
-		for (size_t i = 0; i < points3d.size(); i++) {
-			ba.addPoint({points1[i], points2[i]}, points3d[i]);
-		}
 
-		ftl::calibration::BundleAdjustment::Options options;
-		options.fix_camera_extrinsic = {0};
-		options.optimize_intrinsic = false;
-
-		ba.run(options);
-		// TODO: ... 
-		err = sqrt(ba.reprojectionError(0) * ba.reprojectionError(1));
-
-		R2 = cameras[1].rmat();
-		t2 = Mat(cameras[1].tvec());
-	}
-
-	calculateTransform(R2, t2, R1, t1, rmat, tvec);
-	*/
-	
-	Mat R1, R2, t1, t2;
-	R1 = Mat::eye(Size(3, 3), CV_64FC1);
-	t1 = Mat(Size(1, 3), CV_64FC1, Scalar(0.0));
-	
-	vector<Point3d> points3d;
-	
 	double err = ftl::calibration::computeExtrinsicParameters(K1, dist_coeffs_[camera_from], K2, dist_coeffs_[camera_to], points1, points2, object_points_, R2, t2, points3d);
 	calculateTransform(R2, t2, R1, t1, rmat, tvec);
-	
+
 
 	// Store and average 3D points for both cameras (skip garbage)
 	if (err < 10.0) {
@@ -598,7 +470,7 @@ double MultiCameraCalibrationNew::getReprojectionError(size_t c_from, size_t c_t
 }
 
 double MultiCameraCalibrationNew::getReprojectionErrorOptimized(size_t c_from, const Mat &K, const Mat &R, const Mat &t) {
-	
+
 	vector<Point2d> points2d;
 	vector<Point3d> points3d;
 
@@ -615,7 +487,7 @@ double MultiCameraCalibrationNew::getReprojectionErrorOptimized(size_t c_from, c
 double MultiCameraCalibrationNew::calibrateAll(int reference_camera) {
 	if (reference_camera != -1) {
 		DCHECK(reference_camera >= 0 && reference_camera < static_cast<int>(n_cameras_));
-		reference_camera_ = reference_camera; 
+		reference_camera_ = reference_camera;
 	}
 
 	for (const auto &K : K_) {
@@ -623,17 +495,17 @@ double MultiCameraCalibrationNew::calibrateAll(int reference_camera) {
 	}
 
 	reset(); // remove all old calibration results
-	map<pair<size_t, size_t>, pair<Mat, Mat>> transformations; 
-	
+	map<pair<size_t, size_t>, pair<Mat, Mat>> transformations;
+
 	// All cameras should be calibrated pairwise; otherwise all possible 3D
 	// points are not necessarily triangulated
 
 	auto paths = visibility_graph_.findShortestPaths(reference_camera_);
-	
+
 	for (size_t c1 = 0; c1 < n_cameras_; c1++) {
 	for (size_t c2 = c1; c2 < n_cameras_; c2++) {
 		if (c1 == c2) {
-			transformations[make_pair(c1, c2)] = 
+			transformations[make_pair(c1, c2)] =
 				make_pair(Mat::eye(Size(3, 3), CV_64FC1),
 				Mat(Size(1, 3), CV_64FC1, Scalar(0.0))
 			);
@@ -691,9 +563,9 @@ double MultiCameraCalibrationNew::calibrateAll(int reference_camera) {
 		DCHECK(R_[c].size() == Size(3, 3));
 		DCHECK(t_[c].size() == Size(1, 3));*/
 	}
-	
+
 	calculateMissingPoints3D();
-	
+
 	for (size_t c_from = 0; c_from < n_cameras_; c_from++) {
 		if (c_from == reference_camera_) continue;
 		Mat R, t;
@@ -705,7 +577,7 @@ double MultiCameraCalibrationNew::calibrateAll(int reference_camera) {
 	double err;
 	{
 		auto cameras = vector<ftl::calibration::Camera>();
-		
+
 		for (size_t i = 0; i < n_cameras_; i++) {
 			calculateInverse(R_[i], t_[i], R_[i], t_[i]);
 			cameras.push_back(ftl::calibration::Camera(K_[i], dist_coeffs_[i], R_[i], t_[i]));
@@ -715,7 +587,7 @@ double MultiCameraCalibrationNew::calibrateAll(int reference_camera) {
 		ba.addCameras(cameras);
 
 		for (size_t i = 0; i < points3d_optimized_.size(); i++) {
-			
+
 			auto &p = points3d_optimized_[i];
 			DCHECK(!isnanl(p.x) && !isnanl(p.y) && !isnanl(p.z));
 
@@ -723,9 +595,9 @@ double MultiCameraCalibrationNew::calibrateAll(int reference_camera) {
 			for (size_t c = 0; c < n_cameras_; c++) {
 				if (isVisible(c, i) && isValid(c, i)) { count++; }
 			}
-			
+
 			if (count < 2) continue;
-			
+
 			vector<bool> visible(n_cameras_);
 			vector<Point2d> points2d(n_cameras_);
 
@@ -748,7 +620,7 @@ double MultiCameraCalibrationNew::calibrateAll(int reference_camera) {
 		options.fix_camera_extrinsic = {reference_camera};
 		options.verbose = true;
 		options.max_iter = 500;
-		
+
 		err = ba.reprojectionError();
 		ba.run(options);
 
@@ -767,7 +639,7 @@ double MultiCameraCalibrationNew::calibrateAll(int reference_camera) {
 		calculateInverse(R_[c_from], t_[c_from], R, t);
 		LOG(INFO)	<< "Error (RMS) after BA, cameras " << reference_camera_ << " and " << c_from << ": "
 					<< getReprojectionErrorOptimized(c_from, K_[c_from], R, t);
-	
+
 	}
 
 	is_calibrated_ = true;
@@ -775,7 +647,7 @@ double MultiCameraCalibrationNew::calibrateAll(int reference_camera) {
 }
 
 void MultiCameraCalibrationNew::projectPointsOriginal(size_t camera_src, size_t camera_dst, size_t idx, vector<Point2d> &points) {
-	
+
 }
 
 void MultiCameraCalibrationNew::projectPointsOptimized(size_t camera_dst, size_t idx, vector<Point2d> &points) {
@@ -783,7 +655,7 @@ void MultiCameraCalibrationNew::projectPointsOptimized(size_t camera_dst, size_t
 
 	points.clear();
 	size_t i = target_.n_points * idx;
-	
+
 	if (!isValid(i)) return;
 
 	Point3d p1(points3d_optimized_[i]);
@@ -792,12 +664,12 @@ void MultiCameraCalibrationNew::projectPointsOptimized(size_t camera_dst, size_t
 	if (!std::isfinite(p1.x) || !std::isfinite(p2.x)) {
 		// DEBUG: should not happen
 		LOG(ERROR) << "Bad point! (no valid triangulation)";
-		return; 
+		return;
 	}
-	
+
 	Mat R, tvec, rvec;
 	calculateTransform(R_[reference_camera_], t_[reference_camera_], R_[camera_dst], t_[camera_dst], R, tvec);
-	
+
 	cv::Rodrigues(R, rvec);
 	cv::projectPoints(	vector<Point3d> { p1, p2 },
 						rvec, tvec, K_[camera_dst], dist_coeffs_[camera_dst], points);
@@ -812,4 +684,4 @@ void MultiCameraCalibrationNew::getCalibration(vector<Mat> &R, vector<Mat> &t) {
 		R_[i].copyTo(R[i]);
 		t_[i].copyTo(t[i]);
 	}
-}
\ No newline at end of file
+}
diff --git a/applications/calibration-multi/src/visibility.hpp b/applications/calibration-multi/src/visibility.hpp
index 0ac8d3a80..569e51c1b 100644
--- a/applications/calibration-multi/src/visibility.hpp
+++ b/applications/calibration-multi/src/visibility.hpp
@@ -20,10 +20,10 @@ public:
 	 * @brief	For all cameras, find shortest (optimal) paths to reference
 	 * 			camera
 	 * @param	Id of reference camera
-	 * 
+	 *
 	 * Calculates shortest path in weighted graph using Dijstra's
 	 * algorithm. Weights are inverse of views between cameras (nodes)
-	 * 
+	 *
 	 * @todo	Add constant weight for each edge (prefer less edges)
 	 */
 	vector<vector<pair<int, int>>> findShortestPaths(int reference);
@@ -32,11 +32,11 @@ public:
 	void deleteEdge(int camera1, int camera2);
 
 	int getOptimalCamera();
-	
+
 	/** @brief Returns the smallest visibility count (any camera)
 	 */
 	int getMinVisibility();
-	
+
 	/** @brief Returns the visibility camera's value
 	 */
 	int getViewsCount(int camera);
@@ -51,6 +51,6 @@ protected:
 
 private:
 	int n_cameras_;		// @brief number of cameras
-	Mat visibility_;	// @brief adjacency matrix
+	cv::Mat visibility_;	// @brief adjacency matrix
 	vector<int> count_;
 };
diff --git a/applications/calibration/src/lens.cpp b/applications/calibration/src/lens.cpp
index 72694ebac..ef00ba177 100644
--- a/applications/calibration/src/lens.cpp
+++ b/applications/calibration/src/lens.cpp
@@ -62,7 +62,7 @@ void ftl::calibration::intrinsic(map<string, string> &opt) {
 
 	// assume no tangential and thin prism distortion and only estimate first
 	// three radial distortion coefficients
-	
+
 	int calibrate_flags = 	cv::CALIB_FIX_K4 | cv::CALIB_FIX_K5 | cv::CALIB_FIX_K6 |
 							cv::CALIB_ZERO_TANGENT_DIST | cv::CALIB_FIX_S1_S2_S3_S4 | cv::CALIB_FIX_ASPECT_RATIO;
 
@@ -76,13 +76,13 @@ void ftl::calibration::intrinsic(map<string, string> &opt) {
 		camera_matrix.clear();
 		vector<Mat> tmp;
 		Size tmp_size;
-		
+
 		loadIntrinsics(filename_intrinsics, camera_matrix, tmp, tmp_size);
 		CHECK(camera_matrix.size() == static_cast<unsigned int>(n_cameras));
 
 		if ((tmp_size != image_size) && (!tmp_size.empty())) {
 			for (Mat &K : camera_matrix) {
-				K = ftl::calibration::scaleCameraMatrix(K, image_size, tmp_size);
+				K = ftl::calibration::scaleCameraMatrix(K, tmp_size, image_size);
 			}
 		}
 
@@ -104,13 +104,13 @@ void ftl::calibration::intrinsic(map<string, string> &opt) {
 			LOG(ERROR) << "Could not open camera device";
 			return;
 		}
-		camera.set(cv::CAP_PROP_FRAME_WIDTH, image_size.width); 
+		camera.set(cv::CAP_PROP_FRAME_WIDTH, image_size.width);
 		camera.set(cv::CAP_PROP_FRAME_HEIGHT, image_size.height);
 	}
 
 	vector<vector<vector<Vec2f>>> image_points(n_cameras);
 	vector<vector<vector<Vec3f>>> object_points(n_cameras);
-	
+
 	vector<Mat> img(n_cameras);
 	vector<Mat> img_display(n_cameras);
 	vector<int> count(n_cameras, 0);
@@ -124,7 +124,7 @@ void ftl::calibration::intrinsic(map<string, string> &opt) {
 	std::atomic<bool> ready = false;
 	auto capture = std::thread(
 		[n_cameras, delay, &m, &ready, &count, &calib, &img, &image_points, &object_points]() {
-		
+
 		vector<Mat> tmp(n_cameras);
 		while(true) {
 			if (!ready) {
@@ -138,7 +138,7 @@ void ftl::calibration::intrinsic(map<string, string> &opt) {
 				img[c].copyTo(tmp[c]);
 			}
 			m.unlock();
-			
+
 			for (int c = 0; c < n_cameras; c++) {
 				vector<Vec2f> points;
 				if (calib.findPoints(tmp[c], points)) {
@@ -168,13 +168,13 @@ void ftl::calibration::intrinsic(map<string, string> &opt) {
 			ready = true;
 			m.unlock();
 		}
-		
+
 		for (int c = 0; c < n_cameras; c++) {
 			img[c].copyTo(img_display[c]);
 			m.lock();
 
 			if (image_points[c].size() > 0) {
-				
+
 				for (auto &points : image_points[c]) {
 					calib.drawCorners(img_display[c], points);
 				}
@@ -192,13 +192,13 @@ void ftl::calibration::intrinsic(map<string, string> &opt) {
 	}
 
 	cv::destroyAllWindows();
-	
+
 	//bool calib_ok = true;
 
 	for (int c = 0; c < n_cameras; c++) {
 		LOG(INFO) << "Calculating intrinsic paramters for camera " << std::to_string(c);
 		vector<Mat> rvecs, tvecs;
-		
+
 		double rms = cv::calibrateCamera(
 							object_points[c], image_points[c],
 							image_size, camera_matrix[c], dist_coeffs[c],
@@ -208,9 +208,9 @@ void ftl::calibration::intrinsic(map<string, string> &opt) {
 		LOG(INFO) << "final reprojection RMS error: " << rms;
 
 		if (!ftl::calibration::validate::distortionCoefficients(dist_coeffs[c], image_size)) {
-			LOG(ERROR)	<< "Calibration failed: invalid distortion coefficients:\n" 
+			LOG(ERROR)	<< "Calibration failed: invalid distortion coefficients:\n"
 						<< dist_coeffs[c];
-			
+
 			LOG(WARNING) << "Estimating only intrinsic parameters for camera " << std::to_string(c);
 
 			dist_coeffs[c] = Mat(Size(8, 1), CV_64FC1, cv::Scalar(0.0));
@@ -228,7 +228,7 @@ void ftl::calibration::intrinsic(map<string, string> &opt) {
 		// TODO: check for valid aperture width/height; do not run if not valid values
 		cv::calibrationMatrixValues(camera_matrix[c], image_size, aperture_width, aperture_height,
 									fovx, fovy, focal_length, principal_point, aspect_ratio);
-		
+
 		LOG(INFO) << "";
 		LOG(INFO) << "            fovx (deg): " << fovx;
 		LOG(INFO) << "            fovy (deg): " << fovy;
@@ -243,13 +243,13 @@ void ftl::calibration::intrinsic(map<string, string> &opt) {
 
 	saveIntrinsics(filename_intrinsics, camera_matrix, dist_coeffs, image_size);
 	LOG(INFO) << "intrinsic paramaters saved to: " << filename_intrinsics;
-	
+
 	vector<Mat> map1(n_cameras), map2(n_cameras);
 	for (int c = 0; c < n_cameras; c++) {
 		cv::initUndistortRectifyMap(camera_matrix[c], dist_coeffs[c], Mat::eye(3,3, CV_64F), camera_matrix[c],
 									image_size, CV_16SC2, map1[c], map2[c]);
 	}
-	
+
 	while (cv::waitKey(25) != 27) {
 		for (auto &camera : cameras ) {	camera.grab(); }
 		for (int c = 0; c < n_cameras; c++) {
diff --git a/applications/ftl2mkv/src/main.cpp b/applications/ftl2mkv/src/main.cpp
index 0ebf1a0bf..1d8dd4cef 100644
--- a/applications/ftl2mkv/src/main.cpp
+++ b/applications/ftl2mkv/src/main.cpp
@@ -17,7 +17,7 @@ extern "C" {
 using ftl::codecs::codec_t;
 using ftl::codecs::Channel;
 
-static AVStream *add_video_stream(AVFormatContext *oc, const ftl::codecs::Packet &pkt)
+static AVStream *add_video_stream(AVFormatContext *oc, const ftl::codecs::Packet &pkt, const ftl::rgbd::Camera &cam)
 {
     //AVCodecContext *c;
     AVStream *st;
@@ -47,9 +47,9 @@ static AVStream *add_video_stream(AVFormatContext *oc, const ftl::codecs::Packet
 	//st->nb_frames = 0;
 	st->codecpar->codec_id = codec_id;
 	st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
-	st->codecpar->width = ftl::codecs::getWidth(pkt.definition);
+	st->codecpar->width = cam.width; //ftl::codecs::getWidth(pkt.definition);
 	//if (pkt.flags & ftl::codecs::kFlagStereo) st->codecpar->width *= 2;
-	st->codecpar->height = ftl::codecs::getHeight(pkt.definition);
+	st->codecpar->height = cam.height; //ftl::codecs::getHeight(pkt.definition);
 	st->codecpar->format = AV_PIX_FMT_NV12;
 	st->codecpar->bit_rate = 4000000;
 
@@ -111,6 +111,7 @@ int main(int argc, char **argv) {
 	AVFormatContext *oc;
 	AVStream *video_st[10][2] = {nullptr};
 
+	// TODO: Remove in newer versions
 	av_register_all();
 
 	fmt = av_guess_format(NULL, outputfile.c_str(), NULL);
@@ -127,7 +128,11 @@ int main(int argc, char **argv) {
         return -1;
     }
     oc->oformat = fmt;
+
+	// TODO: Use URL in newer versions
 	snprintf(oc->filename, sizeof(oc->filename), "%s", outputfile.c_str());
+	//oc->url = (char*)av_malloc(outputfile.size()+1);
+	//snprintf(oc->url, outputfile.size()+1, "%s", outputfile.c_str());
 
 	/* open the output file, if needed */
     if (!(fmt->flags & AVFMT_NOFILE)) {
@@ -149,6 +154,11 @@ int main(int argc, char **argv) {
 	// TODO: In future, find a better way to discover number of streams...
 	// Read entire file to find all streams before reading again to write data
 	bool res = r.read(90000000000000, [&first_ts,&current_stream,&current_channel,&r,&video_st,oc](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+		// Extract calibration information to determine frame resolution
+		if (spkt.channel == ftl::codecs::Channel::Calibration) {
+			// FIXME: Implement this!!
+		}
+
         if (spkt.channel != static_cast<ftl::codecs::Channel>(current_channel) && current_channel != -1) return;
         if (spkt.frame_number == current_stream || current_stream == 255) {
 
@@ -161,7 +171,7 @@ int main(int argc, char **argv) {
 			if (spkt.timestamp < first_ts) first_ts = spkt.timestamp;
 
 			if (video_st[spkt.frame_number][(spkt.channel == Channel::Left) ? 0 : 1] == nullptr) {
-				video_st[spkt.frame_number][(spkt.channel == Channel::Left) ? 0 : 1] = add_video_stream(oc, pkt);
+				video_st[spkt.frame_number][(spkt.channel == Channel::Left) ? 0 : 1] = add_video_stream(oc, pkt, {0});  // FIXME: Use real calib data
 			}
 		}
 	});
diff --git a/applications/gui/src/config_window.cpp b/applications/gui/src/config_window.cpp
index 6dd1b4d8a..91dfc19ce 100644
--- a/applications/gui/src/config_window.cpp
+++ b/applications/gui/src/config_window.cpp
@@ -1,4 +1,5 @@
 #include "config_window.hpp"
+#include "../screen.hpp"
 
 #include <nanogui/layout.h>
 #include <nanogui/label.h>
@@ -190,7 +191,7 @@ void ConfigWindow::_addElements(nanogui::FormHelper *form, const std::string &su
 			});
 		} else if (i.value().is_object()) {
 			string key = i.key();
-		
+
 			// Checking the URI with exists() prevents unloaded local configurations from being shown.
 			if (suri.find('#') != string::npos && exists(suri+string("/")+key)) {
 				form->addButton(key, [this,suri,key]() {
@@ -213,7 +214,8 @@ void ConfigWindow::_buildForm(const std::string &suri) {
 	FormHelper *form = new FormHelper(this->screen());
 	//form->setWindow(this);
 	form->addWindow(Vector2i(100,50), uri.getFragment());
-	form->window()->setTheme(theme());
+	auto* theme = dynamic_cast<ftl::gui2::Screen*>(screen())->getTheme("window_light");
+	form->window()->setTheme(theme);
 
 	_addElements(form, suri);
 
diff --git a/applications/gui/src/config_window.hpp b/applications/gui/src/config_window.hpp
index a7acd1171..06f989e0b 100644
--- a/applications/gui/src/config_window.hpp
+++ b/applications/gui/src/config_window.hpp
@@ -21,7 +21,7 @@ class ConfigWindow : public nanogui::Window {
 
 	private:
 	ftl::ctrl::Master *ctrl_;
-	
+
 	void _buildForm(const std::string &uri);
 	void _addElements(nanogui::FormHelper *form, const std::string &suri);
 	bool exists(const std::string &uri);
diff --git a/applications/gui2/CMakeLists.txt b/applications/gui2/CMakeLists.txt
new file mode 100644
index 000000000..a4c7beace
--- /dev/null
+++ b/applications/gui2/CMakeLists.txt
@@ -0,0 +1,77 @@
+# Need to include staged files and libs
+#include_directories(${PROJECT_SOURCE_DIR}/reconstruct/include)
+#include_directories(${PROJECT_BINARY_DIR})
+
+function(add_gui_module NAME)
+	get_filename_component(FULLPATH "src/modules/${NAME}.cpp" ABSOLUTE)
+	if (EXISTS ${FULLPATH})
+		list(APPEND GUI2SRC "src/modules/${NAME}.cpp")
+	endif()
+
+	get_filename_component(FULLPATH "src/views/${NAME}.cpp" ABSOLUTE)
+	if (EXISTS ${FULLPATH})
+		list(APPEND GUI2SRC "src/views/${NAME}.cpp")
+	endif()
+
+	set(GUI2SRC ${GUI2SRC} PARENT_SCOPE)
+endfunction()
+
+set(GUI2SRC
+	src/main.cpp
+	src/inputoutput.cpp
+	src/screen.cpp
+	src/view.cpp
+	src/widgets/soundctrl.cpp
+	src/widgets/popupbutton.cpp
+	src/widgets/imageview.cpp
+	src/widgets/combobox.cpp
+	src/widgets/leftbutton.cpp
+)
+
+add_gui_module("themes")
+add_gui_module("statistics")
+add_gui_module("config")
+add_gui_module("camera")
+add_gui_module("camera3d")
+add_gui_module("thumbnails")
+add_gui_module("addsource")
+
+if (WITH_CERES)
+	list(APPEND GUI2SRC
+		src/modules/calibration/calibration.cpp
+		src/modules/calibration/extrinsic.cpp
+		src/modules/calibration/intrinsic.cpp
+		src/modules/calibration/stereo.cpp
+		src/views/calibration/widgets.cpp
+		src/views/calibration/extrinsicview.cpp
+		src/views/calibration/intrinsicview.cpp
+		src/views/calibration/stereoview.cpp
+	)
+endif()
+
+if (HAVE_OPENVR)
+	add_gui_module("cameravr")
+endif()
+
+# Various preprocessor definitions have been generated by NanoGUI
+add_definitions(${NANOGUI_EXTRA_DEFS})
+
+# On top of adding the path to nanogui/include, you may need extras
+include_directories(${NANOGUI_EXTRA_INCS})
+
+add_executable(ftl-gui2 ${GUI2SRC})
+
+target_include_directories(ftl-gui2 PUBLIC
+	$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
+	$<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/ext/nanogui/include>
+	$<INSTALL_INTERFACE:include>
+	PRIVATE src)
+
+#if (CUDA_FOUND)
+#set_property(TARGET ftl-gui2 PROPERTY CUDA_SEPARABLE_COMPILATION ON)
+#endif()
+
+#target_include_directories(cv-node PUBLIC ${PROJECT_SOURCE_DIR}/include)
+target_link_libraries(ftl-gui2 ftlcommon ftldata ftlctrl ftlrgbd ftlstreams ftlrender Threads::Threads ${OpenCV_LIBS} openvr ftlnet nanogui ${NANOGUI_EXTRA_LIBS} ceres)
+
+target_precompile_headers(ftl-gui2 REUSE_FROM ftldata)
diff --git a/applications/gui2/README.md b/applications/gui2/README.md
new file mode 100644
index 000000000..054581884
--- /dev/null
+++ b/applications/gui2/README.md
@@ -0,0 +1,50 @@
+GUI
+
+Nanogui based graphical user interface.
+
+General:
+ * Do not modify gui outside gui thread (main). Modifications must be done in
+   GUI callbacks or draw().
+ * Expensive processing should be moved out of gui thread (draw() and callbacks)
+ * Module is only required to implement Module. Each module is expected to be
+   loaded only once.
+
+Classes
+
+Screen
+ * Implements main screen: toolbar and view
+ * Interface for registering new modules.
+ * Interface for adding/removing buttons
+ * Interface for setting active View. Inactive view is removed and destroyed if
+   no other references are remaining.
+ * Note: toolbar could be a module, but other modules likely assume it is
+   always available anyways.
+ * Centralized access to Nanogui::Themes and custom non-theme colors.
+
+Module (controller)
+ * GUI module class wraps pointers for io, config and net. Initialization should
+   add necessary buttons to Screen
+ * Build necessary callbacks to process data from InputOutput to view.
+   Note: If callback passes data to view, callback handle should be owned by
+   the view or Module has to keep a nanogui reference to the View. Also note
+   that View destructor is called when active view is replaced.
+
+View
+ * Active view will be the main window; only one view can be active at time
+ * Button callbacks (eg. those registered by module init) may change active view
+ * Destroyed when view is changed. Object lifetime can be used to remove
+   callbacks from InputOutput (TODO: only one active callback supported at the
+   moment)
+ * Implementations do not have to inherit from View. Popup/Window/Widget... can
+   be used to implement UI components available from any mode (config, record).
+ * Receives all unprocessed keyboard events.
+
+InputOutput
+ * Contains pointers to all required FTL objects (network/rendering/feed/...).
+ * Speaker
+
+NanoGUI notes:
+ * If disposing Window in widget destructor, window->parent() reference count
+   must be checked and dispose() only called if refCount > 0. (segfault at exit)
+ * Nanogui does not dispose popup windows automatically. See above point if
+   using destructor for clean up.
diff --git a/applications/gui2/src/inputoutput.cpp b/applications/gui2/src/inputoutput.cpp
new file mode 100644
index 000000000..9349b2329
--- /dev/null
+++ b/applications/gui2/src/inputoutput.cpp
@@ -0,0 +1,43 @@
+#include <loguru.hpp>
+#include <nlohmann/json.hpp>
+#include <ftl/codecs/shapes.hpp>
+#include <ftl/streams/filestream.hpp>
+
+#include "inputoutput.hpp"
+
+using ftl::gui2::InputOutput;
+
+using ftl::codecs::Channel;
+
+InputOutput::InputOutput(ftl::Configurable *root, ftl::net::Universe *net) :
+		net_(net) {
+
+	master_ = std::unique_ptr<ftl::ctrl::Master>(new ftl::ctrl::Master(root, net));
+	master_->onLog([](const ftl::ctrl::LogEvent &e){
+		const int v = e.verbosity;
+		switch (v) {
+		case -2:	LOG(ERROR) << "Remote log: " << e.message; break;
+		case -1:	LOG(WARNING) << "Remote log: " << e.message; break;
+		case 0:		LOG(INFO) << "Remote log: " << e.message; break;
+		}
+	});
+
+	//net_->onConnect([this](ftl::net::Peer *p) {
+		//ftl::pool.push([this](int id) {
+			// FIXME: Find better option that waiting here.
+			// Wait to make sure streams have started properly.
+			//std::this_thread::sleep_for(std::chrono::milliseconds(100));
+
+			//_updateCameras(screen_->net()->findAll<string>("list_streams"));
+		//});
+	//});
+
+	feed_ = std::unique_ptr<ftl::stream::Feed>
+		(ftl::create<ftl::stream::Feed>(root, "feed", net));
+
+	speaker_ = std::unique_ptr<ftl::audio::Speaker>
+		(ftl::create<ftl::audio::Speaker>(root, "speaker"));
+
+	//auto* f = feed_->filter({ftl::codecs::Channel::Colour, ftl::codecs::Channel::Depth});
+	//feed_->render(f, Eigen::Matrix4d::Identity());
+}
diff --git a/applications/gui2/src/inputoutput.hpp b/applications/gui2/src/inputoutput.hpp
new file mode 100644
index 000000000..e80b75461
--- /dev/null
+++ b/applications/gui2/src/inputoutput.hpp
@@ -0,0 +1,50 @@
+#pragma once
+
+#include <memory>
+#include <mutex>
+#include <array>
+
+#include <ftl/handle.hpp>
+#include <ftl/configuration.hpp>
+#include <ftl/net/universe.hpp>
+#include <ftl/master.hpp>
+
+#include <ftl/streams/stream.hpp>
+#include <ftl/streams/receiver.hpp>
+#include <ftl/streams/feed.hpp>
+
+#include <ftl/streams/filestream.hpp>
+#include <ftl/audio/speaker.hpp>
+
+#include <ftl/data/new_frame.hpp>
+#include <ftl/data/new_frameset.hpp>
+#include <ftl/data/framepool.hpp>
+
+
+namespace ftl {
+namespace gui2 {
+
+class InputOutput {
+public:
+	InputOutput(ftl::Configurable *config, ftl::net::Universe *net);
+	InputOutput(const InputOutput&) = delete;
+	void operator=(const InputOutput&) = delete;
+
+	ftl::Handle addCallback(const std::function<bool(const ftl::data::FrameSetPtr&)>&);
+
+	ftl::net::Universe* net() const;
+	ftl::ctrl::Master* master() const { return master_.get(); }
+	ftl::stream::Feed* feed() const { return feed_.get(); }
+	ftl::audio::Speaker* speaker() const { return speaker_.get(); }
+
+private:
+	ftl::net::Universe* net_;
+	std::unique_ptr<ftl::stream::Feed> feed_;
+	std::unique_ptr<ftl::ctrl::Master> master_;
+	std::unique_ptr<ftl::audio::Speaker> speaker_;
+
+
+};
+
+}
+}
diff --git a/applications/gui2/src/main.cpp b/applications/gui2/src/main.cpp
new file mode 100644
index 000000000..2367bd03f
--- /dev/null
+++ b/applications/gui2/src/main.cpp
@@ -0,0 +1,209 @@
+#include <memory>
+
+#include <loguru.hpp>
+#include <nlohmann/json.hpp>
+
+#include <ftl/configuration.hpp>
+#include <ftl/net/universe.hpp>
+#include <ftl/net_configurable.hpp>
+#include <ftl/rgbd.hpp>
+
+#include <nanogui/nanogui.h>
+
+#include <cuda_gl_interop.h>
+
+#include "inputoutput.hpp"
+#include "module.hpp"
+#include "screen.hpp"
+
+#include "modules.hpp"
+
+#ifdef HAVE_PYLON
+#include <pylon/PylonIncludes.h>
+#endif
+
+using std::unique_ptr;
+using std::make_unique;
+
+/**
+ * FTL Graphical User Interface
+ * Single screen, loads configuration and sets up networking and input/output.
+ * Loads required modules to gui.
+ */
+class FTLGui {
+public:
+	FTLGui(int argc, char **argv);
+	~FTLGui();
+
+	template<typename T>
+	T* loadModule(const std::string &name);
+	void mainloop();
+
+private:
+	std::unique_ptr<ftl::Configurable> root_;
+	std::unique_ptr<ftl::net::Universe> net_;
+	std::unique_ptr<ftl::gui2::InputOutput> io_;
+
+	nanogui::ref<ftl::gui2::Screen> screen_;
+};
+
+template<typename T>
+T* FTLGui::loadModule(const std::string &name) {
+	return screen_->addModule<T>(name, root_.get(), screen_.get(), io_.get());
+}
+
+FTLGui::FTLGui(int argc, char **argv) {
+	using namespace ftl::gui2;
+
+	screen_ = new Screen();
+
+	int cuda_device;
+	cudaSafeCall(cudaGetDevice(&cuda_device));
+	//cudaSafeCall(cudaGLSetGLDevice(cuda_device));
+
+	root_ = unique_ptr<ftl::Configurable>(ftl::configure(argc, argv, "gui_default"));
+	net_ = unique_ptr<ftl::net::Universe>(ftl::create<ftl::net::Universe>(root_.get(), "net"));
+	io_ = make_unique<ftl::gui2::InputOutput>(root_.get(), net_.get());
+
+	net_->start();
+	net_->waitConnections();
+
+	loadModule<Themes>("themes");
+	loadModule<ThumbnailsController>("home")->activate();
+	loadModule<Camera>("camera");
+	loadModule<ConfigCtrl>("configwindow");
+	loadModule<Statistics>("statistics");
+#ifdef HAVE_CERES
+	loadModule<Calibration>("calibration");
+#endif
+	auto *adder = loadModule<AddCtrl>("adder");
+
+	for (int c = 1; c < argc; c++) {
+		std::string path(argv[c]);
+		try {
+			io_->feed()->add(path);
+			LOG(INFO) << "Add: " << path;
+		}
+		catch (const ftl::exception&) {
+			LOG(ERROR) << "Could not add: " << path;
+		}
+	}
+
+	if (io_->feed()->listSources().size() == 0) {
+		adder->show();
+	}
+
+	net_->onDisconnect([this](ftl::net::Peer *p) {
+		if (p->status() != ftl::net::Peer::kConnected) {
+			screen_->showError("Connection Failed", std::string("Could not connect to network peer: ") + p->getURI());
+		} else {
+			screen_->showError("Disconnection", std::string("Network peer disconnected: ") + p->getURI());
+		}
+	});
+
+	net_->onError([this](ftl::net::Peer *, const ftl::net::Error &err) {
+
+	});
+}
+
+FTLGui::~FTLGui() {
+	net_->shutdown();
+}
+
+void FTLGui::mainloop() {
+	// implements similar main loop as nanogui::mainloop()
+
+	ftl::timer::start();
+
+	screen_->setVisible(true);
+	screen_->drawAll();
+
+	float last_draw_time = 0.0f;
+
+	while (ftl::running) {
+		if (!screen_->visible()) {
+			ftl::running = false;
+		}
+		else if (glfwWindowShouldClose(screen_->glfwWindow())) {
+			screen_->setVisible(false);
+			ftl::running = false;
+		}
+		else {
+			float now = float(glfwGetTime());
+			float delta = now - last_draw_time;
+
+			// Generate poses and render and virtual frame here
+			// at full FPS (25 without VR and 90 with VR currently)
+			//screen_->render();
+
+			io_->feed()->render();
+
+			// Only draw the GUI at 25fps
+			if (delta >= 0.04f) {
+				last_draw_time = now;
+				screen_->drawAll();
+			}
+		}
+
+		// Wait for mouse/keyboard or empty refresh events
+		glfwWaitEventsTimeout(0.02); // VR headest issues
+		//glfwPollEvents();
+	}
+
+	// Process events once more
+	glfwPollEvents();
+
+	// Stop everything before deleting feed etc
+	LOG(INFO) << "Stopping...";
+	ftl::timer::stop(true);
+	ftl::pool.stop(true);
+	LOG(INFO) << "All threads stopped.";
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+int main(int argc, char **argv) {
+	#ifdef HAVE_PYLON
+	Pylon::PylonAutoInitTerm autoInitTerm;
+	#endif
+
+	// Note: This causes 100% CPU use but prevents the random frame drops.
+	ftl::timer::setHighPrecision(true);
+
+	{
+		nanogui::init();
+		
+		FTLGui gui(argc, argv);
+
+		try {
+			gui.mainloop();
+		}
+		catch (const ftl::exception &e) {
+			#ifdef WIN32
+				std::string error_msg = std::string("Caught a fatal error: ") + std::string(e.what()) + std::string("\r\n") + std::string(e.trace());
+				MessageBoxA(nullptr, error_msg.c_str(), NULL, MB_ICONERROR | MB_OK);
+			#else
+				LOG(ERROR) << "Fatal error: " << e.what();
+				LOG(ERROR) << e.trace();
+			#endif
+		}
+		catch (const std::runtime_error &e) {
+			std::string error_msg = std::string("Caught a fatal error: ") + std::string(e.what());
+			#ifdef WIN32
+				MessageBoxA(nullptr, error_msg.c_str(), NULL, MB_ICONERROR | MB_OK);
+				LOG(ERROR) << error_msg;
+			#else
+				LOG(ERROR) << error_msg;
+			#endif
+			return -1;
+		}
+	}
+
+	// Must be after ~FTLGui since it destroys GL context.
+	nanogui::shutdown();
+
+	// Save config changes and delete final objects
+	ftl::config::cleanup();
+
+	return 0;
+}
diff --git a/applications/gui2/src/module.hpp b/applications/gui2/src/module.hpp
new file mode 100644
index 000000000..ca048f124
--- /dev/null
+++ b/applications/gui2/src/module.hpp
@@ -0,0 +1,33 @@
+#pragma once
+
+#include "view.hpp"
+#include "inputoutput.hpp"
+
+#include <ftl/configurable.hpp>
+#include <nanogui/entypo.h>
+#include <nanogui/button.h>
+
+namespace ftl {
+namespace gui2 {
+
+class Screen;
+
+class Module : public ftl::Configurable {
+public:
+	Module(nlohmann::json &config, Screen *screen, InputOutput *io) :
+		Configurable(config), screen(screen), io(io) {}
+
+	/** called by constructor */
+	virtual void init() {};
+	/** called before draw */
+	virtual void update(double) {};
+	virtual ~Module() {};
+
+	ftl::gui2::Screen* const screen;
+
+protected:
+	ftl::gui2::InputOutput* const io;
+};
+
+}
+}
diff --git a/applications/gui2/src/modules.hpp b/applications/gui2/src/modules.hpp
new file mode 100644
index 000000000..83cac08c1
--- /dev/null
+++ b/applications/gui2/src/modules.hpp
@@ -0,0 +1,11 @@
+#pragma once
+
+#include "modules/thumbnails.hpp"
+#include "modules/camera.hpp"
+#include "modules/config.hpp"
+#include "modules/themes.hpp"
+#include "modules/statistics.hpp"
+#ifdef HAVE_CERES
+#include "modules/calibration/calibration.hpp"
+#endif
+#include "modules/addsource.hpp"
diff --git a/applications/gui2/src/modules/addsource.cpp b/applications/gui2/src/modules/addsource.cpp
new file mode 100644
index 000000000..7ac74b55b
--- /dev/null
+++ b/applications/gui2/src/modules/addsource.cpp
@@ -0,0 +1,78 @@
+#include "addsource.hpp"
+
+using ftl::gui2::AddCtrl;
+
+void AddCtrl::init() {
+	button = screen->addButton(ENTYPO_ICON_PLUS);
+	button->setTooltip("Add New Source");
+	button->setCallback([this](){
+		button->setPushed(false);
+		show();
+	});
+	button->setVisible(true);
+}
+
+void AddCtrl::show() {
+	// Note: By chance, the pointer can in fact pass this test as another
+	// widget gets allocated to the exact same address
+	if (!window || screen->childIndex(window) == -1) {
+		window = new ftl::gui2::AddSourceWindow(screen, this);
+	}
+	window->setVisible(true);
+	window->requestFocus();
+	screen->performLayout();
+}
+
+void AddCtrl::disposeWindow() {
+	window->dispose();
+	window = nullptr;
+}
+
+ftl::Configurable *AddCtrl::add(const std::string &uri) {
+	try {
+		if (io->feed()->sourceActive(uri)) {
+			io->feed()->remove(uri);
+		} else {
+			io->feed()->add(uri);
+		}
+	} catch (const ftl::exception &e) {
+		screen->showError("Exception", e.what());
+	}
+	return nullptr;
+}
+
+std::vector<std::string> AddCtrl::getHosts() {
+	return std::move(io->feed()->knownHosts());
+}
+
+std::vector<std::string> AddCtrl::getGroups() {
+	return std::move(io->feed()->availableGroups());
+}
+
+std::set<ftl::stream::SourceInfo> AddCtrl::getRecent() {
+	return std::move(io->feed()->recentSources());
+}
+
+std::vector<std::string> AddCtrl::getNetSources() {
+	return std::move(io->feed()->availableNetworkSources());
+}
+
+std::vector<std::string> AddCtrl::getFileSources() {
+	return std::move(io->feed()->availableFileSources());
+}
+
+std::vector<std::string> AddCtrl::getDeviceSources() {
+	return std::move(io->feed()->availableDeviceSources());
+}
+
+std::string AddCtrl::getSourceName(const std::string &uri) {
+	return io->feed()->getName(uri);
+}
+
+bool AddCtrl::isSourceActive(const std::string &uri) {
+	return io->feed()->sourceActive(uri);
+}
+
+AddCtrl::~AddCtrl() {
+	// remove window?
+}
diff --git a/applications/gui2/src/modules/addsource.hpp b/applications/gui2/src/modules/addsource.hpp
new file mode 100644
index 000000000..eab8fef9d
--- /dev/null
+++ b/applications/gui2/src/modules/addsource.hpp
@@ -0,0 +1,43 @@
+#pragma once
+
+#include "../module.hpp"
+#include "../screen.hpp"
+
+#include "../views/addsource.hpp"
+
+namespace ftl {
+namespace gui2 {
+
+/**
+ * Controller for adding sources etc.
+ */
+class AddCtrl : public Module {
+public:
+	using Module::Module;
+	virtual ~AddCtrl();
+
+	virtual void init() override;
+	virtual void show();
+	void disposeWindow();
+
+	ftl::Configurable *add(const std::string &uri);
+
+	std::vector<std::string> getHosts();
+	std::set<ftl::stream::SourceInfo> getRecent();
+	std::vector<std::string> getNetSources();
+	std::vector<std::string> getFileSources();
+	std::vector<std::string> getDeviceSources();
+	std::vector<std::string> getGroups();
+	std::string getSourceName(const std::string &uri);
+	bool isSourceActive(const std::string &uri);
+
+	inline ftl::stream::Feed *feed() { return io->feed(); }
+
+
+private:
+	nanogui::ToolButton *button;
+	ftl::gui2::AddSourceWindow *window = nullptr;
+};
+
+}
+}
diff --git a/applications/gui2/src/modules/calibration/calibration.cpp b/applications/gui2/src/modules/calibration/calibration.cpp
new file mode 100644
index 000000000..f892ba61c
--- /dev/null
+++ b/applications/gui2/src/modules/calibration/calibration.cpp
@@ -0,0 +1,381 @@
+
+#include <loguru.hpp>
+
+#include "calibration.hpp"
+#include "../../screen.hpp"
+#include "../../widgets/popupbutton.hpp"
+#include "../../views/calibration/intrinsicview.hpp"
+#include "../../views/calibration/extrinsicview.hpp"
+#include "../../views/calibration/stereoview.hpp"
+
+#include <opencv2/aruco.hpp>
+#include <opencv2/imgproc.hpp>
+#include <opencv2/calib3d.hpp>
+
+#include <ftl/calibration/optimize.hpp>
+#include <ftl/calibration/structures.hpp>
+#include <ftl/threads.hpp>
+
+#include <nanogui/entypo.h>
+#include <nanogui/layout.h>
+
+using ftl::gui2::Calibration;
+
+using ftl::calibration::CalibrationData;
+using ftl::codecs::Channel;
+using ftl::data::FrameID;
+using ftl::data::FrameSetPtr;
+
+
+// ==== OpenCVCalibrateFlags ===================================================
+
+using ftl::gui2::OpenCVCalibrateFlags;
+using ftl::gui2::OpenCVCalibrateFlagsStereo;
+
+int OpenCVCalibrateFlags::defaultFlags() const { return (
+			cv::CALIB_RATIONAL_MODEL |
+			cv::CALIB_THIN_PRISM_MODEL |
+			cv::CALIB_FIX_ASPECT_RATIO
+);}
+
+std::vector<int> OpenCVCalibrateFlags::list() const {
+	return {
+		cv::CALIB_USE_INTRINSIC_GUESS,
+		cv::CALIB_FIX_FOCAL_LENGTH,
+		cv::CALIB_FIX_PRINCIPAL_POINT,
+		cv::CALIB_FIX_ASPECT_RATIO,
+		cv::CALIB_ZERO_TANGENT_DIST,
+		cv::CALIB_FIX_K1,
+		cv::CALIB_FIX_K2,
+		cv::CALIB_FIX_K3,
+		cv::CALIB_FIX_K4,
+		cv::CALIB_FIX_K5,
+		cv::CALIB_FIX_K6,
+		cv::CALIB_RATIONAL_MODEL,
+		cv::CALIB_THIN_PRISM_MODEL,
+		cv::CALIB_FIX_S1_S2_S3_S4,
+		cv::CALIB_TILTED_MODEL,
+		cv::CALIB_FIX_TAUX_TAUY
+	};
+}
+
+std::string OpenCVCalibrateFlags::name(int i) const {
+	using namespace cv;
+	switch(i) {
+		case CALIB_FIX_INTRINSIC:
+			return "CALIB_FIX_INTRINSIC";
+
+		case CALIB_FIX_FOCAL_LENGTH:
+			return "CALIB_FIX_FOCAL_LENGTH";
+
+		case CALIB_USE_INTRINSIC_GUESS:
+			return "CALIB_USE_INTRINSIC_GUESS";
+
+		case CALIB_USE_EXTRINSIC_GUESS:
+			return "CALIB_USE_EXTRINSIC_GUESS";
+
+		case CALIB_FIX_PRINCIPAL_POINT:
+			return "CALIB_FIX_PRINCIPAL_POINT";
+
+		case CALIB_FIX_ASPECT_RATIO:
+			return "CALIB_FIX_ASPECT_RATIO";
+
+		case CALIB_SAME_FOCAL_LENGTH:
+			return "CALIB_SAME_FOCAL_LENGTH";
+
+		case CALIB_ZERO_TANGENT_DIST:
+			return "CALIB_ZERO_TANGENT_DIST";
+
+		case CALIB_FIX_K1:
+			return "CALIB_FIX_K1";
+
+		case CALIB_FIX_K2:
+			return "CALIB_FIX_K2";
+
+		case CALIB_FIX_K3:
+			return "CALIB_FIX_K3";
+
+		case CALIB_FIX_K4:
+			return "CALIB_FIX_K4";
+
+		case CALIB_FIX_K5:
+			return "CALIB_FIX_K5";
+
+		case CALIB_FIX_K6:
+			return "CALIB_FIX_K6";
+
+		case CALIB_RATIONAL_MODEL:
+			return "CALIB_RATIONAL_MODEL";
+
+		case CALIB_THIN_PRISM_MODEL:
+			return "CALIB_THIN_PRISM_MODEL";
+
+		case CALIB_FIX_S1_S2_S3_S4:
+			return "CALIB_FIX_S1_S2_S3_S4";
+
+		case CALIB_TILTED_MODEL:
+			return "CALIB_TILTED_MODEL";
+
+		case CALIB_FIX_TAUX_TAUY:
+			return "CALIB_FIX_TAUX_TAUY";
+	};
+	return "";
+}
+
+
+std::string OpenCVCalibrateFlags::explain(int i) const {
+	using namespace cv;
+	switch(i) {
+		case CALIB_FIX_INTRINSIC:
+			return "Fix all intrinsic paramters.";
+
+		case CALIB_FIX_FOCAL_LENGTH:
+			return "Fix focal length (fx and fy).";
+
+		case CALIB_USE_INTRINSIC_GUESS:
+			return "Use valid initial values of fx, fy, cx, cy that are "
+					"optimized further. Otherwise, (cx, cy) is initially set "
+					"to the image center and focal distances are computed in "
+					"a least-squares fashion.";
+
+		case CALIB_USE_EXTRINSIC_GUESS:
+			return "";
+
+		case CALIB_FIX_PRINCIPAL_POINT:
+			return "The principal point is not changed during the global "
+					"optimization. It stays at the center or at a location "
+					"specified in initial parameters.";
+
+		case CALIB_FIX_ASPECT_RATIO:
+			return "Consider only fy as a free parameter. The ratio fx/fy "
+					"stays the same. When CALIB_USE_INTRINSIC_GUESS is not "
+					"set, the actual input values of fx and fy are ignored, "
+					"only their ratio is computed and used further.";
+
+		case CALIB_ZERO_TANGENT_DIST:
+			return "Tangential distortion coefficients (p1,p2) are set to "
+					"zeros and stay zero.";
+
+		case CALIB_FIX_K1:
+		case CALIB_FIX_K2:
+		case CALIB_FIX_K3:
+		case CALIB_FIX_K4:
+		case CALIB_FIX_K5:
+		case CALIB_FIX_K6:
+			return "The radial distortion coefficient is not changed during "
+					"the optimization. If CALIB_USE_INTRINSIC_GUESS is set, "
+					"the coefficient from initial values is used. Otherwise, "
+					"it is set to 0.";
+
+		case CALIB_RATIONAL_MODEL:
+			return "Coefficients k4, k5, and k6 are enabled.";
+
+		case CALIB_THIN_PRISM_MODEL:
+			return " Coefficients s1, s2, s3 and s4 are enabled.";
+
+		case CALIB_FIX_S1_S2_S3_S4:
+			return "The thin prism distortion coefficients are not changed "
+					"during the optimization. If CALIB_USE_INTRINSIC_GUESS is "
+					"set, the supplied coefficients are used. Otherwise, they "
+					"are set to 0.";
+
+		case CALIB_TILTED_MODEL:
+			return "Coefficients tauX and tauY are enabled";
+
+		case CALIB_FIX_TAUX_TAUY:
+			return "The coefficients of the tilted sensor model are not "
+					"changed during the optimization. If "
+					"CALIB_USE_INTRINSIC_GUESS is set, the supplied "
+					"coefficients are used. Otherwise, they are set to 0.";
+	};
+	return "";
+}
+
+std::vector<int> OpenCVCalibrateFlagsStereo::list() const {
+	auto ls = OpenCVCalibrateFlags::list();
+	ls.insert(ls.begin(), cv::CALIB_FIX_INTRINSIC);
+	ls.insert(ls.begin() + 1, cv::CALIB_SAME_FOCAL_LENGTH);
+	ls.insert(ls.begin() + 1, cv::CALIB_USE_EXTRINSIC_GUESS);
+	return ls;
+}
+
+
+std::string OpenCVCalibrateFlagsStereo::explain(int i) const {
+	using namespace cv;
+	switch(i) {
+		case CALIB_FIX_INTRINSIC:
+			return "Fix intrinsic camera paramters (focal length, aspect "
+					"ratio, principal point and distortion coefficients)";
+
+		case CALIB_USE_INTRINSIC_GUESS:
+			return "Optimize some or all of the intrinsic parameters according "
+					"to the specified flags";
+
+		case CALIB_USE_EXTRINSIC_GUESS:
+			return "Rotation and translation have valid initial values that "
+					"are optimized further. Otherwise rotation and translation "
+					"are initialized to the median value of the pattern views ";
+
+		case CALIB_SAME_FOCAL_LENGTH:
+			return "Enforce fx_l == fx_r && fy_l == fy_r";
+
+		default:
+			return OpenCVCalibrateFlags::explain(i);
+	};
+}
+
+int OpenCVCalibrateFlagsStereo::defaultFlags() const {
+	return cv::CALIB_FIX_INTRINSIC;
+}
+
+// ==== Calibration module =====================================================
+// Loads sub-modules and adds buttons to main screen.
+
+void Calibration::init() {
+
+	screen->addModule<IntrinsicCalibration>("calib_intrinsic", this, screen, io);
+	screen->addModule<ExtrinsicCalibration>("calib_extrinsic", this, screen, io);
+	screen->addModule<StereoCalibration>("calib_stereo", this, screen, io);
+
+	// NOTE: If more GUI code is added, consider moving the GUI cude to a new
+	//       file in ../views/
+
+	// Should implement PopupMenu widget which would abstract building steps
+	// and provide common feel&look. (TODO)
+
+	auto button = screen->addButton<ftl::gui2::PopupButton>("", ENTYPO_ICON_CAMERA);
+	button->setChevronIcon(0);
+	button->setTooltip("Calibrate Cameras");
+
+	auto* popup = button->popup();
+	popup->setLayout(new nanogui::BoxLayout
+		(nanogui::Orientation::Vertical, nanogui::Alignment::Fill, 10, 6));
+
+	auto* button_intrinsic = new nanogui::Button(popup, "Intrinsic Calibration");
+	button_intrinsic->setCallback([this, button, button_intrinsic, popup](){
+		button->setPushed(false);
+		button_intrinsic->setPushed(false);
+		button_intrinsic->setFocused(false);
+		auto* calib = screen->getModule<IntrinsicCalibration>();
+		auto* view = new ftl::gui2::IntrinsicCalibrationStart(screen, calib);
+		screen->setView(view);
+	});
+
+	auto* button_extrinsic = new nanogui::Button(popup, "Extrinsic Calibration");
+	button_extrinsic->setCallback([this, button, button_extrinsic, popup](){
+		button->setPushed(false);
+		button_extrinsic->setPushed(false);
+		button_extrinsic->setFocused(false);
+		auto* calib = screen->getModule<ExtrinsicCalibration>();
+		auto* view = new ftl::gui2::ExtrinsicCalibrationStart(screen, calib);
+		screen->setView(view);
+	});
+
+	auto* button_stereo = new nanogui::Button(popup, "Stereo Calibration");
+	button_stereo->setCallback([this, button, button_extrinsic, popup](){
+		button->setPushed(false);
+		button_extrinsic->setPushed(false);
+		button_extrinsic->setFocused(false);
+		auto* calib = screen->getModule<StereoCalibration>();
+		auto* view = new ftl::gui2::StereoCalibrationStart(screen, calib);
+		screen->setView(view);
+	});
+
+	button->setVisible(true);
+}
+
+Calibration::~Calibration() {
+	// remove button
+}
+
+// ==== CalibrationModule ======================================================
+
+using ftl::gui2::CalibrationModule;
+
+
+bool CalibrationModule::checkFrame(ftl::data::Frame& frame) {
+
+	if (wait_update_) {
+		return false;
+	}
+
+	if (frame.hasChannel(Channel::CalibrationData)) {
+
+		if (calibration_enabled_ != calibrationEnabled(frame)) {
+
+			LOG(INFO) << std::string(calibration_enabled_ ? "Enabling" : "Disabling") +
+						 " calibration (changed outside)";
+
+			setCalibrationMode(frame, calibration_enabled_);
+			return false;
+		}
+	}
+	else {
+		static bool logged_once__ = false;
+		if (!logged_once__) {
+			LOG(WARNING) << "No CalibrationData channel, is this a valid camera?";
+			logged_once__ = true;
+		}
+		return false;
+	}
+
+	return true;
+}
+
+bool CalibrationModule::calibrationEnabled(ftl::data::Frame& frame) {
+	auto& calib_data = frame.get<CalibrationData>(Channel::CalibrationData);
+	return calib_data.enabled;
+}
+
+void CalibrationModule::setCalibration(ftl::data::Frame& frame, CalibrationData data) {
+	// previous callbacks are cancelled!
+	wait_update_ = true;
+
+	// updates enabled_ status with given calibration data
+
+	auto response = frame.response();
+	response.create<CalibrationData>(Channel::CalibrationData) = data;
+	update_handle_ = frame.onChange(Channel::CalibrationData,
+			[&wait_update = wait_update_,
+			 &enabled = calibration_enabled_,
+			 value = data.enabled]
+			(ftl::data::Frame& frame, ftl::codecs::Channel){
+
+		enabled = value;
+		wait_update = false;
+		return true;
+	});
+}
+
+void CalibrationModule::setCalibrationMode(ftl::data::Frame& frame, bool value) {
+
+	if (!frame.hasChannel(Channel::CalibrationData)) {
+		LOG(ERROR) << 	"Trying to change calibration status of frame which does "
+						"not contain CalibrationData";
+		return;
+	}
+
+	auto data = CalibrationData(frame.get<CalibrationData>(Channel::CalibrationData));
+	data.enabled = value;
+	setCalibration(frame, data);
+}
+
+void CalibrationModule::setCalibrationMode(bool value) {
+	calibration_enabled_ = value;
+}
+
+cv::Mat CalibrationModule::getMat(ftl::rgbd::Frame& frame, Channel c) {
+	auto& vframe = frame.get<ftl::rgbd::VideoFrame>(c);
+	cv::Mat host;
+	if (vframe.isGPU())	{ vframe.getGPU().download(host); }
+	else					{ host =  vframe.getCPU(); }
+	return host;
+}
+
+cv::cuda::GpuMat CalibrationModule::getGpuMat(ftl::rgbd::Frame& frame, Channel c) {
+	auto& vframe = frame.get<ftl::rgbd::VideoFrame>(c);
+	cv::cuda::GpuMat gpu;
+	if (!vframe.isGPU())	{ gpu.upload(vframe.getCPU()); }
+	else					{ gpu = vframe.getGPU(); }
+	return gpu;
+}
diff --git a/applications/gui2/src/modules/calibration/calibration.hpp b/applications/gui2/src/modules/calibration/calibration.hpp
new file mode 100644
index 000000000..af27c0f82
--- /dev/null
+++ b/applications/gui2/src/modules/calibration/calibration.hpp
@@ -0,0 +1,484 @@
+#pragma once
+
+#include "../../module.hpp"
+
+#include <ftl/calibration/object.hpp>
+#include <ftl/calibration/extrinsic.hpp>
+#include <ftl/calibration/structures.hpp>
+#include <opencv2/core/types.hpp>
+
+namespace ftl
+{
+namespace gui2
+{
+
+/** OpenCV calibration flags */
+class OpenCVCalibrateFlags {
+public:
+	bool has(unsigned int flag) const { return (flags_ & flag) != 0; }
+	void set(unsigned int flag) { flags_ |= flag; }
+	void unset(unsigned int  flag) { flags_ &= ~flag; }
+	void reset() { flags_ = 0; }
+	std::string name(int) const;
+	operator int() { return flags_; }
+
+	virtual int defaultFlags() const;
+	virtual std::vector<int> list() const;
+	virtual std::string explain(int) const;
+
+private:
+	int flags_ = 0;
+};
+
+class  OpenCVCalibrateFlagsStereo : public OpenCVCalibrateFlags {
+public:
+	int defaultFlags() const override;
+	std::vector<int> list() const override;
+	std::string explain(int) const override;
+};
+
+/**
+ * Calibration. Loads Intrinsic and Extrinsic calibration modules and
+ * adds buttons to main screen.
+ */
+class Calibration : public Module {
+public:
+	using Module::Module;
+	virtual ~Calibration();
+
+	virtual void init() override;
+};
+
+/**
+ * Calibration base module. Implements methods to loading/saving calibration.
+ * Also manages enabling/disabling calibration.
+ */
+
+class CalibrationModule : public Module {
+public:
+	using Module::Module;
+	virtual void init() = 0;
+
+protected:
+	/** Set new calibration. */
+	void setCalibration(ftl::data::Frame& frame, ftl::calibration::CalibrationData data);
+
+	/** Activate/deactivate calibration (rectification, distortion corrections,
+	 *  ...). See also StereoVideo */
+	/** set mode, update performed by checkFrame() when next called */
+	void setCalibrationMode(bool value);
+	/** set mode directly to frame */
+	void setCalibrationMode(ftl::data::Frame& frame, bool value);
+
+	/** Check everything is in expected state. If returns true, processing can
+	 * continue. Use this in frameset callback. Also sets calibration mode if
+	 * it doesn't match with stored state. Should always be called in FrameSet
+	 * callback.
+	 */
+	bool checkFrame(ftl::data::Frame& frame);
+
+	cv::cuda::GpuMat getGpuMat(ftl::rgbd::Frame&, ftl::codecs::Channel);
+	cv::Mat getMat(ftl::rgbd::Frame&, ftl::codecs::Channel);
+
+private:
+	bool calibrationEnabled(ftl::data::Frame& frame);
+
+	std::atomic_bool wait_update_ = false;
+	std::atomic_bool calibration_enabled_ = false;
+	ftl::Handle update_handle_;
+};
+
+/**
+ * GUI for camera intrinsic calibration. Only sources which have CalibrationData
+ * channel can be calibrated (StereoVideo receives updates and saves them).
+ *
+ * TODO: Catch exceptions in future and report back to GUI. At the moment
+ *		 errors are printed with logging.
+ * TODO: View: add button to get back to chessboard/capture parameters.
+ * TODO: Saving calibration should give more feedback, saved just tells it was
+ * 		 sent but it does not verify it was received (or know if it was
+ * 		 successfully saved; if saving fails do not write file/changes; how
+ * 		 to inform GUI/client about the error?)
+ *
+ * TODO: FEATURE: Add timer to calibration window showing remaining time until
+ * 		 next picture is captured.
+ * TODO: FEATURE: Add calibration image window for browsing calibration images
+ * 		 and discarding bad images manually. Also should support visualization
+ * 		 of calibration results; draw detected points and re-projected points
+ * 		 using OpenGL (reproject points implemented in calibration:: using
+ * 		 with OpenCV).
+ * TODO: FEATURE: Visualize lens distortion. Plot regular grid and apply
+ * 		 distortion model.
+ */
+class IntrinsicCalibration : public CalibrationModule {
+public:
+	using CalibrationModule::CalibrationModule;
+
+	virtual void init() override;
+	virtual ~IntrinsicCalibration();
+
+	/** start calibration process, replaces active view */
+	void start(ftl::data::FrameID id);
+
+	bool hasChannel(ftl::codecs::Channel c);
+	/** select channel */
+	void setChannel(ftl::codecs::Channel c);
+	ftl::codecs::Channel channel() { return state_->channel; }
+
+	int count() { return state_->count; }
+	int calibrated() { return state_->calibrated; }
+
+	OpenCVCalibrateFlags& flags() { return state_->flags; };
+	int defaultFlags();
+
+	/** Reset calibration instance, discards drops all state. */
+	void reset();
+
+	void setChessboard(cv::Size, double);
+	cv::Size chessboardSize();
+	double squareSize();
+
+	/** Returns if capture/calibration is still processing in background.
+	 * calib() instance must not be modifed while isBusy() is true.
+	 */
+	bool isBusy();
+
+	/** Start/stop capture. After stopping, use isBusy() to check when last
+	 * frame is finished.
+	 */
+	void setCapture(bool v) { state_->capture = v; }
+	bool capturing() { return state_->capture; }
+
+	/** get/set capture frequency: interval between processed frames in
+	 * chessboard detection
+	*/
+	void setFrequency(float v) { state_->frequency = v; }
+	float frequency() { return state_->frequency; }
+
+	int maxIter() { return state_->max_iter; }
+	void setMaxIter(int v) { state_->max_iter = v; }
+
+	/** Run calibration in another thread. Check status with isBusy(). */
+	void run();
+
+	/** Save calibration */
+	void save();
+
+	ftl::calibration::CalibrationData::Intrinsic calibration();
+
+	float reprojectionError() { return state_->reprojection_error; }
+
+	/** Get sensor size from config/previous calibration (in mm) */
+	cv::Size2d sensorSize();
+	void setSensorSize(cv::Size2d size);
+
+	/** Set/get focal length in mm */
+	double focalLength();
+	void setFocalLength(double value, cv::Size2d sensor_size);
+
+	/** Set principal point at image center */
+	void resetPrincipalPoint();
+
+	void resetDistortion();
+
+	/** get current frame */
+	cv::cuda::GpuMat getFrame();
+	bool hasFrame();
+
+	cv::cuda::GpuMat getFrameUndistort();
+
+	/** get previous points (visualization) */
+	std::vector<cv::Point2f> previousPoints();
+	// must not be running_
+	//std::vector<cv::Point2f> getPoints(int n);
+	//std::vector<cv::Point2f> getProjectedPoints(int n);
+
+	/** List sources which can be calibrated.
+	 */
+	std::vector<std::pair<std::string, ftl::data::FrameID>> listSources(bool all=false);
+
+private:
+	bool onFrame_(const ftl::data::FrameSetPtr& fs);
+	/** Set actual channel (channel_alt_) to high res if found in fs */
+	void setChannel_(ftl::data::FrameSetPtr fs);
+
+	std::future<void> future_;
+	std::mutex mtx_;
+	ftl::data::FrameSetPtr fs_current_;
+	ftl::data::FrameSetPtr fs_update_;
+
+	struct State {
+		cv::Mat gray;
+
+		ftl::codecs::Channel channel;
+		ftl::codecs::Channel channel_alt;
+		ftl::data::FrameID id;
+
+		std::atomic_bool capture = false;
+		std::atomic_bool running = false;
+		float last = 0.0f;
+		float frequency = 0.5f;
+		bool calibrated = false;
+		int count = 0;
+		int max_iter = 50;
+		float reprojection_error = NAN;
+		std::vector<std::vector<cv::Point2f>> points;
+		std::vector<std::vector<cv::Point3f>> points_object;
+
+		std::unique_ptr<ftl::calibration::ChessboardObject> object;
+		OpenCVCalibrateFlags flags;
+		ftl::calibration::CalibrationData::Intrinsic calib;
+	};
+
+	std::unique_ptr<State> state_;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * GUI for camera extrinsic calibration. Sources must be in same FrameSet
+ * (synchronization) and have CalibrationData channel. Provided extrinsic
+ * parameters can be used to calculate paramters for stereo rectification.
+ */
+
+class ExtrinsicCalibration : public CalibrationModule {
+public:
+	using CalibrationModule::CalibrationModule;
+
+	virtual void init() override;
+	virtual ~ExtrinsicCalibration();
+
+	/** List framesets and calibrateable sources */
+	std::vector<std::pair<std::string, unsigned int>> listFrameSets();
+	std::vector<std::pair<std::string, ftl::data::FrameID>> listSources(unsigned int fsid, bool all);
+
+	/** start calibration process for given frames. Assumes stereo,
+	 * calibration: left and right channels are used. */
+	void start(unsigned int fsid, std::vector<ftl::data::FrameID> sources);
+
+	/** discard current state and load defaults */
+	void reset();
+
+	int cameraCount();
+
+	ftl::calibration::ExtrinsicCalibration& calib();
+
+	/** hasFrame(int) must be true before calling getFrame() **/
+	bool hasFrame(int camera);
+	const cv::cuda::GpuMat getFrame(int camera);
+	const cv::cuda::GpuMat getFrameRectified(int camera);
+
+	/** Next FrameSet, returns true if new FrameSet is available */
+	bool next();
+
+	bool capturing();
+	void setCapture(bool value);
+
+	/** Set callback for point detection. Callback returns number of points
+	 * found, takes input frame, channel and output points as arguments.
+	 */
+	//void setCallback(const std::function<int(cv::InputArray, const cv::Mat&, const cv::Mat&, std::vector<cv::Point2f>&)>& cb) { cb_detect_ = cb; }
+
+	struct CameraID : ftl::data::FrameID {
+		CameraID(unsigned int fs, unsigned int s, ftl::codecs::Channel channel) :
+			ftl::data::FrameID::FrameID(fs, s), channel(channel) {}
+		const ftl::codecs::Channel channel;
+	};
+
+	/** list selected (active) cameras */
+	std::vector<CameraID> cameras();
+
+	/** Run calibration in another thread. Check status with isBusy(). */
+	void run();
+
+	/** Returns if capture/calibration is still processing in background.
+	 * calib() instance must not be modifed while isBusy() is true.
+	 */
+	bool isBusy();
+
+	/** status message */
+	std::string status() { return state_.calib.status(); }
+
+	/** Get previous points (for visualization) */
+	const std::vector<cv::Point2d>& previousPoints(int camera);
+
+	/** Get number of frames captured by a camera */
+	int getFrameCount(int c);
+
+	void updateCalibration(int c);
+	void updateCalibration();
+
+	void saveInput(const std::string& filename);
+	void loadInput(const std::string& filename);
+
+	ftl::calibration::CalibrationData::Calibration calibration(int camera);
+
+	double reprojectionError(int camera=-1);
+
+	enum Flags {
+		ZERO_DISTORTION = 1,
+		FIX_INTRINSIC = 2,
+		FIX_FOCAL = 4,
+		FIX_PRINCIPAL_POINT = 8,
+		FIX_DISTORTION = 16,
+		LOSS_CAUCHY = 32,
+	};
+
+	void setFlags(int flags);
+	int flags() const;
+
+protected:
+	ftl::calibration::CalibrationData::Calibration getCalibration(CameraID id);
+
+	/** Calculate stereo rectification maps for two cameras; state_.maps[1,2]
+	 * must already be initialized at correct size */
+	void stereoRectify(int cl, int cr,
+		const ftl::calibration::CalibrationData::Calibration& l,
+		const ftl::calibration::CalibrationData::Calibration& r);
+
+private:
+	// map frameid+channel to int. used by ExtrinsicCalibration
+	struct Camera {
+		const CameraID id;
+		ftl::calibration::CalibrationData::Calibration calib;
+	};
+
+	bool onFrameSet_(const ftl::data::FrameSetPtr& fs);
+
+	std::future<void> future_;
+	std::atomic_bool running_;
+	ftl::data::FrameSetPtr fs_current_;
+	ftl::data::FrameSetPtr fs_update_;
+
+	struct State {
+		bool capture = false;
+		int min_cameras = 2;
+		int flags = 0;
+		std::vector<Camera> cameras;
+
+		std::unique_ptr<ftl::calibration::CalibrationObject> calib_object;
+		ftl::calibration::ExtrinsicCalibration calib;
+		std::vector<std::vector<cv::Point2d>> points_prev;
+		std::vector<cv::cuda::GpuMat> maps1;
+		std::vector<cv::cuda::GpuMat> maps2;
+	};
+	State state_;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+/** Stereo calibration for OpenCV's calibrateStereo() */
+
+class StereoCalibration : public CalibrationModule {
+public:
+	using CalibrationModule::CalibrationModule;
+	virtual void init() override;
+	virtual ~StereoCalibration();
+
+	/** start calibration process, replaces active view */
+	void start(ftl::data::FrameID id);
+
+	bool hasChannel(ftl::codecs::Channel c);
+
+	void setChessboard(cv::Size, double);
+	cv::Size chessboardSize();
+	double squareSize();
+
+	/** Reset calibration instance, discards drops all state. */
+	void reset();
+
+	OpenCVCalibrateFlagsStereo& flags() { return state_->flags; };
+	void resetFlags();
+
+	/** Returns if capture/calibration is still processing in background.
+	 * calib() instance must not be modifed while isBusy() is true.
+	 */
+	bool isBusy();
+
+	/** Start/stop capture. After stopping, use isBusy() to check when last
+	 * frame is finished.
+	 */
+	void setCapture(bool v);
+	bool capturing();
+
+	/** get/set capture frequency: interval between processed frames in
+	 * chessboard detection
+	*/
+	void setFrequency(float v);
+	float frequency();
+
+	/** Run calibration in another thread. Check status with isBusy(). */
+	void run();
+
+	/** Save calibration */
+	void save();
+
+	/** check if calibration valid: baseline > 0 */
+	bool calibrated();
+
+	/** get current frame */
+	cv::cuda::GpuMat getLeft();
+	cv::cuda::GpuMat getRight();
+	cv::cuda::GpuMat getLeftRectify();
+	cv::cuda::GpuMat getRightRectify();
+	bool hasFrame();
+
+	ftl::calibration::CalibrationData::Calibration calibrationLeft();
+	ftl::calibration::CalibrationData::Calibration calibrationRight();
+	double baseline();
+
+	/** get previous points (visualization) */
+	std::vector<std::vector<cv::Point2f>> previousPoints();
+	cv::cuda::GpuMat getLeftPrevious();
+	cv::cuda::GpuMat getRightPrevious();
+	int count() const { return state_->count; }
+	/** List sources which can be calibrated.
+	 */
+	std::vector<std::pair<std::string, ftl::data::FrameID>> listSources(bool all=false);
+
+private:
+	bool onFrame_(const ftl::data::FrameSetPtr& fs);
+	void calculateRectification();
+	ftl::rgbd::Frame& frame_();
+
+	ftl::codecs::Channel channelLeft_();
+	ftl::codecs::Channel channelRight_();
+
+	std::future<void> future_;
+	std::mutex mtx_;
+	ftl::data::FrameSetPtr fs_current_;
+	ftl::data::FrameSetPtr fs_update_;
+
+	struct State {
+		cv::Mat gray_left;
+		cv::Mat gray_right;
+
+		ftl::calibration::CalibrationData calib;
+		std::unique_ptr<ftl::calibration::ChessboardObject> object;
+		ftl::data::FrameID id;
+		bool highres = false;
+		cv::Size imsize;
+		std::atomic_bool capture = false;
+		std::atomic_bool running = false;
+		float last = 0.0f;
+		float frequency = 0.5f;
+		int count = 0;
+		float reprojection_error = NAN;
+		OpenCVCalibrateFlagsStereo flags;
+
+		// maps for rectification (cv)
+		std::pair<cv::Mat, cv::Mat> map_l;
+		std::pair<cv::Mat, cv::Mat> map_r;
+		cv::Rect validROI_l;
+		cv::Rect validROI_r;
+
+		ftl::data::FrameSetPtr fs_previous_points;
+		std::vector<std::vector<cv::Point2f>> points_l;
+		std::vector<std::vector<cv::Point2f>> points_r;
+		std::vector<std::vector<cv::Point3f>> points_object;
+	};
+	std::unique_ptr<State> state_;
+};
+
+}
+}
diff --git a/applications/gui2/src/modules/calibration/extrinsic.cpp b/applications/gui2/src/modules/calibration/extrinsic.cpp
new file mode 100644
index 000000000..83debcedf
--- /dev/null
+++ b/applications/gui2/src/modules/calibration/extrinsic.cpp
@@ -0,0 +1,420 @@
+
+#include "calibration.hpp"
+#include "../../screen.hpp"
+#include "../../widgets/popupbutton.hpp"
+#include "../../views/calibration/extrinsicview.hpp"
+
+#include <opencv2/calib3d.hpp>
+#include <opencv2/aruco.hpp>
+#include <opencv2/cudawarping.hpp>
+
+#include <ftl/calibration/optimize.hpp>
+#include <ftl/calibration/structures.hpp>
+#include <ftl/threads.hpp>
+
+#include <nanogui/entypo.h>
+
+using ftl::gui2::Calibration;
+
+using ftl::calibration::CalibrationData;
+using ftl::codecs::Channel;
+using ftl::data::FrameID;
+using ftl::data::FrameSetPtr;
+
+using ftl::gui2::ExtrinsicCalibration;
+using ftl::calibration::CalibrationObject;
+using ftl::calibration::ArUCoObject;
+
+using ftl::calibration::transform::inverse;
+using ftl::calibration::transform::getRotationAndTranslation;
+
+void ExtrinsicCalibration::init() {
+	reset();
+}
+
+void ExtrinsicCalibration::reset() {
+	if(future_.valid()) { future_.wait(); }
+	state_ = ExtrinsicCalibration::State();
+	running_ = false;
+	fs_current_.reset();
+	fs_update_.reset();
+
+	state_.calib_object = std::unique_ptr<CalibrationObject>(new ArUCoObject(cv::aruco::DICT_6X6_100));
+	state_.calib.points().setObject(state_.calib_object->object());
+	state_.min_cameras = 2;
+}
+
+ExtrinsicCalibration::~ExtrinsicCalibration() {
+	if(future_.valid()) {
+		future_.wait();
+	}
+}
+
+void ExtrinsicCalibration::start(unsigned int fsid, std::vector<FrameID> sources) {
+
+	setCalibrationMode(false);
+	reset();
+
+	state_.cameras.reserve(sources.size()*2);
+
+	auto* filter = io->feed()->filter
+		(std::unordered_set<uint32_t>{fsid}, {Channel::Left, Channel::Right});
+
+	filter->on([this](const FrameSetPtr& fs){ return onFrameSet_(fs);});
+
+	while(fs_current_ == nullptr) {
+		auto fss = filter->getLatestFrameSets();
+		if (fss.size() == 1) { fs_current_ = fss.front(); }
+	}
+
+	for (auto id : sources) {
+		// stereo calibration
+		auto cl = CameraID(id.frameset(), id.source(), Channel::Left);
+		auto cr = CameraID(id.frameset(), id.source(), Channel::Right);
+		const auto& frame = (*fs_current_)[id.source()].cast<ftl::rgbd::Frame>();
+		auto sz = cv::Size((int) frame.getLeftCamera().width, (int) frame.getLeftCamera().height);
+		state_.cameras.push_back({cl, {}});
+		state_.cameras.push_back({cr, {}});
+
+		state_.calib.addStereoCamera(
+			CalibrationData::Intrinsic(getCalibration(cl).intrinsic, sz),
+			CalibrationData::Intrinsic(getCalibration(cr).intrinsic, sz));
+	}
+
+	// initialize last points structure; can't be resized while running (without
+	// mutex)
+	unsigned int npoints = state_.calib_object->object().size();
+	state_.points_prev.resize(state_.cameras.size());
+	for (unsigned int i = 0; i < state_.cameras.size(); i++) {
+		state_.points_prev[i] = std::vector<cv::Point2d>(npoints);
+	}
+
+	auto* view = new ftl::gui2::ExtrinsicCalibrationView(screen, this);
+	view->onClose([this, filter](){
+		running_ = false;
+		filter->remove();
+		if (fs_current_ == nullptr) { return; }
+		for (const auto camera : state_.cameras) {
+			setCalibrationMode((*fs_current_)[camera.id.source()], true);
+		}
+	});
+	state_.capture = true;
+	screen->setView(view);
+}
+
+CalibrationData::Calibration ExtrinsicCalibration::calibration(int c) {
+	return state_.calib.calibration(c);
+}
+
+bool ExtrinsicCalibration::onFrameSet_(const FrameSetPtr& fs) {
+
+	std::atomic_store(&fs_update_, fs);
+	screen->redraw();
+
+	bool all_good = true;
+	for (const auto& [id, channel] : state_.cameras) {
+		std::ignore = channel;
+		all_good &= checkFrame((*fs)[id.source()]);
+	}
+	//if (!all_good) { return true; }
+
+	if (!state_.capture) { return true; }
+	if (running_.exchange(true)) { return true; }
+
+	future_ = ftl::pool.push([this, fs = fs](int thread_id) {
+
+		cv::Mat K;
+		cv::Mat distCoeffs;
+		std::vector<cv::Point2d> points;
+		int count = 0;
+
+		for (unsigned int i = 0; i < state_.cameras.size(); i++) {
+			const auto& [id, calib] = state_.cameras[i];
+
+			if (!(*fs)[id.source()].hasChannel(id.channel)) { continue; }
+
+			points.clear();
+			const cv::cuda::GpuMat& im = (*fs)[id.source()].get<cv::cuda::GpuMat>(id.channel);
+			K = calib.intrinsic.matrix();
+			distCoeffs = calib.intrinsic.distCoeffs.Mat();
+
+			try {
+				int n = state_.calib_object->detect(im, points, K, distCoeffs);
+				if (n > 0) {
+					state_.calib.points().addPoints(i, points);
+					state_.points_prev[i] = points;
+					count++;
+				}
+			}
+			catch (std::exception& ex) {
+				LOG(ERROR) << ex.what();
+			}
+		}
+
+		if (count < state_.min_cameras) {
+			state_.calib.points().clear();
+		}
+		else {
+			state_.calib.points().next();
+		}
+		running_ = false;
+	});
+
+	return true;
+}
+
+bool ExtrinsicCalibration::hasFrame(int camera) {
+	const auto id = state_.cameras[camera].id;
+	return	(std::atomic_load(&fs_current_).get() != nullptr) &&
+			((*fs_current_)[id.source()].hasChannel(id.channel));
+}
+
+const cv::cuda::GpuMat ExtrinsicCalibration::getFrame(int camera) {
+	const auto id = state_.cameras[camera].id;
+	return (*fs_current_)[id.source()].cast<ftl::rgbd::Frame>().get<cv::cuda::GpuMat>(id.channel);
+}
+
+const cv::cuda::GpuMat ExtrinsicCalibration::getFrameRectified(int c) {
+	if (running_ || state_.maps1.size() <= (unsigned int)(c)) {
+		return getFrame(c);
+	}
+	cv::cuda::GpuMat remapped;
+	cv::cuda::remap(getFrame(c), remapped, state_.maps1[c], state_.maps2[c], cv::INTER_LINEAR);
+	return remapped;
+}
+
+int ExtrinsicCalibration::cameraCount() {
+	return state_.cameras.size();
+}
+
+bool ExtrinsicCalibration::next() {
+	if (std::atomic_load(&fs_update_).get()) {
+		std::atomic_store(&fs_current_, fs_update_);
+		std::atomic_store(&fs_update_, {});
+		return true;
+	}
+	return false;
+}
+
+bool ExtrinsicCalibration::capturing() {
+	return state_.capture;
+}
+
+void ExtrinsicCalibration::setCapture(bool v) {
+	state_.capture = v;
+}
+
+std::vector<std::pair<std::string, unsigned int>> ExtrinsicCalibration::listFrameSets() {
+	auto framesets = io->feed()->listFrameSets();
+	std::vector<std::pair<std::string, unsigned int>> result;
+	result.reserve(framesets.size());
+	for (auto fsid : framesets) {
+		auto uri = io->feed()->getURI(fsid);
+		result.push_back({uri, fsid});
+	}
+	return result;
+}
+
+std::vector<std::pair<std::string, ftl::data::FrameID>> ExtrinsicCalibration::listSources(unsigned int fsid, bool all) {
+	std::vector<std::pair<std::string, FrameID>> cameras;
+	for (auto id : io->feed()->listFrames()) {
+		if (id.frameset() != fsid) { continue; }
+		if (all || io->feed()->availableChannels(id).count(Channel::CalibrationData)) {
+			auto name = io->feed()->getURI(id.frameset()) + "#" + std::to_string(id.source());
+			cameras.push_back({name, id});
+		}
+	}
+	return cameras;
+}
+
+std::vector<ExtrinsicCalibration::CameraID> ExtrinsicCalibration::cameras() {
+	std::vector<ExtrinsicCalibration::CameraID> res;
+	res.reserve(cameraCount());
+	for (const auto& camera : state_.cameras) {
+		res.push_back(camera.id);
+	}
+	return res;
+}
+
+bool ExtrinsicCalibration::isBusy() {
+	return running_;
+}
+
+void ExtrinsicCalibration::updateCalibration() {
+	auto fs = std::atomic_load(&fs_current_);
+	std::map<ftl::data::FrameID, ftl::calibration::CalibrationData> update;
+
+	for (unsigned int i = 0; i < state_.cameras.size(); i++) {
+		auto& c = state_.cameras[i];
+		auto frame_id = ftl::data::FrameID(c.id);
+
+		if (update.count(frame_id) == 0) {
+			auto& frame = fs->frames[c.id];
+			update[frame_id] = frame.get<CalibrationData>(Channel::CalibrationData);
+		}
+		update[frame_id].get(c.id.channel) = state_.calib.calibrationOptimized(i);
+	}
+
+	for (auto& [fid, calib] : update) {
+		auto& frame = fs->frames[fid];
+		setCalibration(frame, calib);
+	}
+}
+
+void ExtrinsicCalibration::updateCalibration(int c) {
+	throw ftl::exception("Not implemented");
+}
+
+void ExtrinsicCalibration::stereoRectify(int cl, int cr,
+	const CalibrationData::Calibration& l, const CalibrationData::Calibration& r) {
+
+	CHECK(l.extrinsic.tvec != r.extrinsic.tvec);
+	CHECK(l.intrinsic.resolution == r.intrinsic.resolution);
+
+	auto size = l.intrinsic.resolution;
+	cv::Mat T = l.extrinsic.matrix() * inverse(r.extrinsic.matrix());
+	cv::Mat R, t, R1, R2, P1, P2, Q, map1, map2;
+
+	getRotationAndTranslation(T, R, t);
+
+	cv::stereoRectify(
+		l.intrinsic.matrix(), l.intrinsic.distCoeffs.Mat(),
+		r.intrinsic.matrix(), r.intrinsic.distCoeffs.Mat(), size,
+		R, t, R1, R2, P1, P2, Q, 0, 1.0);
+
+	cv::initUndistortRectifyMap(l.intrinsic.matrix(), l.intrinsic.distCoeffs.Mat(),
+		R1, P1, size, CV_32FC1, map1, map2);
+	state_.maps1[cl].upload(map1);
+	state_.maps2[cl].upload(map2);
+
+	cv::initUndistortRectifyMap(r.intrinsic.matrix(), r.intrinsic.distCoeffs.Mat(),
+		R1, P1, size, CV_32FC1, map1, map2);
+	state_.maps1[cr].upload(map1);
+	state_.maps2[cr].upload(map2);
+}
+
+void ExtrinsicCalibration::run() {
+	if (running_.exchange(true)) { return; }
+
+	future_ = ftl::pool.push([this](int id) {
+		try {
+			for (int c = 0; c < cameraCount(); c += 2) {
+				auto t1 = state_.calib.calibration(c).extrinsic.tvec;
+				auto t2 = state_.calib.calibration(c + 1).extrinsic.tvec;
+
+				LOG(INFO) << "baseline (" << c << ", " << c + 1 << "): "
+						  << cv::norm(t1, t2);
+			}
+
+			auto opt = state_.calib.options();
+			opt.optimize_intrinsic = !(state_.flags & Flags::FIX_INTRINSIC);
+			opt.fix_focal = state_.flags & Flags::FIX_FOCAL;
+			opt.fix_distortion = state_.flags & Flags::FIX_DISTORTION;
+			opt.fix_principal_point = state_.flags & Flags::FIX_PRINCIPAL_POINT;
+			opt.loss = (state_.flags & Flags::LOSS_CAUCHY) ?
+				ftl::calibration::BundleAdjustment::Options::Loss::CAUCHY :
+				ftl::calibration::BundleAdjustment::Options::Loss::SQUARED;
+			state_.calib.setOptions(opt);
+			state_.calib.run();
+
+			for (int c = 0; c < cameraCount(); c += 2) {
+				auto t1 = state_.calib.calibrationOptimized(c).extrinsic.tvec;
+				auto t2 = state_.calib.calibrationOptimized(c + 1).extrinsic.tvec;
+			}
+
+			// Rectification maps for visualization; stereo cameras assumed
+			// if non-stereo cameras added visualization/grouping (by index)
+			// has to be different.
+
+			state_.maps1.resize(cameraCount());
+			state_.maps2.resize(cameraCount());
+
+			for (int c = 0; c < cameraCount(); c += 2) {
+				auto l = state_.calib.calibrationOptimized(c);
+				auto r = state_.calib.calibrationOptimized(c + 1);
+				stereoRectify(c, c + 1, l, r);
+			}
+		}
+		catch (ftl::exception &ex) {
+			LOG(ERROR) << ex.what() << "\n" << ex.trace();
+		}
+		catch (std::exception &ex) {
+			LOG(ERROR) << ex.what();
+		}
+
+		running_ = false;
+	});
+}
+
+double ExtrinsicCalibration::reprojectionError(int camera) {
+	if (camera <= cameraCount()) {
+		return NAN;
+	}
+	if (camera < 0) {
+		return state_.calib.reprojectionError();
+	}
+	else {
+		return state_.calib.reprojectionError(camera);
+	}
+}
+
+ftl::calibration::CalibrationData::Calibration ExtrinsicCalibration::getCalibration(CameraID id) {
+	if (fs_current_ == nullptr) {
+		throw ftl::exception("No frame");
+	}
+
+	auto calib = (*fs_current_)[id.source()].get<CalibrationData>(Channel::CalibrationData);
+	if (!calib.hasCalibration(id.channel)) {
+		throw ftl::exception("Calibration missing for requierd channel");
+	}
+
+	return calib.get(id.channel);
+}
+
+const std::vector<cv::Point2d>& ExtrinsicCalibration::previousPoints(int camera) {
+	// not really thread safe (but points_prev_ should not resize)
+	return state_.points_prev[camera];
+}
+
+int ExtrinsicCalibration::getFrameCount(int camera) {
+	return state_.calib.points().getCount(unsigned(camera));
+}
+
+void ExtrinsicCalibration::setFlags(int flags) {
+	state_.flags = flags;
+}
+
+int ExtrinsicCalibration::flags() const {
+	return state_.flags;
+}
+
+// debug method: save state to file (msgpack)
+void ExtrinsicCalibration::saveInput(const std::string& filename) {
+	ftl::pool.push([this, filename](int){
+		do {
+			// calib must not be modified; would be better to have mutex here
+			state_.capture = false;
+		}
+		while(running_);
+
+		running_ = true;
+		try { state_.calib.toFile(filename);}
+		catch (std::exception& ex) { LOG(ERROR) << "Calib save failed " << ex.what(); }
+		running_ = false;
+	});
+}
+
+// debug method: load state from file (msgpack)
+void ExtrinsicCalibration::loadInput(const std::string& filename) {	ftl::pool.push([this, filename](int){
+		do {
+			// calib must not be modified; would be better to have mutex here
+			state_.capture = false;
+		}
+		while(running_);
+
+		running_ = true;
+		try { state_.calib.fromFile(filename); }
+		catch (std::exception& ex) { LOG(ERROR) << "Calib load failed: " << ex.what(); }
+		running_ = false;
+	});
+}
diff --git a/applications/gui2/src/modules/calibration/intrinsic.cpp b/applications/gui2/src/modules/calibration/intrinsic.cpp
new file mode 100644
index 000000000..43c08485d
--- /dev/null
+++ b/applications/gui2/src/modules/calibration/intrinsic.cpp
@@ -0,0 +1,391 @@
+#include <loguru.hpp>
+
+#include "calibration.hpp"
+#include "../../screen.hpp"
+#include "../../widgets/popupbutton.hpp"
+#include "../../views/calibration/intrinsicview.hpp"
+
+#include <opencv2/imgproc.hpp>
+#include <opencv2/calib3d.hpp>
+
+#include <ftl/calibration/structures.hpp>
+#include <ftl/threads.hpp>
+
+#include <nanogui/entypo.h>
+
+using ftl::gui2::Calibration;
+using ftl::gui2::IntrinsicCalibration;
+
+using ftl::calibration::ChessboardObject;
+using ftl::calibration::CalibrationData;
+using ftl::codecs::Channel;
+using ftl::data::FrameID;
+using ftl::data::FrameSetPtr;
+
+void IntrinsicCalibration::init() {
+	reset();
+}
+
+IntrinsicCalibration::~IntrinsicCalibration() {
+	if(future_.valid()) {
+		future_.wait();
+	}
+}
+
+cv::Size IntrinsicCalibration::chessboardSize() {
+	return state_->object->chessboardSize();
+}
+
+double IntrinsicCalibration::squareSize() {
+	return state_->object->squareSize();
+}
+
+void IntrinsicCalibration::setChessboard(cv::Size size, double square) {
+	state_->object = std::make_unique<ChessboardObject>(size.height, size.width, square);
+}
+
+void IntrinsicCalibration::reset() {
+	state_ = std::make_unique<State>();
+	state_->object = std::make_unique<ChessboardObject>();
+	state_->channel = Channel::Left;
+	state_->channel_alt = Channel::Left;
+	state_->flags.set(defaultFlags());
+}
+
+void IntrinsicCalibration::start(ftl::data::FrameID id) {
+	reset();
+	setCalibrationMode(false);
+
+	state_->id = id;
+
+	auto* filter = io->feed()->filter
+		(std::unordered_set<uint32_t>{id.frameset()},
+		 {Channel::Left, Channel::Right});
+
+	filter->on([this](const FrameSetPtr& fs){ return onFrame_(fs); });
+
+	while(fs_current_ == nullptr) {
+		auto fss = filter->getLatestFrameSets();
+		if (fss.size() == 1) { fs_current_ = fss.front(); }
+	}
+	auto fs = std::atomic_load(&fs_current_);
+	setChannel_(fs);
+
+	auto* view = new ftl::gui2::IntrinsicCalibrationView(screen, this);
+	view->onClose([filter, this](){
+		// if calib_ caches images, also reset() here!
+		filter->remove();
+		if (fs_current_) {
+			setCalibrationMode(fs_current_->frames[state_->id.source()], true);
+		}
+		reset();
+		fs_current_.reset();
+		fs_update_.reset();
+	});
+
+	screen->setView(view);
+}
+
+void IntrinsicCalibration::setChannel(Channel channel) {
+	state_->channel = channel;
+	auto fs = std::atomic_load(&fs_current_);
+	setChannel_(fs);
+}
+
+void IntrinsicCalibration::setChannel_(FrameSetPtr fs) {
+	// reset points, find if high res available and find correct resolution
+	// TODO/FIXME: channel might be missing from previous frameset; temporary
+	// fix uses left channel to set resulution (assumes left resolution always
+	// the same as right resolution).
+
+	state_->calib = CalibrationData::Intrinsic();
+	state_->points.clear();
+	state_->points_object.clear();
+	state_->count = 0;
+
+	state_->channel_alt = state_->channel;
+	if (fs == nullptr) {
+		LOG(ERROR) << "No frame, calibration not loaded";
+	}
+
+	auto& frame = (*fs)[state_->id.source()].cast<ftl::rgbd::Frame>();
+	cv::Size size;
+
+	if (state_->channel== Channel::Left) {
+		if(frame.has(Channel::LeftHighRes)) {
+			state_->channel_alt = Channel::LeftHighRes;
+			size = frame.get<cv::Mat>(state_->channel_alt).size();
+		}
+		else {
+			size = frame.get<cv::Mat>(state_->channel_alt).size();
+		}
+	}
+	else if (state_->channel== Channel::Right) {
+		if (frame.has(Channel::RightHighRes)) {
+			state_->channel_alt = Channel::RightHighRes;
+			size = frame.get<cv::Mat>(Channel::LeftHighRes).size();
+		}
+		else {
+			size = frame.get<cv::Mat>(Channel::Left).size();
+		}
+	}
+
+	try {
+		auto calib = frame.get<CalibrationData>(Channel::CalibrationData);
+		if (calib.hasCalibration(state_->channel)) {
+			auto intrinsic = calib.get(state_->channel).intrinsic;
+			state_->calib = CalibrationData::Intrinsic(intrinsic, size);
+			state_->calibrated = true;
+		}
+		else {
+			state_->calib.resolution = size;
+		}
+	}
+	catch (std::exception& ex) {
+		LOG(ERROR)	<< "Could not read calibration: " << ex.what()
+					<< "; is this a valid source?";
+	}
+}
+
+bool IntrinsicCalibration::onFrame_(const ftl::data::FrameSetPtr& fs) {
+
+	std::atomic_store(&fs_update_, fs);
+	screen->redraw();
+
+	auto& frame = fs->frames[state_->id.source()];
+
+	if (!checkFrame(frame)) { return true; }
+	if (!state_->capture) { return true; }
+	if ((float(glfwGetTime()) - state_->last) < state_->frequency) { return true; }
+	if (state_->running.exchange(true)) { return true; }
+
+	future_ = ftl::pool.push(	[fs, this]
+								(int thread_id) {
+
+		try {
+			auto& frame = (*fs)[state_->id.source()].cast<ftl::rgbd::Frame>();
+
+			auto im = getMat(frame, state_->channel_alt);
+			cv::cvtColor(im, state_->gray, cv::COLOR_BGRA2GRAY);
+
+			std::vector<cv::Point2d> points;
+			int npoints = state_->object->detect(state_->gray, points);
+
+			if (npoints > 0) {
+				std::unique_lock<std::mutex> lk(mtx_);
+
+				auto& new_points = state_->points.emplace_back();
+				for (auto p : points) {
+					new_points.push_back(p);
+				}
+
+				auto& new_points_obj = state_->points_object.emplace_back();
+				for (auto p : state_->object->object()) {
+					new_points_obj.push_back(p);
+				}
+
+				state_->count++;
+			}
+			else {
+				LOG(INFO) << "Calibration pattern was not detected";
+			}
+		}
+		catch (std::exception &e) {
+			LOG(ERROR) << "exception in chesboard detection: " << e.what();
+			state_->running = false;
+			throw;
+		}
+
+		state_->running = false;
+		state_->last = float(glfwGetTime());
+	});
+
+	return true;
+}
+
+
+void IntrinsicCalibration::save() {
+	auto& frame = fs_current_->frames[state_->id.source()];
+	CalibrationData calib_data = CalibrationData(frame.get<CalibrationData>(Channel::CalibrationData));
+	auto& calibration = calib_data.get(state_->channel);
+	calibration.intrinsic = state_->calib;
+	setCalibration(frame, calib_data);
+}
+
+int IntrinsicCalibration::defaultFlags() {
+	int flags = state_->flags.defaultFlags();
+
+	// load config flags
+	for (int i : state_->flags.list()) {
+		auto flag = get<bool>(state_->flags.name(i));
+		if (flag) {
+			if (*flag)	flags |= i;
+			else		flags &= (~i);
+		}
+	}
+
+	return flags;
+}
+
+bool IntrinsicCalibration::isBusy() {
+	return state_->capture || state_->running;
+}
+
+void IntrinsicCalibration::run() {
+	state_->running = true;
+	future_ = ftl::pool.push([this](int id) {
+		try {
+			for (auto f : state_->flags.list()) {
+				if (state_->flags.has(f)) {
+					LOG(INFO) << state_->flags.name(f);
+				}
+			}
+			cv::Size2d ssize = sensorSize();
+			cv::Mat K;
+			cv::Mat distCoeffs;
+			cv::Size size = state_->calib.resolution;
+			if (state_->flags.has(cv::CALIB_USE_INTRINSIC_GUESS)) {
+				// OpenCV seems to use these anyways?
+				K = state_->calib.matrix();
+				state_->calib.distCoeffs.Mat(12).copyTo(distCoeffs);
+			}
+			std::vector<cv::Mat> rvecs, tvecs;
+			auto term = cv::TermCriteria
+				(cv::TermCriteria::COUNT|cv::TermCriteria::EPS, state_->max_iter, 1.0e-6);
+
+			state_->reprojection_error = cv::calibrateCamera(
+				state_->points_object, state_->points,
+				size, K, distCoeffs, rvecs, tvecs,
+				state_->flags, term);
+
+			state_->calib = CalibrationData::Intrinsic(K, distCoeffs, size);
+			state_->calib.sensorSize = ssize;
+			state_->calibrated = true;
+		}
+		catch (std::exception &e) {
+			LOG(ERROR) << "exception in calibration: " << e.what();
+			state_->running = false;
+			throw;
+		}
+
+		state_->running = false;
+	});
+}
+
+bool IntrinsicCalibration::hasFrame() {
+	return (std::atomic_load(&fs_update_).get() != nullptr)
+		&& fs_update_->frames[state_->id.source()].hasChannel(state_->channel_alt);
+};
+
+cv::cuda::GpuMat IntrinsicCalibration::getFrame() {
+	if (std::atomic_load(&fs_update_)) {
+		fs_current_ = fs_update_;
+		std::atomic_store(&fs_update_, {});
+	}
+
+	if (!fs_current_) {
+		return cv::cuda::GpuMat();
+	}
+
+	return getGpuMat((*fs_current_)[state_->id.source()].cast<ftl::rgbd::Frame>(),
+					 state_->channel_alt);
+}
+
+cv::cuda::GpuMat IntrinsicCalibration::getFrameUndistort() {
+	if (!calibrated()) {
+		return getFrame();
+	}
+
+	if (std::atomic_load(&fs_update_)) {
+		fs_current_ = fs_update_;
+		std::atomic_store(&fs_update_, {});
+	}
+
+	if (!fs_current_) {
+		return cv::cuda::GpuMat();
+	}
+
+	auto im = getMat((*fs_current_)[state_->id.source()].cast<ftl::rgbd::Frame>(),
+					 state_->channel_alt);
+
+	// NOTE: would be faster to use remap() and computing the maps just once if
+	// performance is relevant here
+
+	cv::Mat im_undistort;
+	cv::cuda::GpuMat gpu;
+	cv::undistort(im, im_undistort, state_->calib.matrix(), state_->calib.distCoeffs.Mat(12));
+	gpu.upload(im_undistort);
+	return gpu;
+}
+
+cv::Size2d IntrinsicCalibration::sensorSize() {
+	if (state_->calib.sensorSize == cv::Size2d{0.0, 0.0}) {
+		double w = value("sensor_width", 0.0);
+		double h = value("sensor_height", 0.0);
+		return {w, h};
+	}
+	else {
+		return state_->calib.sensorSize;
+	}
+};
+
+void IntrinsicCalibration::setSensorSize(cv::Size2d sz) {
+	state_->calib.sensorSize = sz;
+}
+
+double IntrinsicCalibration::focalLength() {
+	return (state_->calib.fx)*(sensorSize().width/state_->calib.resolution.width);
+}
+
+void IntrinsicCalibration::setFocalLength(double value, cv::Size2d sensor_size) {
+	setSensorSize(sensor_size);
+	double f = value*(state_->calib.resolution.width/sensor_size.width);
+
+	state_->calib.cx = f;
+	state_->calib.cy = f;
+}
+
+void IntrinsicCalibration::resetPrincipalPoint() {
+	auto sz = state_->calib.resolution;
+	state_->calib.cx = double(sz.width)/2.0;
+	state_->calib.cy = double(sz.height)/2.0;
+}
+
+void IntrinsicCalibration::resetDistortion() {
+	state_->calib.distCoeffs = CalibrationData::Intrinsic::DistortionCoefficients();
+}
+
+
+bool IntrinsicCalibration::hasChannel(Channel c) {
+	if (fs_current_) {
+		return (*fs_current_)[state_->id.source()].hasChannel(c);
+	}
+	return false;
+}
+
+std::vector<std::pair<std::string, FrameID>> IntrinsicCalibration::listSources(bool all) {
+	std::vector<std::pair<std::string, FrameID>> cameras;
+	for (auto id : io->feed()->listFrames()) {
+		auto channels = io->feed()->availableChannels(id);
+		// TODO: doesn't work
+		if (all || (channels.count(Channel::CalibrationData) == 1)) {
+			auto name = io->feed()->getURI(id.frameset()) + "#" + std::to_string(id.source());
+			cameras.push_back({name, id});
+		}
+	}
+	return cameras;
+}
+
+std::vector<cv::Point2f> IntrinsicCalibration::previousPoints() {
+	std::unique_lock<std::mutex> lk(mtx_, std::defer_lock);
+	if (lk.try_lock()) {
+		if (state_->points.size() == 0) { return {}; }
+		return std::vector<cv::Point2f>(state_->points.back());
+	}
+	return {};
+}
+
+ftl::calibration::CalibrationData::Intrinsic IntrinsicCalibration::calibration() {
+	return state_->calib;
+}
+
diff --git a/applications/gui2/src/modules/calibration/stereo.cpp b/applications/gui2/src/modules/calibration/stereo.cpp
new file mode 100644
index 000000000..1ac7c9fc1
--- /dev/null
+++ b/applications/gui2/src/modules/calibration/stereo.cpp
@@ -0,0 +1,366 @@
+#include <loguru.hpp>
+
+#include "calibration.hpp"
+#include "../../screen.hpp"
+#include "../../widgets/popupbutton.hpp"
+#include "../../views/calibration/stereoview.hpp"
+
+#include <opencv2/imgproc.hpp>
+#include <opencv2/calib3d.hpp>
+
+#include <ftl/calibration/parameters.hpp>
+#include <ftl/calibration/structures.hpp>
+#include <ftl/threads.hpp>
+
+#include <nanogui/entypo.h>
+
+using ftl::gui2::Calibration;
+using ftl::gui2::StereoCalibration;
+
+using ftl::calibration::ChessboardObject;
+using ftl::calibration::CalibrationData;
+using ftl::codecs::Channel;
+using ftl::data::FrameID;
+using ftl::data::FrameSetPtr;
+
+////////////////////////////////////////////////////////////////////////////////
+
+void StereoCalibration::setCapture(bool v) {
+	state_->capture = v;
+}
+
+bool StereoCalibration::capturing() {
+	return state_->capture;
+}
+
+void StereoCalibration::setFrequency(float v) {
+	state_->frequency = v;
+}
+
+float StereoCalibration::frequency() {
+	return state_->frequency;
+}
+
+void StereoCalibration::init() {
+	state_ = std::make_unique<State>();
+	state_->object = std::unique_ptr<ChessboardObject>(new ChessboardObject());
+}
+
+StereoCalibration::~StereoCalibration() {
+	if (state_) {
+		state_->running = false;
+	}
+	if(future_.valid()) {
+		future_.wait();
+	}
+	fs_current_.reset();
+	fs_update_.reset();
+}
+
+void StereoCalibration::reset() {
+	while(state_->running) { state_->capture = false; }
+	state_ = std::make_unique<State>();
+	state_->object = std::unique_ptr<ChessboardObject>(new ChessboardObject());
+	resetFlags();
+}
+
+cv::Size StereoCalibration::chessboardSize() {
+	return state_->object->chessboardSize();
+}
+
+double StereoCalibration::squareSize() {
+	return state_->object->squareSize();
+}
+
+void StereoCalibration::setChessboard(cv::Size size, double square) {
+	state_->object = std::make_unique<ChessboardObject>(size.height, size.width, square);
+}
+
+void StereoCalibration::start(ftl::data::FrameID id) {
+	reset();
+	setCalibrationMode(false);
+	state_->id = id;
+
+	auto* view = new ftl::gui2::StereoCalibrationView(screen, this);
+	auto* filter = io->feed()->filter
+		(std::unordered_set<uint32_t>{id.frameset()},
+		 {Channel::Left, Channel::Right});
+
+	filter->on([this](const FrameSetPtr& fs){ return onFrame_(fs); });
+
+	view->onClose([filter, this](){
+		// if state_->calib caches images, also reset() here!
+		filter->remove();
+		if (fs_current_) {
+			setCalibrationMode(fs_current_->frames[state_->id.source()], true);
+		}
+		reset();
+		fs_current_.reset();
+		fs_update_.reset();
+	});
+
+	screen->setView(view);
+
+	for (auto fs : filter->getLatestFrameSets()) {
+		if (!(fs->frameset() == state_->id.frameset()) ||
+			!(fs->hasFrame(state_->id.source()))) { continue; }
+
+		// read calibration channel and set channel_alt_ to high res if available
+
+		try {
+			auto& frame = (*fs)[state_->id.source()];
+			state_->calib = frame.get<CalibrationData>(Channel::CalibrationData);
+			state_->highres = frame.hasAll({Channel::LeftHighRes, Channel::RightHighRes});
+			auto sizel = frame.get<cv::cuda::GpuMat>(channelLeft_()).size();
+			auto sizer = frame.get<cv::cuda::GpuMat>(channelLeft_()).size();
+			if (sizel != sizer) {
+				LOG(ERROR) << "Frames have different resolutions";
+				// TODO: do not proceed
+			}
+			state_->imsize = sizel;
+		}
+		catch (std::exception& ex) {
+			LOG(ERROR)	<< "Could not read calibration: " << ex.what()
+						<< "; is this a valid source?";
+		}
+		break;
+	}
+}
+
+bool StereoCalibration::onFrame_(const ftl::data::FrameSetPtr& fs) {
+
+	std::atomic_store(&fs_update_, fs);
+	screen->redraw();
+
+	auto& frame = fs->frames[state_->id.source()];
+
+	if (!checkFrame(frame)) { return true; }
+	if (!frame.hasAll({channelLeft_(), channelRight_()})) { return true; }
+	if (!state_->capture) { return true; }
+	if ((float(glfwGetTime()) - state_->last) < state_->frequency) { return true; }
+	if (state_->running.exchange(true)) { return true; }
+
+	future_ = ftl::pool.push([this, fs] (int thread_id) {
+
+		try {
+			auto& frame = (*fs)[state_->id.source()].cast<ftl::rgbd::Frame>();
+			auto l = getMat(frame, channelLeft_());
+			auto r = getMat(frame, channelRight_());
+			cv::cvtColor(l, state_->gray_left, cv::COLOR_BGRA2GRAY);
+			cv::cvtColor(r, state_->gray_right, cv::COLOR_BGRA2GRAY);
+
+			std::vector<cv::Point2d> pointsl;
+			std::vector<cv::Point2d> pointsr;
+			if ((state_->object->detect(state_->gray_left, pointsl) == 1) &&
+				(state_->object->detect(state_->gray_right, pointsr) == 1)) {
+
+				std::unique_lock<std::mutex> lk(mtx_);
+				auto& new_points_l = state_->points_l.emplace_back();
+				new_points_l.reserve(pointsl.size());
+				auto& new_points_r = state_->points_r.emplace_back();
+				new_points_r.reserve(pointsl.size());
+				auto& new_points_obj = state_->points_object.emplace_back();
+				new_points_obj.reserve(pointsl.size());
+
+				for (auto p : pointsl) { new_points_l.push_back(p); }
+				for (auto p : pointsr) { new_points_r.push_back(p); }
+				for (auto p : state_->object->object()) { new_points_obj.push_back(p); }
+				state_->count++;
+			}
+		}
+		catch (std::exception &e) {
+			LOG(ERROR) << "exception in chesboard detection: " << e.what();
+			running = false;
+			throw;
+		}
+
+		state_->running = false;
+		state_->last = float(glfwGetTime());
+	});
+
+	return true;
+}
+
+
+void StereoCalibration::save() {
+	auto fs = std::atomic_load(&(fs_current_));
+	setCalibration((*fs)[state_->id.source()], state_->calib);
+}
+
+void StereoCalibration::resetFlags() {
+	// reset flags and get class defaults
+	state_->flags.reset();
+	state_->flags.set(state_->flags.defaultFlags());
+
+	// load config flags
+	for (int i : state_->flags.list()) {
+		auto flag = get<bool>(state_->flags.name(i));
+		if (flag) {
+			if (*flag)	state_->flags.set(i);
+			else		state_->flags.unset(i);
+		}
+	}
+}
+
+bool StereoCalibration::isBusy() {
+	return state_->capture || state_->running;
+}
+
+void StereoCalibration::run() {
+	if (state_->running) { return; }
+
+	state_->running = true;
+	future_ = ftl::pool.push([this](int) {
+		try {
+			auto& calib_l = state_->calib.get(Channel::Left);
+			auto& calib_r = state_->calib.get(Channel::Right);
+			auto K1 = calib_l.intrinsic.matrix();
+			auto distCoeffs1 = calib_l.intrinsic.distCoeffs.Mat();
+			auto K2 = calib_l.intrinsic.matrix();
+			auto distCoeffs2 = calib_r.intrinsic.distCoeffs.Mat();
+			cv::Mat R, T, E, F;
+			state_->reprojection_error = cv::stereoCalibrate(
+				state_->points_object, state_->points_l,
+				state_->points_r, K1, distCoeffs1, K2, distCoeffs2,
+				state_->imsize, R, T, E, F, state_->flags);
+
+			state_->calib.get(Channel::Left).intrinsic =
+				CalibrationData::Intrinsic(K1, distCoeffs1, state_->imsize);
+			state_->calib.get(Channel::Right).intrinsic =
+				CalibrationData::Intrinsic(K2, distCoeffs2, state_->imsize);
+
+			state_->calib.get(Channel::Left).extrinsic = CalibrationData::Extrinsic();
+			state_->calib.get(Channel::Right).extrinsic = CalibrationData::Extrinsic(R, T);
+		}
+		catch (std::exception &e) {
+			LOG(ERROR) << "exception in calibration: " << e.what();
+			state_->running = false;
+			throw;
+		}
+
+		state_->running = false;
+	});
+}
+
+ftl::rgbd::Frame& StereoCalibration::frame_() {
+	if (std::atomic_load(&fs_update_)) {
+		fs_current_ = fs_update_;
+		std::atomic_store(&fs_update_, {});
+	}
+	return (*fs_current_)[state_->id.source()].cast<ftl::rgbd::Frame>();
+}
+
+bool StereoCalibration::hasFrame() {
+	auto cleft = state_->highres ? Channel::LeftHighRes : Channel::Left;
+	auto cright = state_->highres ? Channel::RightHighRes : Channel::Right;
+	return (std::atomic_load(&fs_update_).get() != nullptr)
+		&& fs_update_->frames[state_->id.source()].hasAll({cleft, cright});
+};
+
+Channel StereoCalibration::channelLeft_() {
+	return (state_->highres ? Channel::LeftHighRes : Channel::Left);
+}
+
+Channel StereoCalibration::channelRight_() {
+	return (state_->highres ? Channel::RightHighRes : Channel::Right);
+}
+
+cv::cuda::GpuMat StereoCalibration::getLeft() {
+	return getGpuMat(frame_() ,channelLeft_());
+}
+
+cv::cuda::GpuMat StereoCalibration::getRight() {
+	return getGpuMat(frame_() ,channelRight_());
+}
+
+bool StereoCalibration::hasChannel(Channel c) {
+	if (fs_current_) {
+		return (*fs_current_)[state_->id.source()].hasChannel(c);
+	}
+	return false;
+}
+
+std::vector<std::pair<std::string, FrameID>> StereoCalibration::listSources(bool all) {
+	std::vector<std::pair<std::string, FrameID>> cameras;
+	for (auto id : io->feed()->listFrames()) {
+		auto channels = io->feed()->availableChannels(id);
+		// TODO: doesn't work
+		if (all || (channels.count(Channel::CalibrationData) == 1)) {
+			auto name = io->feed()->getURI(id.frameset()) + "#" + std::to_string(id.source());
+			cameras.push_back({name, id});
+		}
+	}
+	return cameras;
+}
+
+std::vector<std::vector<cv::Point2f>> StereoCalibration::previousPoints() {
+	std::unique_lock<std::mutex> lk(mtx_, std::defer_lock);
+	if (lk.try_lock()) {
+		if (state_->points_l.size() > 0) {
+			return {	state_->points_l.back(),
+						state_->points_r.back()
+			};
+		}
+	}
+	return {};
+}
+
+ftl::calibration::CalibrationData::Calibration StereoCalibration::calibrationLeft() {
+	return state_->calib.get(Channel::Left);
+}
+
+ftl::calibration::CalibrationData::Calibration StereoCalibration::calibrationRight() {
+	return state_->calib.get(Channel::Right);
+}
+
+bool StereoCalibration::calibrated() {
+	return (cv::norm(calibrationLeft().extrinsic.tvec,
+					 calibrationRight().extrinsic.tvec) > 0);
+}
+
+void StereoCalibration::calculateRectification() {
+
+	using ftl::calibration::transform::inverse;
+
+	auto left = calibrationLeft();
+	auto right = calibrationRight();
+	auto size = left.intrinsic.resolution;
+
+	cv::Mat T = inverse(left.extrinsic.matrix()) * right.extrinsic.matrix();
+	cv::Mat Rl, Rr, Pl, Pr, Q;
+
+	cv::stereoRectify(left.intrinsic.matrix(), left.intrinsic.distCoeffs.Mat(),
+					  right.intrinsic.matrix(), right.intrinsic.distCoeffs.Mat(),
+					  size, T(cv::Rect(0, 0, 3, 3)), T(cv::Rect(3, 0, 1, 3)),
+					  Rl, Rr, Pl, Pr, Q, 0, 1.0, {0, 0},
+					  &(state_->validROI_l), &(state_->validROI_r));
+
+	cv::initUndistortRectifyMap(left.intrinsic.matrix(), left.intrinsic.distCoeffs.Mat(),
+								Rl, Pl, size, CV_16SC1,
+								state_->map_l.first, state_->map_l.second);
+
+	cv::initUndistortRectifyMap(right.intrinsic.matrix(), right.intrinsic.distCoeffs.Mat(),
+								Rr, Pr, size, CV_16SC1,
+								state_->map_r.first, state_->map_r.second);
+}
+
+cv::cuda::GpuMat StereoCalibration::getLeftRectify() {
+	if (state_->map_l.first.empty()) { calculateRectification(); }
+	cv::Mat tmp;
+	cv::cuda::GpuMat res;
+	cv::remap(getMat(frame_(), channelLeft_()), tmp,
+			  state_->map_l.first,  state_->map_l.second,
+			  cv::INTER_LINEAR);
+	res.upload(tmp);
+	return res;
+}
+cv::cuda::GpuMat StereoCalibration::getRightRectify() {
+	if (state_->map_r.first.empty()) { calculateRectification(); }
+	cv::Mat tmp;
+	cv::cuda::GpuMat res;
+	cv::remap(getMat(frame_(), channelRight_()), tmp,
+			  state_->map_r.first,  state_->map_r.second,
+			  cv::INTER_LINEAR);
+	res.upload(tmp);
+	return res;
+}
diff --git a/applications/gui2/src/modules/camera.cpp b/applications/gui2/src/modules/camera.cpp
new file mode 100644
index 000000000..a47d35f1a
--- /dev/null
+++ b/applications/gui2/src/modules/camera.cpp
@@ -0,0 +1,454 @@
+#include "camera.hpp"
+#include "statistics.hpp"
+
+#include "../views/camera3d.hpp"
+#include <ftl/rgbd/capabilities.hpp>
+#include <chrono>
+
+#include <opencv2/imgproc.hpp>
+#include <opencv2/imgcodecs.hpp>
+#include <opencv2/cudaarithm.hpp>
+
+#include <loguru.hpp>
+
+using ftl::gui2::Camera;
+using ftl::codecs::Channel;
+using ftl::rgbd::Capability;
+using namespace std::literals::chrono_literals;
+using ftl::data::Message;
+
+void Camera::init() {
+
+	colouriser_ = std::unique_ptr<ftl::render::Colouriser>(
+		ftl::create<ftl::render::Colouriser>(this, "colouriser"));
+
+	overlay_ = std::unique_ptr<ftl::overlay::Overlay>
+		(ftl::create<ftl::overlay::Overlay>(this, "overlay"));
+}
+
+void Camera::update(double delta) {
+	if (nframes_ < 0) { return; }
+	if (nframes_ > update_fps_freq_) {
+		float n = nframes_;
+		float l = latency_ / n;
+		nframes_ = 0;
+		latency_ = 0;
+		auto t =  glfwGetTime();
+		float diff = t - last_;
+		last_ =  t;
+
+		auto *mod = screen->getModule<ftl::gui2::Statistics>();
+		mod->getJSON(StatisticsPanel::PERFORMANCE_INFO)["FPS"] = n/diff;
+		mod->getJSON(StatisticsPanel::PERFORMANCE_INFO)["Latency"] = std::to_string(int(l))+std::string("ms");
+		if (live_) mod->getJSON(StatisticsPanel::MEDIA_STATUS)["LIVE"] = nlohmann::json{{"icon", ENTYPO_ICON_VIDEO_CAMERA},{"value", true},{"colour","#0000FF"},{"size",28}};
+
+		auto ptr = std::atomic_load(&latest_);
+		if (ptr) {
+			const auto &frame = ptr->frames[frame_idx];
+			if (frame.has(Channel::MetaData)) {
+				const auto &meta = frame.metadata();
+				if (meta.size() > 0) {
+					auto &jmeta = mod->getJSON(StatisticsPanel::MEDIA_META);
+
+					//if (meta.count("name")) {
+					//	jmeta["name"] = nlohmann::json{{"nokey", true},{"value",meta.find("name")->second},{"size",20}};
+					//}
+					if (meta.count("device")) {
+						jmeta["Device"] = nlohmann::json{{"nokey", true},{"value",meta.find("device")->second}};
+					}
+					if (meta.count("serial")) {
+						jmeta["Serial"] = nlohmann::json{{"value",meta.find("serial")->second}};
+					}
+
+					/*for (const auto &m : meta) {
+						jmeta[m.first] = m.second;
+					}*/
+				}
+			}
+
+			const auto &rgbdf = frame.cast<ftl::rgbd::Frame>();
+
+			if (frame.has(Channel::Calibration)) {
+				const auto &cam = rgbdf.getLeft();
+				auto &jcam = mod->getJSON(StatisticsPanel::CAMERA_DETAILS);
+				jcam["Resolution"] = std::to_string(cam.width) + std::string("x") + std::to_string(cam.height);
+				jcam["Focal"] = cam.fx;
+				jcam["Principle"] = std::to_string(int(cam.cx)) + std::string(",") + std::to_string(int(cam.cy));
+			}
+
+			if (frame.has(Channel::Capabilities)) {
+				const auto &caps = rgbdf.capabilities();
+				auto &jmeta = mod->getJSON(StatisticsPanel::MEDIA_META);
+
+				if (caps.count(Capability::TOUCH)) jmeta["Touch"] = nlohmann::json{{"icon", ENTYPO_ICON_MOUSE_POINTER},{"value", true}};
+				else jmeta.erase("Touch");
+
+				if (caps.count(Capability::MOVABLE)) jmeta["Movable"] = nlohmann::json{{"icon", ENTYPO_ICON_DIRECTION},{"value", true}};
+				else jmeta.erase("Movable");
+
+				if (caps.count(Capability::VR)) jmeta["VR"] = nlohmann::json{{"value", true}};
+				else jmeta.erase("VR");
+
+				if (caps.count(Capability::EQUI_RECT)) jmeta["360"] = nlohmann::json{{"icon", ENTYPO_ICON_COMPASS},{"value", true}};
+				else jmeta.erase("360");
+			}
+
+			std::map<ftl::data::Message,std::string> messages;
+			{
+				UNIQUE_LOCK(mtx_, lk);
+				std::swap(messages, messages_);
+			}
+
+			auto &jmsgs = mod->getJSON(StatisticsPanel::LOGGING);
+			jmsgs.clear();
+			if (messages.size() > 0) {
+				for (const auto &m : messages) {
+					auto &data = jmsgs.emplace_back();
+					data["value"] = m.second;
+					data["nokey"] = true;
+					if (int(m.first) < 1024) {
+						data["icon"] = ENTYPO_ICON_WARNING;
+						data["colour"] = "#0000ff";
+					} else if (int(m.first) < 2046) {
+						data["icon"] = ENTYPO_ICON_WARNING;
+						data["colour"] = "#00a6f0";
+					} else {
+
+					}
+				}
+			}
+		}
+	}
+}
+
+void Camera::_updateCapabilities(ftl::data::Frame &frame) {
+	if (frame.has(Channel::Capabilities)) {
+		live_ = false;
+		touch_ = false;
+		movable_ = false;
+		vr_ = false;
+
+		const auto &cap = frame.get<std::unordered_set<Capability>>(Channel::Capabilities);
+
+		for (auto c : cap) {
+			switch (c) {
+			case Capability::LIVE		: live_ = true; break;
+			case Capability::TOUCH		: touch_ = true; break;
+			case Capability::MOVABLE	: movable_ = true; break;
+			case Capability::VR			: vr_ = true; break;
+			default: break;
+			}
+		}
+	}
+}
+
+void Camera::initiate_(ftl::data::Frame &frame) {
+	if (frame.has(Channel::Capabilities)) {
+		const auto &rgbdf = frame.cast<ftl::rgbd::Frame>();
+		const auto &cap = rgbdf.capabilities();
+		for (auto c : cap) {
+			LOG(INFO) << " -- " << ftl::rgbd::capabilityName(c);
+
+			switch (c) {
+			case Capability::LIVE		: live_ = true; break;
+			case Capability::TOUCH		: touch_ = true; break;
+			case Capability::MOVABLE	: movable_ = true; break;
+			case Capability::VR			: vr_ = true; break;
+			default: break;
+			}
+		}
+
+		if (cap.count(Capability::VIRTUAL)) {
+			view = new ftl::gui2::CameraView3D(screen, this);
+		} else {
+			view = new ftl::gui2::CameraView(screen, this);
+		}
+	} else {
+		view = new ftl::gui2::CameraView(screen, this);
+	}
+
+	if (frame.has(Channel::MetaData)) {
+		const auto &meta = frame.metadata();
+		LOG(INFO) << "Camera Frame Meta Data:";
+		for (auto m : meta) {
+			LOG(INFO) << " -- " << m.first << " = " << m.second;
+		}
+	}
+
+	if (!view) return;
+
+	view->onClose([this](){
+		filter->remove();
+		filter = nullptr;
+		nframes_ = -1;
+
+		auto *mod = this->screen->getModule<ftl::gui2::Statistics>();
+
+		mod->getJSON(StatisticsPanel::PERFORMANCE_INFO).clear();
+		mod->getJSON(StatisticsPanel::MEDIA_STATUS).clear();
+		mod->getJSON(StatisticsPanel::MEDIA_META).clear();
+		mod->getJSON(StatisticsPanel::CAMERA_DETAILS).clear();
+	});
+
+	setChannel(channel);
+
+	screen->setView(view);
+	view->refresh();
+}
+
+float Camera::volume() {
+	return io->speaker()->volume();
+}
+
+void Camera::setVolume(float v) {
+	return io->speaker()->setVolume(v);
+}
+
+std::unordered_set<Channel> Camera::availableChannels() {
+	if (std::atomic_load(&latest_)) {
+		return latest_->frames[frame_idx].available();
+	}
+	return {};
+}
+
+std::unordered_set<Channel> Camera::allAvailableChannels() {
+	if (std::atomic_load(&latest_)) {
+		auto set = latest_->frames[frame_idx].available();
+		for (auto i : latest_->frames[frame_idx].allChannels()) {
+			set.emplace(i);
+		}
+		return set;
+	}
+	return {};
+}
+
+void Camera::activate(ftl::data::FrameID id) {
+	frame_idx = id.source();
+	frame_id_ = id;
+	last_ = glfwGetTime();
+	nframes_ = 0;
+	// Clear the members to defaults
+	has_seen_frame_ = false;
+	point_.id = -1;
+	live_ = false;
+	touch_ = false;
+	movable_ = false;
+	vr_ = false;
+
+	//std::mutex m;
+	//std::condition_variable cv;
+
+	filter = io->feed()->filter(std::unordered_set<unsigned int>{id.frameset()}, {Channel::Left});
+	filter->on(
+		[this, speaker = io->speaker()](ftl::data::FrameSetPtr fs){
+			if (paused) return true;
+
+			std::atomic_store(&current_fs_, fs);
+			std::atomic_store(&latest_, fs);
+
+			// Deal with audio
+			if (fs->frames[frame_idx].hasOwn(Channel::AudioStereo)) {
+				speaker->queue(fs->timestamp(), fs->frames[frame_idx]);
+			}
+
+			// Need to notify GUI thread when first data comes
+			if (!has_seen_frame_) {
+				//std::unique_lock<std::mutex> lk(m);
+				has_seen_frame_ = true;
+				//cv.notify_one();
+			}
+
+			// Extract and record any frame messages
+			auto &frame = fs->frames[frame_idx];
+			if (frame.hasMessages()) {
+				const auto &msgs = frame.messages();
+				//auto &jmsgs = mod->getJSON(StatisticsPanel::LOGGING);
+
+				UNIQUE_LOCK(mtx_, lk);
+				messages_.insert(msgs.begin(), msgs.end());
+			}
+
+			// Some capabilities can change over time
+			if (frame.changed(Channel::Capabilities)) {
+				_updateCapabilities(frame);
+			}
+
+			if (!view) return true;
+
+			if (live_ && touch_) {
+				if (point_.id >= 0) {
+					auto response = fs->frames[frame_idx].response();
+					auto &data = response.create<std::vector<ftl::codecs::Touch>>(Channel::Touch);
+					data.resize(1);
+					UNIQUE_LOCK(mtx_, lk);
+					data[0] = point_;
+					//point_.strength = 0;
+				}
+			}
+
+			screen->redraw();
+			nframes_++;
+			latency_ += ftl::timer::get_time() - fs->localTimestamp;
+			return true;
+		}
+	);
+
+	auto sets = filter->getLatestFrameSets();
+	if (sets.size() > 0) {
+		std::atomic_store(&current_fs_, sets.front());
+		std::atomic_store(&latest_, sets.front());
+		initiate_(sets.front()->frames[frame_idx]);
+	} else {
+		throw FTL_Error("Cannot activate camera, no data");
+	}
+
+	// For first data, extract useful things and create view
+	// Must be done in GUI thread, hence use of cv.
+	//std::unique_lock<std::mutex> lk(m);
+	//cv.wait_for(lk, 1s, [this](){ return has_seen_frame_; });
+	//initiate_(std::atomic_load(&current_fs_)->frames[frame_idx]);
+}
+
+void Camera::setChannel(Channel c) {
+	channel = c;
+	filter->select({c});
+}
+
+std::string Camera::getActiveSourceURI() {
+	auto ptr = std::atomic_load(&latest_);
+	if (ptr) {
+		auto &frame = ptr->frames[frame_idx];
+		if (frame.has(ftl::codecs::Channel::MetaData)) {
+			const auto &meta = frame.metadata();
+			auto i = meta.find("id");
+			if (i != meta.end()) {
+				return i->second;
+			}
+		}
+	}
+
+	return "";
+}
+
+bool Camera::isRecording() {
+	return io->feed()->isRecording();
+}
+
+void Camera::stopRecording() {
+	io->feed()->stopRecording();
+	filter->select({channel});
+}
+
+void Camera::startRecording(const std::string &filename, const std::unordered_set<ftl::codecs::Channel> &channels) {
+	filter->select(channels);
+	io->feed()->startRecording(filter, filename);
+}
+
+void Camera::startStreaming(const std::unordered_set<ftl::codecs::Channel> &channels) {
+	filter->select(channels);
+	io->feed()->startStreaming(filter);
+}
+
+void Camera::snapshot(const std::string &filename) {
+	auto ptr = std::atomic_load(&latest_);
+	if (ptr) {
+		auto &frame = ptr->frames[frame_idx];
+		if (frame.hasChannel(channel)) {
+			const auto &snap = frame.get<cv::Mat>(channel);
+			cv::Mat output;
+			cv::cvtColor(snap, output, cv::COLOR_BGRA2BGR);
+			cv::imwrite(filename, output);
+		}
+	}
+}
+
+ftl::cuda::TextureObject<uchar4>& Camera::getFrame() {
+	if (std::atomic_load(&current_fs_)) {
+		auto& frame = current_fs_->frames[frame_idx].cast<ftl::rgbd::Frame>();
+		if (frame.hasChannel(channel)) {
+			current_frame_ = colouriser_->colourise(frame, channel, 0);
+		} else {
+			throw FTL_Error("Channel missing for frame " << frame.timestamp() << ": '" << ftl::codecs::name(channel) << "'");
+		}
+		std::atomic_store(&current_fs_, {});
+	}
+	return current_frame_;
+}
+
+bool Camera::getFrame(ftl::cuda::TextureObject<uchar4>& frame) {
+	if (std::atomic_load(&current_fs_).get() != nullptr) {
+		frame = getFrame();
+		return true;
+	}
+	return false;
+}
+
+bool Camera::hasFrame() {
+	auto ptr = std::atomic_load(&current_fs_);
+	if (ptr && ptr->frames.size() > (unsigned int)(frame_idx)) {
+		return ptr->frames[frame_idx].hasChannel(channel);
+	}
+	return false;
+}
+
+void Camera::drawOverlay(NVGcontext *ctx) {
+	auto ptr = std::atomic_load(&latest_);
+	// TODO: Need all the source framesets here or all data dumped in by renderer
+	overlay_->draw(ctx, *ptr, ptr->frames[frame_idx].cast<ftl::rgbd::Frame>(), view->size().cast<float>());
+}
+
+void Camera::sendPose(const Eigen::Matrix4d &pose) {
+	if (live_ && movable_ && !vr_) {
+		if (auto ptr = std::atomic_load(&latest_)) {
+			auto response = ptr->frames[frame_idx].response();
+			auto &rgbdresponse = response.cast<ftl::rgbd::Frame>();
+			rgbdresponse.setPose() = pose;
+		}
+	}
+}
+
+void Camera::touch(int id, ftl::codecs::TouchType t, int x, int y, float d, int strength) {
+	if (value("enable_touch", false)) {
+		UNIQUE_LOCK(mtx_, lk);
+		point_.id = id;
+		point_.type = t;
+		point_.x = x;
+		point_.y = y;
+		point_.d = d;
+		point_.strength = strength; //std::max((unsigned char)strength, point_.strength);
+	}
+
+	// TODO: Check for touch capability first
+	/*if (auto ptr = std::atomic_load(&latest_)) {
+		auto response = ptr->frames[frame_idx].response();
+		auto &data = response.create<std::vector<ftl::codecs::Touch>>(Channel::Touch);
+		data.resize(0);
+		auto &pt = data.emplace_back();
+		pt.id = id;
+		pt.type = t;
+		pt.x = x;
+		pt.y = y;
+		pt.d = d;
+		pt.strength = strength;
+
+
+	}*/
+}
+
+float Camera::depthAt(int x, int y) {
+	if (value("show_depth", true)) {
+		auto ptr = std::atomic_load(&latest_);
+
+		if (ptr) {
+			const auto &frame = ptr->frames[frame_idx].cast<ftl::rgbd::Frame>();
+
+			if (frame.hasChannel(Channel::Depth)) {
+				const auto &depth = frame.get<cv::Mat>(Channel::Depth);
+				if (x >= 0 && y >= 0 && x < depth.cols && y < depth.rows) {
+					return depth.at<float>(y, x);
+				}
+			}
+		}
+	}
+	return 0.0f;
+}
diff --git a/applications/gui2/src/modules/camera.hpp b/applications/gui2/src/modules/camera.hpp
new file mode 100644
index 000000000..d4da36f3b
--- /dev/null
+++ b/applications/gui2/src/modules/camera.hpp
@@ -0,0 +1,101 @@
+#pragma once
+
+#include "../module.hpp"
+#include "../screen.hpp"
+#include "../views/camera.hpp"
+
+#include <ftl/render/colouriser.hpp>
+#include <ftl/render/overlay.hpp>
+#include <ftl/codecs/touch.hpp>
+
+namespace ftl {
+namespace gui2 {
+
+class Camera : public Module {
+public:
+	using Module::Module;
+
+	virtual void init() override;
+	virtual void update(double delta) override;
+
+	virtual void activate(ftl::data::FrameID id);
+	void setChannel(ftl::codecs::Channel c);
+	void setPaused(bool set) { paused = set; };
+	bool isPaused() { return paused; }
+
+	float volume();
+	void setVolume(float v);
+
+	/** Gets current active frame to display. Always 4 channel uchar4. Reference
+	 * will stay valid until getFrame() is called again. Always returns a
+	 * reference to internal buffer. */
+	ftl::cuda::TextureObject<uchar4>& getFrame();
+	bool getFrame(ftl::cuda::TextureObject<uchar4>&);
+
+	std::unordered_set<ftl::codecs::Channel> availableChannels();
+
+	/** This includes data channels etc */
+	std::unordered_set<ftl::codecs::Channel> allAvailableChannels();
+
+	void touch(int id, ftl::codecs::TouchType t, int x, int y, float d, int strength);
+
+	/** Check if new frame is available */
+	bool hasFrame();
+	void sendPose(const Eigen::Matrix4d &pose);
+
+	inline bool isLive() const { return live_; }
+	inline bool isTouchable() const { return touch_; }
+	inline bool isMovable() const { return movable_; }
+
+	ftl::render::Colouriser* colouriser() { return colouriser_.get(); };
+	ftl::overlay::Overlay* overlay() { return overlay_.get(); }
+
+	void drawOverlay(NVGcontext *ctx);
+
+	std::string getActiveSourceURI();
+
+	float depthAt(int x, int y);
+
+	bool isRecording();
+	void stopRecording();
+	void startRecording(const std::string &filename, const std::unordered_set<ftl::codecs::Channel> &channels);
+	void startStreaming(const std::unordered_set<ftl::codecs::Channel> &channels);
+
+	void snapshot(const std::string &filename);
+
+private:
+	int frame_idx = -1;
+	ftl::data::FrameID frame_id_;
+	ftl::codecs::Channel channel = ftl::codecs::Channel::Colour;
+	ftl::stream::Feed::Filter *filter = nullptr;
+	std::atomic_bool paused = false; // TODO: implement in InputOutput
+	bool has_seen_frame_ = false;
+	ftl::codecs::Touch point_;
+	bool live_=false;
+	bool touch_=false;
+	bool movable_=false;
+	bool vr_=false;
+	float last_=0.0f;
+	std::atomic_int16_t nframes_=0;
+	std::atomic_int64_t latency_=0;
+	int update_fps_freq_=30; // fps counter update frequency (frames)
+
+	ftl::data::FrameSetPtr current_fs_;
+	ftl::data::FrameSetPtr latest_;
+	ftl::cuda::TextureObject<uchar4> current_frame_;
+
+	std::unique_ptr<ftl::render::Colouriser> colouriser_;
+	std::unique_ptr<ftl::overlay::Overlay> overlay_;
+
+	std::map<ftl::data::Message,std::string> messages_;
+
+	CameraView* view = nullptr;
+
+	MUTEX mtx_;
+
+	void initiate_(ftl::data::Frame &frame);
+	void _updateCapabilities(ftl::data::Frame &frame);
+};
+
+}
+}
diff --git a/applications/gui2/src/modules/config.cpp b/applications/gui2/src/modules/config.cpp
new file mode 100644
index 000000000..bfac6664e
--- /dev/null
+++ b/applications/gui2/src/modules/config.cpp
@@ -0,0 +1,30 @@
+#include "config.hpp"
+
+using ftl::gui2::ConfigCtrl;
+
+void ConfigCtrl::init() {
+	button = screen->addButton(ENTYPO_ICON_COG);
+	button->setTooltip("Settings");
+	button->setCallback([this](){
+		button->setPushed(false);
+		show();
+	});
+	button->setVisible(true);
+}
+
+void ConfigCtrl::show() {
+	if (screen->childIndex(window) == -1) {
+		window = new ftl::gui2::ConfigWindow(screen, io->master());
+	}
+	window->requestFocus();
+	window->setVisible(true);
+	screen->performLayout();
+}
+
+void ConfigCtrl::show(const std::string &uri) {
+	ftl::gui2::ConfigWindow::buildForm(screen, uri);
+}
+
+ConfigCtrl::~ConfigCtrl() {
+	
+}
diff --git a/applications/gui2/src/modules/config.hpp b/applications/gui2/src/modules/config.hpp
new file mode 100644
index 000000000..3a89a17f6
--- /dev/null
+++ b/applications/gui2/src/modules/config.hpp
@@ -0,0 +1,32 @@
+#pragma once
+
+#include "../module.hpp"
+#include "../screen.hpp"
+
+#include "../views/config.hpp"
+
+namespace ftl {
+namespace gui2 {
+
+/**
+ * Controller for thumbnail view.
+ */
+class ConfigCtrl : public Module {
+public:
+	using Module::Module;
+	virtual ~ConfigCtrl();
+
+	virtual void init() override;
+	virtual void show();
+
+	void show(const std::string &uri);
+
+private:
+	nanogui::ToolButton *button;
+	ftl::gui2::ConfigWindow *window = nullptr;
+
+	std::list<nanogui::FormHelper *> forms_;
+};
+
+}
+}
diff --git a/applications/gui2/src/modules/statistics.cpp b/applications/gui2/src/modules/statistics.cpp
new file mode 100644
index 000000000..60fe0fedd
--- /dev/null
+++ b/applications/gui2/src/modules/statistics.cpp
@@ -0,0 +1,98 @@
+#include "statistics.hpp"
+
+#include "../screen.hpp"
+#include "../views/statistics.hpp"
+
+#include <ftl/streams/builder.hpp>
+#include <ftl/streams/netstream.hpp>
+
+#include <nanogui/entypo.h>
+
+#include <loguru.hpp>
+
+using ftl::gui2::Statistics;
+using ftl::gui2::StatisticsPanel;
+
+template <typename T>
+std::string to_string_with_precision(const T a_value, const int n = 6) {
+	std::ostringstream out;
+	out.precision(n);
+	out << std::fixed << a_value;
+	return out.str();
+}
+
+void Statistics::update(double delta) {
+	time_count_ += delta;
+	if (time_count_ > 1.0) {
+		float bitrate = ftl::stream::Net::getRequiredBitrate();
+		if (bitrate > 0.0f) {
+			getJSON(StatisticsPanel::PERFORMANCE_INFO)["Bitrate"] = to_string_with_precision(bitrate, 1) + std::string("Mbit/s");
+		}
+		time_count_ = 0.0;
+	}
+}
+
+void Statistics::init() {
+	/**
+	 * TODO: store all values in hash table and allow other modules to
+	 * add/remove items/groups.
+	 */
+
+	widget = new ftl::gui2::StatisticsWidget(screen, this);
+	widget->setVisible(value("visible", false));
+	auto button = screen->addButton(ENTYPO_ICON_INFO);
+	button->setTooltip("Show Information");
+	button->setCallback([this, button](){
+		button->setPushed(false);
+		widget->setVisible(!widget->visible());
+	});
+
+	button->setVisible(true);
+}
+
+void Statistics::setCursor(nanogui::Cursor c) {
+	widget->setCursor(c);
+}
+
+/*void Statistics::set(const std::string &key, const std::string& value) {
+	text_[key] = value;
+}
+
+void Statistics::set(const std::string &key, float value, const std::string& unit) {
+	text_[key] = to_string_with_precision(value, 3) + unit;
+}
+
+std::vector<std::string> Statistics::get() {
+	std::vector<std::string> res;
+	res.reserve(text_.size());
+	for (auto& [k, v] : text_) {
+		res.push_back(k + ": " +v );
+	}
+	return res;
+}*/
+
+nlohmann::json &Statistics::getJSON(StatisticsPanel p) {
+	return groups_[p].json;
+}
+
+void Statistics::show(StatisticsPanel p, bool visible) {
+	groups_[p].visible = visible;
+}
+
+void Statistics::hide(StatisticsPanel p) {
+	groups_[p].visible = false;
+}
+
+bool Statistics::isVisible(StatisticsPanel p) {
+	return groups_[p].visible;
+}
+
+std::vector<std::pair<StatisticsPanel, const nlohmann::json &>> Statistics::get() const {
+	std::vector<std::pair<StatisticsPanel, const nlohmann::json &>> results;
+
+	for (const auto &i : groups_) {
+		results.emplace_back(i.first, i.second.json);
+	}
+
+	return results;
+}
diff --git a/applications/gui2/src/modules/statistics.hpp b/applications/gui2/src/modules/statistics.hpp
new file mode 100644
index 000000000..913815fb7
--- /dev/null
+++ b/applications/gui2/src/modules/statistics.hpp
@@ -0,0 +1,56 @@
+#pragma once
+
+#include "../module.hpp"
+#include <nlohmann/json.hpp>
+
+namespace ftl
+{
+namespace gui2
+{
+
+enum class StatisticsPanel {
+	MEDIA_STATUS=0,			// Live or not?
+	PERFORMANCE_INFO,		// Bitrate, fps etc
+	STREAM_DATA,			// Channel info
+	MEDIA_META,				// Name, device, capabilities
+	CAMERA_DETAILS,			// Calibration info
+	LOGGING					// Stream error and log messages
+	// Chat, media name, ...
+};
+
+class Statistics : public Module {
+public:
+	using Module::Module;
+	virtual void init() override;
+	virtual void update(double delta) override;
+
+	// not thread safe! (use only from gui thread or add lock)
+	/*void set(const std::string &key, const std::string& value);
+	void set(const std::string &key, int value);
+	void set(const std::string &key, float value, const std::string& unit = "");*/
+
+	nlohmann::json &getJSON(StatisticsPanel);
+
+	void show(StatisticsPanel, bool visible=true);
+	void hide(StatisticsPanel);
+	bool isVisible(StatisticsPanel);
+
+	void setCursor(nanogui::Cursor);
+
+	//void remove(const std::string &key) { text_.erase(key); }
+
+	std::vector<std::pair<StatisticsPanel, const nlohmann::json &>> get() const;
+
+private:
+	struct StatsGroup {
+		// TODO: Other properties...
+		nlohmann::json json; // = nlohmann::json::object_t();
+		bool visible=true;
+	};
+
+	nanogui::Widget* widget;
+	std::map<StatisticsPanel, StatsGroup> groups_;
+	double time_count_=0.0;
+};
+}
+}
diff --git a/applications/gui2/src/modules/themes.cpp b/applications/gui2/src/modules/themes.cpp
new file mode 100644
index 000000000..67f7f7760
--- /dev/null
+++ b/applications/gui2/src/modules/themes.cpp
@@ -0,0 +1,103 @@
+#include "themes.hpp"
+#include "nanogui/theme.h"
+#include "../screen.hpp"
+
+using ftl::gui2::Themes;
+using nanogui::Theme;
+
+void Themes::init() {
+	auto* toolbuttheme = screen->getTheme("toolbutton");
+	toolbuttheme->mBorderDark = nanogui::Color(0,0);
+	toolbuttheme->mBorderLight = nanogui::Color(0,0);
+	toolbuttheme->mButtonGradientBotFocused = nanogui::Color(60,255);
+	toolbuttheme->mButtonGradientBotUnfocused = nanogui::Color(0,0);
+	toolbuttheme->mButtonGradientTopFocused = nanogui::Color(60,255);
+	toolbuttheme->mButtonGradientTopUnfocused = nanogui::Color(0,0);
+	toolbuttheme->mButtonGradientTopPushed = nanogui::Color(60,180);
+	toolbuttheme->mButtonGradientBotPushed = nanogui::Color(60,180);
+	toolbuttheme->mTextColor = nanogui::Color(0.9f,0.9f,0.9f,0.9f);
+	toolbuttheme->mWindowDropShadowSize = 0;
+	toolbuttheme->mDropShadow = nanogui::Color(0,0);
+
+	auto* windowtheme = screen->getTheme("window_light");
+	windowtheme->mWindowFillFocused = nanogui::Color(220, 200);
+	windowtheme->mWindowFillUnfocused = nanogui::Color(220, 200);
+	windowtheme->mWindowHeaderGradientBot = nanogui::Color(60,230);
+	windowtheme->mWindowHeaderGradientTop = nanogui::Color(60,230);
+	windowtheme->mWindowHeaderSepBot = nanogui::Color(60, 230);
+	windowtheme->mTextColor = nanogui::Color(20,255);
+	windowtheme->mDisabledTextColor = nanogui::Color(140, 255);
+	windowtheme->mWindowCornerRadius = 2;
+	windowtheme->mButtonGradientBotFocused = nanogui::Color(210,255);
+	windowtheme->mButtonGradientBotUnfocused = nanogui::Color(190,255);
+	windowtheme->mButtonGradientTopFocused = nanogui::Color(230,255);
+	windowtheme->mButtonGradientTopUnfocused = nanogui::Color(230,255);
+	windowtheme->mButtonGradientTopPushed = nanogui::Color(170,255);
+	windowtheme->mButtonGradientBotPushed = nanogui::Color(210,255);
+	windowtheme->mBorderDark = nanogui::Color(150,255);
+	windowtheme->mBorderMedium = nanogui::Color(165,255);
+	windowtheme->mBorderLight = nanogui::Color(230,255);
+	windowtheme->mButtonFontSize = 16;
+	windowtheme->mTextColorShadow = nanogui::Color(0,0);
+	windowtheme->mWindowTitleUnfocused = windowtheme->mWindowTitleFocused;
+	windowtheme->mWindowTitleFocused = nanogui::Color(240,255);
+	windowtheme->mIconScale = 0.85f;
+
+	auto* viewtheme = screen->getTheme("view");
+	viewtheme->mWindowFillFocused = nanogui::Color(0, 0);
+	viewtheme->mWindowFillUnfocused = nanogui::Color(0, 0);
+	viewtheme->mWindowCornerRadius = 0;
+	viewtheme->mBorderDark = nanogui::Color(0 ,0);
+	viewtheme->mBorderMedium = nanogui::Color(0 ,0);
+	viewtheme->mBorderLight = nanogui::Color(0 ,0);
+	viewtheme->mWindowHeaderGradientBot = nanogui::Color(0, 0);
+	viewtheme->mWindowHeaderGradientTop = nanogui::Color(0, 0);
+	viewtheme->mWindowHeaderSepBot = nanogui::Color(0, 0);
+	viewtheme->mTextColorShadow = nanogui::Color(0, 0);
+	viewtheme->mWindowDropShadowSize = 0;
+
+	auto* windowtheme_dark = screen->getTheme("window_dark");
+	windowtheme_dark->mWindowCornerRadius = 5;
+	/*windowtheme_dark->mButtonGradientBotFocused = nanogui::Color(90,255);
+	windowtheme_dark->mButtonGradientBotUnfocused = nanogui::Color(70,255);
+	windowtheme_dark->mButtonGradientTopFocused = nanogui::Color(110,255);
+	windowtheme_dark->mButtonGradientTopUnfocused = nanogui::Color(110,255);
+	windowtheme_dark->mButtonGradientTopPushed = nanogui::Color(50,255);
+	windowtheme_dark->mButtonGradientBotPushed = nanogui::Color(90,255);*/
+	windowtheme_dark->mButtonGradientBotFocused = nanogui::Color(60,255);
+	windowtheme_dark->mButtonGradientBotUnfocused = nanogui::Color(35,35,40,180);
+	windowtheme_dark->mButtonGradientTopFocused = nanogui::Color(60,255);
+	windowtheme_dark->mButtonGradientTopUnfocused = nanogui::Color(35,35,40,180);
+	windowtheme_dark->mButtonGradientTopPushed = nanogui::Color(90,180);
+	windowtheme_dark->mButtonGradientBotPushed = nanogui::Color(90,180);
+	windowtheme_dark->mButtonFontSize = 16;
+	windowtheme_dark->mIconScale = 0.85f;
+	windowtheme_dark->mBorderDark = nanogui::Color(20,0);
+	windowtheme_dark->mBorderMedium = nanogui::Color(20,0);
+	windowtheme_dark->mBorderLight = nanogui::Color(20,0);
+
+	auto* mediatheme = screen->getTheme("media");
+	mediatheme->mIconScale = 1.2f;
+	mediatheme->mWindowDropShadowSize = 0;
+	mediatheme->mWindowFillFocused = nanogui::Color(45, 150);
+	mediatheme->mWindowFillUnfocused = nanogui::Color(45, 80);
+	mediatheme->mButtonGradientTopUnfocused = nanogui::Color(0,0);
+	mediatheme->mButtonGradientBotUnfocused = nanogui::Color(0,0);
+	mediatheme->mButtonGradientTopFocused = nanogui::Color(80,230);
+	mediatheme->mButtonGradientBotFocused = nanogui::Color(80,230);
+	mediatheme->mIconColor = nanogui::Color(255,255);
+	mediatheme->mTextColor = nanogui::Color(1.0f,1.0f,1.0f,1.0f);
+	mediatheme->mBorderDark = nanogui::Color(0,0);
+	mediatheme->mBorderMedium = nanogui::Color(0,0);
+	mediatheme->mBorderLight = nanogui::Color(0,0);
+	mediatheme->mDropShadow = nanogui::Color(0,0);
+	mediatheme->mButtonFontSize = 30;
+	mediatheme->mStandardFontSize = 20;
+
+	// https://flatuicolors.com/palette/defo
+	screen->setColor("highlight1", nanogui::Color(231, 76, 60, 255)); // red
+	screen->setColor("highlight2", nanogui::Color(52, 152, 219, 255)); // blue
+
+	screen->setColor("highlight1_disabled", nanogui::Color(166, 166, 166, 255));
+	screen->setColor("highlight2_disabled", nanogui::Color(166, 166, 166, 255));
+}
diff --git a/applications/gui2/src/modules/themes.hpp b/applications/gui2/src/modules/themes.hpp
new file mode 100644
index 000000000..af5d7337b
--- /dev/null
+++ b/applications/gui2/src/modules/themes.hpp
@@ -0,0 +1,16 @@
+#pragma once
+
+#include "../module.hpp"
+
+namespace ftl
+{
+namespace gui2
+{
+
+class Themes : public Module {
+public:
+	using Module::Module;
+	virtual void init() override;
+};
+}
+}
diff --git a/applications/gui2/src/modules/thumbnails.cpp b/applications/gui2/src/modules/thumbnails.cpp
new file mode 100644
index 000000000..f23dd97ee
--- /dev/null
+++ b/applications/gui2/src/modules/thumbnails.cpp
@@ -0,0 +1,76 @@
+#include "thumbnails.hpp"
+#include "../views/thumbnails.hpp"
+
+#include "camera.hpp"
+
+#include <ftl/codecs/channels.hpp>
+
+#include <nanogui/entypo.h>
+
+using ftl::codecs::Channel;
+using ftl::gui2::ThumbnailsController;
+
+void ThumbnailsController::init() {
+	auto button = screen->addButton(ENTYPO_ICON_HOME);
+	button->setTooltip("Home");
+	button->setCallback([this, button](){
+		button->setPushed(false);
+		activate();
+	});
+	button->setVisible(true);
+}
+
+void ThumbnailsController::activate() {
+	show_thumbnails();
+}
+
+ThumbnailsController::~ThumbnailsController() {
+
+}
+
+void ThumbnailsController::removeFrameset(uint32_t id) {
+	{
+		std::unique_lock<std::mutex> lk(mtx_);
+		framesets_.erase(id);
+	}
+	io->feed()->remove(id);
+}
+
+void ThumbnailsController::show_thumbnails() {
+	auto thumb_view = new ftl::gui2::Thumbnails(screen, this);
+
+	auto* filter = io->feed()->filter({Channel::Colour});
+	filter->on(
+		[this, thumb_view](const ftl::data::FrameSetPtr& fs){
+			{
+				std::unique_lock<std::mutex> lk(mtx_);
+				framesets_[fs->frameset()] = fs;
+			}
+			screen->redraw();
+			return true;
+	});
+
+	thumb_view->onClose([filter](){
+		filter->remove();
+	});
+
+	screen->setView(thumb_view);
+}
+
+std::vector<ftl::data::FrameSetPtr> ThumbnailsController::getFrameSets() {
+	std::unique_lock<std::mutex> lk(mtx_);
+	std::vector<ftl::data::FrameSetPtr> framesets;
+	framesets.reserve(framesets_.size());
+
+	for (auto& [k, v] : framesets_) {
+		std::ignore = k;
+		framesets.push_back(v);
+	}
+
+	return framesets;
+}
+
+void ThumbnailsController::show_camera(ftl::data::FrameID id) {
+	auto* camera = screen->getModule<ftl::gui2::Camera>();
+	camera->activate(id);
+}
diff --git a/applications/gui2/src/modules/thumbnails.hpp b/applications/gui2/src/modules/thumbnails.hpp
new file mode 100644
index 000000000..c24f4641d
--- /dev/null
+++ b/applications/gui2/src/modules/thumbnails.hpp
@@ -0,0 +1,33 @@
+#pragma once
+
+#include "../module.hpp"
+#include "../screen.hpp"
+
+namespace ftl {
+namespace gui2 {
+
+/**
+ * Controller for thumbnail view.
+ */
+class ThumbnailsController : public Module {
+public:
+	using Module::Module;
+	virtual ~ThumbnailsController();
+
+	virtual void init() override;
+	virtual void activate();
+
+	void show_thumbnails();
+	void show_camera(ftl::data::FrameID id);
+
+	std::vector<ftl::data::FrameSetPtr> getFrameSets();
+
+	void removeFrameset(uint32_t id);
+
+private:
+	std::mutex mtx_;
+	std::map<unsigned int, ftl::data::FrameSetPtr> framesets_;
+};
+
+}
+}
diff --git a/applications/gui2/src/screen.cpp b/applications/gui2/src/screen.cpp
new file mode 100644
index 000000000..6fbd703b8
--- /dev/null
+++ b/applications/gui2/src/screen.cpp
@@ -0,0 +1,216 @@
+#include <nanogui/opengl.h>
+#include <nanogui/glutil.h>
+#include <nanogui/screen.h>
+#include <nanogui/window.h>
+#include <nanogui/layout.h>
+#include <nanogui/imageview.h>
+#include <nanogui/label.h>
+#include <nanogui/toolbutton.h>
+#include <nanogui/popupbutton.h>
+
+#include <Eigen/Eigen>
+
+#include "screen.hpp"
+#include "widgets/window.hpp"
+
+#include <nanogui/messagedialog.h>
+
+#include <loguru.hpp>
+
+using std::min;
+using std::max;
+
+using Eigen::Vector2i;
+
+using ftl::gui2::Screen;
+
+static const int toolbar_w = 50;
+static const Vector2i wsize(1280+toolbar_w,720);
+
+Screen::Screen() :
+		nanogui::Screen(wsize, "FT-Lab Remote Presence"),
+		toolbar_(nullptr),
+		active_view_(nullptr), msgerror_(nullptr) {
+
+	using namespace nanogui;
+
+	setSize(wsize);
+
+	toolbar_ = new FixedWindow(this);
+	toolbar_->setPosition({0, 0});
+	toolbar_->setWidth(toolbar_w);
+	toolbar_->setHeight(height());
+
+	setResizeCallback([this](const Vector2i &s) {
+		toolbar_->setFixedSize({toolbar_->width(), s[1]});
+		toolbar_->setPosition({0, 0});
+		if (active_view_) {
+			active_view_->setSize(viewSize(s));
+		}
+		performLayout();
+	});
+
+	tools_ = new Widget(toolbar_);
+	tools_->setLayout(new BoxLayout( Orientation::Vertical,
+									Alignment::Middle, 0, 10));
+	tools_->setPosition(Vector2i(5,10));
+
+	setVisible(true);
+	performLayout();
+}
+
+Screen::~Screen() {
+	 // removes view; onClose() callback can depend on module
+	if (active_view_) {
+		this->removeChild(active_view_);
+		active_view_ = nullptr;
+	}
+
+	for (auto [name, ptr] : modules_) {
+		std::ignore = name;
+		delete ptr;
+	}
+}
+
+
+nanogui::Theme* Screen::getTheme(const std::string &name) {
+	if (themes_.count(name) == 0) {
+		themes_[name] = new nanogui::Theme(*theme());
+	}
+	return themes_[name];
+}
+
+nanogui::Color Screen::getColor(const std::string &name) {
+	if (colors_.count(name) == 0) {
+		return nanogui::Color(0, 0, 0, 0);
+	}
+	return colors_[name];
+}
+
+void Screen::setColor(const std::string &name, const nanogui::Color &c) {
+	colors_[name] = c;
+}
+
+void Screen::redraw() {
+	// glfwPostEmptyEvent() is safe to call from any thread
+	// https://www.glfw.org/docs/3.3/intro_guide.html#thread_safety
+	glfwPostEmptyEvent();
+}
+
+nanogui::Vector2i Screen::viewSize(const nanogui::Vector2i &ws) {
+	return {ws.x() - toolbar_->width(), ws.y()};
+}
+
+nanogui::Vector2i Screen::viewSize() {
+	return viewSize(size());
+}
+
+
+void Screen::showError(const std::string&title, const std::string& msg) {
+	// FIXME: This isn't thread safe?
+	if (msgerror_) { return; }
+	msgerror_ = new nanogui::MessageDialog
+		(screen(), nanogui::MessageDialog::Type::Warning, title, msg);
+	msgerror_->setModal(false);
+	msgerror_->setCallback([this](int){
+		msgerror_ = nullptr;
+	});
+}
+
+void Screen::setView(ftl::gui2::View *view) {
+
+	view->setPosition({toolbar_->width(), 0});
+
+	view->setTheme(getTheme("view"));
+	view->setVisible(true);
+
+	if (childIndex(view) == -1) {
+		addChild(view);
+	}
+
+	if (active_view_) {
+		active_view_->setVisible(false);
+
+		// View requires same cleanup as Window (see screen.cpp) before removed.
+		if (std::find(mFocusPath.begin(), mFocusPath.end(), active_view_) != mFocusPath.end()) {
+			mFocusPath.clear();
+		}
+		if (mDragWidget == active_view_) {
+			mDragWidget = nullptr;
+		}
+
+		removeChild(active_view_);
+	}
+
+	// all windows should be in front of new view
+	mChildren.erase(std::remove(mChildren.begin(), mChildren.end(), view), mChildren.end());
+	mChildren.insert(mChildren.begin(), view);
+
+	active_view_ = view;
+	LOG(INFO) << "number of children (Screen): "<< mChildren.size();
+
+	// hide all popups (TODO: only works on toolbar at the moment)
+	for (nanogui::Widget* widget : tools_->children()) {
+		if (auto button = dynamic_cast<nanogui::PopupButton*>(widget)) {
+			button->setPushed(false);
+		}
+	}
+
+	performLayout();
+}
+
+void Screen::render() {
+	if (active_view_) {
+		active_view_->render();
+	}
+}
+
+ftl::gui2::Module* Screen::addModule_(const std::string &name, ftl::gui2::Module* ptr) {
+	ptr->init();
+	if (modules_.find(name) != modules_.end()) {
+		LOG(WARNING) << "Module " << name  << " already loaded. Removing old module";
+		delete modules_[name];
+	}
+
+	modules_[name] = ptr;
+	return ptr;
+}
+
+
+bool Screen::keyboardEvent(int key, int scancode, int action, int modifiers) {
+
+	if (nanogui::Screen::keyboardEvent(key, scancode, action, modifiers)) {
+		return true;
+	}
+
+	if (active_view_) {
+		// event not processed in any focused widget
+		return active_view_->keyboardEvent(key, scancode, action, modifiers);
+	}
+
+	return false;
+}
+
+bool Screen::keyboardCharacterEvent(unsigned int codepoint) {
+
+	if (nanogui::Screen::keyboardCharacterEvent(codepoint)) {
+		return true;
+	}
+
+	if (active_view_) {
+		// event not processed in any focused widget
+		return active_view_->keyboardCharacterEvent(codepoint);
+	}
+
+	return false;
+}
+
+void Screen::drawAll() {
+	double now = glfwGetTime();
+	double delta = now - last_draw_time_;
+	for (const auto& [name, mod] : modules_) {
+		mod->update(delta);
+	}
+	last_draw_time_ = now;
+	nanogui::Screen::drawAll();
+}
diff --git a/applications/gui2/src/screen.hpp b/applications/gui2/src/screen.hpp
new file mode 100644
index 000000000..a5e47d6a3
--- /dev/null
+++ b/applications/gui2/src/screen.hpp
@@ -0,0 +1,165 @@
+#pragma once
+
+#include <nanogui/screen.h>
+#include <nanogui/glutil.h>
+
+#include <nanogui/toolbutton.h>
+
+#include <map>
+#include <memory>
+#include <typeinfo>
+
+#include "view.hpp"
+#include "module.hpp"
+
+namespace ftl {
+namespace gui2 {
+
+/**
+ * FTL GUI main screen. Methods may only be called from main (GUI) threads
+ * unless otherwise documented.
+ */
+class Screen : public nanogui::Screen {
+public:
+	explicit Screen();
+	virtual ~Screen();
+
+	virtual void drawAll() override;
+
+	virtual bool keyboardEvent(int key, int scancode, int action, int modifiers) override;
+	virtual bool keyboardCharacterEvent(unsigned int codepoint) override;
+
+	void render(); // necessary?
+	/** Redraw the screen (triggers an empty event). Thread safe. */
+	void redraw();
+
+	void activate(Module *ptr);
+
+	/** set active view (existing object */
+	void setView(ftl::gui2::View* view);
+	/** set active view (create new object)*/
+	template<typename T, typename ... Args>
+	void setView(Args ... args);
+
+	bool isActiveView(View* ptr) { return active_view_ == ptr; }
+
+	/** Add a module.*/
+	template<typename T, typename ... Args>
+	T* addModule(const std::string &name, ftl::Configurable *config, Args ... args);
+
+	/** Get a pointer to module. Module identified by name, exception thrown if not found */
+	template<typename T>
+	T* getModule(const std::string &name);
+
+	/** Get a pointer to module. Module indentified by dynamic type from template parameter.
+	 * Throws an exception if not found. If more than one possible match (same module
+	 * loaded multiple times), return value can be any.
+	 */
+	template<typename T>
+	T* getModule();
+
+	// prever above template (explicit who manages delete)
+	// template<typename T>
+	// T* addModule(T* ptr) { return addModule_(ptr); }
+
+	// TODO removeModule() as well?
+
+	/** add a button to toolbar */
+	template<typename T=nanogui::ToolButton, typename ... Args>
+	T* addButton(Args ... args);
+
+	/** themes/colors */
+	nanogui::Theme* getTheme(const std::string &name);
+	nanogui::Color getColor(const std::string &name);
+	void setColor(const std::string &name, const nanogui::Color &c);
+
+	// Implement in View or Screen? Add ID (address of creating instance)
+	// to each error to prevent spam?
+	/** Show error message popup */
+	void showError(const std::string& title, const std::string &msg);
+
+	nanogui::Vector2i viewSize(const nanogui::Vector2i &ws);
+	nanogui::Vector2i viewSize();
+
+private:
+	Module* addModule_(const std::string &name, Module* ptr);
+
+	//std::mutex mtx_; // not used: do not modify gui outside gui (main) thread
+	std::map<std::string, ftl::gui2::Module*> modules_;
+	std::map<std::string, nanogui::ref<nanogui::Theme>> themes_;
+	std::map<std::string, nanogui::Color> colors_;
+
+	nanogui::Widget *toolbar_;
+	nanogui::Widget *tools_;
+
+	ftl::gui2::View *active_view_;
+
+	nanogui::MessageDialog* msgerror_;
+	double last_draw_time_=0.0f;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+template<typename T, typename ... Args>
+void Screen::setView(Args ... args) {
+	setView(new T(this, args ...));
+}
+
+template<typename T, typename ... Args>
+T* Screen::addModule(const std::string &name, ftl::Configurable *config, Args ... args) {
+	static_assert(std::is_base_of<Module, T>::value);
+
+	return dynamic_cast<T*>(
+		addModule_(
+			name,
+			ftl::config::create<T>(config, name, args ...)
+		)
+	);
+}
+
+template<typename T>
+T* Screen::getModule(const std::string &name) {
+	static_assert(std::is_base_of<Module, T>::value);
+
+	if (modules_.find(name) == modules_.end()) {
+		throw ftl::exception("module: " + name + " not found");
+	}
+
+	auto* ptr = dynamic_cast<T*>(modules_[name]);
+
+	if (ptr == nullptr) {
+		throw ftl::exception("bad cast, module requested with wrong type");
+	}
+
+	return ptr;
+}
+
+template<typename T>
+T* Screen::getModule() {
+	static_assert(std::is_base_of<Module, T>::value);
+
+	for (auto& [name, ptr] : modules_) {
+		std::ignore = name;
+		if (typeid(*ptr) == typeid(T)) {
+			return dynamic_cast<T*>(ptr);
+		}
+	}
+
+	throw ftl::exception("module not found");
+}
+
+template<typename T, typename ... Args>
+T* Screen::addButton(Args ... args) {
+	static_assert(std::is_base_of<nanogui::Button, T>::value);
+
+	T* button = new T(tools_, args ...);
+	button->setIconExtraScale(1.5f);
+	button->setTheme(themes_["toolbutton"]);
+	button->setFixedSize(nanogui::Vector2i(40, 40));
+	performLayout();
+	return button;
+}
+
+}
+}
diff --git a/applications/gui2/src/view.cpp b/applications/gui2/src/view.cpp
new file mode 100644
index 000000000..67297b6b4
--- /dev/null
+++ b/applications/gui2/src/view.cpp
@@ -0,0 +1,10 @@
+#include <nanogui/widget.h>
+
+#include "view.hpp"
+#include "screen.hpp"
+
+using ftl::gui2::View;
+
+View::View(Screen* screen) : nanogui::Widget(screen), screen_(screen) {
+	setSize(screen_->viewSize());
+}
diff --git a/applications/gui2/src/view.hpp b/applications/gui2/src/view.hpp
new file mode 100644
index 000000000..90687d851
--- /dev/null
+++ b/applications/gui2/src/view.hpp
@@ -0,0 +1,37 @@
+#pragma once
+
+#include <nanogui/widget.h>
+#include "inputoutput.hpp"
+
+namespace ftl {
+namespace gui2 {
+
+class Screen;
+
+class View : public nanogui::Widget {
+public:
+	View(Screen* parent);
+
+	virtual ~View() {
+		if(cb_close_) {
+			cb_close_();
+		}
+	}
+
+	/** onClose callback; view closed (destroyed) */
+	void onClose(const std::function<void()> &cb) { cb_close_ = cb; }
+
+	virtual void render() {}// TODO remove if VR works?
+
+	inline Screen *gui() const { return screen_; }
+
+private:
+	std::function<void()> cb_close_;
+	Screen *screen_;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+};
+};
diff --git a/applications/gui2/src/views/addsource.cpp b/applications/gui2/src/views/addsource.cpp
new file mode 100644
index 000000000..c8d62aac5
--- /dev/null
+++ b/applications/gui2/src/views/addsource.cpp
@@ -0,0 +1,274 @@
+#include "addsource.hpp"
+#include "../modules/addsource.hpp"
+
+#include "../widgets/combobox.hpp"
+
+#include <nanogui/layout.h>
+#include <nanogui/label.h>
+#include <nanogui/button.h>
+#include <nanogui/vscrollpanel.h>
+#include <nanogui/tabwidget.h>
+#include <nanogui/formhelper.h>
+
+#include <loguru.hpp>
+
+
+using ftl::gui2::AddSourceWindow;
+
+AddSourceWindow::AddSourceWindow(nanogui::Widget* parent, AddCtrl *ctrl) :
+		nanogui::Window(parent, ""), ctrl_(ctrl) {
+
+	using namespace nanogui;
+
+	auto t = dynamic_cast<ftl::gui2::Screen*>(screen())->getTheme("window_dark");
+	setTheme(t);
+
+	//setFixedWidth(500);
+	setFixedSize(Vector2i(500,300));
+	setLayout(new nanogui::BoxLayout
+				(nanogui::Orientation::Vertical, nanogui::Alignment::Fill, 20, 10));
+
+	setPosition(Vector2i(parent->width()/2.0f - fixedWidth()/2.0f, parent->height()/2.0f - fixedHeight()/2.0f));
+
+	auto close = new nanogui::Button(buttonPanel(), "", ENTYPO_ICON_CROSS);
+	close->setTheme(dynamic_cast<ftl::gui2::Screen*>(screen())->getTheme("window_dark"));
+	close->setBackgroundColor(theme()->mWindowHeaderGradientBot);
+	close->setCallback([this](){ this->close();});
+
+	auto *title = new Label(this, "Add Source", "sans-bold");
+	title->setFontSize(28);
+
+	tabs_ = new TabWidget(this);
+
+	auto *recent_tab = tabs_->createTab("Recent");
+	recent_tab->setLayout(new nanogui::BoxLayout
+				(nanogui::Orientation::Vertical, nanogui::Alignment::Fill, 0, 0));
+	VScrollPanel *vscroll = new VScrollPanel(recent_tab);
+	vscroll->setFixedHeight(200);
+	Widget *recentscroll = new Widget(vscroll);
+	recentscroll->setLayout(new BoxLayout(Orientation::Vertical, Alignment::Fill, 10, 4));
+
+	auto *group_tab = tabs_->createTab("Groups");
+	group_tab->setLayout(new nanogui::BoxLayout
+				(nanogui::Orientation::Vertical, nanogui::Alignment::Fill, 0, 0));
+	vscroll = new VScrollPanel(group_tab);
+	vscroll->setFixedHeight(200);
+	Widget *groupscroll = new Widget(vscroll);
+	groupscroll->setLayout(new BoxLayout(Orientation::Vertical, Alignment::Fill, 10, 4));
+
+	auto *dev_tab = tabs_->createTab("Devices");
+	dev_tab->setLayout(new nanogui::BoxLayout
+				(nanogui::Orientation::Vertical, nanogui::Alignment::Fill, 0, 0));
+	vscroll = new VScrollPanel(dev_tab);
+	vscroll->setFixedHeight(200);
+	Widget *devscroll = new Widget(vscroll);
+	devscroll->setLayout(new BoxLayout(Orientation::Vertical, Alignment::Fill, 10, 4));
+
+	auto *host_tab = tabs_->createTab("Hosts");
+	host_tab->setLayout(new nanogui::BoxLayout
+				(nanogui::Orientation::Vertical, nanogui::Alignment::Fill, 0, 0));
+	vscroll = new VScrollPanel(host_tab);
+	vscroll->setFixedHeight(200);
+	Widget *hostscroll = new Widget(vscroll);
+	hostscroll->setLayout(new BoxLayout(Orientation::Vertical, Alignment::Fill, 10, 4));
+
+	auto *stream_tab = tabs_->createTab("Streams");
+	stream_tab->setLayout(new nanogui::BoxLayout
+				(nanogui::Orientation::Vertical, nanogui::Alignment::Fill, 0, 0));
+	vscroll = new VScrollPanel(stream_tab);
+	vscroll->setFixedHeight(200);
+	Widget *streamscroll = new Widget(vscroll);
+	streamscroll->setLayout(new BoxLayout(Orientation::Vertical, Alignment::Fill, 10, 4));
+
+	auto *file_tab = tabs_->createTab("Files");
+	file_tab->setLayout(new nanogui::BoxLayout
+				(nanogui::Orientation::Vertical, nanogui::Alignment::Fill, 0, 0));
+	vscroll = new VScrollPanel(file_tab);
+	vscroll->setFixedHeight(200);
+	Widget *filescroll = new Widget(vscroll);
+	filescroll->setLayout(new BoxLayout(Orientation::Vertical, Alignment::Fill, 10, 4));
+
+	tab_items_.resize(6);
+	tab_items_[0] = recentscroll;
+	tab_items_[1] = groupscroll;
+	tab_items_[2] = devscroll;
+	tab_items_[3] = hostscroll;
+	tab_items_[4] = streamscroll;
+	tab_items_[5] = filescroll;
+
+	uptodate_.test_and_set();
+	rebuild();
+	tabs_->setActiveTab(0);
+
+	new_source_handle_ = ctrl_->feed()->onNewSources([this](const std::vector<std::string> &srcs) {
+		UNIQUE_LOCK(mutex_, lk);
+		uptodate_.clear();
+		return true;
+	});
+}
+
+AddSourceWindow::~AddSourceWindow() {
+
+}
+
+nanogui::Button *AddSourceWindow::_addButton(const std::string &s, nanogui::Widget *parent, bool hide) {
+	using namespace nanogui;
+
+	ftl::URI uri(s);
+	int icon = 0;
+	switch (uri.getScheme()) {
+	case ftl::URI::SCHEME_DEVICE		: icon = ENTYPO_ICON_CAMERA; break;
+	case ftl::URI::SCHEME_FILE			: icon = ENTYPO_ICON_FOLDER_VIDEO; break;
+	case ftl::URI::SCHEME_FTL			: icon = ENTYPO_ICON_CLOUD; break;
+	case ftl::URI::SCHEME_WS			:
+	case ftl::URI::SCHEME_TCP			: icon = ENTYPO_ICON_CLASSIC_COMPUTER; break;
+	case ftl::URI::SCHEME_GROUP			: icon = ENTYPO_ICON_MERGE; break;
+	default: break;
+	}
+
+	auto *button = new Button(parent, ctrl_->getSourceName(s), icon);
+	if (ctrl_->isSourceActive(s)) {
+		button->setBackgroundColor(Color(0, 255, 0, 25));
+	}
+
+	button->setIconPosition(Button::IconPosition::Left);
+	button->setIconExtraScale(1.2);
+	button->setFontSize(18);
+	button->setTooltip(s);
+
+	button->setCallback([this, uri = s, hide]() {
+		ctrl_->add(uri);
+		if (hide) close();
+	});
+
+	return button;
+}
+
+void AddSourceWindow::rebuild() {
+	using namespace nanogui;
+
+	for (auto *w : tab_items_) {
+		while (w->childCount() > 0) w->removeChild(w->childCount()-1);
+	}
+	
+	Button *button;
+
+	auto srcs = ctrl_->getRecent();
+	for (auto &s : srcs) {
+		_addButton(s.uri, tab_items_[0]);
+	}
+
+	auto groups = ctrl_->getGroups();
+	for (auto &s : groups) {
+		_addButton(s, tab_items_[1]);
+	}
+
+	auto devsrcs = ctrl_->getDeviceSources();
+	for (auto &s : devsrcs) {
+		_addButton(s, tab_items_[2]);
+	}
+
+	auto *host_menu = new Widget(tab_items_[3]);
+	host_menu->setLayout(new BoxLayout(nanogui::Orientation::Horizontal, nanogui::Alignment::Maximum, 5,4));
+
+	button = new Button(host_menu, "Add", ENTYPO_ICON_PLUS);
+	button->setFontSize(18);
+	button->setTooltip("Connect to a new machine");
+	button->setCallback([this]() {
+		FormHelper *fh = new FormHelper(screen());
+		auto *win = fh->addWindow(Vector2i(10,10), "Add Host");
+		win->center();
+		win->setTheme(dynamic_cast<ftl::gui2::Screen*>(win->screen())->getTheme("window_dark"));
+		//win->setWidth(200);
+		fh->addVariable<std::string>("URI", [this,win](const std::string &v) {
+			try {
+				ctrl_->add(v);
+			} catch (const ftl::exception &e) {
+				LOG(ERROR) << "Add failed: " << e.what();
+			}
+			win->dispose();
+		}, [this]() {
+			return "";
+		})->setFixedWidth(150);
+		win->screen()->performLayout();
+		delete fh;
+	});
+
+	button = new Button(host_menu, "Clear", ENTYPO_ICON_CYCLE);
+	button->setFontSize(18);
+	button->setTooltip("Clear host history");
+	button->setCallback([this]() {
+		ctrl_->feed()->clearHostHistory();
+		uptodate_.clear();
+	});
+
+	auto hostsrcs = ctrl_->getHosts();
+	for (auto &s : hostsrcs) {
+		_addButton(s, tab_items_[3], false);
+	}
+
+	auto streamsrcs = ctrl_->getNetSources();
+	for (auto &s : streamsrcs) {
+		_addButton(s, tab_items_[4]);
+	}
+
+	auto *file_menu = new Widget(tab_items_[5]);
+	file_menu->setLayout(new BoxLayout(nanogui::Orientation::Horizontal, nanogui::Alignment::Maximum, 5,4));
+
+	button = new Button(file_menu, "Open", ENTYPO_ICON_PLUS);
+	button->setFontSize(18);
+	button->setTooltip("Open FTL File");
+	button->setCallback([this]() {
+		try {
+			std::string filename = file_dialog({ {"ftl", "FTL Captures"} }, false);
+			if (filename.size() > 0 && filename[0] == '/') {
+				filename = std::string("file://") + filename;
+			} else {
+				filename = std::string("file:///") + filename;
+			}
+#ifdef WIN32
+			auto p = filename.find_first_of('\\');
+			while (p != std::string::npos) {
+				filename[p] = '/';
+				p = filename.find_first_of('\\');
+			}
+#endif
+			ctrl_->add(filename);
+		} catch (const std::exception &e) {
+			LOG(ERROR) << "File load exception: " << e.what();
+		}
+		close();
+	});
+
+	button = new Button(file_menu, "Clear", ENTYPO_ICON_CYCLE);
+	button->setFontSize(18);
+	button->setTooltip("Clear file history");
+	button->setCallback([this]() {
+		ctrl_->feed()->clearFileHistory();
+		uptodate_.clear();
+	});
+
+	auto filesrcs = ctrl_->getFileSources();
+	for (auto &s : filesrcs) {
+		_addButton(s, tab_items_[5]);
+	}
+}
+
+void AddSourceWindow::close() {
+	setVisible(false);
+	//dispose();
+	ctrl_->disposeWindow();
+}
+
+void AddSourceWindow::draw(NVGcontext *ctx) {
+	{
+		UNIQUE_LOCK(mutex_, lk);
+		if (!uptodate_.test_and_set()) {
+			tabs_->requestFocus();  // Must ensure focus item not deleted
+			rebuild();
+			screen()->performLayout();
+		}
+	}
+
+	nanogui::Window::draw(ctx);
+}
diff --git a/applications/gui2/src/views/addsource.hpp b/applications/gui2/src/views/addsource.hpp
new file mode 100644
index 000000000..7e6ca3193
--- /dev/null
+++ b/applications/gui2/src/views/addsource.hpp
@@ -0,0 +1,41 @@
+#pragma once
+
+#include <nanogui/window.h>
+#include <ftl/handle.hpp>
+#include <ftl/threads.hpp>
+
+
+namespace ftl {
+namespace gui2 {
+
+class AddCtrl;
+
+/**
+ * Add source dialog
+ */
+class AddSourceWindow : public nanogui::Window {
+	public:
+	AddSourceWindow(nanogui::Widget *parent, AddCtrl *ctrl);
+	virtual ~AddSourceWindow();
+
+	virtual void draw(NVGcontext *ctx);
+
+private:
+	AddCtrl *ctrl_;
+	void close();
+	void rebuild();
+
+	nanogui::Button *_addButton(const std::string &s, nanogui::Widget *parent, bool hide=true);
+
+	ftl::Handle new_source_handle_;
+	MUTEX mutex_;
+	std::atomic_flag uptodate_;
+	std::vector<nanogui::Widget*> tab_items_;
+	nanogui::TabWidget *tabs_;
+
+public:
+	// EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+}
+}
diff --git a/applications/gui2/src/views/calibration/extrinsicview.cpp b/applications/gui2/src/views/calibration/extrinsicview.cpp
new file mode 100644
index 000000000..91b3b946d
--- /dev/null
+++ b/applications/gui2/src/views/calibration/extrinsicview.cpp
@@ -0,0 +1,589 @@
+#include "extrinsicview.hpp"
+#include "visualization.hpp"
+#include "../../screen.hpp"
+#include "../../widgets/window.hpp"
+
+#include <nanogui/common.h>
+#include <nanogui/window.h>
+#include <nanogui/layout.h>
+#include <nanogui/button.h>
+#include <nanogui/checkbox.h>
+#include <nanogui/label.h>
+#include <nanogui/formhelper.h>
+
+using ftl::gui2::ExtrinsicCalibrationStart;
+using ftl::gui2::ExtrinsicCalibrationView;
+
+using ftl::gui2::FixedWindow;
+
+using ftl::data::FrameID;
+using ftl::codecs::Channel;
+
+ExtrinsicCalibrationStart::ExtrinsicCalibrationStart(Screen* widget, ExtrinsicCalibration* ctrl) :
+		ftl::gui2::View(widget), ctrl_(ctrl), fsid_(-1), sources_(0), show_all_(false) {
+
+	show_all_ = false;
+	window_ = new nanogui::Window(screen(), std::string("Extrinsic Calibration"));
+	window_->setLayout(new nanogui::BoxLayout(nanogui::Orientation::Vertical,
+									 nanogui::Alignment::Fill, 6, 12));
+
+	auto* button_refresh = new nanogui::Button(window_->buttonPanel(), "", ENTYPO_ICON_CCW);
+	button_refresh->setCallback([this](){
+		update();
+		updateSources();
+		screen()->performLayout();
+	});
+
+	lsframesets_ = new nanogui::Widget(window_);
+	lsframesets_->setLayout(new nanogui::BoxLayout(nanogui::Orientation::Vertical,
+									 nanogui::Alignment::Fill, 0, 8));
+
+	lselect_ = new nanogui::Label(window_, "Select Cameras");
+	lselect_->setVisible(false);
+
+	lssources_ = new nanogui::Widget(window_);
+	lssources_->setLayout(new nanogui::BoxLayout(nanogui::Orientation::Vertical,
+									 nanogui::Alignment::Fill, 0, 8));
+
+	cball_ = new nanogui::CheckBox(window_, "Show all sources",
+		[this](bool v){
+			show_all_ = v;
+			updateSources();
+			screen()->performLayout();
+	});
+	cball_->setChecked(show_all_);
+	cball_->setVisible(false);
+
+	bcontinue_ = new nanogui::Button(window_, "Continue");
+	bcontinue_->setEnabled(false);
+	bcontinue_->setVisible(false);
+	bcontinue_->setCallback([this](){
+		ctrl_->start(fsid_, getSources());
+	});
+
+	window_->setFixedWidth(400);
+	window_->setVisible(true);
+
+	update();
+}
+
+ExtrinsicCalibrationStart::~ExtrinsicCalibrationStart() {
+	window_->setVisible(false);
+	if (parent()->getRefCount() > 0) {
+		window_->dispose();
+	}
+}
+
+void ExtrinsicCalibrationStart::draw(NVGcontext* ctx) {
+	window_->center();
+	bcontinue_->setEnabled(sources_ != 0);
+	ftl::gui2::View::draw(ctx);
+}
+
+void ExtrinsicCalibrationStart::resetSources() {
+	sources_ = ~uint64_t(0);
+}
+
+bool ExtrinsicCalibrationStart::sourceSelected(unsigned int idx) {
+	return (sources_ & (uint64_t(1) << idx));
+}
+
+
+void ExtrinsicCalibrationStart::addSource(unsigned int idx) {
+	sources_ |= (uint64_t(1) << idx);
+}
+
+void ExtrinsicCalibrationStart::removeSource(unsigned int idx) {
+	sources_ &= ~(uint64_t(1) << idx);
+}
+
+std::vector<FrameID> ExtrinsicCalibrationStart::getSources() {
+	std::vector<FrameID> sources;
+	unsigned int nmax = ctrl_->listSources(fsid_, show_all_).size();
+	CHECK(nmax < 64);
+
+	for (unsigned int i = 0; i < nmax; i++) {
+		if (sourceSelected(i)) {
+			sources.push_back(FrameID(fsid_, i));
+		}
+	}
+	return sources;
+}
+
+void ExtrinsicCalibrationStart::updateSources() {
+	while (lssources_->childCount() > 0) {
+		lssources_->removeChild(lssources_->childCount() - 1);
+	}
+	if (fsid_ == (unsigned int)(-1)) {
+		return;
+	}
+	for (const auto& [name, id] : ctrl_->listSources(fsid_, show_all_)) {
+		auto* button = new nanogui::Button(lssources_, name);
+		button->setFlags(nanogui::Button::Flags::ToggleButton);
+		button->setChangeCallback([this, button, id = id.source()](bool value){
+			if (value)	{ addSource(id); }
+			else		{ removeSource(id); }
+		});
+		if (sourceSelected(id.source())) {
+			button->setPushed(true);
+		}
+	}
+}
+
+void ExtrinsicCalibrationStart::update() {
+	while (lsframesets_->childCount() > 0) {
+		lsframesets_->removeChild(lsframesets_->childCount() - 1);
+	}
+
+	for (const auto& [uri, fsid] : ctrl_->listFrameSets()) {
+		auto* button = new nanogui::Button(lsframesets_, uri, ENTYPO_ICON_IMAGES);
+		button->setFlags(nanogui::Button::Flags::RadioButton);
+		if (fsid == fsid_) { button->setPushed(true); }
+		button->setCallback([button, fsid, this](){
+			fsid_ = fsid;
+			lselect_->setVisible(true);
+			cball_->setVisible(true);
+			bcontinue_->setVisible(true);
+			resetSources();
+			updateSources();
+			screen()->performLayout();
+		});
+	}
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+class ExtrinsicCalibrationView::ControlWindow : public FixedWindow {
+public:
+	ControlWindow(nanogui::Widget* parent, ExtrinsicCalibrationView* view);
+	virtual void draw(NVGcontext* ctx) override;
+
+private:
+	ExtrinsicCalibrationView* view_;
+	ExtrinsicCalibration* ctrl_;
+
+
+	nanogui::Button* bsave_;
+	nanogui::Button* bupload_;
+	nanogui::Button* bapply_;
+	nanogui::Button* bcalibrate_;
+	nanogui::Button* bpause_;
+	nanogui::Button* bresults_;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+ExtrinsicCalibrationView::ControlWindow::ControlWindow(nanogui::Widget* parent, ExtrinsicCalibrationView* view) :
+	FixedWindow(parent, ""), view_(view), ctrl_(view->ctrl_) {
+
+	setLayout(new nanogui::BoxLayout
+		(nanogui::Orientation::Vertical, nanogui::Alignment::Fill, 6, 6));
+
+	auto* buttons = new nanogui::Widget(this);
+	buttons->setLayout(new nanogui::BoxLayout
+		(nanogui::Orientation::Horizontal, nanogui::Alignment::Middle, 0, 0));
+
+	bsave_ = new nanogui::Button(buttons, "", ENTYPO_ICON_SAVE);
+	bsave_->setTooltip("Save input to file (Debug)");
+	bsave_->setCallback([this](){
+		std::string fname = nanogui::file_dialog({{"bin", "Binary file"}}, true);
+		ctrl_->saveInput(fname);
+	});
+
+	bsave_ = new nanogui::Button(buttons, "", ENTYPO_ICON_FOLDER);
+	bsave_->setTooltip("Load input from file (Debug)");
+	bsave_->setCallback([this](){
+		std::string fname = nanogui::file_dialog({{"bin", "Binary file"}}, true);
+		ctrl_->loadInput(fname);
+	});
+
+	bupload_ = new nanogui::Button(buttons, "", ENTYPO_ICON_UPLOAD);
+	bupload_->setTooltip("Save input to sources");
+	bupload_->setCallback([this](){
+		ctrl_->updateCalibration();
+		bupload_->setTextColor(nanogui::Color(32, 192, 32, 255));
+	});
+
+	bapply_ = new nanogui::Button(buttons, "");
+	bapply_->setFixedWidth(40);
+	bapply_->setTooltip("Rectify stereo images");
+
+	bapply_->setFlags(nanogui::Button::Flags::ToggleButton);
+	bapply_->setPushed(view_->rectify());
+	bapply_->setChangeCallback([button = bapply_, view = view_](bool v){
+		view->setRectify(v);
+	});
+
+	bresults_ = new nanogui::Button(buttons, "Show Calibration");
+	//bresults_->setEnabled(ctrl_->calib().calibrated());
+	bresults_->setCallback([view = view_, button = bresults_]{
+		view->setMode(Mode::RESULTS);
+	});
+
+	bpause_ = new nanogui::Button(buttons, "");
+	bpause_->setFixedWidth(140);
+	bpause_->setCallback([&ctrl = ctrl_, button = bpause_](){
+		ctrl->setCapture(!ctrl->capturing());
+	});
+
+	bcalibrate_ = new nanogui::Button(buttons, "Calibrate");
+	bcalibrate_->setFixedWidth(140);
+	bcalibrate_->setCallback([view = view_, button = bcalibrate_](){
+		view->setMode(Mode::CALIBRATION);
+	});
+}
+
+void ExtrinsicCalibrationView::ControlWindow::draw(NVGcontext* ctx) {
+	if (ctrl_->capturing())	{
+		bpause_->setCaption("Pause");
+		view_->setRectify(false);
+	}
+	else 					{
+		bpause_->setCaption("Continue");
+	}
+	bapply_->setIcon(view_->rectify() ? ENTYPO_ICON_EYE : ENTYPO_ICON_EYE_WITH_LINE);
+	bapply_->setPushed(view_->rectify());
+	//bcalibrate_->setEnabled(ctrl_->calib().nFrames() > 0);
+	//bresults_->setEnabled(ctrl_->calib().calibrated());
+	FixedWindow::draw(ctx);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+class ExtrinsicCalibrationView::CalibrationWindow : public FixedWindow {
+public:
+	CalibrationWindow(nanogui::Widget* parent, ExtrinsicCalibrationView* view);
+	virtual void draw(NVGcontext* ctx) override;
+
+private:
+	void build();
+
+	ExtrinsicCalibrationView* view_;
+	ExtrinsicCalibration* ctrl_;
+	nanogui::Widget* cameras_;
+
+	nanogui::Label* status_;
+	nanogui::Button* brun_;
+	bool running_; // run button clicked
+	int flags_;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+ExtrinsicCalibrationView::CalibrationWindow::CalibrationWindow(nanogui::Widget* parent, ExtrinsicCalibrationView* view) :
+	FixedWindow(parent, "Settings"), view_(view), ctrl_(view->ctrl_) {
+
+	running_ = false;
+
+	(new nanogui::Button(buttonPanel(), "", ENTYPO_ICON_CROSS))->setCallback(
+	[view = view_]() {
+		view->setMode(Mode::VIDEO);
+	});
+
+	setLayout(new nanogui::BoxLayout
+		(nanogui::Orientation::Vertical, nanogui::Alignment::Fill, 10 , 10));
+
+	build();
+}
+
+void ExtrinsicCalibrationView::CalibrationWindow::build() {
+
+	flags_ = ctrl_->flags();
+
+	auto* wfreeze = new nanogui::Widget(this);
+	wfreeze->setLayout(new nanogui::BoxLayout
+		(nanogui::Orientation::Vertical, nanogui::Alignment::Fill, 0 , 5));
+
+	auto* floss = new nanogui::CheckBox(wfreeze, "Cauchy loss");
+	floss->setChecked(flags_ & ExtrinsicCalibration::Flags::LOSS_CAUCHY);
+	floss->setCallback([&flags = flags_](bool v) {
+		if (v)	{ flags |= ExtrinsicCalibration::Flags::LOSS_CAUCHY; }
+		else	{ flags &= ~ExtrinsicCalibration::Flags::LOSS_CAUCHY; }
+	});
+
+	auto* fall = new nanogui::CheckBox(wfreeze, "Freeze all intrinsic paramters");
+	fall->setChecked(flags_ & ExtrinsicCalibration::Flags::FIX_INTRINSIC);
+	fall->setCallback([&flags = flags_](bool v) {
+		if (v)	{ flags |= ExtrinsicCalibration::Flags::FIX_INTRINSIC; }
+		else	{ flags &= ~ExtrinsicCalibration::Flags::FIX_INTRINSIC; }
+	});
+
+	auto* ff = new nanogui::CheckBox(wfreeze, "Fix focal length");
+	ff->setChecked(flags_ & ExtrinsicCalibration::Flags::FIX_FOCAL);
+	ff->setCallback([&flags = flags_](bool v) {
+		if (v)	{ flags |= ExtrinsicCalibration::Flags::FIX_FOCAL; }
+		else	{ flags &= ~ExtrinsicCalibration::Flags::FIX_FOCAL; }
+	});
+
+	auto* fpp = new nanogui::CheckBox(wfreeze, "Fix principal point");
+	fpp->setChecked(flags_ & ExtrinsicCalibration::Flags::FIX_PRINCIPAL_POINT);
+	fpp->setCallback([&flags = flags_](bool v) {
+		if (v)	{ flags |= ExtrinsicCalibration::Flags::FIX_PRINCIPAL_POINT; }
+		else	{ flags &= ~ExtrinsicCalibration::Flags::FIX_PRINCIPAL_POINT; }
+	});
+
+	auto* fdist = new nanogui::CheckBox(wfreeze, "Fix distortion coefficients");
+	fdist->setChecked(flags_ & ExtrinsicCalibration::Flags::FIX_DISTORTION);
+	fdist->setCallback([&flags = flags_](bool v) {
+		if (v)	{ flags |= ExtrinsicCalibration::Flags::FIX_DISTORTION; }
+		else	{ flags &= ~ExtrinsicCalibration::Flags::FIX_DISTORTION; }
+	});
+
+	auto* zdist = new nanogui::CheckBox(wfreeze, "Assume zero distortion");
+	zdist->setChecked(flags_ & ExtrinsicCalibration::Flags::ZERO_DISTORTION);
+	zdist->setCallback([&flags = flags_](bool v) {
+		if (v)	{ flags |= ExtrinsicCalibration::Flags::ZERO_DISTORTION; }
+		else	{ flags &= ~ExtrinsicCalibration::Flags::ZERO_DISTORTION; }
+	});
+
+	fall->setCallback([wfreeze](bool value){
+		for (int i = 2; i < wfreeze->childCount(); i++) {
+			wfreeze->childAt(i)->setEnabled(!value);
+		}
+	});
+
+	/* Needs thinking: visualize visibility graph? Use earlier alignment (if
+	 * some of the cameras already calibrated), do elsewhere?
+	 */
+
+	status_ = new nanogui::Label(this, "Ready");
+	brun_ = new nanogui::Button(this, "Run");
+	brun_->setCallback([this](){
+		ctrl_->setFlags(flags_);
+		ctrl_->run();
+		running_ = true;
+	});
+}
+
+void ExtrinsicCalibrationView::CalibrationWindow::draw(NVGcontext* ctx) {
+	brun_->setEnabled(!ctrl_->isBusy());
+	if (ctrl_->isBusy()) {
+		if (running_) {
+			auto dots = std::string(int(round(glfwGetTime())) % 4, '.');
+			status_->setCaption(ctrl_->status() + dots);
+		}
+		else {
+			status_->setCaption("Busy");
+		}
+	}
+	else {
+		status_->setCaption("Ready");
+	}
+	if (running_ && !ctrl_->isBusy()) {
+		running_ = false;
+		view_->setMode(Mode::RESULTS);
+	}
+	FixedWindow::draw(ctx);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+class ExtrinsicCalibrationView::ResultsWindow : public FixedWindow {
+public:
+	ResultsWindow(nanogui::Widget* parent, ExtrinsicCalibrationView* view);
+	virtual void draw(NVGcontext* ctx) override;
+
+private:
+	ExtrinsicCalibrationView* view_;
+	ExtrinsicCalibration* ctrl_;
+
+	nanogui::Button* bcalibrate_;
+	nanogui::Button* bpause_;
+	nanogui::Button* bresults_;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+ExtrinsicCalibrationView::ResultsWindow::ResultsWindow(nanogui::Widget* parent, ExtrinsicCalibrationView* view) :
+	FixedWindow(parent, "Results"), view_(view), ctrl_(view->ctrl_) {
+
+	(new nanogui::Button(buttonPanel(), "", ENTYPO_ICON_CROSS))->setCallback(
+		[view = view_]() {
+		view->setMode(Mode::VIDEO);
+	});
+	setSize({300, 300});
+}
+
+void ExtrinsicCalibrationView::ResultsWindow::draw(NVGcontext* ctx) {
+	FixedWindow::draw(ctx);
+	std::vector<ftl::calibration::CalibrationData::Extrinsic> calib(ctrl_->cameraCount());
+	for (int i = 0; i < ctrl_->cameraCount(); i++) {
+		calib[i] = ctrl_->calibration(i).extrinsic;
+	}
+	drawFloorPlan(ctx, this, calib);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+static void drawText(NVGcontext* ctx, nanogui::Vector2f &pos, const std::string& text,
+		float size=12.0f, int align=NVG_ALIGN_MIDDLE|NVG_ALIGN_CENTER) {
+	nvgFontSize(ctx, size);
+	nvgFontFace(ctx, "sans-bold");
+	nvgTextAlign(ctx, align);
+	nvgFillColor(ctx, nanogui::Color(8, 8, 8, 255)); // shadow
+	nvgText(ctx, pos.x(), pos.y(), text.c_str(), nullptr);
+	nvgFillColor(ctx, nanogui::Color(244, 244, 244, 255));
+	nvgText(ctx, pos.x() + 1, pos.y() + 1, text.c_str(), nullptr);
+}
+
+ExtrinsicCalibrationView::ExtrinsicCalibrationView(Screen* widget, ExtrinsicCalibration* ctrl) :
+		ftl::gui2::View(widget), ctrl_(ctrl), rows_(0) {
+
+	frames_ = new nanogui::Widget(this);
+	draw_number_ = false;
+	rectify_ = false;
+
+	frames_->setLayout(new nanogui::BoxLayout
+		(nanogui::Orientation::Horizontal, nanogui::Alignment::Maximum, 0, 0));
+
+	// assumes all cameras stereo cameras, indexed in order
+	for (int i = 0; i < ctrl_->cameraCount(); i += 2) {
+		new StereoImageView(frames_, nanogui::Orientation::Vertical);
+	}
+
+	wcontrol_ = new ControlWindow(screen(), this);
+	wcalibration_ = new CalibrationWindow(screen(), this);
+	wresults_ = new ResultsWindow(screen(), this);
+	setMode(Mode::CAPTURE_IMAGES);
+}
+
+void ExtrinsicCalibrationView::performLayout(NVGcontext* ctx) {
+
+	auto sz = wcontrol_->size();
+	wcontrol_->setPosition(
+		nanogui::Vector2i(width() / 2 - sz[0]/2, height() - 30 - sz[1]));
+
+	wcalibration_->center();
+	wresults_->center();
+
+	frames_->setSize(size());
+
+	nanogui::Vector2i fsize = { width()/(frames_->childCount()), height() };
+	for (int i = 0; i < frames_->childCount(); i++) {
+		auto* stereo = dynamic_cast<StereoImageView*>(frames_->childAt(i));
+		stereo->setFixedSize(fsize);
+		stereo->fit();
+	}
+
+	View::performLayout(ctx);
+}
+
+void ExtrinsicCalibrationView::draw(NVGcontext* ctx) {
+
+	if (ctrl_->next()) {
+		for (int i = 0; i < ctrl_->cameraCount(); i += 2) {
+			auto* imview = dynamic_cast<StereoImageView*>(frames_->childAt(i/2));
+
+			int l = i;
+			int r = i + 1;
+			if (ctrl_->hasFrame(l)) {
+				if (!rectify_) { imview->left()->copyFrom(ctrl_->getFrame(l)); }
+				else { imview->left()->copyFrom(ctrl_->getFrameRectified(l)); }
+				imview->left()->setVisible(true);
+			}
+			else { imview->left()->setVisible(false); }
+
+			if (ctrl_->hasFrame(r)) {
+				if (!rectify_) { imview->right()->copyFrom(ctrl_->getFrame(r)); }
+				else { imview->right()->copyFrom(ctrl_->getFrameRectified(r)); }
+				imview->right()->setVisible(true);
+			}
+			else { imview->right()->setVisible(false); }
+		}
+	}
+
+	Widget::draw(ctx);
+
+	// draw corner labels
+	for (int i = 0; i < ctrl_->cameraCount(); i++) {
+		FTLImageView* imview;
+		if (i%2 == 0) {
+			imview = dynamic_cast<StereoImageView*>(frames_->childAt(i/2))->left();
+		}
+		else {
+			imview = dynamic_cast<StereoImageView*>(frames_->childAt(i/2))->right();
+		}
+		auto points = ctrl_->previousPoints(i);
+
+		std::vector<Eigen::Vector2f, Eigen::aligned_allocator<Eigen::Vector2f>>
+			paths;
+
+
+		nanogui::Vector2f wpos = imview->absolutePosition().cast<float>();
+		nanogui::Vector2f wsize = imview->sizeF();
+
+		for (unsigned int p = 0; p < points.size(); p++) {
+			auto pos = imview->positionForCoordinate({points[p].x, points[p].y});
+			nanogui::Vector2f apos = pos + wpos;
+			paths.push_back(apos);
+		}
+
+		nvgScissor(ctx, wpos.x(), wpos.y(), wsize.x(), wsize.y());
+		// draw border
+		for (unsigned int p = 0; p < paths.size(); p += 4) {
+			nvgBeginPath(ctx);
+			nvgMoveTo(ctx, paths[p + 0].x(), paths[p + 0].y());
+			nvgLineTo(ctx, paths[p + 1].x(), paths[p + 1].y());
+			nvgLineTo(ctx, paths[p + 2].x(), paths[p + 2].y());
+			nvgLineTo(ctx, paths[p + 3].x(), paths[p + 3].y());
+			nvgLineTo(ctx, paths[p + 0].x(), paths[p + 0].y());
+			if (p == 0) nvgStrokeColor(ctx, nvgRGBA(255, 32, 32, 255));
+			if (p == 4) nvgStrokeColor(ctx, nvgRGBA(32, 255, 32, 255));
+			nvgStrokeWidth(ctx, 1.5f);
+			nvgStroke(ctx);
+		}
+		// draw number
+		/*if (draw_number_ ) {
+			for (unsigned int p = 0; p < paths.size(); p += 1) {
+				auto str = std::to_string(p);
+				drawText(ctx, paths[p], std::to_string(p), 14.0f);
+			}
+		}*/
+		nanogui::Vector2f cpos = wpos + nanogui::Vector2f{10.0f, 10.0f};
+		drawText(ctx, cpos, std::to_string(ctrl_->getFrameCount(i)), 20.0f, NVG_ALIGN_TOP|NVG_ALIGN_LEFT);
+		nvgResetScissor(ctx);
+	}
+}
+
+ExtrinsicCalibrationView::~ExtrinsicCalibrationView() {
+	wcontrol_->dispose();
+	wcalibration_->dispose();
+	wresults_->dispose();
+}
+
+void ExtrinsicCalibrationView::setMode(Mode mode) {
+	switch(mode) {
+		case Mode::CAPTURE_IMAGES:
+			ctrl_->setCapture(true);
+			wcontrol_->setVisible(true);
+			wcalibration_->setVisible(false);
+			wresults_->setVisible(false);
+			break;
+
+		case Mode::VIDEO:
+			ctrl_->setCapture(false);
+			wcontrol_->setVisible(true);
+			wcalibration_->setVisible(false);
+			wresults_->setVisible(false);
+			break;
+
+		case Mode::CALIBRATION:
+			ctrl_->setCapture(false);
+			wcontrol_->setVisible(false);
+			wcalibration_->setVisible(true);
+			wresults_->setVisible(false);
+			break;
+
+		case Mode::RESULTS:
+			ctrl_->setCapture(false);
+			wcontrol_->setVisible(false);
+			wcalibration_->setVisible(false);
+			wresults_->setVisible(true);
+			//wresults_->update();
+			break;
+	}
+	screen()->performLayout();
+}
diff --git a/applications/gui2/src/views/calibration/extrinsicview.hpp b/applications/gui2/src/views/calibration/extrinsicview.hpp
new file mode 100644
index 000000000..10066d9c7
--- /dev/null
+++ b/applications/gui2/src/views/calibration/extrinsicview.hpp
@@ -0,0 +1,93 @@
+#pragma once
+
+#include <unordered_set>
+
+#include "../../modules/calibration/calibration.hpp"
+#include "../../view.hpp"
+#include <ftl/utility/gltexture.hpp>
+#include "../../widgets/imageview.hpp"
+
+namespace ftl
+{
+namespace gui2
+{
+
+class ExtrinsicCalibrationStart : public View {
+public:
+	ExtrinsicCalibrationStart(Screen* widget, ExtrinsicCalibration* ctrl);
+	virtual ~ExtrinsicCalibrationStart();
+
+	virtual void draw(NVGcontext *ctx) override;
+
+	/** query about current state */
+	void addSource(unsigned int);
+	void removeSource(unsigned int);
+	void resetSources();
+	bool sourceSelected(unsigned int source);
+	std::vector<ftl::data::FrameID> getSources();
+
+	/** update widgets */
+	void update();
+	void updateSources();
+
+private:
+	ExtrinsicCalibration* ctrl_;
+	nanogui::Window* window_;
+	nanogui::Label* lselect_;
+	nanogui::CheckBox* cball_;
+	nanogui::Widget* lsframesets_;
+	nanogui::Widget* lssources_;
+	nanogui::Button* bcontinue_;
+	unsigned int fsid_;
+	uint64_t sources_;
+	bool show_all_;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+class ExtrinsicCalibrationView : public View {
+public:
+	class ControlWindow;
+	class CalibrationWindow;
+	class ResultsWindow;
+
+	enum Mode {
+		CAPTURE_IMAGES,	// capture images
+		CALIBRATION,	// calibration options
+		RESULTS,		// calibration results
+		VIDEO			// same as capture images but paused
+	};
+
+	ExtrinsicCalibrationView(Screen* widget, ExtrinsicCalibration* ctrl);
+	virtual ~ExtrinsicCalibrationView();
+
+	virtual void draw(NVGcontext *ctx) override;
+	virtual void performLayout(NVGcontext *ctx) override;
+
+	bool rectify() { return rectify_; };
+	void setRectify(bool v) { rectify_ = v; };
+	void setMode(Mode m);
+
+protected:
+	int rows(); // calculate optimum number of rows;
+	void setRows(int rows);
+
+private:
+	ExtrinsicCalibration* ctrl_;
+	nanogui::Widget* frames_;
+
+	ControlWindow* wcontrol_;
+	CalibrationWindow* wcalibration_;
+	ResultsWindow* wresults_;
+
+	int rows_;
+	bool draw_number_;
+	bool rectify_;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+}
+}
diff --git a/applications/gui2/src/views/calibration/intrinsicview.cpp b/applications/gui2/src/views/calibration/intrinsicview.cpp
new file mode 100644
index 000000000..6129ce48d
--- /dev/null
+++ b/applications/gui2/src/views/calibration/intrinsicview.cpp
@@ -0,0 +1,629 @@
+#include <sstream>
+
+#include "visualization.hpp"
+#include "widgets.hpp"
+#include "intrinsicview.hpp"
+
+#include "../../screen.hpp"
+#include "../../widgets/window.hpp"
+
+#include <opencv2/calib3d.hpp>
+
+#include <nanogui/messagedialog.h>
+#include <nanogui/window.h>
+#include <nanogui/layout.h>
+#include <nanogui/button.h>
+#include <nanogui/checkbox.h>
+#include <nanogui/textbox.h>
+#include <nanogui/label.h>
+
+using ftl::codecs::Channel;
+
+using ftl::gui2::Screen;
+using ftl::gui2::View;
+using ftl::gui2::FixedWindow;
+
+using ftl::gui2::IntrinsicCalibrationStart;
+using ftl::gui2::IntrinsicCalibration;
+using ftl::gui2::IntrinsicCalibrationView;
+using Mode = ftl::gui2::IntrinsicCalibrationView::Mode;
+
+////////////////////////////////////////////////////////////////////////////////
+
+template<typename T>
+std::string to_string(T v, int precision = 2) {
+	std::stringstream stream;
+	stream << std::fixed << std::setprecision(precision) << v;
+	return stream.str();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+class IntrinsicCalibrationView::CaptureWindow : public FixedWindow {
+public:
+	CaptureWindow(nanogui::Widget* parent, IntrinsicCalibrationView* view);
+	virtual void draw(NVGcontext* ctx) override;
+
+private:
+	void update();
+	IntrinsicCalibrationView* view_;
+	IntrinsicCalibration* ctrl_;
+
+	nanogui::Widget* channels_;
+
+	int width_;
+	int height_;
+	double square_size_;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+class IntrinsicCalibrationView::ControlWindow : public FixedWindow {
+public:
+	ControlWindow(nanogui::Widget* parent, IntrinsicCalibrationView* view);
+	virtual void draw(NVGcontext* ctx) override;
+
+private:
+	void updateCount();
+
+	IntrinsicCalibrationView* view_;
+	IntrinsicCalibration* ctrl_;
+
+	nanogui::Label* txtnframes_;
+	nanogui::Button* bcalibrate_;
+	nanogui::Button* bsave_;
+	nanogui::Button* bapply_;
+	nanogui::Button* bresults_;
+	nanogui::Button* bpause_;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+class IntrinsicCalibrationView::CalibrationWindow : public FixedWindow {
+public:
+	CalibrationWindow(nanogui::Widget* parent, IntrinsicCalibrationView* view);
+	void update();
+	virtual void draw(NVGcontext* ctx) override;
+
+private:
+	IntrinsicCalibrationView* view_;
+	IntrinsicCalibration* ctrl_;
+
+	nanogui::Label* status_;
+	nanogui::Button* bcalibrate_;
+	nanogui::FloatBox<double>* sensor_width_;
+	nanogui::FloatBox<double>* sensor_height_;
+	nanogui::FloatBox<double>* focal_length_;
+	nanogui::CheckBox* reset_dist_;
+	nanogui::CheckBox* reset_pp_;
+	bool calibrating_;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+class IntrinsicCalibrationView::ResultWindow : public FixedWindow {
+public:
+	ResultWindow(nanogui::Widget* parent, IntrinsicCalibrationView* view);
+	virtual void draw(NVGcontext* ctx) override;
+	void update();
+
+private:
+	IntrinsicCalibrationView* view_;
+	IntrinsicCalibration* ctrl_;
+
+	nanogui::Button* bsave_;
+	nanogui::Label* rms_;
+	ftl::gui2::IntrinsicDetails* info_;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+
+////////////////////////////////////////////////////////////////////////////////
+//
+
+IntrinsicCalibrationStart::IntrinsicCalibrationStart(ftl::gui2::Screen *parent, IntrinsicCalibration *ctrl) :
+		ftl::gui2::View(parent), ctrl_(ctrl) {
+
+	show_all_ = false;
+	window_ = new FixedWindow(parent, std::string("Intrinsic Calibration"));
+	window_->setLayout(new nanogui::BoxLayout(nanogui::Orientation::Vertical,
+									 nanogui::Alignment::Fill, 6, 12));
+
+	auto* button_refresh = new nanogui::Button(window_->buttonPanel(), "", ENTYPO_ICON_CCW);
+	button_refresh->setCallback([this](){ update(); });
+
+	buttons_ = new nanogui::Widget(window_);
+	buttons_->setLayout(new nanogui::BoxLayout(nanogui::Orientation::Vertical,
+									 nanogui::Alignment::Fill, 0, 8));
+
+	auto bshow_all = new nanogui::CheckBox(window_, "Show all sources",
+		[this](bool v){
+			show_all_ = v;
+			update();
+	});
+	bshow_all->setChecked(show_all_);
+
+	window_->setFixedWidth(400);
+	window_->setVisible(true);
+
+	update();
+}
+
+IntrinsicCalibrationStart::~IntrinsicCalibrationStart() {
+	window_->setVisible(false);
+	if (parent()->getRefCount() > 0) {
+		window_->dispose();
+	}
+}
+
+void IntrinsicCalibrationStart::update() {
+	while (buttons_->childCount() > 0) {
+		buttons_->removeChild(buttons_->childCount() - 1);
+	}
+
+	for (const auto& [name, id] : ctrl_->listSources(show_all_)) {
+		auto* button = new nanogui::Button(buttons_, name, ENTYPO_ICON_CAMERA);
+		button->setCallback([ctrl = this->ctrl_, id](){
+			ctrl->start(id);
+		});
+	}
+
+	screen()->performLayout();
+}
+
+void IntrinsicCalibrationStart::draw(NVGcontext* ctx) {
+	window_->center();
+	View::draw(ctx);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Capture Window
+
+
+void IntrinsicCalibrationView::CaptureWindow::update() {
+	ctrl_->setChessboard({width_, height_}, square_size_);
+}
+
+IntrinsicCalibrationView::CaptureWindow::CaptureWindow(nanogui::Widget* parent, IntrinsicCalibrationView* view) :
+	FixedWindow(parent, "Capture Options"), view_(view), ctrl_(view->ctrl_) {
+
+	width_ = ctrl_->chessboardSize().width;
+	height_ = ctrl_->chessboardSize().height;
+	square_size_ = ctrl_->squareSize();
+
+	setLayout(new nanogui::BoxLayout
+		(nanogui::Orientation::Vertical, nanogui::Alignment::Fill, 6, 6));
+
+	(new nanogui::Button(buttonPanel(), "", ENTYPO_ICON_CROSS))->setCallback(
+		[view = view_]() {
+		view->setMode(Mode::VIDEO);
+	});
+
+	// Capture parameters
+	new nanogui::Label(this, "Select Camera");
+	channels_ = new nanogui::Widget(this);
+	channels_->setLayout(new nanogui::GridLayout
+		(nanogui::Orientation::Horizontal, 2, nanogui::Alignment::Fill, 0, 0));
+	auto* button_left = new nanogui::Button(channels_, "Left");
+	button_left->setPushed(ctrl_->channel() == Channel::Left);
+	button_left->setFlags(nanogui::Button::RadioButton);
+	button_left->setCallback([ctrl = ctrl_, view=view_](){
+		if (ctrl->channel() != Channel::Left) {
+			ctrl->setChannel(Channel::Left);
+			view->setUndistort(false);
+		}
+	});
+
+	auto* button_right = new nanogui::Button(channels_, "Right");
+	button_right->setFlags(nanogui::Button::RadioButton);
+	button_right->setPushed(ctrl_->channel() == Channel::Right);
+	button_right->setCallback([ctrl = ctrl_, view=view_](){
+		if (ctrl->channel() != Channel::Right) {
+			ctrl->setChannel(Channel::Right);
+			view->setUndistort(false);
+		}
+	});
+	button_right->setEnabled(ctrl_->hasChannel(Channel::Right));
+
+	new nanogui::Label(this, "Capture interval");
+	auto* interval = new nanogui::FloatBox<float>(this, ctrl_->frequency());
+	interval->setEditable(true);
+	interval->setFormat("[0-9]*\\.?[0-9]+");
+	interval->setUnits("s");
+	interval->setCallback([ctrl = this->ctrl_](float v){
+		ctrl->setFrequency(v);
+	});
+
+	// Chessboard parameters
+	auto* chessboard = new nanogui::Widget(this);
+	chessboard->setLayout(new nanogui::BoxLayout
+		(nanogui::Orientation::Vertical, nanogui::Alignment::Fill, 0, 4));
+
+	// width
+	new nanogui::Label(chessboard, "Chessboard width");
+	auto* chessboard_size_x = new nanogui::IntBox<int>(chessboard, width_);
+	chessboard_size_x->setEditable(true);
+	chessboard_size_x->setFormat("[1-9][0-9]*");
+	chessboard_size_x->setCallback([this](int v){
+		width_ = max(0, v);
+	});
+
+	// height
+	new nanogui::Label(chessboard, "Chessboard height");
+	auto* chessboard_size_y = new nanogui::IntBox<int>(chessboard, height_);
+	chessboard_size_y->setEditable(true);
+	chessboard_size_y->setFormat("[1-9][0-9]*");
+	chessboard_size_y->setCallback([this](int v){
+		height_ = max(0, v);
+	});
+
+	// square size
+	new nanogui::Label(chessboard, "Chessboard square size");
+	auto* square_size = new nanogui::FloatBox<float>(chessboard, square_size_*1000.0);
+
+	square_size->setEditable(true);
+	square_size->setFormat("[0-9]*\\.?[0-9]+");
+	square_size->setUnits("mm");
+	square_size->setCallback([this](float v){
+		square_size_ = v/1000.0;
+	});
+
+	auto* button_start = new nanogui::Button(this, "Start");
+	button_start->setCallback([this]() {
+		update();
+		view_->setMode(Mode::CAPTURE_IMAGES);
+	});
+}
+
+void IntrinsicCalibrationView::CaptureWindow::draw(NVGcontext* ctx) {
+	channels_->childAt(1)->setEnabled(ctrl_->hasChannel(Channel::Right));
+	FixedWindow::draw(ctx);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Control Window
+
+IntrinsicCalibrationView::ControlWindow::ControlWindow(nanogui::Widget* parent, IntrinsicCalibrationView* view) :
+	FixedWindow(parent, ""), view_(view), ctrl_(view->ctrl_) {
+
+	setLayout(new nanogui::BoxLayout
+		(nanogui::Orientation::Vertical, nanogui::Alignment::Fill, 6, 6));
+
+	txtnframes_ = new nanogui::Label(this, "");
+	updateCount();
+
+	auto* buttons = new nanogui::Widget(this);
+	buttons->setLayout(new nanogui::BoxLayout
+		(nanogui::Orientation::Horizontal, nanogui::Alignment::Middle, 0, 0));
+
+	auto* bback_ = new nanogui::Button(buttons, "", ENTYPO_ICON_ARROW_LEFT);
+	bback_->setFixedWidth(40);
+	bback_->setTooltip("Back to capture options");
+	bback_->setCallback([this, button = bback_](){
+		view_->setMode(Mode::CAPTURE_INIT);
+	});
+
+	bsave_ = new nanogui::Button(buttons, "", ENTYPO_ICON_SAVE);
+	bsave_->setFixedWidth(40);
+	bsave_->setTooltip("Save calibration");
+	bsave_->setEnabled(ctrl_->calibrated());
+	bsave_->setCallback([ctrl = ctrl_, view = view_](){
+		ctrl->save();
+		new nanogui::MessageDialog
+			(view->screen(), nanogui::MessageDialog::Type::Information, "Calibration", "Calibration sent");
+	});
+
+	bapply_ = new nanogui::Button(buttons, "");
+	bapply_->setFixedWidth(40);
+	bapply_->setTooltip("Apply distortion correction");
+	bapply_->setEnabled(ctrl_->calibrated());
+	bapply_->setFlags(nanogui::Button::Flags::ToggleButton);
+	bapply_->setPushed(view_->undistort());
+	bapply_->setChangeCallback([button = bapply_, view = view_](bool v){
+		view->setUndistort(v);
+	});
+
+	bresults_ = new nanogui::Button(buttons, "Details");
+	bresults_->setFixedWidth(120);
+
+	bresults_->setEnabled(ctrl_->calibrated());
+	bresults_->setCallback([view = view_, button = bresults_]{
+		view->setMode(Mode::RESULTS);
+	});
+
+	bpause_ = new nanogui::Button(buttons, "");
+	bpause_->setFixedWidth(120);
+	bpause_->setCallback([&ctrl = ctrl_](){
+		// TODO: add buttons to browse captured images and allow deleting
+		//		 images
+		ctrl->setCapture(!ctrl->capturing());
+	});
+
+	bcalibrate_ = new nanogui::Button(buttons, "Calibrate");
+	bcalibrate_->setFixedWidth(120);
+	bcalibrate_->setCallback([view = view_, button = bcalibrate_](){
+		view->setMode(Mode::CALIBRATION);
+	});
+}
+
+void IntrinsicCalibrationView::ControlWindow::draw(NVGcontext* ctx) {
+	if (ctrl_->capturing())	{ bpause_->setCaption("Pause"); }
+	else 					{ bpause_->setCaption("Continue"); }
+	//bcalibrate_->setEnabled(ctrl_->count() > 0);
+	bresults_->setEnabled(ctrl_->calibrated());
+	bsave_->setEnabled(ctrl_->calibrated());
+	bapply_->setEnabled(ctrl_->calibrated());
+	bapply_->setIcon(view_->undistort() ? ENTYPO_ICON_EYE : ENTYPO_ICON_EYE_WITH_LINE);
+	bapply_->setPushed(view_->undistort());
+	updateCount();
+	FixedWindow::draw(ctx);
+}
+
+void IntrinsicCalibrationView::ControlWindow::updateCount() {
+	txtnframes_->setCaption("Detected patterns: " +
+							std::to_string(ctrl_->count()));
+}
+////////////////////////////////////////////////////////////////////////////////
+// Calibration Window
+
+IntrinsicCalibrationView::CalibrationWindow::CalibrationWindow(nanogui::Widget* parent, IntrinsicCalibrationView* view) :
+		FixedWindow(parent, "Calibration"), view_(view), ctrl_(view->ctrl_) {
+
+	calibrating_ = false;
+
+	setLayout(new nanogui::BoxLayout
+		(nanogui::Orientation::Vertical, nanogui::Alignment::Fill, 6, 6));
+
+	(new nanogui::Button(buttonPanel(), "", ENTYPO_ICON_CROSS))->setCallback(
+		[view = view_]() {
+		view->setMode(Mode::VIDEO);
+	});
+
+	// sensor size
+	new nanogui::Label(this, "Initial values");
+
+	nanogui::GridLayout *grid_layout = new nanogui::GridLayout
+		(nanogui::Orientation::Horizontal, 2, nanogui::Alignment::Fill, 0, 5);
+	grid_layout->setColAlignment
+		({nanogui::Alignment::Maximum, nanogui::Alignment::Fill});
+
+	grid_layout->setSpacing(0, 10);
+	auto* initial_values = new nanogui::Widget(this);
+	initial_values->setLayout(grid_layout);
+
+	new nanogui::Label(initial_values, "Sensor width");
+	sensor_width_ = new nanogui::FloatBox<double>(initial_values, ctrl_->sensorSize().width);
+	sensor_width_->setEditable(true);
+	sensor_width_->setFormat("[0-9]*\\.?[0-9]+");
+	sensor_width_->setUnits("mm");
+
+	new nanogui::Label(initial_values, "Sensor height");
+	sensor_height_ = new nanogui::FloatBox<double>(initial_values, ctrl_->sensorSize().height);
+	sensor_height_->setEditable(true);
+	sensor_height_->setFormat("[0-9]*\\.?[0-9]+");
+	sensor_height_->setUnits("mm");
+
+	new nanogui::Label(initial_values, "Focal length");
+	focal_length_ = new nanogui::FloatBox<double>(initial_values, ctrl_->focalLength());
+	focal_length_->setEditable(true);
+	focal_length_->setFormat("[0-9]*\\.?[0-9]+");
+	focal_length_->setUnits("mm");
+
+	new nanogui::Label(initial_values, "Reset principal point");
+	reset_pp_ = new nanogui::CheckBox(initial_values, "");
+	reset_pp_->setChecked(false);
+
+	new nanogui::Label(initial_values, "Reset distortion coefficients");
+	reset_dist_ = new nanogui::CheckBox(initial_values, "");
+	reset_dist_->setChecked(false);
+
+	// flags
+	new nanogui::Label(this, "Flags");
+	new ftl::gui2::OpenCVFlagWidget(this, &(ctrl_->flags()), ctrl_->defaultFlags());
+	status_ = new nanogui::Label(this, " ");
+
+	bcalibrate_ = new nanogui::Button(this, "Run");
+	bcalibrate_->setEnabled(false);
+	bcalibrate_->setCallback([this](){
+		if (!ctrl_->isBusy()) {
+			ctrl_->setSensorSize({sensor_width_->value(), sensor_height_->value()});
+			ctrl_->setFocalLength(focal_length_->value(), ctrl_->sensorSize());
+			if (reset_pp_->checked()) { ctrl_->resetPrincipalPoint(); }
+			if (reset_dist_->checked()) { ctrl_->resetDistortion(); }
+			ctrl_->run();
+			calibrating_ = true;
+		}
+	});
+}
+
+void IntrinsicCalibrationView::CalibrationWindow::update() {
+	focal_length_->setValue(ctrl_->focalLength());
+}
+
+void IntrinsicCalibrationView::CalibrationWindow::draw(NVGcontext* ctx) {
+	bool use_guess = ctrl_->flags().has(cv::CALIB_USE_INTRINSIC_GUESS);
+	focal_length_->setEnabled(use_guess);
+	reset_pp_->setEnabled(use_guess);
+	reset_dist_->setEnabled(use_guess);
+
+	if (ctrl_->isBusy()) {
+		if (calibrating_) {
+			auto dots = std::string(int(round(glfwGetTime())) % 4, '.');
+			status_->setCaption("Calibrating " + dots);
+		}
+		else {
+			status_->setCaption("Busy");
+		}
+	}
+	else {
+		status_->setCaption(" ");
+	}
+	bcalibrate_->setEnabled(!ctrl_->isBusy() && (ctrl_->count() > 0));
+	if (calibrating_ && !ctrl_->isBusy()) {
+		calibrating_ = false;
+		view_->setUndistort(true);
+		view_->setMode(Mode::RESULTS);
+	}
+	FixedWindow::draw(ctx);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Result window
+
+IntrinsicCalibrationView::ResultWindow::ResultWindow(nanogui::Widget* parent, IntrinsicCalibrationView* view) :
+	FixedWindow(parent, "Results"), view_(view), ctrl_(view->ctrl_) {
+
+	setLayout(new nanogui::BoxLayout
+		(nanogui::Orientation::Vertical, nanogui::Alignment::Fill, 8 , 8));
+
+	(new nanogui::Button(buttonPanel(), "", ENTYPO_ICON_CROSS))->setCallback(
+		[view = view_]() {
+		view->setMode(Mode::VIDEO);
+	});
+
+	rms_ = new nanogui::Label(this, "");
+
+	info_ = new ftl::gui2::IntrinsicDetails(this);
+
+	bsave_ = new nanogui::Button(this, "Save");
+	bsave_->setCallback([button = bsave_, ctrl = ctrl_](){
+		ctrl->save();
+		button->setCaption("Saved");
+		button->setEnabled(false);
+	});
+}
+
+void IntrinsicCalibrationView::ResultWindow::draw(NVGcontext* ctx) {
+	nanogui::Window::draw(ctx);
+}
+
+void IntrinsicCalibrationView::ResultWindow::update() {
+	if (!isnan(ctrl_->reprojectionError())) {
+		rms_->setCaption("Reprojection error (RMS): " + to_string(ctrl_->reprojectionError()));
+		rms_->setVisible(true);
+	}
+	else {
+		rms_->setVisible(false);
+	}
+	info_->update(ctrl_->calibration());
+	bsave_->setEnabled(true);
+	bsave_->setCaption("Save");
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+IntrinsicCalibrationView::IntrinsicCalibrationView(Screen* parent,
+		IntrinsicCalibration* ctrl) : View(parent), ctrl_(ctrl) {
+
+	undistort_ = false;
+
+	imview_ = new ftl::gui2::FTLImageView(this);
+
+	int w = 300;
+	wcapture_ = new CaptureWindow(screen(), this);
+	wcapture_->setFixedWidth(w);
+	wcontrol_ = new ControlWindow(screen(), this);
+	wcalibration_ = new CalibrationWindow(screen(), this);
+	wcalibration_->setFixedWidth(w);
+	wresults_ = new ResultWindow(screen(), this);
+	wresults_->update();
+
+	screen()->performLayout();
+	setMode(Mode::CAPTURE_INIT);
+}
+
+IntrinsicCalibrationView::~IntrinsicCalibrationView() {
+	wcapture_->setVisible(false);
+	wcapture_->dispose();
+	wcontrol_->setVisible(false);
+	wcontrol_->dispose();
+	wcalibration_->setVisible(false);
+	wcalibration_->dispose();
+	wresults_->setVisible(false);
+	wresults_->dispose();
+}
+
+void IntrinsicCalibrationView::performLayout(NVGcontext *ctx) {
+	auto sz = wcontrol_->size();
+	wcontrol_->setPosition(
+		nanogui::Vector2i(width() / 2 - sz[0]/2, height() - 30 - sz[1]));
+
+	wcapture_->center();
+	wcalibration_->center();
+	wresults_->center();
+	imview_->setSize(size());
+	View::performLayout(ctx);
+}
+
+void IntrinsicCalibrationView::draw(NVGcontext *ctx) {
+	if (ctrl_->hasFrame()) {
+		bool was_valid = imview_->texture().isValid();
+		if (undistort_) {
+			auto frame = ctrl_->getFrameUndistort();
+			imview_->copyFrom(frame);
+		}
+		else {
+			auto frame = ctrl_->getFrame();
+			imview_->copyFrom(frame);
+		}
+		if (!was_valid) {
+			imview_->fit();
+		}
+	}
+	View::draw(ctx);
+	if (ctrl_->capturing()) {
+		drawChessboardCorners(ctx, imview_, ctrl_->previousPoints());
+	}
+}
+
+void IntrinsicCalibrationView::setMode(Mode m) {
+	switch(m) {
+		case Mode::CAPTURE_INIT:
+			ctrl_->setCapture(false);
+			wcapture_->setVisible(true);
+			wcontrol_->setVisible(false);
+			wcalibration_->setVisible(false);
+			wresults_->setVisible(false);
+			break;
+
+		case Mode::CAPTURE_IMAGES:
+			ctrl_->setCapture(true);
+			wcapture_->setVisible(false);
+			wcontrol_->setVisible(true);
+			wcalibration_->setVisible(false);
+			wresults_->setVisible(false);
+			break;
+
+		case Mode::VIDEO:
+			ctrl_->setCapture(false);
+			wcapture_->setVisible(false);
+			wcontrol_->setVisible(true);
+			wcalibration_->setVisible(false);
+			wresults_->setVisible(false);
+			break;
+
+		case Mode::CALIBRATION:
+			ctrl_->setCapture(false);
+			wcapture_->setVisible(false);
+			wcontrol_->setVisible(false);
+			wcalibration_->update();
+			wcalibration_->setVisible(true);
+			wresults_->setVisible(false);
+			break;
+
+		case Mode::RESULTS:
+			ctrl_->setCapture(false);
+			wcapture_->setVisible(false);
+			wcontrol_->setVisible(false);
+			wcalibration_->setVisible(false);
+			wresults_->setVisible(true);
+			wresults_->update();
+			break;
+	}
+	screen()->performLayout();
+}
diff --git a/applications/gui2/src/views/calibration/intrinsicview.hpp b/applications/gui2/src/views/calibration/intrinsicview.hpp
new file mode 100644
index 000000000..289971b17
--- /dev/null
+++ b/applications/gui2/src/views/calibration/intrinsicview.hpp
@@ -0,0 +1,75 @@
+#pragma once
+
+#include "../../modules/calibration/calibration.hpp"
+#include "../../view.hpp"
+#include "../../widgets/imageview.hpp"
+
+#include <ftl/utility/gltexture.hpp>
+
+namespace ftl
+{
+namespace gui2
+{
+
+class IntrinsicCalibrationStart : public View {
+public:
+	IntrinsicCalibrationStart(Screen* widget, IntrinsicCalibration* ctrl);
+	virtual ~IntrinsicCalibrationStart();
+
+	virtual void draw(NVGcontext *ctx) override;
+
+	void update();
+
+private:
+	nanogui::Window* window_;
+	nanogui::Widget* buttons_;
+	IntrinsicCalibration* ctrl_;
+	bool show_all_;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+class IntrinsicCalibrationView : public View {
+
+	class CaptureWindow;
+	class ControlWindow;
+	class CalibrationWindow;
+	class ResultWindow;
+
+public:
+	IntrinsicCalibrationView(Screen* screen, IntrinsicCalibration* ctrl);
+	virtual ~IntrinsicCalibrationView();
+
+	enum Mode {
+		CAPTURE_INIT,	// set capture parameters
+		CAPTURE_IMAGES,	// capture images
+		CALIBRATION,	// calibration options
+		RESULTS,		// calibration results
+		VIDEO			// same as capture images but paused
+	};
+
+	void setMode(Mode m);
+
+	virtual void performLayout(NVGcontext* ctx) override;
+	virtual void draw(NVGcontext* ctx) override;
+
+	void setUndistort(bool v) { undistort_ = v; }
+	bool undistort() { return undistort_; }
+
+private:
+	IntrinsicCalibration* ctrl_;
+	FTLImageView* imview_;
+
+	CaptureWindow* wcapture_;
+	ControlWindow* wcontrol_;
+	CalibrationWindow* wcalibration_;
+	ResultWindow* wresults_;
+	bool undistort_;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+} // namespace gui2
+} // namespace ftl
diff --git a/applications/gui2/src/views/calibration/stereoview.cpp b/applications/gui2/src/views/calibration/stereoview.cpp
new file mode 100644
index 000000000..3dccafd41
--- /dev/null
+++ b/applications/gui2/src/views/calibration/stereoview.cpp
@@ -0,0 +1,511 @@
+#include <sstream>
+
+#include "visualization.hpp"
+#include "widgets.hpp"
+#include "stereoview.hpp"
+
+
+#include "../../screen.hpp"
+#include "../../widgets/window.hpp"
+
+
+#include <nanogui/window.h>
+#include <nanogui/layout.h>
+#include <nanogui/button.h>
+#include <nanogui/checkbox.h>
+#include <nanogui/textbox.h>
+#include <nanogui/label.h>
+#include <nanogui/tabwidget.h>
+
+using ftl::codecs::Channel;
+
+using ftl::gui2::Screen;
+using ftl::gui2::View;
+using ftl::gui2::FixedWindow;
+
+using ftl::gui2::StereoCalibrationStart;
+using ftl::gui2::StereoCalibration;
+using ftl::gui2::StereoCalibrationView;
+using Mode = ftl::gui2::StereoCalibrationView::Mode;
+
+////////////////////////////////////////////////////////////////////////////////
+
+template<typename T>
+std::string to_string(T v, int precision = 2) {
+	std::stringstream stream;
+	stream << std::fixed << std::setprecision(precision) << v;
+	return stream.str();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+class StereoCalibrationView::CaptureWindow : public FixedWindow {
+public:
+	CaptureWindow(nanogui::Widget* parent, StereoCalibrationView* view);
+	virtual void draw(NVGcontext* ctx) override;
+
+private:
+	void update();
+	StereoCalibrationView* view_;
+	StereoCalibration* ctrl_;
+	int width_;
+	int height_;
+	double square_size_;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+class StereoCalibrationView::ControlWindow : public FixedWindow {
+public:
+	ControlWindow(nanogui::Widget* parent, StereoCalibrationView* view);
+	virtual void draw(NVGcontext* ctx) override;
+
+private:
+	void updateCount();
+
+	StereoCalibrationView* view_;
+	StereoCalibration* ctrl_;
+
+	nanogui::Label* txtnframes_;
+	nanogui::Button* bcalibrate_;
+	nanogui::Button* bresults_;
+	nanogui::Button* bpause_;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+class StereoCalibrationView::CalibrationWindow : public FixedWindow {
+public:
+	CalibrationWindow(nanogui::Widget* parent, StereoCalibrationView* view);
+	virtual void draw(NVGcontext* ctx) override;
+	double sensorWidth() { return sensor_width_->value(); }
+	double sensorHeight() { return sensor_width_->value(); }
+
+private:
+	StereoCalibrationView* view_;
+	StereoCalibration* ctrl_;
+
+	nanogui::Button* bcalibrate_;
+	nanogui::FloatBox<double>* sensor_width_;
+	nanogui::FloatBox<double>* sensor_height_;
+	bool calibrating_;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+class StereoCalibrationView::ResultWindow : public FixedWindow {
+public:
+	ResultWindow(nanogui::Widget* parent, StereoCalibrationView* view);
+	virtual void performLayout(NVGcontext* ctx) override;
+	virtual void draw(NVGcontext* ctx) override;
+	void update();
+
+private:
+	StereoCalibrationView* view_;
+	StereoCalibration* ctrl_;
+
+	nanogui::TabWidget* tabs_;
+	nanogui::Button* bsave_;
+	ftl::gui2::IntrinsicDetails* infol_;
+	ftl::gui2::IntrinsicDetails* infor_;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+
+////////////////////////////////////////////////////////////////////////////////
+//
+
+StereoCalibrationStart::StereoCalibrationStart(ftl::gui2::Screen *parent, StereoCalibration *ctrl) :
+		ftl::gui2::View(parent), ctrl_(ctrl) {
+
+	show_all_ = false;
+	window_ = new FixedWindow(parent, std::string("Stereo Calibration"));
+	window_->setLayout(new nanogui::BoxLayout(nanogui::Orientation::Vertical,
+									 nanogui::Alignment::Fill, 6, 12));
+
+	auto* button_refresh = new nanogui::Button(window_->buttonPanel(), "", ENTYPO_ICON_CCW);
+	button_refresh->setCallback([this](){ update(); });
+
+	buttons_ = new nanogui::Widget(window_);
+	buttons_->setLayout(new nanogui::BoxLayout(nanogui::Orientation::Vertical,
+									 nanogui::Alignment::Fill, 0, 8));
+
+	auto bshow_all = new nanogui::CheckBox(window_, "Show all sources",
+		[this](bool v){
+			show_all_ = v;
+			update();
+	});
+	bshow_all->setChecked(show_all_);
+
+	window_->setFixedWidth(400);
+	window_->setVisible(true);
+
+	update();
+}
+
+StereoCalibrationStart::~StereoCalibrationStart() {
+	window_->setVisible(false);
+	if (parent()->getRefCount() > 0) {
+		window_->dispose();
+	}
+}
+
+void StereoCalibrationStart::update() {
+	while (buttons_->childCount() > 0) {
+		buttons_->removeChild(buttons_->childCount() - 1);
+	}
+
+	for (const auto& [name, id] : ctrl_->listSources(show_all_)) {
+		auto* button = new nanogui::Button(buttons_, name, ENTYPO_ICON_CAMERA);
+		button->setCallback([ctrl = this->ctrl_, id](){
+			ctrl->start(id);
+		});
+	}
+
+	screen()->performLayout();
+}
+
+void StereoCalibrationStart::draw(NVGcontext* ctx) {
+	window_->center();
+	View::draw(ctx);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Capture Window
+
+void StereoCalibrationView::CaptureWindow::update() {
+	ctrl_->setChessboard({width_, height_}, square_size_);
+}
+
+StereoCalibrationView::CaptureWindow::CaptureWindow(nanogui::Widget* parent, StereoCalibrationView* view) :
+	FixedWindow(parent, "Capture Options"), view_(view), ctrl_(view->ctrl_) {
+
+	width_ = ctrl_->chessboardSize().width;
+	height_ = ctrl_->chessboardSize().height;
+	square_size_ = ctrl_->squareSize();
+
+	setLayout(new nanogui::BoxLayout
+		(nanogui::Orientation::Vertical, nanogui::Alignment::Fill, 6, 6));
+
+	(new nanogui::Button(buttonPanel(), "", ENTYPO_ICON_CROSS))->setCallback(
+		[this]() {
+		update();
+		view_->setMode(Mode::VIDEO);
+	});
+
+	new nanogui::Label(this, "Capture interval");
+	auto* interval = new nanogui::FloatBox<float>(this, ctrl_->frequency());
+	interval->setEditable(true);
+	interval->setFormat("[0-9]*\\.?[0-9]+");
+	interval->setUnits("s");
+	interval->setCallback([ctrl = this->ctrl_](float v){
+		ctrl->setFrequency(v);
+	});
+
+	// Chessboard parameters
+	auto* chessboard = new nanogui::Widget(this);
+	chessboard->setLayout(new nanogui::BoxLayout
+		(nanogui::Orientation::Vertical, nanogui::Alignment::Fill, 0, 4));
+
+	// width
+	new nanogui::Label(chessboard, "Chessboard width");
+	auto* chessboard_size_x = new nanogui::IntBox<int>(chessboard, width_);
+	chessboard_size_x->setEditable(true);
+	chessboard_size_x->setFormat("[1-9][0-9]*");
+	chessboard_size_x->setCallback([this](int v){
+		width_ = max(0, v);
+	});
+
+	// height
+	new nanogui::Label(chessboard, "Chessboard height");
+	auto* chessboard_size_y = new nanogui::IntBox<int>(chessboard, height_);
+	chessboard_size_y->setEditable(true);
+	chessboard_size_y->setFormat("[1-9][0-9]*");
+	chessboard_size_y->setCallback([this](int v){
+		height_ = max(0, v);
+	});
+
+	// square size
+	new nanogui::Label(chessboard, "Chessboard square size");
+	auto* square_size = new nanogui::FloatBox<float>(chessboard, square_size_*1000.0);
+
+	square_size->setEditable(true);
+	square_size->setFormat("[0-9]*\\.?[0-9]+");
+	square_size->setUnits("mm");
+	square_size->setCallback([this](float v){
+		square_size_ = v/1000.0;
+	});
+
+	auto* button_start = new nanogui::Button(this, "Start");
+	button_start->setCallback([this]() {
+		update();
+		view_->setMode(Mode::CAPTURE_IMAGES);
+	});
+}
+
+void StereoCalibrationView::CaptureWindow::draw(NVGcontext* ctx) {
+	FixedWindow::draw(ctx);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Control Window
+
+StereoCalibrationView::ControlWindow::ControlWindow(nanogui::Widget* parent, StereoCalibrationView* view) :
+	FixedWindow(parent, ""), view_(view), ctrl_(view->ctrl_) {
+
+	setLayout(new nanogui::BoxLayout
+		(nanogui::Orientation::Vertical, nanogui::Alignment::Fill, 6, 6));
+
+	txtnframes_ = new nanogui::Label(this, "");
+	updateCount();
+
+	auto* buttons = new nanogui::Widget(this);
+	buttons->setLayout(new nanogui::BoxLayout
+		(nanogui::Orientation::Horizontal, nanogui::Alignment::Middle, 0, 0));
+
+	auto* button_back = new nanogui::Button(buttons, "", ENTYPO_ICON_ARROW_LEFT);
+	button_back->setCallback([this, button = button_back](){
+		view_->setMode(Mode::CAPTURE_INIT);
+	});
+
+	bresults_ = new nanogui::Button(buttons, "Details");
+	bresults_->setFixedWidth(120);
+	//bresults_->setEnabled(ctrl_->calib().calibrated());
+	bresults_->setCallback([view = view_, button = bresults_]{
+		view->setMode(Mode::RESULTS);
+	});
+
+	bpause_ = new nanogui::Button(buttons, "");
+	bpause_->setFixedWidth(120);
+	bpause_->setCallback([&ctrl = ctrl_](){
+		ctrl->setCapture(!ctrl->capturing());
+	});
+
+	bcalibrate_ = new nanogui::Button(buttons, "Calibrate");
+	bcalibrate_->setFixedWidth(120);
+	bcalibrate_->setCallback([view = view_, button = bcalibrate_](){
+		view->setMode(Mode::CALIBRATION);
+	});
+}
+
+void StereoCalibrationView::ControlWindow::draw(NVGcontext* ctx) {
+	if (ctrl_->capturing())	{ bpause_->setCaption("Pause"); }
+	else 					{ bpause_->setCaption("Continue"); }
+	//bcalibrate_->setEnabled(ctrl_->calib().count() > 0);
+	//bresults_->setEnabled(ctrl_->calib().calibrated());
+	updateCount();
+	FixedWindow::draw(ctx);
+}
+
+void StereoCalibrationView::ControlWindow::updateCount() {
+	txtnframes_->setCaption("Detected patterns: " +
+							std::to_string(ctrl_->count()));
+}
+////////////////////////////////////////////////////////////////////////////////
+// Calibration Window
+
+StereoCalibrationView::CalibrationWindow::CalibrationWindow(nanogui::Widget* parent, StereoCalibrationView* view) :
+		FixedWindow(parent, "Calibration"), view_(view), ctrl_(view->ctrl_) {
+
+	calibrating_ = false;
+
+	setLayout(new nanogui::BoxLayout
+		(nanogui::Orientation::Vertical, nanogui::Alignment::Fill, 6, 6));
+
+	(new nanogui::Button(buttonPanel(), "", ENTYPO_ICON_CROSS))->setCallback(
+		[view = view_]() {
+		view->setMode(Mode::VIDEO);
+	});
+
+	nanogui::GridLayout *grid_layout = new nanogui::GridLayout
+		(nanogui::Orientation::Horizontal, 2, nanogui::Alignment::Fill, 0, 5);
+	grid_layout->setColAlignment
+		({nanogui::Alignment::Maximum, nanogui::Alignment::Fill});
+
+	grid_layout->setSpacing(0, 10);
+	auto* sensor = new nanogui::Widget(this);
+	sensor->setLayout(grid_layout);
+
+	// flags
+	new nanogui::Label(this, "Flags");
+	new ftl::gui2::OpenCVFlagWidget(this, &(ctrl_->flags()));
+
+	bcalibrate_ = new nanogui::Button(this, "Run");
+	bcalibrate_->setEnabled(false);
+	bcalibrate_->setCallback([&ctrl = ctrl_, &running = calibrating_](){
+		if (!ctrl->isBusy()) {
+			ctrl->run();
+			running = true;
+		}
+	});
+}
+
+void StereoCalibrationView::CalibrationWindow::draw(NVGcontext* ctx) {
+	bcalibrate_->setEnabled(!ctrl_->isBusy() && (ctrl_->count() > 0));
+	if (calibrating_ && !ctrl_->isBusy()) {
+		calibrating_ = false;
+		view_->setMode(Mode::RESULTS);
+	}
+	FixedWindow::draw(ctx);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Result window
+
+StereoCalibrationView::ResultWindow::ResultWindow(nanogui::Widget* parent, StereoCalibrationView* view) :
+	FixedWindow(parent, "Results"), view_(view), ctrl_(view->ctrl_) {
+
+	setLayout(new nanogui::BoxLayout
+		(nanogui::Orientation::Vertical, nanogui::Alignment::Fill, 8 , 0));
+
+	tabs_ = new nanogui::TabWidget(this);
+	auto* tabl = tabs_->createTab("Left (intrinsic)");
+	auto* tabr = tabs_->createTab("Right (intrinsic)");
+	infol_ = new ftl::gui2::IntrinsicDetails(tabl);
+	infor_ = new ftl::gui2::IntrinsicDetails(tabr);
+
+	(new nanogui::Button(buttonPanel(), "", ENTYPO_ICON_CROSS))->setCallback(
+		[view = view_]() {
+		view->setMode(Mode::VIDEO);
+	});
+
+	bsave_ = new nanogui::Button(this, "Save");
+	bsave_->setCallback([button = bsave_, ctrl = ctrl_](){
+		ctrl->save();
+		button->setCaption("Saved");
+		button->setEnabled(false);
+	});
+}
+
+void StereoCalibrationView::ResultWindow::draw(NVGcontext* ctx) {
+	nanogui::Window::draw(ctx);
+}
+
+void StereoCalibrationView::ResultWindow::performLayout(NVGcontext* ctx) {
+	nanogui::Window::performLayout(ctx);
+	auto sz = infor_->preferredSize(ctx);
+	infol_->parent()->setSize(sz);
+	infor_->parent()->setSize(sz);
+	center();
+}
+
+void StereoCalibrationView::ResultWindow::update() {
+	infol_->update(ctrl_->calibrationLeft().intrinsic);
+	infor_->update(ctrl_->calibrationRight().intrinsic);
+
+	bsave_->setEnabled(true);
+	bsave_->setCaption("Save");
+	screen()->performLayout();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+StereoCalibrationView::StereoCalibrationView(Screen* parent,
+		StereoCalibration* ctrl) : View(parent), ctrl_(ctrl) {
+
+	imview_ = new ftl::gui2::StereoImageView(this);
+
+	int w = 300;
+	wcapture_ = new CaptureWindow(screen(), this);
+	wcapture_->setFixedWidth(w);
+	wcontrol_ = new ControlWindow(screen(), this);
+	wcalibration_ = new CalibrationWindow(screen(), this);
+	wcalibration_->setFixedWidth(w);
+	wresults_ = new ResultWindow(screen(), this);
+
+	screen()->performLayout();
+	setMode(Mode::CAPTURE_INIT);
+}
+
+StereoCalibrationView::~StereoCalibrationView() {
+	wcapture_->setVisible(false);
+	wcapture_->dispose();
+	wcontrol_->setVisible(false);
+	wcontrol_->dispose();
+	wcalibration_->setVisible(false);
+	wcalibration_->dispose();
+	wresults_->setVisible(false);
+	wresults_->dispose();
+}
+
+void StereoCalibrationView::performLayout(NVGcontext *ctx) {
+	auto sz = wcontrol_->size();
+	wcontrol_->setPosition(
+		nanogui::Vector2i(width() / 2 - sz[0]/2, height() - 30 - sz[1]));
+
+	wcapture_->center();
+	wcalibration_->center();
+	wresults_->center();
+
+	imview_->setFixedSize(size());
+
+	View::performLayout(ctx);
+}
+
+void StereoCalibrationView::draw(NVGcontext *ctx) {
+	if (ctrl_->hasFrame()) {
+		auto l = ctrl_->getLeft();
+		auto r = ctrl_->getRight();
+
+		if (l.size() != cv::Size(0, 0) && r.size() != cv::Size(0, 0)) {
+			imview_->left()->copyFrom(l);
+			imview_->right()->copyFrom(r);
+		}
+	}
+	View::draw(ctx);
+	auto points = ctrl_->previousPoints();
+	if (points.size() == 2) {
+		drawChessboardCorners(ctx, imview_->left(), points[0]);
+		drawChessboardCorners(ctx, imview_->right(), points[1]);
+	}
+}
+
+void StereoCalibrationView::setMode(Mode m) {
+	switch(m) {
+		case Mode::CAPTURE_INIT:
+			ctrl_->setCapture(false);
+			wcapture_->setVisible(true);
+			wcontrol_->setVisible(false);
+			wcalibration_->setVisible(false);
+			wresults_->setVisible(false);
+			break;
+
+		case Mode::CAPTURE_IMAGES:
+			ctrl_->setCapture(true);
+			wcapture_->setVisible(false);
+			wcontrol_->setVisible(true);
+			wcalibration_->setVisible(false);
+			wresults_->setVisible(false);
+			break;
+
+		case Mode::VIDEO:
+			ctrl_->setCapture(false);
+			wcapture_->setVisible(false);
+			wcontrol_->setVisible(true);
+			wcalibration_->setVisible(false);
+			wresults_->setVisible(false);
+			break;
+
+		case Mode::CALIBRATION:
+			ctrl_->setCapture(false);
+			wcapture_->setVisible(false);
+			wcontrol_->setVisible(false);
+			wcalibration_->setVisible(true);
+			wresults_->setVisible(false);
+			break;
+
+		case Mode::RESULTS:
+			ctrl_->setCapture(false);
+			wcapture_->setVisible(false);
+			wcontrol_->setVisible(false);
+			wcalibration_->setVisible(false);
+			wresults_->setVisible(true);
+			wresults_->update();
+			break;
+	}
+}
diff --git a/applications/gui2/src/views/calibration/stereoview.hpp b/applications/gui2/src/views/calibration/stereoview.hpp
new file mode 100644
index 000000000..194602cb1
--- /dev/null
+++ b/applications/gui2/src/views/calibration/stereoview.hpp
@@ -0,0 +1,71 @@
+#pragma once
+
+#include "../../modules/calibration/calibration.hpp"
+#include "../../view.hpp"
+#include "../../widgets/imageview.hpp"
+
+#include <ftl/utility/gltexture.hpp>
+
+namespace ftl
+{
+namespace gui2
+{
+
+class StereoCalibrationStart : public View {
+public:
+	StereoCalibrationStart(Screen* widget, StereoCalibration* ctrl);
+	virtual ~StereoCalibrationStart();
+
+	virtual void draw(NVGcontext *ctx) override;
+
+	void update();
+
+private:
+	nanogui::Window* window_;
+	nanogui::Widget* buttons_;
+	StereoCalibration* ctrl_;
+	bool show_all_;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+class StereoCalibrationView : public View {
+
+	class CaptureWindow;
+	class ControlWindow;
+	class CalibrationWindow;
+	class ResultWindow;
+
+public:
+	StereoCalibrationView(Screen* screen, StereoCalibration* ctrl);
+	virtual ~StereoCalibrationView();
+
+	enum Mode {
+		CAPTURE_INIT,	// set capture parameters
+		CAPTURE_IMAGES,	// capture images
+		CALIBRATION,	// calibration options
+		RESULTS,		// calibration results
+		VIDEO			// same as capture images but paused
+	};
+
+	void setMode(Mode m);
+
+	virtual void performLayout(NVGcontext* ctx) override;
+	virtual void draw(NVGcontext* ctx) override;
+
+private:
+	StereoCalibration* ctrl_;
+	StereoImageView* imview_;
+
+	CaptureWindow* wcapture_;
+	ControlWindow* wcontrol_;
+	CalibrationWindow* wcalibration_;
+	ResultWindow* wresults_;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+} // namespace gui2
+} // namespace ftl
diff --git a/applications/gui2/src/views/calibration/visualization.hpp b/applications/gui2/src/views/calibration/visualization.hpp
new file mode 100644
index 000000000..de2fe29ab
--- /dev/null
+++ b/applications/gui2/src/views/calibration/visualization.hpp
@@ -0,0 +1,85 @@
+#pragma once
+
+#include "../../widgets/imageview.hpp"
+
+#include <ftl/calibration/structures.hpp>
+
+/** Draw Chessboard Corners with OpenGL to ImageView widget. */
+template<typename T>
+static void drawChessboardCorners(NVGcontext* ctx, ftl::gui2::ImageView* imview, const std::vector<T>& points) {
+	if (points.size() == 0) { return; }
+
+	nanogui::Vector2f wpos = imview->absolutePosition().cast<float>();
+	nanogui::Vector2f wsize = imview->sizeF();
+	nanogui::Vector2f apos = imview->positionForCoordinate({points[0].x, points[0].y}) + wpos;
+
+	nvgShapeAntiAlias(ctx, 1);
+	nvgScissor(ctx, wpos.x(), wpos.y(), wsize.x(), wsize.y());
+	nvgBeginPath(ctx);
+	nvgMoveTo(ctx, apos.x(), apos.y());
+	for (unsigned int i = 1; i < points.size(); i++) {
+		apos = imview->positionForCoordinate({points[i].x, points[i].y}) + wpos;
+		nvgLineTo(ctx, apos.x(), apos.y());
+	}
+	nvgStrokeColor(ctx, nvgRGBA(255, 32, 32, 192));
+	nvgStrokeWidth(ctx, 1.0f);
+	nvgStroke(ctx);
+
+	for (unsigned int i = 0; i < points.size(); i++) {
+		apos = imview->positionForCoordinate({points[i].x, points[i].y}) + wpos;
+		nvgBeginPath(ctx);
+		nvgCircle(ctx, apos.x(), apos.y(), 2.5);
+		nvgStrokeColor(ctx, nvgRGBA(0, 0, 0, 255));
+		nvgStrokeWidth(ctx, 1.5f);
+		nvgStroke(ctx);
+		nvgBeginPath(ctx);
+		nvgCircle(ctx, apos.x(), apos.y(), 2.5);
+		nvgStrokeColor(ctx, nvgRGBA(255, 255, 255, 255));
+		nvgStrokeWidth(ctx, 1.0f);
+		nvgStroke(ctx);
+
+	}
+	nvgResetScissor(ctx);
+}
+
+static void drawFloorPlan(NVGcontext* ctx, nanogui::Widget* parent,
+		const std::vector<ftl::calibration::CalibrationData::Extrinsic>& calib,
+		const std::vector<std::string>& names = {},
+		int origin=0) {
+
+	float minx = INFINITY;
+	float miny = INFINITY;
+	float maxx = -INFINITY;
+	float maxy = -INFINITY;
+
+	std::vector<cv::Point2f> points(calib.size());
+	for (unsigned int i = 0; i < points.size(); i++) {
+		// xz, assume floor on y-plane
+		float x = calib[i].tvec[0];
+		float y = calib[i].tvec[2];
+		points[i] = {x, y};
+		minx = std::min(minx, x);
+		miny = std::min(miny, y);
+		maxx = std::max(maxx, x);
+		maxy = std::max(maxy, y);
+	}
+
+	float w = parent->width();
+	float sx = w/(maxx - minx);
+	float h = parent->height();
+	float sy = h/(maxy - miny);
+	float s = min(sx, sy);
+
+	nanogui::Vector2f apos = parent->absolutePosition().cast<float>() + nanogui::Vector2f{w/2.0f, h/2.0f};
+
+	for (unsigned int i = 0; i < points.size(); i++) {
+		float x = points[i].x*s + apos.x();
+		float y = points[i].y*s + apos.y();
+		// TODO: draw triangles (rotate by camera rotation)
+		nvgBeginPath(ctx);
+		nvgCircle(ctx, x, y, 2.5);
+		nvgStrokeColor(ctx, nvgRGBA(0, 0, 0, 255));
+		nvgStrokeWidth(ctx, 1.0f);
+		nvgStroke(ctx);
+	}
+}
diff --git a/applications/gui2/src/views/calibration/widgets.cpp b/applications/gui2/src/views/calibration/widgets.cpp
new file mode 100644
index 000000000..f907b2ce1
--- /dev/null
+++ b/applications/gui2/src/views/calibration/widgets.cpp
@@ -0,0 +1,165 @@
+#include "widgets.hpp"
+
+#include <nanogui/label.h>
+#include <nanogui/layout.h>
+#include <nanogui/checkbox.h>
+
+#include <opencv2/calib3d.hpp>
+
+using ftl::gui2::OpenCVFlagWidget;
+using ftl::gui2::OpenCVCalibrateFlags;
+
+template<typename T>
+std::string to_string(T v, int precision = 2) {
+	std::stringstream stream;
+	stream << std::fixed << std::setprecision(precision) << v;
+	return stream.str();
+}
+
+OpenCVFlagWidget::OpenCVFlagWidget(nanogui::Widget* parent, OpenCVCalibrateFlags* flags, int defaultv) :
+		nanogui::Widget(parent), flags_(flags), defaults_(defaultv) {
+
+	if (defaultv == -1) {
+		defaults_ = flags_->defaultFlags();
+	}
+
+	setLayout(new nanogui::BoxLayout
+		(nanogui::Orientation::Vertical, nanogui::Alignment::Fill, 0, 4));
+
+	reset();
+}
+
+void OpenCVFlagWidget::reset() {
+	while(childCount() > 0) {
+		removeChild(childCount() - 1);
+	}
+
+	for(int flag : flags_->list()) {
+		auto* checkbox = new nanogui::CheckBox(this, flags_->name(flag),
+		[flag, this](bool state){
+			if (state)	{ flags_->set(flag); }
+			else		{ flags_->unset(flag); }
+		});
+		checkbox->setChecked(flags_->has(flag));
+		checkbox->setTooltip(flags_->explain(flag));
+	}
+
+	// reset button
+	auto* reset = new nanogui::Button(this, "Reset flags");
+	reset->setCallback([this](){
+
+		// update widget
+		auto all_flags = flags_->list();
+		for(size_t i = 0; i < all_flags.size(); i++) {
+			auto* checkbox = dynamic_cast<nanogui::CheckBox*>(childAt(i));
+			checkbox->setChecked(all_flags[i] & defaults_);
+		}
+	});
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+using ftl::gui2::IntrinsicDetails;
+
+IntrinsicDetails::IntrinsicDetails(nanogui::Widget* parent) :
+	nanogui::Widget(parent), padding_(8) {
+
+	setLayout(new nanogui::BoxLayout
+		(nanogui::Orientation::Vertical, nanogui::Alignment::Fill, 0 , padding_));
+
+	params_ = new nanogui::Widget(this);
+	dist_ = new nanogui::Widget(this);
+	dist_->setLayout(new nanogui::BoxLayout
+		(nanogui::Orientation::Vertical, nanogui::Alignment::Fill, 0, padding_));
+}
+
+void IntrinsicDetails::update(const ftl::calibration::CalibrationData::Intrinsic &values) {
+	while (params_->childCount() > 0) {
+		params_->removeChild(params_->childCount() - 1);
+	}
+	while (dist_->childCount() > 0) {
+		dist_->removeChild(dist_->childCount() - 1);
+	}
+	bool use_physical = values.sensorSize != cv::Size2d{0.0, 0.0};
+	nanogui::GridLayout* grid_layout;
+	if (use_physical) {
+		grid_layout = new nanogui::GridLayout
+			(nanogui::Orientation::Horizontal, 3, nanogui::Alignment::Fill, 0, padding_);
+	}
+	else {
+		grid_layout = new nanogui::GridLayout
+			(nanogui::Orientation::Horizontal, 2, nanogui::Alignment::Fill, 0, padding_);
+	}
+	grid_layout->setColAlignment
+		({nanogui::Alignment::Maximum, nanogui::Alignment::Fill});
+	params_->setLayout(grid_layout);
+
+	auto sw = values.sensorSize.width;
+	auto sh = values.sensorSize.height;
+	auto K = values.matrix();
+	auto imsize = values.resolution;
+
+	double fovx;
+	double fovy;
+	double f;
+	cv::Point2d pp;
+	double ar;
+	cv::calibrationMatrixValues(K, imsize, sw, sh, fovx, fovy, f, pp, ar);
+
+	new nanogui::Label(params_, "Size (sensor/image):");
+	if (use_physical) new nanogui::Label(params_, to_string(sw, 1) + std::string("x") + to_string(sh, 1));
+	new nanogui::Label(params_, std::to_string(imsize.width) + std::string("x") + std::to_string(imsize.height));
+
+	new nanogui::Label(params_, "Focal length:");
+	if (use_physical) new nanogui::Label(params_, to_string(f) + " mm");
+	new nanogui::Label(params_,
+		((values.fx == values.fy) ? to_string(values.fx) + " px": (
+		"(" + to_string(values.fx) + ", "
+			+ to_string(values.fy) + ")")));
+
+	new nanogui::Label(params_, "Principal point:");
+	if (use_physical) new nanogui::Label(params_,
+			"(" + to_string(pp.x) + ", " +
+				to_string(pp.y) + ")");
+
+	new nanogui::Label(params_,
+		"(" + to_string(values.cx) + ", " +
+			  to_string(values.cy) + ")");
+
+	new nanogui::Label(params_, "Field of View (x):");
+	new nanogui::Label(params_, to_string(fovx) + "°");
+	if (use_physical) new nanogui::Label(params_, "");
+
+	new nanogui::Label(params_, "Field of View (y):");
+	new nanogui::Label(params_, to_string(fovy)+ "°");
+	if (use_physical) new nanogui::Label(params_, "");
+
+	new nanogui::Label(params_, "Aspect ratio:");
+	new nanogui::Label(params_, to_string(ar));
+	if (use_physical) new nanogui::Label(params_, "");
+
+	std::string pK;
+	std::string pP;
+	std::string pS;
+	auto& D = values.distCoeffs;
+
+	pK += "K1: " + to_string(D[0] ,3);
+	pK += ", K2: " + to_string(D[1] ,3);
+	pP += "P1: " + to_string(D[2], 3);
+	pP += ", P2: " + to_string(D[3], 3);
+
+	pK += ", K3: " + to_string(D[4], 3);
+
+	pK += ", K4: " + to_string(D[5] ,3);
+	pK += ", K5: " + to_string(D[6] ,3);
+	pK += ", K6: " + to_string(D[7] ,3);
+
+	pS += "S1: " + to_string(D[8] ,3);
+	pS += ", S2: " + to_string(D[9] ,3);
+	pS += ", S3: " + to_string(D[10] ,3);
+	pS += ", S4: " + to_string(D[11] ,3);
+
+	if (!pK.empty()) new nanogui::Label(dist_, pK);
+	if (!pP.empty()) new nanogui::Label(dist_, pP);
+	if (!pS.empty()) new nanogui::Label(dist_, pS);
+}
diff --git a/applications/gui2/src/views/calibration/widgets.hpp b/applications/gui2/src/views/calibration/widgets.hpp
new file mode 100644
index 000000000..76ebf2c7d
--- /dev/null
+++ b/applications/gui2/src/views/calibration/widgets.hpp
@@ -0,0 +1,35 @@
+#pragma once
+
+#include <nanogui/widget.h>
+
+#include <ftl/calibration/structures.hpp>
+
+#include "../../modules/calibration/calibration.hpp"
+
+namespace ftl {
+namespace gui2 {
+
+class OpenCVFlagWidget : public nanogui::Widget {
+public:
+	OpenCVFlagWidget(nanogui::Widget* parent, OpenCVCalibrateFlags* flags, int defaultv=-1);
+	void reset();
+	void setDefaults(int v) { defaults_ = v; }
+
+private:
+	OpenCVCalibrateFlags* flags_;
+	int defaults_;
+};
+
+class IntrinsicDetails : public nanogui::Widget {
+public:
+	IntrinsicDetails(nanogui::Widget* parent);
+	void update(const ftl::calibration::CalibrationData::Intrinsic &values);
+
+private:
+	nanogui::Widget* params_;
+	nanogui::Widget* dist_;
+	int padding_;
+};
+
+}
+}
diff --git a/applications/gui2/src/views/camera.cpp b/applications/gui2/src/views/camera.cpp
new file mode 100644
index 000000000..565381ebc
--- /dev/null
+++ b/applications/gui2/src/views/camera.cpp
@@ -0,0 +1,508 @@
+#include <nanogui/screen.h>
+#include <nanogui/layout.h>
+#include <nanogui/button.h>
+#include <nanogui/vscrollpanel.h>
+#include <ftl/utility/string.hpp>
+
+#include <ftl/codecs/touch.hpp>
+
+#include "camera.hpp"
+
+#include "../modules/camera.hpp"
+#include "../modules/config.hpp"
+#include "../modules/statistics.hpp"
+
+#include "../widgets/popupbutton.hpp"
+
+#include <loguru.hpp>
+
+using ftl::gui2::Camera;
+using ftl::gui2::FixedWindow;
+using ftl::gui2::MediaPanel;
+using ftl::gui2::CameraView;
+using ftl::gui2::PopupButton;
+using ftl::gui2::VolumeButton;
+
+using ftl::codecs::Channel;
+
+// ==== Record Options =========================================================
+
+class RecordOptions : public nanogui::Window {
+public:
+	RecordOptions(nanogui::Widget *parent, Camera* ctrl);
+	virtual ~RecordOptions();
+
+	void show(const std::function<void(bool)> &cb);
+
+private:
+	Camera* ctrl_;
+	std::list<std::tuple<nanogui::CheckBox*,ftl::codecs::Channel>> channels_;
+	std::function<void(bool)> callback_;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+RecordOptions::RecordOptions(nanogui::Widget *parent, Camera* ctrl)
+ : nanogui::Window(parent, "Recording"), ctrl_(ctrl) {
+
+	using namespace nanogui;
+
+	//setFixedWidth(300);
+	setLayout(new GroupLayout(15, 6, 14, 10));
+	setPosition(Vector2i(parent->width()/2.0f - 100.0f, parent->height()/2.0f - 100.0f));
+	setVisible(false);
+
+	auto close = new nanogui::Button(buttonPanel(), "", ENTYPO_ICON_CROSS);
+	close->setTheme(dynamic_cast<ftl::gui2::Screen*>(screen())->getTheme("window_dark"));
+	close->setBackgroundColor(theme()->mWindowHeaderGradientBot);
+	close->setCallback([this](){
+		setVisible(false);
+		if (callback_) callback_(false);
+	});
+
+	auto filename_box = new TextBox(this, "test.ftl");
+	filename_box->setEditable(true);
+
+	VScrollPanel *vscroll = new VScrollPanel(this);
+	vscroll->setFixedHeight(150);
+	Widget *scroll = new Widget(vscroll);
+	scroll->setLayout(new GridLayout(Orientation::Horizontal, 3));
+	//auto *label = new Label(vscroll, "Select Channels:", "sans-bold");
+
+	// Add all available channels as checkboxes
+	// TODO: Refresh this list on show
+	auto channels = ctrl_->allAvailableChannels();
+	for (auto c : channels) {
+		// Skip channels that can't be encoded
+		if (int(c) < 32) {
+			switch (c) {
+			case Channel::Colour	:
+			case Channel::Colour2	:
+			case Channel::Depth		:
+			case Channel::Depth2	:
+			case Channel::ColourHighRes :
+			case Channel::Colour2HighRes : break;
+			default: continue;
+			}
+		}
+
+		auto check = new CheckBox(scroll, ftl::codecs::name(c));
+		switch (c) {
+		case Channel::Colour		:
+		case Channel::Pose			:
+		case Channel::Capabilities	:
+		case Channel::Calibration	:
+		case Channel::MetaData		: check->setChecked(true); break;
+		default: break;
+		}
+
+		if (c == Channel::Calibration) {
+			check->setEnabled(false);
+		}
+
+		channels_.emplace_back(check, c);
+	}
+
+	auto *button_panel = new Widget(this);
+	button_panel->setLayout(new BoxLayout(Orientation::Horizontal, Alignment::Middle, 0, 6));
+
+	auto start = new Button(button_panel, "Record");
+	start->setCallback([this, filename_box]() {
+		std::unordered_set<ftl::codecs::Channel> selection;
+		for (auto &s : channels_) {
+			if (std::get<0>(s)->checked()) {
+				selection.emplace(std::get<1>(s));
+			}
+		}
+
+		if (selection.size() > 0) {
+			ctrl_->startRecording(filename_box->value(), selection);
+			setVisible(false);
+		}
+
+		if (callback_) callback_(true);
+	});
+
+	auto stream = new Button(button_panel, "Stream");
+	stream->setCallback([this]() {
+		std::unordered_set<ftl::codecs::Channel> selection;
+		for (auto &s : channels_) {
+			if (std::get<0>(s)->checked()) {
+				selection.emplace(std::get<1>(s));
+			}
+		}
+
+		if (selection.size() > 0) {
+			ctrl_->startStreaming(selection);
+			setVisible(false);
+		}
+
+		if (callback_) callback_(true);
+	});
+
+	auto closebut = new Button(button_panel, "Cancel");
+	closebut->setCallback([this]() {
+		setVisible(false);
+		if (callback_) callback_(false);
+	});
+
+	auto advanced = new Button(button_panel, "Advanced");
+	advanced->setEnabled(false);
+
+	screen()->performLayout();
+}
+
+RecordOptions::~RecordOptions() {
+
+}
+
+void RecordOptions::show(const std::function<void(bool)> &cb) {
+	setVisible(true);
+	callback_ = cb;
+}
+
+// === MediaPanel ==============================================================
+
+class MediaPanel : public FixedWindow {
+public:
+	MediaPanel(nanogui::Widget *parent, Camera* ctrl);
+	virtual ~MediaPanel();
+
+	void setAvailableChannels(const std::unordered_set<ftl::codecs::Channel> &channels);
+	void setActiveChannel(ftl::codecs::Channel c);
+
+	void draw(NVGcontext *ctx) override;
+
+	/** add button to position. */
+	nanogui::Button* addButton(int position = -1);
+
+	PopupButton* button_channels;
+	VolumeButton* button_volume;
+
+private:
+	std::vector<nanogui::Widget*> buttons(); // channel buttons
+	Camera* ctrl_;
+	RecordOptions *record_opts_=nullptr;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+MediaPanel::MediaPanel(nanogui::Widget *parent, ftl::gui2::Camera* ctrl) :
+	ftl::gui2::FixedWindow(parent, ""), ctrl_(ctrl) {
+
+	LOG(INFO) << __func__ << " (" << this << ")";
+	using namespace nanogui;
+
+	record_opts_ = new RecordOptions(screen(), ctrl);
+
+	setLayout(new BoxLayout(Orientation::Horizontal,
+									Alignment::Middle, 5, 10));
+
+	auto theme = dynamic_cast<ftl::gui2::Screen*>(screen())->getTheme("media");
+	this->setTheme(theme);
+
+	// Volume control
+	button_volume = new ftl::gui2::VolumeButton(this);
+	button_volume->setValue(ctrl_->volume());
+	button_volume->setCallback([ctrl = ctrl_](float v){ ctrl->setVolume(v); });
+
+	// Pause/Unpause
+	auto button_pause = new Button(this, "", ENTYPO_ICON_CONTROLLER_PAUS);
+	if (ctrl->isPaused()) {
+		button_pause->setIcon(ENTYPO_ICON_CONTROLLER_PLAY);
+	}
+
+	button_pause->setCallback([ctrl = ctrl_ ,button_pause]() {
+		ctrl->setPaused(!ctrl->isPaused());
+
+		if (ctrl->isPaused()) {
+			button_pause->setIcon(ENTYPO_ICON_CONTROLLER_PLAY);
+		} else {
+			button_pause->setIcon(ENTYPO_ICON_CONTROLLER_PAUS);
+		}
+	});
+
+	// Record
+	/*auto button_record = new ftl::gui2::PopupButton(this, "", ENTYPO_ICON_CONTROLLER_RECORD);
+	button_record->setSide(Popup::Side::Right);
+	button_record->setChevronIcon(0);
+
+	auto rec_popup = button_record->popup();
+	rec_popup->setLayout(new GroupLayout());
+
+	{
+		auto button = new Button(rec_popup, "Record to File");
+		//button->setFlags(Button::RadioButton);
+		//button->setVisible(true);
+		button->setCallback([this, button_record]() {
+			if (!ctrl_->isRecording()) {
+				button_record->setTextColor(nanogui::Color(1.0f,0.1f,0.1f,1.0f));
+				button_record->setPushed(false);
+				ctrl_->startRecording("test.ftl");
+			}
+		});
+	}
+
+	button_record->setCallback([this, button_record]() {
+		if (ctrl_->isRecording()) {
+			button_record->setTextColor(nanogui::Color(1.0f,1.0f,1.0f,1.0f));
+			button_record->setPushed(false);
+			ctrl_->stopRecording();
+		}
+	});*/
+
+	// Record
+	auto button_record = new Button(this, "", ENTYPO_ICON_CONTROLLER_RECORD);
+	button_record->setCallback([this, button_record]() {
+		if (record_opts_->visible()) return;
+
+		if (ctrl_->isRecording()) {
+			ctrl_->stopRecording();
+			button_record->setTextColor(nanogui::Color(1.0f,1.0f,1.0f,1.0f));
+		} else {
+			record_opts_->show([button_record](bool rec) {
+				if (rec) button_record->setTextColor(nanogui::Color(1.0f,0.1f,0.1f,1.0f));
+			});
+		}
+	});
+
+	// Channel select. Creates buttons for 32 channels and sets available ones
+	// visible (a bit of a hack, only used here and setAvailableChannels())
+
+	button_channels = new ftl::gui2::PopupButton(this, "", ENTYPO_ICON_LAYERS);
+	button_channels->setSide(Popup::Side::Right);
+	button_channels->setChevronIcon(0);
+
+	auto popup = button_channels->popup();
+	popup->setLayout(new GroupLayout());
+
+	for (int i=0; i < 32; ++i) {
+		ftl::codecs::Channel c = static_cast<ftl::codecs::Channel>(i);
+		auto button = new Button(popup, ftl::codecs::name(c));
+		button->setFlags(Button::RadioButton);
+		button->setVisible(false);
+		button->setCallback([this,c]() {
+			ctrl_->setChannel(c);
+			setActiveChannel(c);
+		});
+	}
+
+	setAvailableChannels(ctrl_->availableChannels());
+
+	// Settings
+	auto button_config = new Button(this, "", ENTYPO_ICON_COG);
+
+	button_config->setCallback([ctrl = ctrl_]() {
+		auto uri = ctrl->getActiveSourceURI();
+		if (uri.size() > 0) ctrl->screen->getModule<ftl::gui2::ConfigCtrl>()->show(uri);
+		else ctrl->screen->showError("Error", "This source does not have any settings");
+	});
+}
+
+MediaPanel::~MediaPanel() {
+	if (parent()->getRefCount() > 0) record_opts_->dispose();
+}
+
+void MediaPanel::draw(NVGcontext *ctx) {
+	auto size = this->size();
+	setPosition(
+		nanogui::Vector2i(	screen()->width() / 2 - size[0]/2,
+							screen()->height() - 30 - size[1]));
+
+	FixedWindow::draw(ctx);
+}
+
+std::vector<nanogui::Widget*> MediaPanel::buttons() {
+
+	auto popup = button_channels->popup();
+
+	if (popup->childCount() != 32) {
+		LOG(ERROR) << "Wrong number of buttons!";
+	}
+	return popup->children();
+}
+
+void MediaPanel::setAvailableChannels(const std::unordered_set<Channel> &channels) {
+
+	const auto &button = buttons();
+	bool update = false;
+
+	for (int i = 0; i < 32; ++i) {
+		ftl::codecs::Channel c = static_cast<ftl::codecs::Channel>(i);
+		bool visible = channels.count(c) > 0;
+		update |= (visible != button[i]->visible());
+		button[i]->setVisible(visible);
+	}
+
+	if (update) {
+		auto popup = button_channels->popup();
+		screen()->performLayout();
+		popup->setAnchorHeight(popup->height() - 20);
+	}
+}
+
+void MediaPanel::setActiveChannel(Channel c) {
+	auto button = dynamic_cast<nanogui::Button*>
+		(buttons()[static_cast<size_t>(c)]);
+
+	button->setVisible(true);
+	button->setPushed(true);
+}
+
+nanogui::Button* MediaPanel::addButton(int pos) {
+	auto* button = new nanogui::Button(this, "", 0);
+	if (pos >= 0) {
+		mChildren.pop_back();
+		mChildren.insert(mChildren.begin() + pos, button);
+	}
+	performLayout(screen()->nvgContext());
+	return button;
+}
+
+// ==== CameraView =============================================================
+
+CameraView::CameraView(ftl::gui2::Screen* parent, ftl::gui2::Camera* ctrl) :
+		View(parent), enable_zoom_(false), enable_pan_(false), ctrl_(ctrl) {
+
+	imview_ = new ftl::gui2::FTLImageView(this);
+	panel_ = new ftl::gui2::MediaPanel(screen(), ctrl);
+
+	auto *mod = ctrl_->screen->getModule<ftl::gui2::Statistics>();
+	if (ctrl_->isMovable()) {
+		imview_->setCursor(nanogui::Cursor::Hand);
+		mod->setCursor(nanogui::Cursor::Hand);
+	} else {
+		imview_->setCursor(nanogui::Cursor::Crosshair);
+		mod->setCursor(nanogui::Cursor::Crosshair);
+	}
+
+	auto theme = dynamic_cast<ftl::gui2::Screen*>(screen())->getTheme("toolbutton");
+	//this->setTheme(theme);
+
+	context_menu_ = new nanogui::Window(parent, "");
+	context_menu_->setVisible(false);
+	context_menu_->setLayout(new nanogui::BoxLayout(nanogui::Orientation::Vertical));
+	context_menu_->setTheme(theme);
+
+	auto *button = new nanogui::Button(context_menu_, "Capture Image");
+	button->setCallback([this]() {
+		char timestamp[18];
+		std::time_t t=std::time(NULL);
+		std::strftime(timestamp, sizeof(timestamp), "%F-%H%M%S", std::localtime(&t));
+		context_menu_->setVisible(false);
+		ctrl_->snapshot(std::string(timestamp)+std::string(".png"));
+	});
+
+	button = new nanogui::Button(context_menu_, "Settings");
+	button->setCallback([this, button]() {
+		context_menu_->setVisible(false);
+		ctrl_->screen->getModule<ftl::gui2::ConfigCtrl>()->show(ctrl_->getID());
+	});
+}
+
+CameraView::~CameraView() {
+	if (parent()->getRefCount() > 0) {
+		// segfault without this check; nanogui already deleted windows?
+		// should be fixed in nanogui
+		panel_->dispose();
+	}
+
+	if (context_menu_->parent()->getRefCount() > 0) {
+		context_menu_->setVisible(false);
+		context_menu_->dispose();
+	}
+}
+
+void CameraView::refresh() {
+	bool was_valid = imview_->texture().isValid();
+
+	if (ctrl_->hasFrame()) {
+		imview_->copyFrom(ctrl_->getFrame());
+	}
+	if (!was_valid && imview_->texture().isValid()) {
+		screen()->performLayout();
+	}
+}
+
+void CameraView::setZoom(bool v) {
+	enable_zoom_ = v;
+	imview_->setFixedScale(!v);
+	if (!v) {
+		imview_->setScale(1.0f);
+	}
+}
+
+void CameraView::setPan(bool v) {
+	enable_pan_ = v;
+	imview_->setFixedOffset(!v);
+	if (!v) {
+		imview_->fit();
+	}
+}
+
+bool CameraView::mouseMotionEvent(const Eigen::Vector2i &p, const Eigen::Vector2i &rel, int button, int modifiers) {
+	//if (button == 1) {
+		auto pos = imview_->imageCoordinateAt((p - mPos + rel).cast<float>());
+		if (pos.x() >= 0.0f && pos.y() >= 0.0f) {
+			ctrl_->touch(0, ftl::codecs::TouchType::MOUSE_LEFT, pos.x(), pos.y(), 0.0f, (button > 0) ? 255 : 0);
+
+			//LOG(INFO) << "Depth at " << pos.x() << "," << pos.y() << " = " << ctrl_->depthAt(pos.x(), pos.y());
+		}
+		return true;
+	//}
+	return false;
+}
+
+bool CameraView::mouseButtonEvent(const Eigen::Vector2i &p, int button, bool down, int modifiers) {
+	//LOG(INFO) << "mouseButtonEvent: " << p << " - " << button;
+	if (button == 0) {
+		auto pos = imview_->imageCoordinateAt((p - mPos).cast<float>());
+		if (pos.x() >= 0.0f && pos.y() >= 0.0f) {
+			ctrl_->touch(0, ftl::codecs::TouchType::MOUSE_LEFT, pos.x(), pos.y(), 0.0f, (down) ? 255 : 0);
+		}
+		context_menu_->setVisible(false);
+		return true;
+	} else if (button == 1) {
+		if (!down) {
+			context_menu_->setPosition(p - mPos);
+			context_menu_->setVisible(true);
+			return true;
+		}
+	} else {
+		context_menu_->setVisible(false);
+	}
+	return false;
+}
+
+void CameraView::draw(NVGcontext*ctx) {
+	if (ctrl_->hasFrame()) {
+		try {
+			// TODO: Select shader to flip if VR capability found...
+			imview_->copyFrom(ctrl_->getFrame());
+		}
+		catch (std::exception& e) {
+			gui()->showError("Exception", e.what());
+		}
+	}
+	View::draw(ctx);
+
+	ctrl_->drawOverlay(ctx);
+
+	auto mouse = screen()->mousePos();
+	auto pos = imview_->imageCoordinateAt((mouse - mPos).cast<float>());
+	float d = ctrl_->depthAt(pos.x(), pos.y());
+
+	if (d > 0.0f) {
+		nvgText(ctx, mouse.x()+25.0f, mouse.y()+20.0f, (to_string_with_precision(d,2) + std::string("m")).c_str(), nullptr);
+	}
+}
+
+void CameraView::performLayout(NVGcontext* ctx) {
+	imview_->setSize(size());
+	if (!(enable_zoom_ && enable_pan_)) {
+		imview_->fit();
+	}
+	View::performLayout(ctx);
+}
diff --git a/applications/gui2/src/views/camera.hpp b/applications/gui2/src/views/camera.hpp
new file mode 100644
index 000000000..efeb4221b
--- /dev/null
+++ b/applications/gui2/src/views/camera.hpp
@@ -0,0 +1,44 @@
+#pragma once
+
+#include "../view.hpp"
+
+#include <ftl/utility/gltexture.hpp>
+
+#include "../widgets/window.hpp"
+#include "../widgets/soundctrl.hpp"
+#include "../widgets/imageview.hpp"
+
+namespace ftl {
+namespace gui2 {
+
+class Camera;
+class MediaPanel;
+
+class CameraView : public View {
+public:
+	CameraView(Screen* parent, Camera* ctrl);
+	virtual ~CameraView();
+
+	virtual void draw(NVGcontext* ctx) override;
+	virtual void performLayout(NVGcontext* ctx) override;
+	virtual bool mouseButtonEvent(const Eigen::Vector2i &p, int button, bool down, int modifiers) override;
+	virtual bool mouseMotionEvent(const Eigen::Vector2i &p, const Eigen::Vector2i &rel, int button, int modifiers) override;
+
+	void refresh();
+	void setZoom(bool enable);
+	void setPan(bool enable);
+
+protected:
+	bool enable_zoom_;
+	bool enable_pan_;
+	Camera* ctrl_;
+	MediaPanel* panel_;
+	FTLImageView* imview_;
+	nanogui::Window *context_menu_;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+}
+}
diff --git a/applications/gui2/src/views/camera3d.cpp b/applications/gui2/src/views/camera3d.cpp
new file mode 100644
index 000000000..9d1c0fea2
--- /dev/null
+++ b/applications/gui2/src/views/camera3d.cpp
@@ -0,0 +1,138 @@
+#include "camera3d.hpp"
+#include "../modules/camera.hpp"
+
+#include <loguru.hpp>
+
+using ftl::gui2::CameraView3D;
+
+// =============================================================================
+
+static Eigen::Affine3d create_rotation_matrix(float ax, float ay, float az) {
+	Eigen::Affine3d rx =
+		Eigen::Affine3d(Eigen::AngleAxisd(ax, Eigen::Vector3d(1, 0, 0)));
+	Eigen::Affine3d ry =
+		Eigen::Affine3d(Eigen::AngleAxisd(ay, Eigen::Vector3d(0, 1, 0)));
+	Eigen::Affine3d rz =
+		Eigen::Affine3d(Eigen::AngleAxisd(az, Eigen::Vector3d(0, 0, 1)));
+	return rz * rx * ry;
+}
+
+// ==== CameraView3D ===========================================================
+
+CameraView3D::CameraView3D(ftl::gui2::Screen *parent, ftl::gui2::Camera *ctrl) :
+		CameraView(parent, ctrl) {
+
+	eye_ = Eigen::Vector3d::Zero();
+	neye_ = Eigen::Vector4d::Zero();
+	rotmat_.setIdentity();
+
+	rx_ = 0.0;
+	ry_ = 0.0;
+
+	ftime_ = 0.0;
+	delta_ = 0.0;
+	lerp_speed_ = 0.999f;
+
+	pose_up_to_date_.test_and_set();
+
+	setZoom(false);
+	setPan(false);
+}
+
+bool CameraView3D::keyboardEvent(int key, int scancode, int action, int modifiers) {
+	if (key == 263 || key == 262) {
+		float mag = (modifiers & 0x1) ? 0.01f : 0.1f;
+		float scalar = (key == 263) ? -mag : mag;
+		neye_ += rotmat_*Eigen::Vector4d(scalar, 0.0, 0.0, 1.0);
+		pose_up_to_date_.clear();
+	}
+	else if (key == 264 || key == 265) {
+		float mag = (modifiers & 0x1) ? 0.01f : 0.1f;
+		float scalar = (key == 264) ? -mag : mag;
+		neye_ += rotmat_*Eigen::Vector4d(0.0, 0.0, scalar, 1.0);
+		pose_up_to_date_.clear();
+	}
+	else if (key == 266 || key == 267) {
+		float mag = (modifiers & 0x1) ? 0.01f : 0.1f;
+		float scalar = (key == 266) ? -mag : mag;
+		neye_ += rotmat_*Eigen::Vector4d(0.0, scalar, 0.0, 1.0);
+		pose_up_to_date_.clear();
+	}
+	else if (key >= '0' && key <= '5' && modifiers == 2) {  // Ctrl+NUMBER
+	}
+
+	return true;
+}
+
+bool CameraView3D::mouseButtonEvent(const Eigen::Vector2i &p, int button, bool down, int modifiers) {
+	return CameraView::mouseButtonEvent(p, button, down, modifiers);
+}
+
+bool CameraView3D::mouseMotionEvent(const Eigen::Vector2i &p, const Eigen::Vector2i &rel, int button, int modifiers) {
+	if (button != 1) {
+		return true;
+	}
+
+	rx_ += rel[0];
+	ry_ += rel[1];
+	pose_up_to_date_.clear();
+
+	//LOG(INFO) << "New pose: \n" << getUpdatedPose();
+	//ctrl_->sendPose(getUpdatedPose());
+	return true;
+}
+
+bool CameraView3D::scrollEvent(const Eigen::Vector2i &p, const Eigen::Vector2f &rel) {
+	return true;
+}
+
+bool CameraView3D::keyboardCharacterEvent(unsigned int codepoint) {
+	LOG(INFO) << "keyboardCharacterEvent: " << codepoint;
+	return false;
+}
+
+Eigen::Matrix4d CameraView3D::getUpdatedPose() {
+	float rrx = ((float)ry_ * 0.2f * delta_);
+	float rry = (float)rx_ * 0.2f * delta_;
+	float rrz = 0.0;
+
+	Eigen::Affine3d r = create_rotation_matrix(rrx, -rry, rrz);
+	rotmat_ = rotmat_ * r.matrix();
+
+	rx_ = 0;
+	ry_ = 0;
+
+	eye_[0] += (neye_[0] - eye_[0]) * lerp_speed_ * delta_;
+	eye_[1] += (neye_[1] - eye_[1]) * lerp_speed_ * delta_;
+	eye_[2] += (neye_[2] - eye_[2]) * lerp_speed_ * delta_;
+
+	Eigen::Translation3d trans(eye_);
+	Eigen::Affine3d t(trans);
+	return t.matrix() * rotmat_;
+}
+
+void CameraView3D::processAnimation() {
+	Eigen::Vector3d diff;
+	diff[0] = neye_[0] - eye_[0];
+	diff[1] = neye_[1] - eye_[1];
+	diff[2] = neye_[2] - eye_[2];
+
+	// Only update pose if there is enough motion
+	if (diff.norm() > 0.01) {
+		pose_up_to_date_.clear();
+	}
+}
+
+void CameraView3D::draw(NVGcontext* ctx) {
+	double now = glfwGetTime();
+	delta_ = now - ftime_;
+	ftime_ = now;
+
+	processAnimation();
+
+	// poll from ctrl_ or send on event instead?
+	if (!pose_up_to_date_.test_and_set()) {
+		ctrl_->sendPose(getUpdatedPose());
+	}
+	CameraView::draw(ctx);
+}
diff --git a/applications/gui2/src/views/camera3d.hpp b/applications/gui2/src/views/camera3d.hpp
new file mode 100644
index 000000000..b5dda23c2
--- /dev/null
+++ b/applications/gui2/src/views/camera3d.hpp
@@ -0,0 +1,51 @@
+#pragma once
+
+#include "../widgets/window.hpp"
+
+#include "../view.hpp"
+
+#include "camera.hpp"
+
+namespace ftl {
+namespace gui2 {
+
+class CameraView3D : public CameraView {
+public:
+	CameraView3D(Screen *parent, Camera* ctrl);
+
+	virtual bool keyboardEvent(int key, int scancode, int action, int modifiers) override;
+	virtual bool keyboardCharacterEvent(unsigned int codepoint) override;
+	virtual bool mouseMotionEvent(const Eigen::Vector2i &p, const Eigen::Vector2i &rel, int button, int modifiers) override;
+	virtual bool mouseButtonEvent(const Eigen::Vector2i &p, int button, bool down, int modifiers) override;
+	virtual bool scrollEvent(const Eigen::Vector2i &p, const Eigen::Vector2f &rel) override;
+	virtual void draw(NVGcontext* ctx) override;
+
+	Eigen::Matrix4d getUpdatedPose();
+
+protected:
+	// updates from keyboard
+	Eigen::Vector4d neye_;
+	// current
+	Eigen::Vector3d eye_;
+	Eigen::Matrix4d rotmat_;
+
+	// updates from mouse
+	double rx_;
+	double ry_;
+
+	// times for pose update
+	double ftime_;
+	double delta_;
+
+	double lerp_speed_;
+
+	std::atomic_flag pose_up_to_date_;
+
+	void processAnimation();
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+}
+}
diff --git a/applications/gui2/src/views/config.cpp b/applications/gui2/src/views/config.cpp
new file mode 100644
index 000000000..029b66d65
--- /dev/null
+++ b/applications/gui2/src/views/config.cpp
@@ -0,0 +1,356 @@
+
+#include <loguru.hpp>
+
+#include <nanogui/layout.h>
+#include <nanogui/label.h>
+#include <nanogui/button.h>
+#include <nanogui/entypo.h>
+#include <nanogui/formhelper.h>
+#include <nanogui/vscrollpanel.h>
+#include <nanogui/opengl.h>
+
+#include <nlohmann/json.hpp>
+
+#include <vector>
+#include <string>
+
+#include "config.hpp"
+#include "../screen.hpp"
+#include "../widgets/leftbutton.hpp"
+
+using ftl::gui2::ConfigWindow;
+using std::string;
+using std::vector;
+using ftl::config::json_t;
+
+class SearchBox : public nanogui::TextBox {
+private:
+	std::vector<std::string> configurables_;
+	Widget *buttons_;
+	std::string previous;
+
+	void _setVisible(const std::string &str) {
+		// Check whether the search string has changed to prevent
+		// unnecessary searching.
+		if (str != previous) {
+			for (int i = configurables_.size()-1; i >= 0; --i) {
+				if (configurables_[i].find(mValueTemp) != std::string::npos) {
+					buttons_->childAt(i)->setVisible(true);
+				} else {
+					buttons_->childAt(i)->setVisible(false);
+				}
+			}
+			previous = str;
+			screen()->performLayout();
+		}
+	}
+
+public:
+	SearchBox(Widget *parent, std::vector<std::string> &configurables) : nanogui::TextBox(parent, ""), configurables_(configurables) {
+		setAlignment(TextBox::Alignment::Left);
+		setEditable(true);
+		setPlaceholder("Search");
+	}
+
+	virtual ~SearchBox() {
+	}
+
+	bool keyboardEvent(int key, int scancode, int action, int modifier) {
+		TextBox::keyboardEvent(key, scancode, action, modifier);
+		_setVisible(mValueTemp);
+		return true;
+	}
+
+	void setButtons(Widget *buttons) {
+		buttons_ = buttons;
+	}
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+static std::string titleForURI(const ftl::URI &uri) {
+	auto *cfg = ftl::config::find(uri.getBaseURI());
+	if (cfg && cfg->get<std::string>("title")) {
+		return *cfg->get<std::string>("title");
+	} else if (uri.getPath().size() > 0) {
+		return uri.getPathSegment(-1);
+	} else {
+		return uri.getHost();
+	}
+}
+
+ConfigWindow::ConfigWindow(nanogui::Widget *parent, ftl::ctrl::Master *ctrl)
+		: nanogui::Window(parent, "Settings"), ctrl_(ctrl) {
+
+	LOG(INFO) << __func__ << " (" << this << ")";
+
+	using namespace nanogui;
+
+	setTheme(dynamic_cast<ftl::gui2::Screen*>(screen())->getTheme("window_dark"));
+
+	auto close = new nanogui::Button(buttonPanel(), "", ENTYPO_ICON_CROSS);
+	close->setTheme(dynamic_cast<ftl::gui2::Screen*>(screen())->getTheme("window_dark"));
+	close->setBackgroundColor(theme()->mWindowHeaderGradientBot);
+	close->setCallback([this](){ dispose();});
+
+	setLayout(new GroupLayout(15, 6, 14, 10));
+	setFixedWidth(400);
+	setPosition(Vector2i(parent->width()/2.0f - 100.0f, parent->height()/2.0f - 100.0f));
+
+	auto configurables = ftl::config::list();
+	const auto size = configurables.size();
+
+	new Label(this, "Select Configurable","sans-bold");
+
+	auto searchBox = new SearchBox(this, configurables);
+
+	auto vscroll = new VScrollPanel(this);
+	vscroll->setFixedHeight(300);
+	auto buttons = new Widget(vscroll);
+	buttons->setLayout(new BoxLayout(Orientation::Vertical, Alignment::Fill));
+
+	searchBox->setButtons(buttons);
+
+	std::vector<std::string> configurable_titles(size);
+
+	std::set<std::string> sorted_cfgs;
+	sorted_cfgs.insert(configurables.begin(), configurables.end());
+
+	for (auto &c : sorted_cfgs) {
+		ftl::URI uri(c);
+
+		std::string spacing = "";
+		for (size_t i=0; i<uri.getPathLength(); ++i) {
+			spacing += "    ";
+		}
+
+		//if (uri.getFragment().size() == 0) {
+			std::string title = spacing + titleForURI(uri);
+			//configurable_titles[i] = title;
+			auto itembutton = new ftl::gui2::LeftButton(buttons, title);
+
+			/*if (_isEmpty(c)) {
+				itembutton->setEnabled(false);
+			}*/
+			itembutton->setTooltip(c);
+			//itembutton->setBackgroundColor(nanogui::Color(0.9f,0.9f,0.9f,0.9f));
+			itembutton->setCallback([this,c]() {
+				_buildForm(c);
+				setVisible(false);
+				dispose();
+			});
+		//}
+	}
+
+	/*for (size_t i = 0; i < size; ++i) {
+		ftl::URI uri(configurables[i]);
+		std::string label = uri.getFragment();
+
+		size_t pos = label.find_last_of("/");
+		if (pos != std::string::npos) label = label.substr(pos+1);
+
+		std::string parentName = configurables[i];
+		size_t pos2 = parentName.find_last_of("/");
+		if (pos2 != std::string::npos) parentName = parentName.substr(0,pos2);
+
+		// FIXME: Does not indicated parent indentation ... needs sorting?
+
+		if (i > 0 && parentName == configurables[i-1]) {
+			ftl::URI uri(configurables[i-1]);
+			configurable_titles[i-1] = std::string("[") + titleForURI(uri) + std::string("] ") + uri.getFragment();
+
+			auto *prev = dynamic_cast<Button*>(buttons->childAt(buttons->childCount()-1));
+			prev->setCaption(configurable_titles[i-1]);
+			prev->setBackgroundColor(nanogui::Color(0.3f,0.3f,0.3f,1.0f));
+			prev->setTextColor(nanogui::Color(1.0f,1.0f,1.0f,1.0f));
+			prev->setIconPosition(Button::IconPosition::Left);
+			prev->setIcon(ENTYPO_ICON_FOLDER);
+		}
+
+		configurable_titles[i] = label;
+
+		auto itembutton = new nanogui::Button(buttons, configurable_titles[i]);
+		std::string c = configurables[i];
+		if (_isEmpty(c)) {
+			itembutton->setEnabled(false);
+		}
+		itembutton->setTooltip(c);
+		itembutton->setBackgroundColor(nanogui::Color(0.9f,0.9f,0.9f,0.9f));
+		itembutton->setCallback([this,c]() {
+			_buildForm(c);
+			setVisible(false);
+			dispose();
+		});
+	}*/
+}
+
+ConfigWindow::~ConfigWindow() {
+	LOG(INFO) << __func__ << " (" << this << ")";
+}
+
+bool ConfigWindow::_isEmpty(const std::string &uri) {
+	// $id, $ref and tags always present
+	return ftl::config::find(uri)->getConfig().size() <= 3;
+}
+
+void ConfigWindow::__addElements(nanogui::FormHelper *form, const std::string &suri) {
+	using namespace nanogui;
+
+	Configurable *configurable = ftl::config::find(suri);
+	ftl::config::json_t data;
+	if (configurable) {
+		configurable->refresh();
+		data = configurable->getConfig();
+	}
+
+	for (auto i=data.begin(); i!=data.end(); ++i) {
+		if (i.key() == "$id") continue;
+
+		if (i.key() == "$ref" && i.value().is_string()) {
+			const std::string suri = std::string(i.value().get<string>());
+			__addElements(form, suri);
+			continue;
+		}
+
+		if (i.value().is_boolean()) {
+			string key = i.key();
+			form->addVariable<bool>(i.key(), [data,key,suri](const bool &b){
+				ftl::config::update(suri+"/"+key, b);
+			}, [data,key]() -> bool {
+				return data[key].get<bool>();
+			});
+		} else if (i.value().is_number_integer()) {
+			string key = i.key();
+			form->addVariable<int>(i.key(), [data,key,suri](const int &f){
+				ftl::config::update(suri+"/"+key, f);
+			}, [data,key]() -> int {
+				return data[key].get<int>();
+			});
+		} else if (i.value().is_number_float()) {
+			string key = i.key();
+			form->addVariable<float>(i.key(), [data,key,suri](const float &f){
+				ftl::config::update(suri+"/"+key, f);
+			}, [data,key]() -> float {
+				return data[key].get<float>();
+			});
+		} else if (i.value().is_string()) {
+			string key = i.key();
+			form->addVariable<string>(i.key(), [data,key,suri](const string &f){
+				ftl::config::update(suri+"/"+key, f);
+			}, [data,key]() -> string {
+				return data[key].get<string>();
+			});
+		} else if (i.value().is_object()) {
+			string key = i.key();
+			string nuri;
+
+			// Checking the URI with exists() prevents unloaded local configurations from being shown.
+			//if (suri.find('#') != string::npos && exists(suri+string("/")+key)) {
+			//	nuri = suri+string("/")+key;
+			//} else
+			if (exists(suri+string("/")+key)) {
+				nuri = suri+string("/")+key;
+			}
+
+			if (!nuri.empty()) {
+				nanogui::Window *window = form->window();
+				auto button = form->addButton(key, [window, nuri]() {
+					buildForm(window->screen(), nuri);
+				});
+
+				button->setIcon(ENTYPO_ICON_FOLDER);
+				button->setIconPosition(nanogui::Button::IconPosition::Left);
+				if (_isEmpty(nuri)) {
+					button->setEnabled(false);
+				}
+			}
+		}
+	}
+}
+
+void ConfigWindow::_buildForm(const std::string &suri) {
+	using namespace nanogui;
+
+	/*ftl::URI uri(suri);
+
+	FormHelper *form = new FormHelper(this->screen());
+	form->addWindow(Vector2i(100,50), uri.getFragment());
+	form->window()->setTheme(theme());
+
+	__addElements(form, suri);
+
+	// prevent parent window from being destroyed too early
+	incRef();  // TODO: Is this needed? It isn't a parent window?
+
+	auto close = new nanogui::Button(form->window()->buttonPanel(),	"",	ENTYPO_ICON_CROSS);
+	close->setTheme(dynamic_cast<ftl::gui2::Screen*>(screen())->getTheme("window_dark"));
+	close->setBackgroundColor(theme()->mWindowHeaderGradientBot);
+
+	auto *window = form->window();
+
+	close->setCallback([this, window](){
+		window->dispose();
+		decRef();
+	});
+	close->setBackgroundColor({80, 255});
+	form->window()->screen()->performLayout();
+	delete form;*/
+
+	buildForm(screen(), suri);
+}
+
+static MUTEX config_mtx;
+static std::unordered_map<std::string, nanogui::Window*> existing_configs;
+
+// Static version
+void ConfigWindow::buildForm(nanogui::Screen *screen, const std::string &suri) {
+	using namespace nanogui;
+
+	{
+		UNIQUE_LOCK(config_mtx, lk);
+		auto i = existing_configs.find(suri);
+		if (i != existing_configs.end()) {
+			screen->moveWindowToFront(i->second);
+			return;
+		}
+	}
+
+	ftl::URI uri(suri);
+
+	FormHelper *form = new FormHelper(screen);
+	form->addWindow(Vector2i(100,50), titleForURI(uri));
+	//form->window()->setTheme(theme());
+
+	{
+		UNIQUE_LOCK(config_mtx, lk);
+		existing_configs[suri] = form->window();
+	}
+
+	auto *window = form->window();
+	window->setTheme(dynamic_cast<ftl::gui2::Screen*>(window->screen())->getTheme("window_dark"));
+	window->setWidth(200);
+
+	__addElements(form, suri);
+
+	// prevent parent window from being destroyed too early
+	//incRef();  // TODO: Is this needed? It isn't a parent window?
+
+	auto close = new nanogui::Button(form->window()->buttonPanel(),	"",	ENTYPO_ICON_CROSS);
+	close->setTheme(dynamic_cast<ftl::gui2::Screen*>(screen)->getTheme("window_dark"));
+	//close->setBackgroundColor(theme()->mWindowHeaderGradientBot);
+
+	close->setCallback([window, suri](){
+		window->dispose();
+		//decRef();
+		UNIQUE_LOCK(config_mtx, lk);
+		existing_configs.erase(suri);
+	});
+	close->setBackgroundColor({80, 255});
+	form->window()->screen()->performLayout();
+	delete form;
+}
+
+bool ConfigWindow::exists(const std::string &uri) {
+	return ftl::config::find(uri) != nullptr;
+}
diff --git a/applications/gui2/src/views/config.hpp b/applications/gui2/src/views/config.hpp
new file mode 100644
index 000000000..45a23b11d
--- /dev/null
+++ b/applications/gui2/src/views/config.hpp
@@ -0,0 +1,36 @@
+#pragma once
+
+#include <nanogui/window.h>
+#include <nanogui/formhelper.h>
+
+#include <ftl/master.hpp>
+#include <ftl/uuid.hpp>
+#include <ftl/net_configurable.hpp>
+
+namespace ftl {
+namespace gui2 {
+
+/**
+ * Allow configurable editing.
+ */
+class ConfigWindow : public nanogui::Window {
+	public:
+	ConfigWindow(nanogui::Widget *parent, ftl::ctrl::Master *ctrl);
+	virtual ~ConfigWindow();
+
+	static void buildForm(nanogui::Screen *screen, const std::string &uri);
+
+private:
+	ftl::ctrl::Master *ctrl_;
+
+	static bool _isEmpty(const std::string &uri);
+	void _buildForm(const std::string &uri);
+	static void __addElements(nanogui::FormHelper *form, const std::string &suri);
+	static bool exists(const std::string &uri);
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+}
+}
diff --git a/applications/gui2/src/views/statistics.cpp b/applications/gui2/src/views/statistics.cpp
new file mode 100644
index 000000000..e72bc160a
--- /dev/null
+++ b/applications/gui2/src/views/statistics.cpp
@@ -0,0 +1,106 @@
+#include "statistics.hpp"
+#include "../modules/statistics.hpp"
+
+#include <ftl/streams/builder.hpp>
+#include <ftl/streams/netstream.hpp>
+#include <ftl/render/colouriser.hpp>
+#include <ftl/utility/string.hpp>
+
+#include <nanogui/screen.h>
+#include <nanogui/opengl.h>
+
+#include <loguru.hpp>
+
+using ftl::gui2::StatisticsWidget;
+using std::string;
+
+StatisticsWidget::StatisticsWidget(nanogui::Widget* parent, ftl::gui2::Statistics* ctrl) :
+		nanogui::Window(parent,""), ctrl_(ctrl), last_stats_count_(0) {
+
+	setWidth(parent->width()/2);
+}
+
+void StatisticsWidget::draw(NVGcontext *ctx) {
+	int margin = 20;
+	const auto &screenSize = screen()->size();
+	float rowh = 10.0;
+	int count = 0;
+
+	setPosition({screenSize[0] - width() - margin, 0});
+	setHeight(screenSize[1]);
+
+	const auto pos = absolutePosition();
+	auto panels = ctrl_->get();
+	for (unsigned int i = 0; i < panels.size(); i++) {
+		if (panels[i].second.is_structured()) {
+			for (auto j : panels[i].second.items()) {
+				std::string msg = j.key();
+
+				auto colour = nanogui::Color(244, 244, 244, 255);
+				int fsize = 15;
+				int entypo = 0;
+				
+				if (j.value().is_object()) {
+					const auto &val = j.value()["value"];
+
+					if (j.value().contains("nokey")) {
+						msg = "";
+					}
+					if (j.value().contains("colour")) {
+						uchar4 cucol = ftl::render::parseCUDAColour(j.value()["colour"].get<std::string>());
+						colour = nanogui::Color(cucol.x, cucol.y, cucol.z, 255);
+					}
+					if (j.value().contains("size")) {
+						fsize = j.value()["size"].get<int>();
+					}
+					if (j.value().contains("icon")) {
+						entypo = j.value()["icon"].get<int>();
+					}
+
+					if (val.is_string()) {
+						if (msg.size() > 0) msg += std::string(": ");
+						msg += val.get<std::string>();
+					} else if (val.is_number()) {
+						if (msg.size() > 0) msg += std::string(": ");
+						msg += std::string(": ") + to_string_with_precision(val.get<float>(),2);
+					}
+				} else if (j.value().is_string()) {
+					msg += std::string(": ") + j.value().get<std::string>();
+				} else if (j.value().is_number()) {
+					msg += std::string(": ") + to_string_with_precision(j.value().get<float>(),2);
+				} else if (j.value().is_boolean()) {
+
+				}
+
+				rowh += float(fsize)+5.0f;
+
+				nvgFontSize(ctx, fsize);
+				nvgTextAlign(ctx, NVG_ALIGN_RIGHT);
+
+				float tw = 0.0f;
+
+				if (msg.size() > 0) {
+					if (panels[i].first == ftl::gui2::StatisticsPanel::LOGGING) nvgFontFace(ctx, "sans");
+					else nvgFontFace(ctx, "sans-bold");
+					nvgFillColor(ctx, nanogui::Color(8, 8, 8, 255)); // shadow
+					tw = nvgTextBounds(ctx, pos[0] + width(), rowh, msg.c_str(), nullptr, nullptr);
+					nvgText(ctx, pos[0] + width(), rowh, msg.c_str(), nullptr);
+					nvgFillColor(ctx, colour);
+					nvgText(ctx, pos[0] + width() - 1, rowh - 1, msg.c_str(), nullptr);
+					tw += 10;
+				}
+				if (entypo > 0) {
+					auto icon = nanogui::utf8(entypo);
+					nvgFontFace(ctx, "icons");
+					nvgFontSize(ctx, float(fsize)*0.8f);
+					nvgFillColor(ctx, nanogui::Color(8, 8, 8, 255)); // shadow
+					nvgText(ctx, pos[0] + width() - tw, rowh, icon.data(), nullptr);
+					nvgFillColor(ctx, colour);
+					nvgText(ctx, pos[0] + width() - 1 - tw, rowh - 1, icon.data(), nullptr);
+				}
+
+				++count;
+			}
+		}
+	}
+}
diff --git a/applications/gui2/src/views/statistics.hpp b/applications/gui2/src/views/statistics.hpp
new file mode 100644
index 000000000..f425bf3de
--- /dev/null
+++ b/applications/gui2/src/views/statistics.hpp
@@ -0,0 +1,29 @@
+#pragma once
+
+#include <nanogui/widget.h>
+#include <nanogui/window.h>
+
+namespace ftl
+{
+namespace gui2
+{
+
+class Statistics;
+
+class StatisticsWidget : public nanogui::Window {
+public:
+	StatisticsWidget(nanogui::Widget *parent, Statistics* ctrl);
+	virtual void draw(NVGcontext *ctx);
+
+	bool mouseButtonEvent(const Eigen::Vector2i &p, int button, bool down, int modifiers) override { return false; }
+
+private:
+	Statistics* ctrl_;
+	int last_stats_count_;
+	
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+}
+}
diff --git a/applications/gui2/src/views/thumbnails.cpp b/applications/gui2/src/views/thumbnails.cpp
new file mode 100644
index 000000000..7250e1763
--- /dev/null
+++ b/applications/gui2/src/views/thumbnails.cpp
@@ -0,0 +1,211 @@
+#include "thumbnails.hpp"
+#include "../modules/thumbnails.hpp"
+#include <ftl/utility/gltexture.hpp>
+
+#include <opencv2/imgproc.hpp>
+#include <opencv2/imgcodecs.hpp>
+#include <opencv2/cudaarithm.hpp>
+
+#include <ftl/operators/antialiasing.hpp>
+#include <ftl/cuda/normals.hpp>
+#include <ftl/render/colouriser.hpp>
+#include <ftl/cuda/transform.hpp>
+#include <ftl/operators/gt_analysis.hpp>
+#include <ftl/operators/poser.hpp>
+#include <ftl/cuda/colour_cuda.hpp>
+#include <ftl/streams/parsers.hpp>
+#include <ftl/rgbd/frame.hpp>
+
+#include <nanogui/label.h>
+#include <nanogui/tabwidget.h>
+#include <nanogui/vscrollpanel.h>
+#include <nanogui/layout.h>
+#include <nanogui/popup.h>
+
+#include <loguru.hpp>
+
+using ftl::gui2::ThumbView;
+using ftl::gui2::Thumbnails;
+using ftl::utility::GLTexture;
+using ftl::gui2::ThumbnailsController;
+
+using ftl::codecs::Channel;
+using ftl::data::FrameID;
+
+class ThumbView : public ftl::gui2::ImageView {
+public:
+	ThumbView(nanogui::Widget *parent, ThumbnailsController *control, ftl::data::FrameID id, const std::string &name);
+	virtual ~ThumbView() {}
+
+	virtual bool mouseButtonEvent(const nanogui::Vector2i &p, int button, bool down, int modifiers) override;
+	virtual void draw(NVGcontext *ctx) override;
+
+	void setName(const std::string &str) { name_ = str; }
+	void update(ftl::rgbd::Frame& frame, Channel c);
+
+private:
+	ThumbnailsController *ctrl_;
+	GLTexture texture_;
+	const ftl::data::FrameID id_;
+	std::string name_;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+ThumbView::ThumbView(nanogui::Widget *parent, ThumbnailsController *control, ftl::data::FrameID id, const std::string &name) :
+		ftl::gui2::ImageView(parent), ctrl_(control), id_(id), name_(name) {
+	setCursor(nanogui::Cursor::Hand);
+	setFixedOffset(true);
+	setFixedScale(true);
+}
+
+bool ThumbView::mouseButtonEvent(const nanogui::Vector2i &p, int button, bool down, int modifiers) {
+	if (button == 0) {
+		if (!down) {
+			ctrl_->show_camera(id_);
+		}
+	}
+	return true;
+}
+
+void ThumbView::update(ftl::rgbd::Frame &frame, Channel c) {
+	if (!frame.hasChannel(c)) {
+		return;
+	}
+
+	const auto &vf = frame.get<ftl::rgbd::VideoFrame>(c);
+
+	if (vf.isGPU()) {
+		texture_.copyFrom(vf.getGPU());
+	} else {
+		texture_.copyFrom(vf.getCPU());
+	}
+	if (texture_.isValid()) {
+		bindImage(texture_.texture());
+	}
+}
+
+void ThumbView::draw(NVGcontext *ctx) {
+	fit();
+	// Image
+	ftl::gui2::ImageView::draw(ctx);
+	// Label
+	nvgScissor(ctx, mPos.x(), mPos.y(), mSize.x(), mSize.y());
+	nvgFontSize(ctx, 14);
+	nvgFontFace(ctx, "sans-bold");
+	nvgTextAlign(ctx, NVG_ALIGN_CENTER);
+	nvgText(ctx, mPos.x() + mSize.x()/2.0f, mPos.y()+mSize.y() - 18, name_.c_str(), NULL);
+	nvgResetScissor(ctx);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+Thumbnails::Thumbnails(ftl::gui2::Screen *parent, ftl::gui2::ThumbnailsController *control) :
+		View(parent), ctrl_(control), tabwidget_(nullptr) {
+
+	tabwidget_ = new nanogui::TabWidget(this);
+	tabwidget_->setFixedSize(size());
+
+	context_menu_ = new nanogui::Window(parent, "");
+	context_menu_->setVisible(false);
+	context_menu_->setLayout(new nanogui::BoxLayout(nanogui::Orientation::Vertical));
+
+	auto *button = new nanogui::Button(context_menu_, "Remove");
+	button->setCallback([this]() {
+		int ix = tabwidget_->activeTab();
+		LOG(INFO) << "REMOVE FSID " << ix;
+
+		tabwidget_->removeTab(ix);
+		thumbnails_.erase(ix);
+		context_menu_->setVisible(false);
+		ctrl_->removeFrameset(ix);
+		//screen()->performLayout();
+	});
+}
+
+
+Thumbnails::~Thumbnails() {
+	if (context_menu_->parent()->getRefCount() > 0) {
+		context_menu_->setVisible(false);
+		context_menu_->dispose();
+	}
+}
+
+bool Thumbnails::mouseButtonEvent(const nanogui::Vector2i &p, int button, bool down, int modifiers) {
+	bool r = View::mouseButtonEvent(p, button, down, modifiers);
+
+	if (!r) {
+		if (button == 1) {
+			if (!down) {
+				context_menu_->setPosition(p - mPos);
+				context_menu_->setVisible(true);
+				return true;
+			}
+		} else {
+			context_menu_->setVisible(false);
+		}
+	}
+
+	return true;
+}
+
+void Thumbnails::updateThumbnails() {
+	const Channel channel = Channel::Colour;
+	bool perform_layout = false;
+	auto framesets = ctrl_->getFrameSets();
+	for (auto& fs : framesets) {
+		unsigned int fsid = fs->frameset();
+
+		// create new tab if necessary
+		if (thumbnails_.count(fsid) == 0) {
+			if (fs->frames.size() == 0) {
+				// setting layout to widget without any children will crash
+				// nanogui, skip
+				continue;
+			}
+
+			auto* tab = tabwidget_->createTab("Frameset " + std::to_string(fsid));
+			tab->setLayout(new nanogui::BoxLayout
+				(nanogui::Orientation::Vertical, nanogui::Alignment::Middle, 40));
+			auto* panel = new nanogui::Widget(tab);
+			panel->setLayout(
+				new nanogui::GridLayout(nanogui::Orientation::Horizontal, 3,
+										nanogui::Alignment::Middle, 0, 10));
+
+			thumbnails_[fsid] = {0, panel, {}};
+			perform_layout = true;
+		}
+
+		auto& thumbs = thumbnails_[fsid];
+		while (thumbs.thumbnails.size() < fs->frames.size()) {
+			int source = thumbs.thumbnails.size();
+			auto &frame = fs->frames[source];
+
+			perform_layout = true;
+
+			std::string name = frame.name();
+
+			auto* thumbnail = new ThumbView(thumbs.panel, ctrl_, FrameID(fsid, source), name);
+			thumbnail->setFixedSize(thumbsize_);
+			thumbs.thumbnails.push_back(thumbnail);
+		}
+
+		if (fs->timestamp() > thumbs.timestamp) {
+			for(size_t i = 0; i < fs->frames.size(); i++) {
+				thumbs.thumbnails[i]->update((*fs)[i].cast<ftl::rgbd::Frame>(), channel);
+			}
+			thumbs.timestamp = fs->timestamp();
+		}
+	}
+	if (perform_layout) {
+		screen()->performLayout();
+	}
+}
+
+void Thumbnails::draw(NVGcontext *ctx) {
+	tabwidget_->setFixedSize(size());
+	updateThumbnails();
+	View::draw(ctx);
+}
+
diff --git a/applications/gui2/src/views/thumbnails.hpp b/applications/gui2/src/views/thumbnails.hpp
new file mode 100644
index 000000000..c9440967d
--- /dev/null
+++ b/applications/gui2/src/views/thumbnails.hpp
@@ -0,0 +1,50 @@
+#pragma once
+#include "../view.hpp"
+
+#include "../widgets/imageview.hpp"
+
+#include <nanogui/glcanvas.h>
+#include <nanogui/glutil.h>
+#include <nanogui/imageview.h>
+
+namespace ftl {
+namespace gui2 {
+
+class ThumbnailsController;
+class ThumbView;
+
+class Thumbnails : public View {
+public:
+	Thumbnails(Screen *parent, ThumbnailsController *controller);
+	virtual ~Thumbnails();
+
+	virtual void draw(NVGcontext *ctx) override;
+
+	bool mouseButtonEvent(const nanogui::Vector2i &p, int button, bool down, int modifiers) override;
+
+private:
+	void updateThumbnails();
+	void addTab(unsigned int fsid);
+
+	struct FSThumbnails {
+		int64_t timestamp;
+		nanogui::Widget* panel;
+		std::vector<ThumbView*> thumbnails;
+	};
+
+	std::mutex mtx_;
+	ftl::gui2::ThumbnailsController *ctrl_;
+	nanogui::TabWidget* tabwidget_;
+
+	std::map<unsigned int, FSThumbnails> thumbnails_;
+
+	nanogui::Vector2i thumbsize_ = nanogui::Vector2i(320,180);
+
+	nanogui::Window *context_menu_;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+}
+}
diff --git a/applications/gui2/src/widgets/combobox.cpp b/applications/gui2/src/widgets/combobox.cpp
new file mode 100644
index 000000000..fc76210aa
--- /dev/null
+++ b/applications/gui2/src/widgets/combobox.cpp
@@ -0,0 +1,103 @@
+/*
+	src/combobox.cpp -- simple combo box widget based on a popup button
+
+	NanoGUI was developed by Wenzel Jakob <wenzel.jakob@epfl.ch>.
+	The widget drawing code is based on the NanoVG demo application
+	by Mikko Mononen.
+
+	All rights reserved. Use of this source code is governed by a
+	BSD-style license that can be found in the LICENSE.txt file.
+*/
+
+#include "combobox.hpp"
+
+#include <nanogui/layout.h>
+#include <nanogui/serializer/core.h>
+#include <cassert>
+
+using nanogui::Vector2i;
+using nanogui::Vector2f;
+using nanogui::GroupLayout;
+using nanogui::Serializer;
+
+using ftl::gui2::ComboBox;
+using ftl::gui2::PopupButton;
+
+ComboBox::ComboBox(Widget *parent) : PopupButton(parent), mSelectedIndex(0) {
+}
+
+ComboBox::ComboBox(Widget *parent, const std::vector<std::string> &items)
+	: PopupButton(parent), mSelectedIndex(0) {
+	setItems(items);
+}
+
+ComboBox::ComboBox(Widget *parent, const std::vector<std::string> &items, const std::vector<std::string> &itemsShort)
+	: PopupButton(parent), mSelectedIndex(0) {
+	setItems(items, itemsShort);
+}
+
+void ComboBox::setSelectedIndex(int idx) {
+	if (mItemsShort.empty())
+		return;
+	const std::vector<Widget *> &children = popup()->children();
+	((Button *) children[mSelectedIndex])->setPushed(false);
+	((Button *) children[idx])->setPushed(true);
+	mSelectedIndex = idx;
+	setCaption(mItemsShort[idx]);
+}
+
+void ComboBox::setItems(const std::vector<std::string> &items, const std::vector<std::string> &itemsShort) {
+	assert(items.size() == itemsShort.size());
+	mItems = items;
+	mItemsShort = itemsShort;
+	if (mSelectedIndex < 0 || mSelectedIndex >= (int) items.size())
+		mSelectedIndex = 0;
+	while (mPopup->childCount() != 0)
+		mPopup->removeChild(mPopup->childCount()-1);
+	mPopup->setLayout(new GroupLayout(10));
+	int index = 0;
+	for (const auto &str: items) {
+		Button *button = new Button(mPopup, str);
+		button->setFlags(Button::RadioButton);
+		button->setCallback([&, index] {
+			mSelectedIndex = index;
+			setCaption(mItemsShort[index]);
+			setPushed(false);
+			popup()->setVisible(false);
+			if (mCallback)
+				mCallback(index);
+		});
+		index++;
+	}
+	setSelectedIndex(mSelectedIndex);
+}
+
+bool ComboBox::scrollEvent(const Vector2i &p, const Vector2f &rel) {
+	if (rel.y() < 0) {
+		setSelectedIndex(std::min(mSelectedIndex+1, (int)(items().size()-1)));
+		if (mCallback)
+			mCallback(mSelectedIndex);
+		return true;
+	} else if (rel.y() > 0) {
+		setSelectedIndex(std::max(mSelectedIndex-1, 0));
+		if (mCallback)
+			mCallback(mSelectedIndex);
+		return true;
+	}
+	return Widget::scrollEvent(p, rel);
+}
+
+void ComboBox::save(Serializer &s) const {
+	Widget::save(s);
+	s.set("items", mItems);
+	s.set("itemsShort", mItemsShort);
+	s.set("selectedIndex", mSelectedIndex);
+}
+
+bool ComboBox::load(Serializer &s) {
+	if (!Widget::load(s)) return false;
+	if (!s.get("items", mItems)) return false;
+	if (!s.get("itemsShort", mItemsShort)) return false;
+	if (!s.get("selectedIndex", mSelectedIndex)) return false;
+	return true;
+}
diff --git a/applications/gui2/src/widgets/combobox.hpp b/applications/gui2/src/widgets/combobox.hpp
new file mode 100644
index 000000000..b137fd505
--- /dev/null
+++ b/applications/gui2/src/widgets/combobox.hpp
@@ -0,0 +1,95 @@
+/*
+	Modification: Inherits from ftl::gui2::PopupButton
+
+	NanoGUI was developed by Wenzel Jakob <wenzel.jakob@epfl.ch>.
+	The nanogui::Widget drawing code is based on the NanoVG demo application
+	by Mikko Mononen.
+
+	All rights reserved. Use of this source code is governed by a
+	BSD-style license that can be found in the LICENSE.txt file.
+*/
+/**
+ * \file nanogui/combobox.h
+ *
+ * \brief Simple combo box nanogui::Widget based on a popup button.
+ */
+
+#pragma once
+
+#include "popupbutton.hpp"
+
+namespace ftl {
+namespace gui2 {
+
+/**
+ * \class ComboBox combobox.h nanogui/combobox.h
+ *
+ * \brief Simple combo box nanogui::Widget based on a popup button.
+ */
+class NANOGUI_EXPORT ComboBox : public PopupButton {
+public:
+	/// Create an empty combo box
+	ComboBox(nanogui::Widget *parent);
+
+	/// Create a new combo box with the given items
+	ComboBox(nanogui::Widget *parent, const std::vector<std::string> &items);
+
+	/**
+	 * \brief Create a new combo box with the given items, providing both short and
+	 * long descriptive labels for each item
+	 */
+	ComboBox(nanogui::Widget *parent, const std::vector<std::string> &items,
+			 const std::vector<std::string> &itemsShort);
+
+	/// The callback to execute for this ComboBox.
+	std::function<void(int)> callback() const { return mCallback; }
+
+	/// Sets the callback to execute for this ComboBox.
+	void setCallback(const std::function<void(int)> &callback) { mCallback = callback; }
+
+	/// The current index this ComboBox has selected.
+	int selectedIndex() const { return mSelectedIndex; }
+
+	/// Sets the current index this ComboBox has selected.
+	void setSelectedIndex(int idx);
+
+	/// Sets the items for this ComboBox, providing both short and long descriptive lables for each item.
+	void setItems(const std::vector<std::string> &items, const std::vector<std::string> &itemsShort);
+
+	/// Sets the items for this ComboBox.
+	void setItems(const std::vector<std::string> &items) { setItems(items, items); }
+
+	/// The items associated with this ComboBox.
+	const std::vector<std::string> &items() const { return mItems; }
+
+	/// The short descriptions associated with this ComboBox.
+	const std::vector<std::string> &itemsShort() const { return mItemsShort; }
+
+	/// Handles mouse scrolling events for this ComboBox.
+	virtual bool scrollEvent(const nanogui::Vector2i &p, const nanogui::Vector2f &rel) override;
+
+	/// Saves the state of this ComboBox to the specified nanogui::Serializer.
+	virtual void save(nanogui::Serializer &s) const override;
+
+	/// Sets the state of this ComboBox from the specified nanogui::Serializer.
+	virtual bool load(nanogui::Serializer &s) override;
+
+protected:
+	/// The items associated with this ComboBox.
+	std::vector<std::string> mItems;
+
+	/// The short descriptions of items associated with this ComboBox.
+	std::vector<std::string> mItemsShort;
+
+	/// The callback for this ComboBox.
+	std::function<void(int)> mCallback;
+
+	/// The current index this ComboBox has selected.
+	int mSelectedIndex;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+}
+}
diff --git a/applications/gui2/src/widgets/imageview.cpp b/applications/gui2/src/widgets/imageview.cpp
new file mode 100644
index 000000000..b8531d562
--- /dev/null
+++ b/applications/gui2/src/widgets/imageview.cpp
@@ -0,0 +1,626 @@
+/*
+	nanogui/imageview.cpp -- Widget used to display images.
+
+	The image view widget was contributed by Stefan Ivanov.
+
+	NanoGUI was developed by Wenzel Jakob <wenzel.jakob@epfl.ch>.
+	The widget drawing code is based on the NanoVG demo application
+	by Mikko Mononen.
+
+	All rights reserved. Use of this source code is governed by a
+	BSD-style license that can be found in the LICENSE.txt file.
+*/
+
+#include <nanogui/window.h>
+#include <nanogui/screen.h>
+#include <nanogui/theme.h>
+#include <cmath>
+
+#include <ftl/utility/gltexture.hpp>
+#include "imageview.hpp"
+
+using namespace nanogui;
+using ftl::gui2::ImageView;
+using ftl::gui2::FTLImageView;
+using ftl::utility::GLTexture;
+
+namespace {
+	std::vector<std::string> tokenize(const std::string &string,
+									  const std::string &delim = "\n",
+									  bool includeEmpty = false) {
+		std::string::size_type lastPos = 0, pos = string.find_first_of(delim, lastPos);
+		std::vector<std::string> tokens;
+
+		while (lastPos != std::string::npos) {
+			std::string substr = string.substr(lastPos, pos - lastPos);
+			if (!substr.empty() || includeEmpty)
+				tokens.push_back(std::move(substr));
+			lastPos = pos;
+			if (lastPos != std::string::npos) {
+				lastPos += 1;
+				pos = string.find_first_of(delim, lastPos);
+			}
+		}
+
+		return tokens;
+	}
+
+	constexpr char const *const defaultImageViewVertexShader =
+		R"(#version 330
+		uniform vec2 scaleFactor;
+		uniform vec2 position;
+		in vec2 vertex;
+		out vec2 uv;
+		void main() {
+			uv = vertex;
+			vec2 scaledVertex = (vertex * scaleFactor) + position;
+			gl_Position  = vec4(2.0*scaledVertex.x - 1.0,
+								1.0 - 2.0*scaledVertex.y,
+								0.0, 1.0);
+
+		})";
+
+	constexpr char const *const defaultImageViewFragmentShader =
+		R"(#version 330
+		uniform sampler2D image;
+		out vec4 color;
+		in vec2 uv;
+		void main() {
+			color = texture(image, uv);
+			color.w = 1;
+		})";
+
+}
+
+ftl::gui2::ImageView::ImageView(Widget* parent, GLuint imageID)
+	: Widget(parent), mImageID(imageID), mScale(1.0f), mOffset(Vector2f::Zero()),
+	mFixedScale(false), mFixedOffset(false), mPixelInfoCallback(nullptr) {
+
+	mImageSize = {0, 0};
+
+	if (imageID != unsigned(-1)) {
+		updateImageParameters();
+	}
+
+	mShader.init("ImageViewShader", defaultImageViewVertexShader,
+				 defaultImageViewFragmentShader);
+
+	MatrixXu indices(3, 2);
+	indices.col(0) << 0, 1, 2;
+	indices.col(1) << 2, 3, 1;
+
+	MatrixXf vertices(2, 4);
+	vertices.col(0) << 0, 0;
+	vertices.col(1) << 1, 0;
+	vertices.col(2) << 0, 1;
+	vertices.col(3) << 1, 1;
+
+	mShader.bind();
+	mShader.uploadIndices(indices);
+	mShader.uploadAttrib("vertex", vertices);
+}
+
+ftl::gui2::ImageView::~ImageView() {
+	mShader.free();
+}
+
+void ftl::gui2::ImageView::bindImage(GLuint imageId) {
+	if (imageId == unsigned(-1)) {
+		return;
+	}
+
+	mImageID = imageId;
+	updateImageParameters();
+
+}
+
+Vector2f ftl::gui2::ImageView::imageCoordinateAt(const Vector2f& position) const {
+	auto imagePosition = position - mOffset;
+	return imagePosition / mScale;
+}
+
+Vector2f ftl::gui2::ImageView::clampedImageCoordinateAt(const Vector2f& position) const {
+	auto imageCoordinate = imageCoordinateAt(position);
+	return imageCoordinate.cwiseMax(Vector2f::Zero()).cwiseMin(imageSizeF());
+}
+
+Vector2f ftl::gui2::ImageView::positionForCoordinate(const Vector2f& imageCoordinate) const {
+	return mScale*imageCoordinate + mOffset;
+}
+
+void ftl::gui2::ImageView::setImageCoordinateAt(const Vector2f& position, const Vector2f& imageCoordinate) {
+	// Calculate where the new offset must be in order to satisfy the image position equation.
+	// Round the floating point values to balance out the floating point to integer conversions.
+	mOffset = position - (imageCoordinate * mScale);
+
+	// Clamp offset so that the image remains near the screen.
+	mOffset = mOffset.cwiseMin(sizeF()).cwiseMax(-scaledImageSizeF());
+}
+
+void ftl::gui2::ImageView::center() {
+	mOffset = (sizeF() - scaledImageSizeF()) / 2;
+}
+
+void ftl::gui2::ImageView::fit() {
+	// Calculate the appropriate scaling factor.
+	mScale = (sizeF().cwiseQuotient(imageSizeF())).minCoeff();
+	center();
+}
+
+void ftl::gui2::ImageView::setScaleCentered(float scale) {
+	auto centerPosition = sizeF() / 2;
+	auto p = imageCoordinateAt(centerPosition);
+	mScale = scale;
+	setImageCoordinateAt(centerPosition, p);
+}
+
+void ftl::gui2::ImageView::moveOffset(const Vector2f& delta) {
+	// Apply the delta to the offset.
+	mOffset += delta;
+
+	// Prevent the image from going out of bounds.
+	auto scaledSize = scaledImageSizeF();
+	if (mOffset.x() + scaledSize.x() < 0)
+		mOffset.x() = -scaledSize.x();
+	if (mOffset.x() > sizeF().x())
+		mOffset.x() = sizeF().x();
+	if (mOffset.y() + scaledSize.y() < 0)
+		mOffset.y() = -scaledSize.y();
+	if (mOffset.y() > sizeF().y())
+		mOffset.y() = sizeF().y();
+}
+
+void ftl::gui2::ImageView::zoom(int amount, const Vector2f& focusPosition) {
+	auto focusedCoordinate = imageCoordinateAt(focusPosition);
+	float scaleFactor = std::pow(mZoomSensitivity, amount);
+	mScale = std::max(0.01f, scaleFactor * mScale);
+	setImageCoordinateAt(focusPosition, focusedCoordinate);
+}
+
+bool ftl::gui2::ImageView::mouseDragEvent(const Vector2i& p, const Vector2i& rel, int button, int /*modifiers*/) {
+	if ((button & (1 << GLFW_MOUSE_BUTTON_RIGHT)) != 0 && !mFixedOffset) {
+		setImageCoordinateAt((p + rel).cast<float>(), imageCoordinateAt(p.cast<float>()));
+		return true;
+	}
+	return false;
+}
+
+bool ftl::gui2::ImageView::gridVisible() const {
+	return (mGridThreshold != -1) && (mScale > mGridThreshold);
+}
+
+bool ftl::gui2::ImageView::pixelInfoVisible() const {
+	return mPixelInfoCallback && (mPixelInfoThreshold != -1) && (mScale > mPixelInfoThreshold);
+}
+
+bool ftl::gui2::ImageView::helpersVisible() const {
+	return gridVisible() || pixelInfoVisible();
+}
+
+bool ftl::gui2::ImageView::scrollEvent(const Vector2i& p, const Vector2f& rel) {
+	if (mFixedScale)
+		return false;
+	float v = rel.y();
+	if (std::abs(v) < 1)
+		v = std::copysign(1.f, v);
+	zoom(v, (p - position()).cast<float>());
+	return true;
+}
+
+bool ftl::gui2::ImageView::keyboardEvent(int key, int /*scancode*/, int action, int modifiers) {
+	if (action) {
+		switch (key) {
+		case GLFW_KEY_LEFT:
+			if (!mFixedOffset) {
+				if (GLFW_MOD_CONTROL & modifiers)
+					moveOffset(Vector2f(30, 0));
+				else
+					moveOffset(Vector2f(10, 0));
+				return true;
+			}
+			break;
+		case GLFW_KEY_RIGHT:
+			if (!mFixedOffset) {
+				if (GLFW_MOD_CONTROL & modifiers)
+					moveOffset(Vector2f(-30, 0));
+				else
+					moveOffset(Vector2f(-10, 0));
+				return true;
+			}
+			break;
+		case GLFW_KEY_DOWN:
+			if (!mFixedOffset) {
+				if (GLFW_MOD_CONTROL & modifiers)
+					moveOffset(Vector2f(0, -30));
+				else
+					moveOffset(Vector2f(0, -10));
+				return true;
+			}
+			break;
+		case GLFW_KEY_UP:
+			if (!mFixedOffset) {
+				if (GLFW_MOD_CONTROL & modifiers)
+					moveOffset(Vector2f(0, 30));
+				else
+					moveOffset(Vector2f(0, 10));
+				return true;
+			}
+			break;
+		}
+	}
+	return false;
+}
+
+bool ftl::gui2::ImageView::keyboardCharacterEvent(unsigned int codepoint) {
+	switch (codepoint) {
+	case '-':
+		if (!mFixedScale) {
+			zoom(-1, sizeF() / 2);
+			return true;
+		}
+		break;
+	case '+':
+		if (!mFixedScale) {
+			zoom(1, sizeF() / 2);
+			return true;
+		}
+		break;
+	case 'c':
+		if (!mFixedOffset) {
+			center();
+			return true;
+		}
+		break;
+	case 'f':
+		if (!mFixedOffset && !mFixedScale) {
+			fit();
+			return true;
+		}
+		break;
+	case '1': case '2': case '3': case '4': case '5':
+	case '6': case '7': case '8': case '9':
+		if (!mFixedScale) {
+			setScaleCentered(1 << (codepoint - '1'));
+			return true;
+		}
+		break;
+	default:
+		return false;
+	}
+	return false;
+}
+
+Vector2i ftl::gui2::ImageView::preferredSize(NVGcontext* /*ctx*/) const {
+	return mImageSize;
+}
+
+void ftl::gui2::ImageView::performLayout(NVGcontext* ctx) {
+	Widget::performLayout(ctx);
+}
+
+void ftl::gui2::ImageView::draw(NVGcontext* ctx) {
+	Widget::draw(ctx);
+
+	if (mImageID != unsigned(-1)) {
+		nvgEndFrame(ctx); // Flush the NanoVG draw stack, not necessary to call nvgBeginFrame afterwards.
+		//drawImageBorder(ctx);
+
+		// Calculate several variables that need to be send to OpenGL in order for the image to be
+		// properly displayed inside the widget.
+		const Screen* screen = dynamic_cast<const Screen*>(this->screen());
+		Vector2f screenSize = screen->size().cast<float>();
+		Vector2f scaleFactor = mScale * imageSizeF().cwiseQuotient(screenSize);
+		Vector2f positionInScreen = absolutePosition().cast<float>();
+		Vector2f positionAfterOffset = positionInScreen + mOffset;
+		Vector2f imagePosition = positionAfterOffset.cwiseQuotient(screenSize);
+		glEnable(GL_SCISSOR_TEST);
+		float r = screen->pixelRatio();
+		glScissor(positionInScreen.x() * r,
+				(screenSize.y() - positionInScreen.y() - size().y()) * r,
+				size().x() * r, size().y() * r);
+		mShader.bind();
+		glActiveTexture(GL_TEXTURE0);
+		glBindTexture(GL_TEXTURE_2D, mImageID);
+		mShader.setUniform("image", 0);
+		mShader.setUniform("scaleFactor", scaleFactor);
+		mShader.setUniform("position", imagePosition);
+		mShader.drawIndexed(GL_TRIANGLES, 0, 2);
+		glDisable(GL_SCISSOR_TEST);
+	}
+
+	if (helpersVisible())
+		drawHelpers(ctx);
+
+	//drawWidgetBorder(ctx);
+}
+
+void ftl::gui2::ImageView::updateImageParameters() {
+	// Query the width of the OpenGL texture.
+	glBindTexture(GL_TEXTURE_2D, mImageID);
+	GLint w, h;
+	glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &w);
+	glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &h);
+	mImageSize = Vector2i(w, h);
+}
+
+void ftl::gui2::ImageView::drawWidgetBorder(NVGcontext* ctx) const {
+	nvgBeginPath(ctx);
+	nvgStrokeWidth(ctx, 1);
+	nvgRoundedRect(ctx, mPos.x() + 0.5f, mPos.y() + 0.5f, mSize.x() - 1,
+				   mSize.y() - 1, 0);
+	nvgStrokeColor(ctx, mTheme->mWindowPopup);
+	nvgStroke(ctx);
+
+	nvgBeginPath(ctx);
+	nvgRoundedRect(ctx, mPos.x() + 0.5f, mPos.y() + 0.5f, mSize.x() - 1,
+				   mSize.y() - 1, mTheme->mButtonCornerRadius);
+	nvgStrokeColor(ctx, mTheme->mBorderDark);
+	nvgStroke(ctx);
+}
+
+void ftl::gui2::ImageView::drawImageBorder(NVGcontext* ctx) const {
+	nvgSave(ctx);
+	nvgBeginPath(ctx);
+	nvgScissor(ctx, mPos.x(), mPos.y(), mSize.x(), mSize.y());
+	nvgStrokeWidth(ctx, 1.0f);
+	Vector2i borderPosition = mPos + mOffset.cast<int>();
+	Vector2i borderSize = scaledImageSizeF().cast<int>();
+	nvgRect(ctx, borderPosition.x() - 0.5f, borderPosition.y() - 0.5f,
+			borderSize.x() + 1, borderSize.y() + 1);
+	nvgStrokeColor(ctx, Color(1.0f, 1.0f, 1.0f, 1.0f));
+	nvgStroke(ctx);
+	nvgResetScissor(ctx);
+	nvgRestore(ctx);
+}
+
+void ftl::gui2::ImageView::drawHelpers(NVGcontext* ctx) const {
+	// We need to apply mPos after the transformation to account for the position of the widget
+	// relative to the parent.
+	Vector2f upperLeftCorner = positionForCoordinate(Vector2f::Zero()) + positionF();
+	Vector2f lowerRightCorner = positionForCoordinate(imageSizeF()) + positionF();
+	if (gridVisible())
+		drawPixelGrid(ctx, upperLeftCorner, lowerRightCorner, mScale);
+	if (pixelInfoVisible())
+		drawPixelInfo(ctx, mScale);
+}
+
+void ftl::gui2::ImageView::drawPixelGrid(NVGcontext* ctx, const Vector2f& upperLeftCorner,
+							  const Vector2f& lowerRightCorner, float stride) {
+	nvgBeginPath(ctx);
+
+	// Draw the vertical grid lines
+	float currentX = upperLeftCorner.x();
+	while (currentX <= lowerRightCorner.x()) {
+		nvgMoveTo(ctx, std::round(currentX), std::round(upperLeftCorner.y()));
+		nvgLineTo(ctx, std::round(currentX), std::round(lowerRightCorner.y()));
+		currentX += stride;
+	}
+
+	// Draw the horizontal grid lines
+	float currentY = upperLeftCorner.y();
+	while (currentY <= lowerRightCorner.y()) {
+		nvgMoveTo(ctx, std::round(upperLeftCorner.x()), std::round(currentY));
+		nvgLineTo(ctx, std::round(lowerRightCorner.x()), std::round(currentY));
+		currentY += stride;
+	}
+
+	nvgStrokeWidth(ctx, 1.0f);
+	nvgStrokeColor(ctx, Color(1.0f, 1.0f, 1.0f, 0.2f));
+	nvgStroke(ctx);
+}
+
+void ftl::gui2::ImageView::drawPixelInfo(NVGcontext* ctx, float stride) const {
+	// Extract the image coordinates at the two corners of the widget.
+	Vector2i topLeft = clampedImageCoordinateAt(Vector2f::Zero())
+						   .unaryExpr([](float x) { return std::floor(x); })
+						   .cast<int>();
+
+	Vector2i bottomRight = clampedImageCoordinateAt(sizeF())
+							   .unaryExpr([](float x) { return std::ceil(x); })
+							   .cast<int>();
+
+	// Extract the positions for where to draw the text.
+	Vector2f currentCellPosition =
+		(positionF() + positionForCoordinate(topLeft.cast<float>()));
+
+	float xInitialPosition = currentCellPosition.x();
+	int xInitialIndex = topLeft.x();
+
+	// Properly scale the pixel information for the given stride.
+	auto fontSize = stride * mFontScaleFactor;
+	static constexpr float maxFontSize = 30.0f;
+	fontSize = fontSize > maxFontSize ? maxFontSize : fontSize;
+	nvgBeginPath(ctx);
+	nvgFontSize(ctx, fontSize);
+	nvgTextAlign(ctx, NVG_ALIGN_CENTER | NVG_ALIGN_TOP);
+	nvgFontFace(ctx, "sans");
+	while (topLeft.y() != bottomRight.y()) {
+		while (topLeft.x() != bottomRight.x()) {
+			writePixelInfo(ctx, currentCellPosition, topLeft, stride, fontSize);
+			currentCellPosition.x() += stride;
+			++topLeft.x();
+		}
+		currentCellPosition.x() = xInitialPosition;
+		currentCellPosition.y() += stride;
+		++topLeft.y();
+		topLeft.x() = xInitialIndex;
+	}
+}
+
+void ftl::gui2::ImageView::writePixelInfo(NVGcontext* ctx, const Vector2f& cellPosition,
+							   const Vector2i& pixel, float stride, float fontSize) const {
+	auto pixelData = mPixelInfoCallback(pixel);
+	auto pixelDataRows = tokenize(pixelData.first);
+
+	// If no data is provided for this pixel then simply return.
+	if (pixelDataRows.empty())
+		return;
+
+	nvgFillColor(ctx, pixelData.second);
+	float yOffset = (stride - fontSize * pixelDataRows.size()) / 2;
+	for (size_t i = 0; i != pixelDataRows.size(); ++i) {
+		nvgText(ctx, cellPosition.x() + stride / 2, cellPosition.y() + yOffset,
+				pixelDataRows[i].data(), nullptr);
+		yOffset += fontSize;
+	}
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+FTLImageView::~FTLImageView() {
+}
+
+void FTLImageView::draw(NVGcontext* ctx) {
+	if (texture_.isValid()) {
+		if (!was_valid_) { fit(); }
+		ImageView::draw(ctx);
+	}
+	was_valid_ = texture_.isValid();
+}
+
+GLTexture& FTLImageView::texture() {
+	return texture_;
+}
+
+void FTLImageView::copyFrom(const ftl::cuda::TextureObject<uchar4> &buf, cudaStream_t stream ) {
+	texture_.copyFrom(buf, stream);
+	bindImage(texture_.texture());
+}
+
+void FTLImageView::copyFrom(const cv::Mat &im, cudaStream_t stream) {
+	texture_.copyFrom(im, stream);
+	bindImage(texture_.texture());
+}
+
+void FTLImageView::copyFrom(const cv::cuda::GpuMat &im, cudaStream_t stream) {
+	texture_.copyFrom(im, stream);
+	bindImage(texture_.texture());
+}
+
+void FTLImageView::copyFrom(ftl::rgbd::Frame& frame, ftl::codecs::Channel channel) {
+	if (frame.hasOpenGL(channel)) {
+		bindImage(frame.getOpenGL(channel));
+		if (texture_.isValid()) {
+			texture_.free();
+		}
+	}
+	else if (frame.isGPU(channel)) {
+		copyFrom(frame.get<cv::cuda::GpuMat>(channel));
+	}
+	else {
+		copyFrom(frame.get<cv::Mat>(channel));
+	}
+}
+
+nanogui::Vector2i ftl::gui2::FTLImageView::preferredSize(NVGcontext* /*ctx*/) const {
+	/** this avoids issues if layout not set to fill/maximum */
+	return mSize;
+}
+
+// ==== StereoImageView ========================================================
+
+using ftl::gui2::StereoImageView;
+
+StereoImageView::StereoImageView(nanogui::Widget* parent, nanogui::Orientation orientation) :
+		nanogui::Widget(parent), orientation_(orientation) {
+
+	setLayout(new nanogui::BoxLayout(orientation_, nanogui::Alignment::Fill));
+
+	left_ = new FTLImageView(this);
+	right_ = new FTLImageView(this);
+
+	// disables mouse/keyboard events in widgets
+	left_->setFixedOffset(true);
+	left_->setFixedScale(true);
+	right_->setFixedOffset(true);
+	right_->setFixedScale(true);
+}
+
+bool StereoImageView::mouseMotionEvent(const nanogui::Vector2i &p, const nanogui::Vector2i &rel, int button, int modifiers) {
+	if ((button & (1 << GLFW_MOUSE_BUTTON_RIGHT)) != 0) {
+		nanogui::Vector2f posl = left_->imageCoordinateAt(p.cast<float>());
+		nanogui::Vector2f posr = right_->imageCoordinateAt(p.cast<float>());
+		if (posl.minCoeff() > 0) {
+			left_->setImageCoordinateAt((p + rel).cast<float>(), posl);
+			right_->setImageCoordinateAt((p + rel).cast<float>(), posl);
+		}
+		if (posr.minCoeff() > 0) {
+			left_->setImageCoordinateAt((p + rel).cast<float>(), posr);
+			right_->setImageCoordinateAt((p + rel).cast<float>(), posr);
+		}
+		return true;
+	}
+	return false;
+}
+
+bool StereoImageView::scrollEvent(const nanogui::Vector2i& p, const nanogui::Vector2f& rel) {
+	// synchronized zoom
+
+	float v = rel.y();
+
+	// zooming on right image?
+	bool zoom_right =
+		((p.x() >= left_->absolutePosition().x()) && (orientation_ == nanogui::Orientation::Horizontal)) ||
+		((p.y() >= left_->absolutePosition().y()) && (orientation_ == nanogui::Orientation::Vertical));
+
+	if (orientation_ == nanogui::Orientation::Horizontal) {
+		if (zoom_right) {
+			left_->zoom(v, (p - nanogui::Vector2i{left_->width(), 0} - left_->position()).cast<float>());
+			right_->zoom(v, (p - right_->position()).cast<float>());
+		}
+		else {
+			left_->zoom(v, (p - left_->position()).cast<float>());
+			right_->zoom(v, (nanogui::Vector2i{right_->width(), 0} + p - right_->position()).cast<float>());
+		}
+	}
+	else { // same as above, flip x/y
+		if (zoom_right) {
+			left_->zoom(v, (p - nanogui::Vector2i{0, left_->height()} - left_->position()).cast<float>());
+			right_->zoom(v, (p - right_->position()).cast<float>());
+		}
+		else {
+			left_->zoom(v, (p - left_->position()).cast<float>());
+			right_->zoom(v, (nanogui::Vector2i{0, right_->height()} + p - right_->position()).cast<float>());
+		}
+	}
+	return true;
+}
+
+bool StereoImageView::keyboardEvent(int key, int /*scancode*/, int action, int modifiers) {
+	return true; // copy code from above (ImageView)?
+}
+
+
+void StereoImageView::fit() {
+	left()->fit();
+	right()->fit();
+}
+
+bool StereoImageView::keyboardCharacterEvent(unsigned int codepoint) {
+	switch (codepoint) {
+	case 'c':
+		left_->center();
+		right_->center();
+		return true;
+
+	case 'f':
+		left_->fit();
+		right_->fit();
+		return true;
+
+	default:
+		return true;
+	}
+}
+
+void StereoImageView::performLayout(NVGcontext *ctx) {
+	if (orientation_ == nanogui::Orientation::Horizontal) {
+		left_->setSize({width()/2, height()});
+		right_->setSize({width()/2, height()});
+	}
+	else { // Orientation::Vertical
+		left_->setSize({width(), height()/2});
+		right_->setSize({width(), height()/2});
+	}
+	Widget::performLayout(ctx);
+}
diff --git a/applications/gui2/src/widgets/imageview.hpp b/applications/gui2/src/widgets/imageview.hpp
new file mode 100644
index 000000000..f7541c5d7
--- /dev/null
+++ b/applications/gui2/src/widgets/imageview.hpp
@@ -0,0 +1,247 @@
+/*
+	nanogui/imageview.h -- Widget used to display images.
+
+	The image view widget was contributed by Stefan Ivanov.
+
+	NanoGUI was developed by Wenzel Jakob <wenzel.jakob@epfl.ch>.
+	The widget drawing code is based on the NanoVG demo application
+	by Mikko Mononen.
+
+	All rights reserved. Use of this source code is governed by a
+	BSD-style license that can be found in the LICENSE.txt file.
+*/
+/** \file */
+
+#pragma once
+
+#include <nanogui/widget.h>
+#include <nanogui/glutil.h>
+#include <nanogui/layout.h>
+#include <functional>
+
+#include <ftl/rgbd/frame.hpp>
+#include <ftl/codecs/channels.hpp>
+#include <ftl/utility/gltexture.hpp>
+
+namespace ftl
+{
+namespace gui2 {
+
+
+/**
+ * \class ImageView imageview.h nanogui/imageview.h
+ *
+ * \brief Widget used to display images.
+ */
+class NANOGUI_EXPORT ImageView : public nanogui::Widget {
+public:
+	ImageView(nanogui::Widget* parent, GLuint imageID = -1);
+	virtual ~ImageView();
+
+	void bindImage(GLuint imageId);
+
+	nanogui::GLShader& imageShader() { return mShader; }
+
+	nanogui::Vector2f positionF() const { return mPos.cast<float>(); }
+	nanogui::Vector2f sizeF() const { return mSize.cast<float>(); }
+
+	const nanogui::Vector2i& imageSize() const { return mImageSize; }
+	nanogui::Vector2i scaledImageSize() const { return (mScale * mImageSize.cast<float>()).cast<int>(); }
+	nanogui::Vector2f imageSizeF() const { return mImageSize.cast<float>(); }
+	nanogui::Vector2f scaledImageSizeF() const { return (mScale * mImageSize.cast<float>()); }
+
+	const nanogui::Vector2f& offset() const { return mOffset; }
+	void setOffset(const nanogui::Vector2f& offset) { mOffset = offset; }
+	float scale() const { return mScale; }
+	void setScale(float scale) { mScale = scale > 0.01f ? scale : 0.01f; }
+
+	bool fixedOffset() const { return mFixedOffset; }
+	void setFixedOffset(bool fixedOffset) { mFixedOffset = fixedOffset; }
+	bool fixedScale() const { return mFixedScale; }
+	void setFixedScale(bool fixedScale) { mFixedScale = fixedScale; }
+
+	float zoomSensitivity() const { return mZoomSensitivity; }
+	void setZoomSensitivity(float zoomSensitivity) { mZoomSensitivity = zoomSensitivity; }
+
+	float gridThreshold() const { return mGridThreshold; }
+	void setGridThreshold(float gridThreshold) { mGridThreshold = gridThreshold; }
+
+	float pixelInfoThreshold() const { return mPixelInfoThreshold; }
+	void setPixelInfoThreshold(float pixelInfoThreshold) { mPixelInfoThreshold = pixelInfoThreshold; }
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+	void setPixelInfoCallback(const std::function<std::pair<std::string, nanogui::Color>(const nanogui::Vector2i&)>& callback) {
+		mPixelInfoCallback = callback;
+	}
+	const std::function<std::pair<std::string, nanogui::Color>(const nanogui::Vector2i&)>& pixelInfoCallback() const {
+		return mPixelInfoCallback;
+	}
+#endif // DOXYGEN_SHOULD_SKIP_THIS
+
+	void setFontScaleFactor(float fontScaleFactor) { mFontScaleFactor = fontScaleFactor; }
+	float fontScaleFactor() const { return mFontScaleFactor; }
+
+	// Image transformation functions.
+
+	/// Calculates the image coordinates of the given pixel position on the widget.
+	nanogui::Vector2f imageCoordinateAt(const nanogui::Vector2f& position) const;
+
+	/**
+	 * Calculates the image coordinates of the given pixel position on the widget.
+	 * If the position provided corresponds to a coordinate outside the range of
+	 * the image, the coordinates are clamped to edges of the image.
+	 */
+	nanogui::Vector2f clampedImageCoordinateAt(const nanogui::Vector2f& position) const;
+
+	/// Calculates the position inside the widget for the given image coordinate. Origin?
+	nanogui::Vector2f positionForCoordinate(const nanogui::Vector2f& imageCoordinate) const;
+
+	/**
+	 * Modifies the internal state of the image viewer widget so that the pixel at the provided
+	 * position on the widget has the specified image coordinate. Also clamps the values of offset
+	 * to the sides of the widget.
+	 */
+	void setImageCoordinateAt(const nanogui::Vector2f& position, const nanogui::Vector2f& imageCoordinate);
+
+	/// Centers the image without affecting the scaling factor.
+	void center();
+
+	/// Centers and scales the image so that it fits inside the widgets.
+	void fit();
+
+	/// Set the scale while keeping the image centered
+	void setScaleCentered(float scale);
+
+	/// Moves the offset by the specified amount. Does bound checking.
+	void moveOffset(const nanogui::Vector2f& delta);
+
+	/**
+	 * Changes the scale factor by the provided amount modified by the zoom sensitivity member variable.
+	 * The scaling occurs such that the image coordinate under the focused position remains in
+	 * the same position before and after the scaling.
+	 */
+	void zoom(int amount, const nanogui::Vector2f& focusPosition);
+
+	bool keyboardEvent(int key, int scancode, int action, int modifiers) override;
+	bool keyboardCharacterEvent(unsigned int codepoint) override;
+
+	//bool mouseButtonEvent(const nanogui::Vector2i &p, int button, bool down, int modifiers) override;
+	bool mouseDragEvent(const nanogui::Vector2i &p, const nanogui::Vector2i &rel, int button, int modifiers) override;
+	bool scrollEvent(const nanogui::Vector2i &p, const nanogui::Vector2f &rel) override;
+
+	/// Function indicating whether the grid is currently visible.
+	bool gridVisible() const;
+
+	/// Function indicating whether the pixel information is currently visible.
+	bool pixelInfoVisible() const;
+
+	/// Function indicating whether any of the overlays are visible.
+	bool helpersVisible() const;
+
+	nanogui::Vector2i preferredSize(NVGcontext* ctx) const override;
+	void performLayout(NVGcontext* ctx) override;
+	void draw(NVGcontext* ctx) override;
+
+protected:
+	// Helper image methods.
+	void updateImageParameters();
+
+	// Helper drawing methods.
+	void drawWidgetBorder(NVGcontext* ctx) const;
+	void drawImageBorder(NVGcontext* ctx) const;
+	void drawHelpers(NVGcontext* ctx) const;
+	static void drawPixelGrid(NVGcontext* ctx, const nanogui::Vector2f& upperLeftCorner,
+							  const nanogui::Vector2f& lowerRightCorner, float stride);
+	void drawPixelInfo(NVGcontext* ctx, float stride) const;
+	void writePixelInfo(NVGcontext* ctx, const nanogui::Vector2f& cellPosition,
+						const nanogui::Vector2i& pixel, float stride, float fontSize) const;
+
+	// Image parameters.
+	nanogui::GLShader mShader;
+	GLuint mImageID;
+	nanogui::Vector2i mImageSize;
+
+	// Image display parameters.
+	float mScale;
+	nanogui::Vector2f mOffset;
+	bool mFixedScale;
+	bool mFixedOffset;
+
+	// Fine-tuning parameters.
+	float mZoomSensitivity = 1.1f;
+
+	// Image info parameters.
+	float mGridThreshold = -1;
+	float mPixelInfoThreshold = -1;
+
+	// Image pixel data display members.
+	std::function<std::pair<std::string, nanogui::Color>(const nanogui::Vector2i&)> mPixelInfoCallback;
+	float mFontScaleFactor = 0.2f;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+/**
+ * Simple wrapper for drawing FTLImageView.
+ */
+class FTLImageView : public ImageView {
+public:
+	using ImageView::ImageView;
+
+	FTLImageView(nanogui::Widget* parent, GLuint imageID = -1) : ImageView(parent, imageID), was_valid_(false) {}
+	~FTLImageView();
+
+	virtual void draw(NVGcontext* ctx) override;
+	virtual nanogui::Vector2i preferredSize(NVGcontext* ctx) const override;
+
+	/** Get GLTexture instance */
+	ftl::utility::GLTexture& texture();
+
+	/** Copy&Bind */
+	void copyFrom(const ftl::cuda::TextureObject<uchar4> &buf, cudaStream_t stream = cudaStreamDefault);
+	void copyFrom(const cv::Mat &im, cudaStream_t stream = cudaStreamDefault);
+	void copyFrom(const cv::cuda::GpuMat &im, cudaStream_t stream = cudaStreamDefault);
+
+	/** From frame, use OpenGL if available (no copy), otherwise copy from GPU/CPU */
+	void copyFrom(ftl::rgbd::Frame& frame, ftl::codecs::Channel channel);
+
+private:
+	ftl::utility::GLTexture texture_;
+	bool was_valid_;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+/** Two ImageViews with synchronized zoom and pan. */
+class StereoImageView : public nanogui::Widget {
+public:
+	StereoImageView(nanogui::Widget* parent, nanogui::Orientation orientation = nanogui::Orientation::Horizontal);
+
+	virtual void performLayout(NVGcontext* ctx) override;
+
+	bool keyboardEvent(int key, int scancode, int action, int modifiers) override;
+	bool keyboardCharacterEvent(unsigned int codepoint) override;
+	bool mouseMotionEvent(const nanogui::Vector2i &p, const nanogui::Vector2i &rel, int button, int modifiers) override;
+	bool scrollEvent(const nanogui::Vector2i &p, const nanogui::Vector2f &rel) override;
+
+	FTLImageView* left() { return left_; }
+	FTLImageView* right() { return right_; }
+
+	void fit();
+
+	void bindLeft(GLuint id) { left_->texture().free(); left_->bindImage(id); }
+	void bindRight(GLuint id) { right_->texture().free(); right_->bindImage(id); }
+
+private:
+	nanogui::Orientation orientation_;
+	FTLImageView* left_;
+	FTLImageView* right_;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+}
+}
diff --git a/applications/gui2/src/widgets/leftbutton.cpp b/applications/gui2/src/widgets/leftbutton.cpp
new file mode 100644
index 000000000..c448ebaf2
--- /dev/null
+++ b/applications/gui2/src/widgets/leftbutton.cpp
@@ -0,0 +1,121 @@
+#include "leftbutton.hpp"
+#include <nanogui/button.h>
+#include <nanogui/theme.h>
+#include <nanogui/opengl.h>
+
+void ftl::gui2::LeftButton::draw(NVGcontext* ctx) {
+	using namespace nanogui;
+
+	Widget::draw(ctx);
+
+    NVGcolor gradTop = mTheme->mButtonGradientTopUnfocused;
+    NVGcolor gradBot = mTheme->mButtonGradientBotUnfocused;
+
+    if (mPushed) {
+        gradTop = mTheme->mButtonGradientTopPushed;
+        gradBot = mTheme->mButtonGradientBotPushed;
+    } else if (mMouseFocus && mEnabled) {
+        gradTop = mTheme->mButtonGradientTopFocused;
+        gradBot = mTheme->mButtonGradientBotFocused;
+    }
+
+    nvgBeginPath(ctx);
+
+    nvgRoundedRect(ctx, mPos.x() + 1, mPos.y() + 1.0f, mSize.x() - 2,
+                   mSize.y() - 2, mTheme->mButtonCornerRadius - 1);
+
+    if (mBackgroundColor.w() != 0) {
+        nvgFillColor(ctx, Color(mBackgroundColor.head<3>(), 1.f));
+        nvgFill(ctx);
+        if (mPushed) {
+            gradTop.a = gradBot.a = 0.8f;
+        } else {
+            double v = 1 - mBackgroundColor.w();
+            gradTop.a = gradBot.a = mEnabled ? v : v * .5f + .5f;
+        }
+    }
+
+    NVGpaint bg = nvgLinearGradient(ctx, mPos.x(), mPos.y(), mPos.x(),
+                                    mPos.y() + mSize.y(), gradTop, gradBot);
+
+    nvgFillPaint(ctx, bg);
+    nvgFill(ctx);
+
+    nvgBeginPath(ctx);
+    nvgStrokeWidth(ctx, 1.0f);
+    nvgRoundedRect(ctx, mPos.x() + 0.5f, mPos.y() + (mPushed ? 0.5f : 1.5f), mSize.x() - 1,
+                   mSize.y() - 1 - (mPushed ? 0.0f : 1.0f), mTheme->mButtonCornerRadius);
+    nvgStrokeColor(ctx, mTheme->mBorderLight);
+    nvgStroke(ctx);
+
+    nvgBeginPath(ctx);
+    nvgRoundedRect(ctx, mPos.x() + 0.5f, mPos.y() + 0.5f, mSize.x() - 1,
+                   mSize.y() - 2, mTheme->mButtonCornerRadius);
+    nvgStrokeColor(ctx, mTheme->mBorderDark);
+    nvgStroke(ctx);
+
+    int fontSize = mFontSize == -1 ? mTheme->mButtonFontSize : mFontSize;
+    nvgFontSize(ctx, fontSize);
+    nvgFontFace(ctx, "sans-bold");
+    float tw = nvgTextBounds(ctx, 0,0, mCaption.c_str(), nullptr, nullptr);
+
+    Vector2f center = mPos.cast<float>() + mSize.cast<float>() * 0.5f;
+    Vector2f textPos(mPos.x() + 8, center.y() - 1);
+    NVGcolor textColor =
+        mTextColor.w() == 0 ? mTheme->mTextColor : mTextColor;
+    if (!mEnabled)
+        textColor = mTheme->mDisabledTextColor;
+
+    if (mIcon) {
+        auto icon = utf8(mIcon);
+
+        float iw, ih = fontSize;
+        if (nvgIsFontIcon(mIcon)) {
+            ih *= icon_scale();
+            nvgFontSize(ctx, ih);
+            nvgFontFace(ctx, "icons");
+            iw = nvgTextBounds(ctx, 0, 0, icon.data(), nullptr, nullptr);
+        } else {
+            int w, h;
+            ih *= 0.9f;
+            nvgImageSize(ctx, mIcon, &w, &h);
+            iw = w * ih / h;
+        }
+        if (mCaption != "")
+            iw += mSize.y() * 0.15f;
+        nvgFillColor(ctx, textColor);
+        nvgTextAlign(ctx, NVG_ALIGN_LEFT | NVG_ALIGN_MIDDLE);
+        Vector2f iconPos = center;
+        iconPos.y() -= 1;
+
+        if (mIconPosition == IconPosition::LeftCentered) {
+            iconPos.x() -= (tw + iw) * 0.5f;
+            textPos.x() += iw * 0.5f;
+        } else if (mIconPosition == IconPosition::RightCentered) {
+            textPos.x() -= iw * 0.5f;
+            iconPos.x() += tw * 0.5f;
+        } else if (mIconPosition == IconPosition::Left) {
+            iconPos.x() = mPos.x() + 8;
+        } else if (mIconPosition == IconPosition::Right) {
+            iconPos.x() = mPos.x() + mSize.x() - iw - 8;
+        }
+
+        if (nvgIsFontIcon(mIcon)) {
+            nvgText(ctx, iconPos.x(), iconPos.y()+1, icon.data(), nullptr);
+        } else {
+            NVGpaint imgPaint = nvgImagePattern(ctx,
+                    iconPos.x(), iconPos.y() - ih/2, iw, ih, 0, mIcon, mEnabled ? 0.5f : 0.25f);
+
+            nvgFillPaint(ctx, imgPaint);
+            nvgFill(ctx);
+        }
+    }
+
+    nvgFontSize(ctx, fontSize);
+    nvgFontFace(ctx, "sans-bold");
+    nvgTextAlign(ctx, NVG_ALIGN_LEFT | NVG_ALIGN_MIDDLE);
+    nvgFillColor(ctx, mTheme->mTextColorShadow);
+    nvgText(ctx, textPos.x(), textPos.y(), mCaption.c_str(), nullptr);
+    nvgFillColor(ctx, textColor);
+    nvgText(ctx, textPos.x(), textPos.y() + 1, mCaption.c_str(), nullptr);
+}
\ No newline at end of file
diff --git a/applications/gui2/src/widgets/leftbutton.hpp b/applications/gui2/src/widgets/leftbutton.hpp
new file mode 100644
index 000000000..51efe156b
--- /dev/null
+++ b/applications/gui2/src/widgets/leftbutton.hpp
@@ -0,0 +1,23 @@
+#pragma once
+#include <nanogui/button.h>
+
+namespace ftl {
+namespace gui2 {
+
+/**
+ * Allow left aligned button text.
+ */
+class LeftButton : public nanogui::Button {
+public:
+	LeftButton(nanogui::Widget *parent, const std::string &caption = "",
+				int buttonIcon = 0) : nanogui::Button(parent, caption, buttonIcon) {};
+	virtual ~LeftButton() {};
+
+	virtual void draw(NVGcontext* ctx) override;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+}
+}
diff --git a/applications/gui2/src/widgets/popupbutton.cpp b/applications/gui2/src/widgets/popupbutton.cpp
new file mode 100644
index 000000000..4276aa2f0
--- /dev/null
+++ b/applications/gui2/src/widgets/popupbutton.cpp
@@ -0,0 +1,118 @@
+/*
+	src/popupbutton.cpp -- Button which launches a popup widget
+
+	NanoGUI was developed by Wenzel Jakob <wenzel.jakob@epfl.ch>.
+	The widget drawing code is based on the NanoVG demo application
+	by Mikko Mononen.
+
+	All rights reserved. Use of this source code is governed by a
+	BSD-style license that can be found in the LICENSE.txt file.
+*/
+
+#include "popupbutton.hpp"
+
+#include <nanogui/theme.h>
+#include <nanogui/opengl.h>
+#include <nanogui/serializer/core.h>
+#include <nanogui/popup.h>
+
+using nanogui::Widget;
+using nanogui::Window;
+using nanogui::Button;
+using nanogui::Popup;
+using nanogui::Serializer;
+using nanogui::utf8;
+using nanogui::Vector2i;
+using nanogui::Vector2f;
+
+using ftl::gui2::PopupButton;
+
+PopupButton::PopupButton(Widget *parent, const std::string &caption, int buttonIcon)
+	: Button(parent, caption, buttonIcon) {
+
+	mChevronIcon = mTheme->mPopupChevronRightIcon;
+
+	setFlags(Flags::ToggleButton | Flags::PopupButton);
+
+	Window *parentWindow = window();
+	mPopup = new Popup(parentWindow->parent(), window());
+	mPopup->setSize(Vector2i(320, 250));
+	mPopup->setVisible(false);
+
+	mIconExtraScale = 0.8f;// widget override
+}
+
+PopupButton::~PopupButton() {
+	if (mPopup->parent()->getRefCount() > 0) {
+		mPopup->setVisible(false);
+		mPopup->dispose();
+	}
+}
+
+Vector2i PopupButton::preferredSize(NVGcontext *ctx) const {
+	return Button::preferredSize(ctx) + Vector2i(15, 0);
+}
+
+void PopupButton::draw(NVGcontext* ctx) {
+	if (!mEnabled && mPushed)
+		mPushed = false;
+
+	mPopup->setVisible(mPushed);
+	Button::draw(ctx);
+
+	if (mChevronIcon) {
+		auto icon = utf8(mChevronIcon);
+		NVGcolor textColor =
+			mTextColor.w() == 0 ? mTheme->mTextColor : mTextColor;
+
+		nvgFontSize(ctx, (mFontSize < 0 ? mTheme->mButtonFontSize : mFontSize) * icon_scale());
+		nvgFontFace(ctx, "icons");
+		nvgFillColor(ctx, mEnabled ? textColor : mTheme->mDisabledTextColor);
+		nvgTextAlign(ctx, NVG_ALIGN_LEFT | NVG_ALIGN_MIDDLE);
+
+		float iw = nvgTextBounds(ctx, 0, 0, icon.data(), nullptr, nullptr);
+		Vector2f iconPos(0, mPos.y() + mSize.y() * 0.5f - 1);
+
+		if (mPopup->side() == Popup::Right)
+			iconPos[0] = mPos.x() + mSize.x() - iw - 8;
+		else
+			iconPos[0] = mPos.x() + 8;
+
+		nvgText(ctx, iconPos.x(), iconPos.y(), icon.data(), nullptr);
+	}
+}
+
+void PopupButton::performLayout(NVGcontext *ctx) {
+	Widget::performLayout(ctx);
+
+	const Window *parentWindow = window();
+
+	int posY = absolutePosition().y() - parentWindow->position().y() + mSize.y() /2;
+	if (mPopup->side() == Popup::Right)
+		mPopup->setAnchorPos(Vector2i(parentWindow->width() + 15, posY));
+	else
+		mPopup->setAnchorPos(Vector2i(0 - 15, posY));
+}
+
+void PopupButton::setSide(Popup::Side side) {
+	if (mPopup->side() == Popup::Right &&
+		mChevronIcon == mTheme->mPopupChevronRightIcon)
+		setChevronIcon(mTheme->mPopupChevronLeftIcon);
+	else if (mPopup->side() == Popup::Left &&
+			 mChevronIcon == mTheme->mPopupChevronLeftIcon)
+		setChevronIcon(mTheme->mPopupChevronRightIcon);
+	mPopup->setSide(side);
+}
+
+void PopupButton::save(Serializer &s) const {
+	Button::save(s);
+	s.set("chevronIcon", mChevronIcon);
+}
+
+bool PopupButton::load(Serializer &s) {
+	if (!Button::load(s))
+		return false;
+	if (!s.get("chevronIcon", mChevronIcon))
+		return false;
+	return true;
+}
diff --git a/applications/gui2/src/widgets/popupbutton.hpp b/applications/gui2/src/widgets/popupbutton.hpp
new file mode 100644
index 000000000..81519c134
--- /dev/null
+++ b/applications/gui2/src/widgets/popupbutton.hpp
@@ -0,0 +1,52 @@
+#pragma once
+#include <nanogui/button.h>
+#include <nanogui/popup.h>
+
+namespace ftl {
+namespace gui2 {
+
+/**
+ * Patched version of nanogui::PopopButton with destructor which also removes
+ * popup window on destruction.
+ *
+ * \class PopupButton popupbutton.h nanogui/popupbutton.h
+ *
+ * \brief Button which launches a popup widget.
+ *
+ * \remark
+ *     This class overrides \ref nanogui::Widget::mIconExtraScale to be ``0.8f``,
+ *     which affects all subclasses of this Widget.  Subclasses must explicitly
+ *     set a different value if needed (e.g., in their constructor).
+ */
+class PopupButton : public nanogui::Button {
+public:
+	PopupButton(nanogui::Widget *parent, const std::string &caption = "",
+				int buttonIcon = 0);
+	virtual ~PopupButton();
+
+	void setChevronIcon(int icon) { mChevronIcon = icon; }
+	int chevronIcon() const { return mChevronIcon; }
+
+	void setSide(nanogui::Popup::Side popupSide);
+	nanogui::Popup::Side side() const { return mPopup->side(); }
+
+	nanogui::Popup *popup() { return mPopup; }
+	const nanogui::Popup *popup() const { return mPopup; }
+
+	virtual void draw(NVGcontext* ctx) override;
+	virtual nanogui::Vector2i preferredSize(NVGcontext *ctx) const override;
+	virtual void performLayout(NVGcontext *ctx) override;
+
+	virtual void save(nanogui::Serializer &s) const override;
+	virtual bool load(nanogui::Serializer &s) override;
+
+protected:
+	nanogui::Popup *mPopup;
+	int mChevronIcon;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+}
+}
diff --git a/applications/gui2/src/widgets/soundctrl.cpp b/applications/gui2/src/widgets/soundctrl.cpp
new file mode 100644
index 000000000..87ca183a5
--- /dev/null
+++ b/applications/gui2/src/widgets/soundctrl.cpp
@@ -0,0 +1,105 @@
+#include <nanogui/layout.h>
+#include <nanogui/label.h>
+#include <nanogui/slider.h>
+
+#include "soundctrl.hpp"
+#include "../screen.hpp"
+
+using ftl::gui2::PopupButton;
+using ftl::gui2::VolumeButton;
+using ftl::gui2::Screen;
+
+VolumeButton::VolumeButton(nanogui::Widget *parent) :
+	ftl::gui2::PopupButton(parent, "", ENTYPO_ICON_SOUND) {
+	setChevronIcon(-1);
+
+	muted_ = false;
+
+	mPopup->setLayout(new nanogui::GroupLayout(15, 6, 14, 0));
+	new nanogui::Label(mPopup, "Volume");
+	slider_ = new nanogui::Slider(mPopup);
+
+	slider_->setHighlightColor(dynamic_cast<Screen*>(screen())->getColor("highlight1"));
+	slider_->setHeight(20);
+	mPopup->setFixedWidth(200);
+
+	slider_->setCallback([this](float value) {
+		setValue(value);
+		if (cb_) { cb_(value); }
+	});
+}
+
+VolumeButton::~VolumeButton() {
+}
+
+void VolumeButton::setCallback(std::function<void(float)> cb) {
+	cb_ = cb;
+}
+
+void VolumeButton::update() {
+	slider_->setValue(value_);
+	slider_->setHighlightedRange({0.0f, value_});
+
+	if (muted_ || value_ == 0.0f) {
+		setIcon(ICON_MUTED);
+	}
+	else if (value_ < 0.33){
+		setIcon(ICON_VOLUME_1);
+	}
+	else if (value_ >= 0.67) {
+		setIcon(ICON_VOLUME_3);
+	}
+	else {
+		setIcon(ICON_VOLUME_2);
+	}
+}
+
+void VolumeButton::setValue(float v) {
+	value_ = v;
+	setMuted(false);
+	update();
+}
+
+float VolumeButton::value() {
+	return muted_ ? 0.0f : value_;
+}
+
+void VolumeButton::setMuted(bool v) {
+	if (muted_ == v) {
+		return;
+	}
+
+	muted_ = v;
+	if (muted_) {
+		slider_->setHighlightColor(
+			dynamic_cast<Screen*>(screen())->getColor("highlight1_disabled"));
+	}
+	else {
+		slider_->setHighlightColor(
+			dynamic_cast<Screen*>(screen())->getColor("highlight1"));
+	}
+	update();
+}
+
+bool VolumeButton::muted() {
+	return muted_;
+}
+
+bool VolumeButton::mouseButtonEvent(const nanogui::Vector2i &p, int button, bool down, int modifiers) {
+	parent()->setFocused(true);
+	if (down && button == GLFW_MOUSE_BUTTON_2) {
+		setMuted(!muted_);
+		if (cb_) { cb_(value()); }
+		return true;
+	}
+	else {
+		return PopupButton::mouseButtonEvent(p, button, down, modifiers);
+	}
+
+}
+
+bool VolumeButton::scrollEvent(const nanogui::Vector2i &p, const nanogui::Vector2f &rel) {
+	setValue(std::min(std::max(0.0f, value_ + rel[1]*scroll_step_), 1.0f));
+	if (cb_) { cb_(value()); }
+	return true;
+}
diff --git a/applications/gui2/src/widgets/soundctrl.hpp b/applications/gui2/src/widgets/soundctrl.hpp
new file mode 100644
index 000000000..1db40ec5c
--- /dev/null
+++ b/applications/gui2/src/widgets/soundctrl.hpp
@@ -0,0 +1,50 @@
+#pragma once
+
+#include <nanogui/entypo.h>
+
+#include "popupbutton.hpp"
+
+namespace ftl {
+namespace gui2 {
+
+class VolumeButton : public ftl::gui2::PopupButton {
+public:
+	VolumeButton(nanogui::Widget *parent);
+	virtual ~VolumeButton();
+
+	// callback, new value passed in argument
+	void setCallback(std::function<void(float)> cb);
+
+	// set value (updates slider value and highlight and changes icon)
+	void setValue(float v);
+	float value();
+
+	// get/set mute status (changes volume highlight color and icon)
+	void setMuted(bool v);
+	bool muted();
+
+	virtual bool mouseButtonEvent(const nanogui::Vector2i &p, int button, bool down, int modifiers) override;
+	virtual bool scrollEvent(const nanogui::Vector2i &p, const nanogui::Vector2f &rel) override;
+
+	// icons: 3 levels and muted
+	int ICON_VOLUME_3 = ENTYPO_ICON_SOUND; // [67, 100]
+	int ICON_VOLUME_2 = ENTYPO_ICON_SOUND; // [33,67)
+	int ICON_VOLUME_1 = ENTYPO_ICON_SOUND; // [0,33)
+	int ICON_MUTED = ENTYPO_ICON_SOUND_MUTE;
+
+private:
+	void update();
+
+	nanogui::Slider* slider_;
+	std::function<void(float)> cb_;
+
+	float scroll_step_ = 0.02f;
+	float value_;
+	bool muted_;
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+}
+}
diff --git a/applications/gui2/src/widgets/window.hpp b/applications/gui2/src/widgets/window.hpp
new file mode 100644
index 000000000..3e568026a
--- /dev/null
+++ b/applications/gui2/src/widgets/window.hpp
@@ -0,0 +1,23 @@
+#pragma once
+
+#include <nanogui/window.h>
+
+namespace ftl {
+namespace gui2 {
+/**
+ * Non-movable Window widget
+ */
+class FixedWindow : public nanogui::Window {
+public:
+	FixedWindow(nanogui::Widget *parent, const std::string name="") :
+		nanogui::Window(parent, name) {};
+
+	virtual bool mouseDragEvent(const nanogui::Vector2i&, const nanogui::Vector2i&, int, int) override { return false; }
+	virtual ~FixedWindow() {}
+
+public:
+	EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+};
+
+}
+}
diff --git a/applications/reconstruct2/CMakeLists.txt b/applications/reconstruct2/CMakeLists.txt
new file mode 100644
index 000000000..a001f2553
--- /dev/null
+++ b/applications/reconstruct2/CMakeLists.txt
@@ -0,0 +1,21 @@
+# Need to include staged files and libs
+#include_directories(${PROJECT_SOURCE_DIR}/reconstruct/include)
+#include_directories(${PROJECT_BINARY_DIR})
+
+set(REPSRC
+	src/main.cpp
+)
+
+add_executable(ftl-reconstruct2 ${REPSRC})
+
+#target_include_directories(ftl-reconstruct PUBLIC
+#	$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
+#	$<INSTALL_INTERFACE:include>
+#	PRIVATE src)
+
+if (CUDA_FOUND)
+set_property(TARGET ftl-reconstruct2 PROPERTY CUDA_SEPARABLE_COMPILATION ON)
+endif()
+
+#target_include_directories(cv-node PUBLIC ${PROJECT_SOURCE_DIR}/include)
+target_link_libraries(ftl-reconstruct2 ftlcommon ftlrgbd Threads::Threads ${OpenCV_LIBS} ftlctrl ftlnet ftlrender ftloperators ftlstreams ftlaudio)
diff --git a/applications/reconstruct2/src/main.cpp b/applications/reconstruct2/src/main.cpp
new file mode 100644
index 000000000..1a0bb7c59
--- /dev/null
+++ b/applications/reconstruct2/src/main.cpp
@@ -0,0 +1,147 @@
+#include <ftl/configuration.hpp>
+#include <ftl/net.hpp>
+#include <ftl/streams/feed.hpp>
+#include <ftl/master.hpp>
+#include <nlohmann/json.hpp>
+#include <loguru.hpp>
+
+#include "ftl/operators/smoothing.hpp"
+#include "ftl/operators/colours.hpp"
+#include "ftl/operators/normals.hpp"
+#include "ftl/operators/filling.hpp"
+#include "ftl/operators/segmentation.hpp"
+#include "ftl/operators/mask.hpp"
+#include "ftl/operators/antialiasing.hpp"
+#include "ftl/operators/mvmls.hpp"
+#include "ftl/operators/clipping.hpp"
+#include <ftl/operators/disparity.hpp>
+#include <ftl/operators/poser.hpp>
+#include <ftl/operators/detectandtrack.hpp>
+
+using ftl::net::Universe;
+using ftl::stream::Feed;
+using ftl::codecs::Channel;
+using std::vector;
+using std::string;
+
+static void threadSetCUDADevice() {
+	// Ensure all threads have correct cuda device
+	std::atomic<int> ijobs = 0;
+	for (int i=0; i<ftl::pool.size(); ++i) {
+		ftl::pool.push([&ijobs](int id) {
+			ftl::cuda::setDevice();
+			++ijobs;
+			while (ijobs < ftl::pool.size()) std::this_thread::sleep_for(std::chrono::milliseconds(10));
+		});
+	}
+	while (ijobs < ftl::pool.size()) std::this_thread::sleep_for(std::chrono::milliseconds(10));
+}
+
+static void run(ftl::Configurable *root) {
+	// Use other GPU if available.
+	ftl::cuda::setDevice(ftl::cuda::deviceCount()-1);
+	threadSetCUDADevice();
+	ftl::timer::setClockSlave(false);
+	ftl::timer::setHighPrecision(true);
+
+	Universe *net = ftl::create<Universe>(root, "net");
+	ftl::ctrl::Master ctrl(root, net);
+
+	net->start();
+	net->waitConnections();
+
+	Feed *feed = ftl::create<Feed>(root, "feed", net);
+	std::string group_name = root->value("group", std::string("Reconstruction"));
+
+	// Add sources here
+	if (root->getConfig().contains("sources")) {
+		for (const auto &s : root->getConfig()["sources"]) {
+			ftl::URI uri(s);
+			uri.setAttribute("group", group_name);
+			feed->add(uri);
+		}
+	}
+
+	// Add sources from command line as well
+	auto paths = root->get<vector<string>>("paths");
+	string file = "";
+
+	for (auto &x : *paths) {
+		if (x != "") {
+			ftl::URI uri(x);
+			uri.setAttribute("group", group_name);
+			feed->add(uri);
+		}
+	}
+
+	// Automatically add any new sources
+	auto nsrc_handle = feed->onNewSources([feed,group_name](const vector<string> &srcs) {
+		for (const auto &s : srcs) {
+			ftl::URI uri(s);
+			uri.setAttribute("group", group_name);
+			feed->add(uri);
+		}
+		return true;
+	});
+
+	auto *filter = feed->filter({Channel::Colour, Channel::Depth});
+	feed->set("uri", root->value("uri", std::string("ftl://ftlab.utu.fi/reconstruction")));
+	feed->setPipelineCreator([](ftl::operators::Graph *pipeline) {
+		pipeline->append<ftl::operators::DepthChannel>("depth");  // Ensure there is a depth channel
+		pipeline->append<ftl::operators::DisparityBilateralFilter>("bilateral_filter")->value("enabled", false);
+		pipeline->append<ftl::operators::DisparityToDepth>("calculate_depth")->value("enabled", false);
+		pipeline->append<ftl::operators::ColourChannels>("colour");  // Convert BGR to BGRA
+		pipeline->append<ftl::operators::ClipScene>("clipping")->value("enabled", false);
+		pipeline->append<ftl::operators::DetectAndTrack>("facedetection")->value("enabled", false);
+		pipeline->append<ftl::operators::ArUco>("aruco")->value("enabled", false);
+		//pipeline_->append<ftl::operators::HFSmoother>("hfnoise");  // Remove high-frequency noise
+		pipeline->append<ftl::operators::Normals>("normals");  // Estimate surface normals
+		//pipeline_->append<ftl::operators::SmoothChannel>("smoothing");  // Generate a smoothing channel
+		//pipeline_->append<ftl::operators::ScanFieldFill>("filling");  // Generate a smoothing channel
+		pipeline->append<ftl::operators::CrossSupport>("cross");
+		pipeline->append<ftl::operators::DiscontinuityMask>("discontinuity");
+		pipeline->append<ftl::operators::CrossSupport>("cross2")->value("discon_support", true);
+		pipeline->append<ftl::operators::BorderMask>("border_mask")->value("enabled", false);
+		pipeline->append<ftl::operators::CullDiscontinuity>("remove_discontinuity")->set("enabled", false);
+		//pipeline_->append<ftl::operators::AggreMLS>("mls");  // Perform MLS (using smoothing channel)
+		pipeline->append<ftl::operators::VisCrossSupport>("viscross")->value("enabled", false);
+		pipeline->append<ftl::operators::MultiViewMLS>("mvmls");
+		pipeline->append<ftl::operators::Poser>("poser")->value("enabled", false);
+	});
+	//feed->lowLatencyMode();
+	feed->startStreaming(filter);
+
+	// Just do whatever jobs are available
+	while (ftl::running) {
+		auto f = ftl::pool.pop();
+		if (f) {
+			f(-1);
+		} else {
+			std::this_thread::sleep_for(std::chrono::milliseconds(10));
+		}
+	}
+
+	nsrc_handle.cancel();
+	feed->stopRecording();
+	feed->removeFilter(filter);
+
+	net->shutdown();
+	LOG(INFO) << "Stopping...";
+	ftl::timer::stop(true);
+	LOG(INFO) << "Timer stopped...";
+	ftl::pool.stop(true);
+	LOG(INFO) << "All threads stopped.";
+
+	delete feed;
+	delete net;
+	delete root;
+}
+
+int main(int argc, char **argv) {
+	run(ftl::configure(argc, argv, "reconstruction_default"));
+
+	// Save config changes and delete final objects
+	ftl::config::cleanup();
+
+	return ftl::exit_code;
+}
diff --git a/applications/tools/CMakeLists.txt b/applications/tools/CMakeLists.txt
index 0506376d5..8a6d662a3 100644
--- a/applications/tools/CMakeLists.txt
+++ b/applications/tools/CMakeLists.txt
@@ -1,7 +1,8 @@
-add_subdirectory(codec_eval)
+#add_subdirectory(codec_eval)
 
 #if (HAVE_ASSIMP)
 #    add_subdirectory(model_truth)
 #endif()
 
 add_subdirectory(middlebury_gen)
+add_subdirectory(simple_viewer)
diff --git a/applications/tools/middlebury_gen/CMakeLists.txt b/applications/tools/middlebury_gen/CMakeLists.txt
index 2dffb172d..13cc2c9e8 100644
--- a/applications/tools/middlebury_gen/CMakeLists.txt
+++ b/applications/tools/middlebury_gen/CMakeLists.txt
@@ -15,3 +15,4 @@ endif()
 
 #target_include_directories(cv-node PUBLIC ${PROJECT_SOURCE_DIR}/include)
 target_link_libraries(middlebury-gen ftlcommon ftlrgbd Threads::Threads ${OpenCV_LIBS} ftlrender ftloperators ftlstreams)
+set_property(TARGET middlebury-gen PROPERTY CUDA_ARCHITECTURES OFF)
diff --git a/applications/tools/middlebury_gen/src/main.cpp b/applications/tools/middlebury_gen/src/main.cpp
index 04c040d36..c1b71e568 100644
--- a/applications/tools/middlebury_gen/src/main.cpp
+++ b/applications/tools/middlebury_gen/src/main.cpp
@@ -4,6 +4,8 @@
 #include <ftl/codecs/opencv_encoder.hpp>
 #include <ftl/streams/injectors.hpp>
 
+#include <ftl/data/framepool.hpp>
+
 #include <opencv2/imgcodecs.hpp>
 #include <opencv2/imgproc.hpp>
 #include <opencv2/highgui.hpp>
@@ -212,8 +214,10 @@ int main(int argc, char **argv) {
 	// For each middlebury test folder
 	auto paths = (*root->get<nlohmann::json>("paths"));
 
-	ftl::rgbd::Frame frame;
-	ftl::rgbd::FrameState state;
+	ftl::data::Pool pool(1,1);
+	ftl::data::Frame dframe = pool.allocate(ftl::data::FrameID(0,0), 10);
+	ftl::rgbd::Frame &frame = dframe.cast<ftl::rgbd::Frame>();
+	frame.store();
 
 	ftl::operators::DisparityToDepth disp2depth(ftl::create<ftl::Configurable>(root, "disparity"));
 
@@ -249,7 +253,7 @@ int main(int argc, char **argv) {
 			// Load the ground truth
 			//frame.create<cv::Mat>(Channel::Disparity) = cv::imread(path+"/disp0.pfm", cv::IMREAD_UNCHANGED);
 			readFilePFM(frame.create<cv::Mat>(Channel::Disparity), path+"/disp0.pfm");
-			cv::Mat &disp = frame.get<cv::Mat>(Channel::Disparity);
+			cv::Mat &disp = frame.set<cv::Mat>(Channel::Disparity);
 			float aspect = float(disp.cols) / float(disp.rows);
 			float scaling = float(height) / float(disp.rows);
 			cv::resize(disp, disp, cv::Size(int(aspect*float(height)),height), 0.0, 0.0, cv::INTER_NEAREST);
@@ -277,14 +281,16 @@ int main(int argc, char **argv) {
 			intrin1.width = c1.cols;
 			intrin2.width = c2.cols;
 
-			state.setLeft(intrin1);
-			state.setRight(intrin2);
-			frame.setOrigin(&state);
-			ftl::stream::injectCalibration(out, frame, 0, 0, i, false);
-			ftl::stream::injectCalibration(out, frame, 0, 0, i, true);
+			frame.setLeft() = intrin1;
+			frame.setRight() = intrin2;
+			//ftl::stream::injectCalibration(out, frame, 0, 0, i, false);
+			//ftl::stream::injectCalibration(out, frame, 0, 0, i, true);
 
 			// Convert disparity to depth
-			frame.upload(Channel::Disparity + Channel::Colour + Channel::Colour2);
+			frame.upload(Channel::Disparity);
+			frame.upload(Channel::Colour);
+			frame.upload(Channel::Colour2);
+
 
 			disp2depth.apply(frame, frame, 0);
 
diff --git a/applications/tools/simple_viewer/CMakeLists.txt b/applications/tools/simple_viewer/CMakeLists.txt
new file mode 100644
index 000000000..c7d6e0c2a
--- /dev/null
+++ b/applications/tools/simple_viewer/CMakeLists.txt
@@ -0,0 +1,12 @@
+# Need to include staged files and libs
+#include_directories(${PROJECT_SOURCE_DIR}/reconstruct/include)
+#include_directories(${PROJECT_BINARY_DIR})
+
+set(SIMPVIEWSRC
+	main.cpp
+)
+
+add_executable(simple-viewer ${SIMPVIEWSRC})
+
+#target_include_directories(cv-node PUBLIC ${PROJECT_SOURCE_DIR}/include)
+target_link_libraries(simple-viewer ftlcommon ftlrgbd Threads::Threads ${OpenCV_LIBS} ftlctrl ftlnet ftlrender ftloperators ftlstreams ftlaudio)
diff --git a/applications/tools/simple_viewer/main.cpp b/applications/tools/simple_viewer/main.cpp
new file mode 100644
index 000000000..6a3883749
--- /dev/null
+++ b/applications/tools/simple_viewer/main.cpp
@@ -0,0 +1,208 @@
+/*
+ * Copyright 2019 Nicolas Pope. All rights reserved.
+ *
+ * See LICENSE.
+ */
+
+#define LOGURU_WITH_STREAMS 1
+#include <loguru.hpp>
+#include <ftl/config.h>
+#include <ftl/configuration.hpp>
+#include <ftl/master.hpp>
+#include <ftl/threads.hpp>
+#include <ftl/codecs/channels.hpp>
+#include <ftl/codecs/depth_convert_cuda.hpp>
+#include <ftl/data/framepool.hpp>
+#include <ftl/audio/speaker.hpp>
+
+#include <nlohmann/json.hpp>
+
+#include <fstream>
+#include <string>
+#include <vector>
+#include <thread>
+#include <chrono>
+
+#include <opencv2/opencv.hpp>
+#include <opencv2/quality/qualitypsnr.hpp>
+#include <ftl/net/universe.hpp>
+
+#include <ftl/streams/filestream.hpp>
+#include <ftl/streams/receiver.hpp>
+#include <ftl/streams/sender.hpp>
+#include <ftl/streams/netstream.hpp>
+
+#include <ftl/operators/colours.hpp>
+#include <ftl/operators/mask.hpp>
+#include <ftl/operators/segmentation.hpp>
+#include <ftl/operators/depth.hpp>
+
+#ifdef WIN32
+#pragma comment(lib, "Rpcrt4.lib")
+#endif
+
+using ftl::net::Universe;
+using std::string;
+using std::vector;
+using ftl::config::json_t;
+using ftl::codecs::Channel;
+using ftl::codecs::codec_t;
+using ftl::codecs::definition_t;
+
+using json = nlohmann::json;
+using std::this_thread::sleep_for;
+using std::chrono::milliseconds;
+
+static ftl::data::Generator *createFileGenerator(ftl::Configurable *root, ftl::data::Pool *pool, const std::string &filename) {
+	ftl::stream::File *stream = ftl::create<ftl::stream::File>(root, "player");
+	stream->set("filename", filename);
+
+	ftl::stream::Receiver *gen = ftl::create<ftl::stream::Receiver>(root, "receiver", pool);
+	gen->setStream(stream);
+
+	stream->begin();
+	stream->select(0, Channel::Colour + Channel::Depth);  // TODO: Choose these elsewhere
+	return gen;
+}
+
+static void visualizeDepthMap(	const cv::Mat &depth, cv::Mat &out,
+								const float max_depth)
+{
+	DCHECK(max_depth > 0.0);
+
+	depth.convertTo(out, CV_8U, 255.0f / max_depth);
+	out = 255 - out;
+	//cv::Mat mask = (depth >= max_depth); // TODO (mask for invalid pixels)
+	
+	applyColorMap(out, out, cv::COLORMAP_JET);
+	//out.setTo(cv::Scalar(0), mask);
+	//cv::cvtColor(out,out, cv::COLOR_BGR2BGRA);
+}
+
+static void run(ftl::Configurable *root) {
+	Universe *net = ftl::create<Universe>(root, "net");
+	ftl::ctrl::Master ctrl(root, net);
+
+	net->start();
+	net->waitConnections();
+
+	std::list<ftl::Handle> handles;
+	ftl::data::Pool pool(2,10);
+
+	std::list<ftl::data::Generator*> generators;
+
+	// Check paths for FTL files to load.
+	auto paths = (*root->get<nlohmann::json>("paths"));
+	int i = 0; //groups.size();
+	for (auto &x : paths.items()) {
+		std::string path = x.value().get<std::string>();
+		auto eix = path.find_last_of('.');
+		auto ext = path.substr(eix+1);
+
+		// Command line path is ftl file
+		if (ext == "ftl") {
+			auto *gen = createFileGenerator(root, &pool, path);
+			generators.push_back(gen);
+			++i;
+		} else {
+			ftl::URI uri(path);
+			if (uri.getScheme() == ftl::URI::SCHEME_TCP || uri.getScheme() == ftl::URI::SCHEME_WS) {
+				net->connect(path)->waitConnection();
+			}
+		}
+	}
+
+	auto stream_uris = net->findAll<std::string>("list_streams");
+
+	if (stream_uris.size() > 0) {
+		ftl::stream::Muxer *stream = ftl::create<ftl::stream::Muxer>(root, "muxstream");
+		ftl::stream::Receiver *gen = ftl::create<ftl::stream::Receiver>(root, "receiver", &pool);
+		ftl::stream::Sender *sender = ftl::create<ftl::stream::Sender>(root, "sender");
+		gen->setStream(stream);
+		sender->setStream(stream);
+
+		int count = 0;
+		for (auto &s : stream_uris) {
+			LOG(INFO) << " --- found stream: " << s;
+			auto *nstream = ftl::create<ftl::stream::Net>(stream, std::string("netstream")+std::to_string(count), net);
+			nstream->set("uri", s);
+			//nstream->select(0, {Channel::Colour}, true);
+			stream->add(nstream);
+			++count;
+		}
+
+		generators.push_back(gen);
+		stream->begin();
+		stream->select(0, Channel::Colour + Channel::Depth + Channel::AudioStereo, true);
+
+		handles.push_back(std::move(pool.onFlush([sender](ftl::data::Frame &f, ftl::codecs::Channel c) {
+			// Send only reponse channels on a per frame basis
+			if (f.mode() == ftl::data::FrameMode::RESPONSE) {
+				sender->post(f, c);
+			}
+			return true;
+		})));
+	}
+
+	ftl::audio::Speaker *speaker = ftl::create<ftl::audio::Speaker>(root, "speaker");
+
+	for (auto *g : generators) {
+		handles.push_back(std::move(g->onFrameSet([&](std::shared_ptr<ftl::data::FrameSet> fs) {	
+			LOG(INFO) << "Got frameset: " << fs->timestamp();
+			for (auto &f : fs->frames) {
+				if (f.has(Channel::Colour)) {
+					cv::Mat tmp;
+					f.get<cv::cuda::GpuMat>(Channel::Colour).download(tmp);
+					cv::imshow(std::string("Frame")+std::to_string(f.id().id), tmp);
+				}
+
+				if (f.has(Channel::Depth)) {
+					cv::Mat tmp;
+					f.get<cv::cuda::GpuMat>(Channel::Depth).download(tmp);
+					visualizeDepthMap(tmp,tmp,8.0f);
+					cv::imshow(std::string("Depth")+std::to_string(f.id().id), tmp);
+				}
+
+				if (f.has(Channel::AudioStereo)) {
+					const auto &audio = f.get<std::list<ftl::audio::Audio>>(Channel::AudioStereo).front();
+					LOG(INFO) << "Got stereo: " << audio.data().size();
+					if (f.source() == 0) {
+						speaker->queue(f.timestamp(), f);
+					}
+				}
+			}
+
+			int k = cv::waitKey(10);
+
+			// Send the key back to vision node (TESTING)
+			if (k >= 0) {
+				auto rf = fs->firstFrame().response();
+				rf.create<int>(Channel::Control) = k;
+			}
+
+			return true;
+		})));
+	}
+
+	LOG(INFO) << "Start timer";
+	ftl::timer::start(true);
+
+	LOG(INFO) << "Shutting down...";
+	ftl::timer::stop();
+	ftl::pool.stop(true);
+	ctrl.stop();
+	net->shutdown();
+
+	//cudaProfilerStop();
+
+	LOG(INFO) << "Deleting...";
+
+	delete net;
+
+	ftl::config::cleanup();  // Remove any last configurable objects.
+	LOG(INFO) << "Done.";
+}
+
+int main(int argc, char **argv) {
+	run(ftl::configure(argc, argv, "tools_default"));
+}
diff --git a/applications/vision/CMakeLists.txt b/applications/vision/CMakeLists.txt
index 4c2674d8d..9341fab23 100644
--- a/applications/vision/CMakeLists.txt
+++ b/applications/vision/CMakeLists.txt
@@ -22,6 +22,6 @@ set_property(TARGET ftl-vision PROPERTY CUDA_SEPARABLE_COMPILATION OFF)
 endif()
 
 #target_include_directories(cv-node PUBLIC ${PROJECT_SOURCE_DIR}/include)
-target_link_libraries(ftl-vision ftlrgbd ftlcommon ftlstreams ftlctrl ${OpenCV_LIBS} ${LIBSGM_LIBRARIES} ${CUDA_LIBRARIES} ftlnet ftlaudio)
-
+target_link_libraries(ftl-vision ftlrgbd ftlcommon ftlstreams ftlctrl ${OpenCV_LIBS} ${CUDA_LIBRARIES} ftlnet ftlaudio)
 
+target_precompile_headers(ftl-vision REUSE_FROM ftldata)
diff --git a/applications/vision/src/main.cpp b/applications/vision/src/main.cpp
index 2ed9ed444..496ce2347 100644
--- a/applications/vision/src/main.cpp
+++ b/applications/vision/src/main.cpp
@@ -17,6 +17,8 @@
 
 #include <opencv2/opencv.hpp>
 #include <ftl/rgbd.hpp>
+#include <ftl/data/framepool.hpp>
+#include <ftl/streams/builder.hpp>
 //#include <ftl/middlebury.hpp>
 #include <ftl/net/universe.hpp>
 #include <ftl/master.hpp>
@@ -26,6 +28,7 @@
 
 #include <ftl/streams/netstream.hpp>
 #include <ftl/streams/sender.hpp>
+#include <ftl/streams/receiver.hpp>
 
 #include <ftl/audio/source.hpp>
 
@@ -55,6 +58,8 @@ using std::chrono::milliseconds;
 using cv::Mat;
 using json = nlohmann::json;
 
+static bool quiet = false;
+
 static void run(ftl::Configurable *root) {
 	Universe *net = ftl::create<Universe>(root, "net");
 
@@ -86,7 +91,7 @@ static void run(ftl::Configurable *root) {
 	ftl::ctrl::Master ctrl(root, net);
 
 	// Sync clocks!
-	ftl::timer::add(ftl::timer::kTimerMain, [&time_peer,&sync_counter,net](int64_t ts) {
+	auto timer = ftl::timer::add(ftl::timer::kTimerMain, [&time_peer,&sync_counter,net](int64_t ts) {
 		if (sync_counter-- <= 0 && time_peer != ftl::UUID(0) ) {
 			sync_counter = 20;
 			auto start = std::chrono::high_resolution_clock::now();
@@ -114,10 +119,8 @@ static void run(ftl::Configurable *root) {
 
 	auto paths = root->get<vector<string>>("paths");
 	string file = "";
-	//if (paths && (*paths).size() > 0) file = (*paths)[(*paths).size()-1];
 
 	for (auto &x : *paths) {
-		//LOG(INFO) << "PATH - " << x;
 		if (x != "") {
 			ftl::URI uri(x);
 			if (uri.isValid()) {
@@ -132,56 +135,133 @@ static void run(ftl::Configurable *root) {
 		}
 	}
 
-	Source *source = nullptr;
-	source = ftl::create<Source>(root, "source", net);
 	if (file != "") {
-		//source->set("uri", file);
 		ftl::URI uri(file);
-		uri.to_json(source->getConfig());
-		source->set("uri", uri.getBaseURI());
+		uri.to_json(root->getConfig()["source"]);
 	}
+	Source *source = nullptr;
+	source = ftl::create<Source>(root, "source");
 	
 	ftl::stream::Sender *sender = ftl::create<ftl::stream::Sender>(root, "sender");
 	ftl::stream::Net *outstream = ftl::create<ftl::stream::Net>(root, "stream", net);
-	outstream->set("uri", outstream->getID());
+	outstream->set("uri", root->value("uri", outstream->getID()));
 	outstream->begin();
 	sender->setStream(outstream);
 
-	auto *grp = new ftl::rgbd::Group();
-	source->setChannel(Channel::Depth);
-	grp->addSource(source);
-
-	int stats_count = 0;
-
-	grp->onFrameSet([sender,&stats_count](ftl::rgbd::FrameSet &fs) {
-		fs.id = 0;
-		sender->post(fs);
+	ftl::audio::Source *audioSrc = ftl::create<ftl::audio::Source>(root, "audio_test");
 
-		if (--stats_count <= 0) {
-			auto [fps,latency] = ftl::rgbd::Builder::getStatistics();
-			LOG(INFO) << "Frame rate: " << fps << ", Latency: " << latency;
-			stats_count = 20;
+	ftl::data::Pool pool(root->value("mempool_min", 2),root->value("mempool_max", 5));
+	auto *creator = new ftl::streams::IntervalSourceBuilder(&pool, 0, {source, audioSrc});
+	std::shared_ptr<ftl::streams::BaseBuilder> creatorptr(creator);
+
+	ftl::stream::Receiver *receiver = ftl::create<ftl::stream::Receiver>(root, "receiver", &pool);
+	receiver->setStream(outstream);
+	receiver->registerBuilder(creatorptr);
+
+	// Which channels should be encoded
+	std::unordered_set<Channel> encodable;
+
+	// Send channels on flush
+	auto flushhandle = pool.onFlushSet([sender,&encodable](ftl::data::FrameSet &fs, ftl::codecs::Channel c) {
+		if ((int)c >= 32) sender->post(fs, c);
+		else {
+			if (encodable.count(c)) {
+				sender->post(fs, c);
+			} else {
+				//switch (c) {
+				//case Channel::Colour		:
+				//case Channel::Colour2		:
+				//case Channel::Depth			: 
+				sender->post(fs, c, true); //break;
+				//default						: sender->fakePost(fs, c);
+				//}
+			}
 		}
 		return true;
 	});
 
-	// TODO: TEMPORARY
-	ftl::audio::Source *audioSrc = ftl::create<ftl::audio::Source>(root, "audio_test");
-	audioSrc->onFrameSet([sender](ftl::audio::FrameSet &fs) {
-		sender->post(fs);
-		return true;
-	});
-	
-	auto pipeline = ftl::config::create<ftl::operators::Graph>(root, "pipeline");
+	int stats_count = 0;
+	int frames = 0;
+	float latency = 0.0f;
+	int64_t stats_time = 0;
+
+	root->on("quiet", quiet, false);
+
+	auto *pipeline = ftl::config::create<ftl::operators::Graph>(root, "pipeline");
 	pipeline->append<ftl::operators::DetectAndTrack>("facedetection")->value("enabled", false);
 	pipeline->append<ftl::operators::ArUco>("aruco")->value("enabled", false);
 	pipeline->append<ftl::operators::DepthChannel>("depth");  // Ensure there is a depth channel
-	grp->addPipeline(pipeline);
+
+	bool busy = false;
+
+	auto h = creator->onFrameSet([sender,outstream,&stats_count,&latency,&frames,&stats_time,pipeline,&busy,&encodable](const ftl::data::FrameSetPtr &fs) {
+		if (busy) {
+			LOG(WARNING) << "Depth pipeline drop: " << fs->timestamp();
+			fs->firstFrame().message(ftl::data::Message::Warning_PIPELINE_DROP, "Depth pipeline drop");
+			return true;
+		}
+		busy = true;
+
+		encodable.clear();
+		// Decide what to encode here.
+		const auto sel = outstream->selectedNoExcept(fs->frameset());
+		std::vector<Channel> sortedsel(sel.begin(), sel.end());
+		std::sort(sortedsel.begin(),sortedsel.end());
+
+		if (sortedsel.size() > 0) encodable.emplace(sortedsel[0]);
+		if (sortedsel.size() > 1) encodable.emplace(sortedsel[1]);
+
+		// Only allow the two encoders to exist
+		// This ensures we cleanup other encoders
+		sender->setActiveEncoders(fs->frameset(), encodable);
+
+		// Do all processing in another thread... only if encoding of depth
+		//if (encodable.find(Channel::Depth) != encodable.end()) {
+			ftl::pool.push([sender,&stats_count,&latency,&frames,&stats_time,pipeline,&busy,fs](int id) mutable {
+				// Do pipeline here...
+				pipeline->apply(*fs, *fs);
+
+				++frames;
+				latency += float(ftl::timer::get_time() - fs->timestamp());
+
+				// Destruct frameset as soon as possible to send the data...
+				const_cast<ftl::data::FrameSetPtr&>(fs).reset();
+
+				if (!quiet && --stats_count <= 0) {
+					latency /= float(frames);
+					int64_t nowtime = ftl::timer::get_time();
+					stats_time = nowtime - stats_time;
+					float fps = float(frames) / (float(stats_time) / 1000.0f);
+					LOG(INFO) << "Frame rate: " << fps << ", Latency: " << latency;
+					stats_count = 20;
+					frames = 0;
+					latency = 0.0f;
+					stats_time = nowtime;
+				}
+
+				busy = false;
+			});
+		//} else {
+			//LOG(INFO) << "NOT DOING DEPTH";
+		//	sender->forceAvailable(*fs, Channel::Depth);
+		//	busy = false;
+		//}
+
+		// Lock colour right now to encode in parallel
+		fs->flush(ftl::codecs::Channel::Colour);
+		fs->flush(ftl::codecs::Channel::AudioStereo);
+
+		return true;
+	});
+
+	// Start the timed generation of frames
+	creator->start();
 	
+	// Only now start listening for connections
 	net->start();
 
 	LOG(INFO) << "Running...";
-	ftl::timer::start(true);
+	ftl::timer::start(true);  // Blocks
 	LOG(INFO) << "Stopping...";
 	ctrl.stop();
 	
@@ -189,27 +269,16 @@ static void run(ftl::Configurable *root) {
 
 	ftl::pool.stop();
 
-	delete grp;
+	delete source;
+	delete receiver;
 	delete sender;
+	delete pipeline;
+	delete audioSrc;
 	delete outstream;
 
-	//delete source;  // TODO(Nick) Add ftl::destroy
 	delete net;
 }
 
-static void threadSetCUDADevice() {
-	// Ensure all threads have correct cuda device
-	std::atomic<int> ijobs = 0;
-	for (int i=0; i<ftl::pool.size(); ++i) {
-		ftl::pool.push([&ijobs](int id) {
-			ftl::cuda::setDevice();
-			++ijobs;
-			while (ijobs < ftl::pool.size()) std::this_thread::sleep_for(std::chrono::milliseconds(10));
-		});
-	}
-	while (ijobs < ftl::pool.size()) std::this_thread::sleep_for(std::chrono::milliseconds(10));
-}
-
 int main(int argc, char **argv) {
 #ifdef HAVE_PYLON
 	Pylon::PylonAutoInitTerm autoInitTerm;
@@ -219,7 +288,24 @@ int main(int argc, char **argv) {
 	SetPriorityClass(GetCurrentProcess(), HIGH_PRIORITY_CLASS);
 #endif
 	std::cout << "FTL Vision Node " << FTL_VERSION_LONG << std::endl;
-	auto root = ftl::configure(argc, argv, "vision_default");
+	auto root = ftl::configure(argc, argv, "vision_default", {
+		"uri",
+		"fps",
+		"time_master",
+		"time_peer",
+		"quiet"
+	});
+
+	root->value("restart", 0);
+
+	// Allow config controlled restart
+	root->on("restart", [root]() {
+		auto val = root->get<int>("restart");
+		if (val) {
+			ftl::exit_code = *val;
+			ftl::running = false;
+		}
+	});
 
 	// Use other GPU if available.
 	//ftl::cuda::setDevice(ftl::cuda::deviceCount()-1);
@@ -228,6 +314,9 @@ int main(int argc, char **argv) {
 	run(root);
 
 	delete root;
+
+	ftl::config::cleanup();
+
 	LOG(INFO) << "Terminating with code " << ftl::exit_code;
 	LOG(INFO) << "Branch: " << ftl::branch_name;
 	return ftl::exit_code;
diff --git a/applications/vision/src/middlebury.cpp b/applications/vision/src/middlebury.cpp
deleted file mode 100644
index 531cd8a0e..000000000
--- a/applications/vision/src/middlebury.cpp
+++ /dev/null
@@ -1,301 +0,0 @@
-#include <ftl/middlebury.hpp>
-#include <loguru.hpp>
-#include <ftl/rgbd.hpp>
-
-#include <string>
-#include <algorithm>
-
-#include <nlohmann/json.hpp>
-
-#include <opencv2/highgui.hpp>
-#include <opencv2/imgproc.hpp>
-
-using cv::Mat;
-using cv::Size;
-using std::string;
-using std::min;
-using std::max;
-using std::isnan;
-
-static void skip_comment(FILE *fp) {
-    // skip comment lines in the headers of pnm files
-
-    char c;
-    while ((c=getc(fp)) == '#')
-        while (getc(fp) != '\n') ;
-    ungetc(c, fp);
-}
-
-static void skip_space(FILE *fp) {
-    // skip white space in the headers or pnm files
-
-    char c;
-    do {
-        c = getc(fp);
-    } while (c == '\n' || c == ' ' || c == '\t' || c == '\r');
-    ungetc(c, fp);
-}
-
-static void read_header(FILE *fp, const char *imtype, char c1, char c2, 
-                 int *width, int *height, int *nbands, int thirdArg)
-{
-    // read the header of a pnmfile and initialize width and height
-
-    char c;
-  
-	if (getc(fp) != c1 || getc(fp) != c2)
-		LOG(FATAL) << "ReadFilePGM: wrong magic code for " << imtype << " file";
-	skip_space(fp);
-	skip_comment(fp);
-	skip_space(fp);
-	fscanf(fp, "%d", width);
-	skip_space(fp);
-	fscanf(fp, "%d", height);
-	if (thirdArg) {
-		skip_space(fp);
-		fscanf(fp, "%d", nbands);
-	}
-    // skip SINGLE newline character after reading image height (or third arg)
-	c = getc(fp);
-    if (c == '\r')      // <cr> in some files before newline
-        c = getc(fp);
-    if (c != '\n') {
-        if (c == ' ' || c == '\t' || c == '\r')
-            LOG(FATAL) << "newline expected in file after image height";
-        else
-            LOG(FATAL) << "whitespace expected in file after image height";
-  }
-}
-
-// check whether machine is little endian
-static int littleendian() {
-    int intval = 1;
-    uchar *uval = (uchar *)&intval;
-    return uval[0] == 1;
-}
-
-// 1-band PFM image, see http://netpbm.sourceforge.net/doc/pfm.html
-// 3-band not yet supported
-void ftl::middlebury::readFilePFM(Mat &img, const string &filename)
-{
-    // Open the file and read the header
-    FILE *fp = fopen(filename.c_str(), "rb");
-    if (fp == 0)
-        LOG(FATAL) << "ReadFilePFM: could not open \"" << filename << "\"";
-
-    int width, height, nBands;
-    read_header(fp, "PFM", 'P', 'f', &width, &height, &nBands, 0);
-
-    skip_space(fp);
-
-    float scalef;
-    fscanf(fp, "%f", &scalef);  // scale factor (if negative, little endian)
-
-    // skip SINGLE newline character after reading third arg
-    char c = getc(fp);
-    if (c == '\r')      // <cr> in some files before newline
-        c = getc(fp);
-    if (c != '\n') {
-        if (c == ' ' || c == '\t' || c == '\r')
-            LOG(FATAL) << "newline expected in file after scale factor";
-        else
-            LOG(FATAL) << "whitespace expected in file after scale factor";
-    }
-    
-    // Allocate the image if necessary
-    img = Mat(height, width, CV_32FC1);
-    // Set the image shape
-    //Size sh = img.size();
-
-    int littleEndianFile = (scalef < 0);
-    int littleEndianMachine = littleendian();
-    int needSwap = (littleEndianFile != littleEndianMachine);
-    //printf("endian file = %d, endian machine = %d, need swap = %d\n", 
-    //       littleEndianFile, littleEndianMachine, needSwap);
-
-    for (int y = height-1; y >= 0; y--) { // PFM stores rows top-to-bottom!!!!
-	int n = width;
-	float* ptr = &img.at<float>(y, 0, 0);
-	if ((int)fread(ptr, sizeof(float), n, fp) != n)
-	    LOG(FATAL) << "ReadFilePFM(" << filename << "): file is too short";
-	
-	if (needSwap) { // if endianness doesn't agree, swap bytes
-	    uchar* ptr = (uchar *)&img.at<uchar>(y, 0, 0);
-	    int x = 0;
-	    uchar tmp = 0;
-	    while (x < n) {
-		tmp = ptr[0]; ptr[0] = ptr[3]; ptr[3] = tmp;
-		tmp = ptr[1]; ptr[1] = ptr[2]; ptr[2] = tmp;
-		ptr += 4;
-		x++;
-	    }
-	}
-    }
-    if (fclose(fp))
-        LOG(FATAL) << "ReadFilePGM(" << filename << "): error closing file";
-}
-
-// 1-band PFM image, see http://netpbm.sourceforge.net/doc/pfm.html
-// 3-band not yet supported
-void ftl::middlebury::writeFilePFM(const Mat &img, const char* filename, float scalefactor)
-{
-    // Write a PFM file
-    Size sh = img.size();
-    int nBands = img.channels();
-    if (nBands != 1)
-	LOG(FATAL) << "WriteFilePFM(" << filename << "): can only write 1-band image as pfm for now";
-	
-    // Open the file
-    FILE *stream = fopen(filename, "wb");
-    if (stream == 0)
-        LOG(FATAL) << "WriteFilePFM: could not open " << filename;
-
-    // sign of scalefact indicates endianness, see pfms specs
-    if (littleendian())
-	scalefactor = -scalefactor;
-
-    // write the header: 3 lines: Pf, dimensions, scale factor (negative val == little endian)
-    fprintf(stream, "Pf\n%d %d\n%f\n", sh.width, sh.height, scalefactor);
-
-    int n = sh.width;
-    // write rows -- pfm stores rows in inverse order!
-    for (int y = sh.height-1; y >= 0; y--) {
-	const float* ptr = &img.at<float>(0, y, 0);
-	if ((int)fwrite(ptr, sizeof(float), n, stream) != n)
-	    LOG(FATAL) << "WriteFilePFM(" << filename << "): file is too short";
-    }
-    
-    // close file
-    if (fclose(stream))
-        LOG(FATAL) << "WriteFilePFM(" << filename << "): error closing file";
-}
-
-void ftl::middlebury::evaldisp(const Mat &disp, const Mat &gtdisp, const Mat &mask, float badthresh, int maxdisp, int rounddisp)
-{
-    Size sh = gtdisp.size();
-    Size sh2 = disp.size();
-    Size msh = mask.size();
-    int width = sh.width, height = sh.height;
-    int width2 = sh2.width, height2 = sh2.height;
-    int scale = width / width2;
-
-    if ((!(scale == 1 || scale == 2 || scale == 4))
-	|| (scale * width2 != width)
-	|| (scale * height2 != height)) {
-	printf("   disp size = %4d x %4d\n", width2, height2);
-	printf("GT disp size = %4d x %4d\n", width,  height);
-	LOG(ERROR) << "GT disp size must be exactly 1, 2, or 4 * disp size";
-    }
-
-    int usemask = (msh.width > 0 && msh.height > 0);
-    if (usemask && (msh != sh))
-	LOG(ERROR) << "mask image must have same size as GT";
-
-    int n = 0;
-    int bad = 0;
-    int invalid = 0;
-    float serr = 0;
-    for (int y = 0; y < height; y++) {
-	for (int x = 0; x < width; x++) {
-	    float gt = gtdisp.at<float>(y, x, 0);
-	    if (gt == INFINITY) // unknown
-		continue;
-	    float d = scale * disp.at<float>(y / scale, x / scale, 0);
-	    int valid = (!isnan(d) && d < 256.0f); // NOTE: Is meant to be infinity in middlebury
-	    if (valid) {
-		float maxd = scale * maxdisp; // max disp range
-		d = max(0.0f, min(maxd, d)); // clip disps to max disp range
-	    }
-	    if (valid && rounddisp)
-		d = round(d);
-	    float err = fabs(d - gt);
-	    if (usemask && mask.at<float>(y, x, 0) != 255) { // don't evaluate pixel
-	    } else {
-		n++;
-		if (valid) {
-		    serr += err;
-		    if (err > badthresh) {
-			bad++;
-		    }
-		} else {// invalid (i.e. hole in sparse disp map)
-		    invalid++;
-		}
-	    }
-	}
-    }
-    float badpercent =  100.0*bad/n;
-    float invalidpercent =  100.0*invalid/n;
-    float totalbadpercent =  100.0*(bad+invalid)/n;
-    float avgErr = serr / (n - invalid); // CHANGED 10/14/2014 -- was: serr / n
-    printf("mask  bad%.1f  invalid  totbad   avgErr\n", badthresh);
-    printf("%4.1f  %6.2f  %6.2f   %6.2f  %6.2f\n",   100.0*n/(width * height), 
-	   badpercent, invalidpercent, totalbadpercent, avgErr);
-}
-
-void ftl::middlebury::test(nlohmann::json &config) {
-	// Load dataset images
-	Mat l = cv::imread((string)config["middlebury"]["dataset"] + "/im0.png");
-	Mat r = cv::imread((string)config["middlebury"]["dataset"] + "/im1.png");
-	
-	// Load ground truth
-	Mat gt;
-	readFilePFM(gt, (string)config["middlebury"]["dataset"] + "/disp0.pfm");
-	
-	if ((float)config["middlebury"]["scale"] != 1.0f) {
-		float scale = (float)config["middlebury"]["scale"];
-		//cv::resize(gt, gt, cv::Size(gt.cols * scale,gt.rows * scale), 0, 0, cv::INTER_LINEAR);
-		cv::resize(l, l, cv::Size(l.cols * scale,l.rows * scale), 0, 0, cv::INTER_LINEAR);
-		cv::resize(r, r, cv::Size(r.cols * scale,r.rows * scale), 0, 0, cv::INTER_LINEAR);
-	}
-	
-	// TODO(Nick) Update to use an RGBD Image source
-	// Run algorithm
-	//auto disparity = ftl::Disparity::create(config["disparity"]);
-    
-    Mat disp;
-   // disparity->compute(l,r,disp);
-	//disp.convertTo(disp, CV_32F);
-	
-	// Display results
-	evaldisp(disp, gt, Mat(), (float)config["middlebury"]["threshold"], (int)config["disparity"]["maximum"], 0);
-	
-	/*if (gt.cols > 1600) {
-		cv::resize(gt, gt, cv::Size(gt.cols * 0.25,gt.rows * 0.25), 0, 0, cv::INTER_LINEAR);
-	}*/
-	if (disp.cols > 1600) {
-		cv::resize(disp, disp, cv::Size(disp.cols * 0.25,disp.rows * 0.25), 0, 0, cv::INTER_LINEAR);
-	}
-    cv::resize(gt, gt, cv::Size(disp.cols,disp.rows), 0, 0, cv::INTER_LINEAR);
-	
-	double mindisp, mindisp_gt;
-	double maxdisp, maxdisp_gt;
-	Mat mask;
-	threshold(disp,mask,255.0, 255, cv::THRESH_BINARY_INV);
-	normalize(mask, mask, 0, 255, cv::NORM_MINMAX, CV_8U);
-	cv::minMaxLoc(disp, &mindisp, &maxdisp, 0, 0, mask);
-    cv::minMaxLoc(gt, &mindisp_gt, &maxdisp_gt, 0, 0);
-
-    //disp = (disp < 256.0f);
-    //disp = disp + (mindisp_gt - mindisp);
-    disp.convertTo(disp, CV_8U, 255.0f / (maxdisp_gt*(float)config["middlebury"]["scale"]));
-    disp = disp & mask;
-
-	gt = gt / maxdisp_gt; // TODO Read from calib.txt
-    gt.convertTo(gt, CV_8U, 255.0f);
-	//disp = disp / maxdisp;
-	imshow("Ground Truth", gt);
-	imshow("Disparity", disp);
-    imshow("Diff", gt - disp);
-	
-	while (cv::waitKey(10) != 27);
-	
-	/*cv::putText(yourImageMat, 
-            "Here is some text",
-            cv::Point(5,5), // Coordinates
-            cv::FONT_HERSHEY_COMPLEX_SMALL, // Font
-            1.0, // Scale. 2.0 = 2x bigger
-            cv::Scalar(255,255,255), // BGR Color
-            1, // Line Thickness (Optional)
-            cv::CV_AA); // Anti-alias (Optional)*/
-}
-
diff --git a/applications/vision/src/streamer.cpp b/applications/vision/src/streamer.cpp
deleted file mode 100644
index 29b84568c..000000000
--- a/applications/vision/src/streamer.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-#include <loguru.hpp>
-#include <ftl/streamer.hpp>
-#include <vector>
-// #include <zlib.h>
-// #include <lz4.h>
-
-using ftl::Streamer;
-using ftl::net::Universe;
-using cv::Mat;
-using nlohmann::json;
-using std::string;
-using std::vector;
-
-Streamer::Streamer(Universe &net, json &config) : net_(net), config_(config) {
-	uri_ = string("ftl://utu.fi/")+(string)config["name"]+string("/rgb-d");
-	net.createResource(uri_);
-}
-
-Streamer::~Streamer() {
-
-}
-
-void Streamer::send(const Mat &rgb, const Mat &depth) {
-	// Compress the rgb as jpeg.
-	vector<unsigned char> rgb_buf;
-	cv::imencode(".jpg", rgb, rgb_buf);
-	
-	Mat d2;
-    depth.convertTo(d2, CV_16UC1, 16*100);
-	
-	vector<unsigned char> d_buf;
-	/*d_buf.resize(d2.step*d2.rows);
-	z_stream defstream;
-    defstream.zalloc = Z_NULL;
-    defstream.zfree = Z_NULL;
-    defstream.opaque = Z_NULL;
-    defstream.avail_in = d2.step*d2.rows;
-    defstream.next_in = (Bytef *)d2.data; // input char array
-    defstream.avail_out = (uInt)d2.step*d2.rows; // size of output
-    defstream.next_out = (Bytef *)d_buf.data(); // output char array
-    
-    deflateInit(&defstream, Z_DEFAULT_COMPRESSION);
-    deflate(&defstream, Z_FINISH);
-    deflateEnd(&defstream);
-    
-    d2.copyTo(last);
-    
-    d_buf.resize(defstream.total_out);*/
-    
-    // LZ4 Version
-    // d_buf.resize(LZ4_compressBound(depth.step*depth.rows));
-    // int s = LZ4_compress_default((char*)depth.data, (char*)d_buf.data(), depth.step*depth.rows, d_buf.size());
-    // d_buf.resize(s);
-
-    cv::imencode(".png", d2, d_buf);
-    //LOG(INFO) << "Depth Size = " << ((float)d_buf.size() / (1024.0f*1024.0f));
-
-	try {
-    	net_.publish(uri_, rgb_buf, d_buf);
-	} catch (...) {
-		LOG(ERROR) << "Exception on net publish to " << uri_;
-	}
-}
-
diff --git a/applications/vision/src/sync.cpp b/applications/vision/src/sync.cpp
deleted file mode 100644
index 8d1671a3f..000000000
--- a/applications/vision/src/sync.cpp
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright 2019 Nicolas Pope
- */
-
-#include <ftl/synched.hpp>
-
-using ftl::SyncSource;
-using cv::Mat;
-
-SyncSource::SyncSource() {
-	channels_.push_back(Mat());
-	channels_.push_back(Mat());
-}
-
-void SyncSource::addChannel(const std::string &c) {
-}
-
-void SyncSource::feed(int channel, cv::Mat &m, double ts) {
-	if (channel > static_cast<int>(channels_.size())) return;
-	channels_[channel] = m;
-}
-
-bool SyncSource::get(int channel, cv::Mat &m) {
-	if (channel > static_cast<int>(channels_.size())) return false;
-	m = channels_[channel];
-	return true;
-}
-
-double SyncSource::latency() const {
-	return 0.0;
-}
-
diff --git a/cmake/FindPylon.cmake b/cmake/FindPylon.cmake
index a9f69e004..ba194ab1d 100644
--- a/cmake/FindPylon.cmake
+++ b/cmake/FindPylon.cmake
@@ -19,12 +19,22 @@ if (PYLON_DIR)
 
 	mark_as_advanced(PYLON_FOUND)
 
+	if (WIN32)
+	list(APPEND PYLON_LIBRARIES PylonBase_v6_1 PylonUtility_v6_1 GenApi_MD_VC141_v3_1_Basler_pylon GCBase_MD_VC141_v3_1_Basler_pylon)
+	else()
 	list(APPEND PYLON_LIBRARIES pylonbase pylonutility GenApi_gcc_v3_1_Basler_pylon GCBase_gcc_v3_1_Basler_pylon)
+	endif()
 
 	add_library(Pylon INTERFACE)
 	set_property(TARGET Pylon PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${PYLON_DIR}/include)
 	#set_property(TARGET Pylon PROPERTY INTERFACE_LINK_DIRECTORIES ${PYLON_DIR}/lib)
+
+	if (WIN32)
+	link_directories(${PYLON_DIR}/lib/x64)
+	else()
 	link_directories(${PYLON_DIR}/lib)
+	endif()
+
 	set_property(TARGET Pylon PROPERTY INTERFACE_LINK_LIBRARIES ${PYLON_LIBRARIES})
 else()
 	add_library(Pylon INTERFACE)
diff --git a/cmake/Findglog.cmake b/cmake/Findglog.cmake
index 6b07e3ba4..0648a7bdc 100644
--- a/cmake/Findglog.cmake
+++ b/cmake/Findglog.cmake
@@ -43,5 +43,5 @@ if(GLOG_FOUND)
     add_library(glog::glog UNKNOWN IMPORTED)
     set_property(TARGET glog::glog PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${GLOG_INCLUDE_DIRS})
 	set_property(TARGET glog::glog PROPERTY IMPORTED_LOCATION ${GLOG_LIBRARY})
-    message(STATUS "Found glog: ${GLOG_LIBRARY}")
+    message(STATUS "Found glog: ${GLOG_LIBRARY} ${GLOG_INCLUDE_DIRS}")
 endif()
diff --git a/components/audio/CMakeLists.txt b/components/audio/CMakeLists.txt
index bd5548ba6..cae7b4080 100644
--- a/components/audio/CMakeLists.txt
+++ b/components/audio/CMakeLists.txt
@@ -1,6 +1,5 @@
 set(AUDIOSRC
 	src/source.cpp
-	src/frame.cpp
 	src/portaudio.cpp
 	src/speaker.cpp
 	src/software_encoder.cpp
diff --git a/components/audio/include/ftl/audio/audio.hpp b/components/audio/include/ftl/audio/audio.hpp
index 12939b665..5241c28dc 100644
--- a/components/audio/include/ftl/audio/audio.hpp
+++ b/components/audio/include/ftl/audio/audio.hpp
@@ -22,4 +22,14 @@ class Audio {
 }
 }
 
+template <>
+inline bool ftl::data::make_type<std::list<ftl::audio::Audio>>() {
+	return false;
+}
+
+template <>
+inline bool ftl::data::decode_type<std::list<ftl::audio::Audio>>(std::any &a, const std::vector<uint8_t> &data) {
+	return false;
+}
+
 #endif  // _FTL_AUDIO_AUDIO_HPP_
diff --git a/components/audio/include/ftl/audio/buffer.hpp b/components/audio/include/ftl/audio/buffer.hpp
index 236762991..353fa55fc 100644
--- a/components/audio/include/ftl/audio/buffer.hpp
+++ b/components/audio/include/ftl/audio/buffer.hpp
@@ -4,8 +4,8 @@
 #include <vector>
 #include <cmath>
 
-#define LOGURU_REPLACE_GLOG 1
-#include <loguru.hpp>
+//#define LOGURU_REPLACE_GLOG 1
+//#include <loguru.hpp>
 
 namespace ftl {
 namespace audio {
@@ -102,7 +102,6 @@ class FixedBuffer : public ftl::audio::Buffer<T> {
 	void reset() override {
 		Buffer<T>::reset();
 		write_position_ = 0; //int(this->cur_delay_);
-		LOG(INFO) << "RESET AUDIO: " << write_position_;
 		read_position_ = 0;
 	}
 
@@ -120,7 +119,7 @@ static T fracIndex(const std::vector<T> &in, float ix, int c) {
 	const auto i1 = static_cast<unsigned int>(ix);
 	const auto i2 = static_cast<unsigned int>(ix+1.0f);
 	const float alpha = ix - static_cast<float>(i1);
-	return (i2*CHAN+CHAN >= in.size()) ? in[i1*CHAN+c] : in[i1*CHAN+c]*(1.0f-alpha) + in[i2*CHAN+c]*alpha;
+	return static_cast<T>((i2*CHAN+CHAN >= in.size()) ? in[i1*CHAN+c] : in[i1*CHAN+c]*(1.0f-alpha) + in[i2*CHAN+c]*alpha);
 }
 
 inline float clamp(float v, float c) { return (v < -c) ? -c : (v > c) ? c : v; }
diff --git a/components/audio/include/ftl/audio/frame.hpp b/components/audio/include/ftl/audio/frame.hpp
index c30fb66e5..720a02d1f 100644
--- a/components/audio/include/ftl/audio/frame.hpp
+++ b/components/audio/include/ftl/audio/frame.hpp
@@ -2,47 +2,25 @@
 #ifndef _FTL_AUDIO_FRAME_HPP_
 #define _FTL_AUDIO_FRAME_HPP_
 
-#include <ftl/data/framestate.hpp>
-#include <ftl/data/frame.hpp>
+#include <ftl/data/new_frame.hpp>
 #include <ftl/audio/audio.hpp>
 
 namespace ftl {
 namespace audio {
 
+static constexpr int kFrameSize = 960;
+static constexpr int kSampleRate = 48000;
+
+typedef ftl::data::Frame Frame;
+typedef ftl::audio::Audio AudioFrame;
+
 struct AudioSettings {
 	int sample_rate;
 	int frame_size;
 	int channels;
 };
 
-struct AudioData {
-	template <typename T>
-	const T &as() const {
-		throw FTL_Error("Type not valid for audio channel");
-	}
-
-	template <typename T>
-	T &as() {
-		throw FTL_Error("Type not valid for audio channel");
-	}
-
-	template <typename T>
-	T &make() {
-		throw FTL_Error("Type not valid for audio channel");
-	}
-
-	inline void reset() {}
-
-	Audio data;
-};
-
-// Specialisations for getting Audio data.
-template <> Audio &AudioData::as<Audio>(); 
-template <> const Audio &AudioData::as<Audio>() const;
-template <> Audio &AudioData::make<Audio>();
 
-typedef ftl::data::FrameState<AudioSettings,2> FrameState;
-typedef ftl::data::Frame<32,2,FrameState,AudioData> Frame;
 
 }
 }
diff --git a/components/audio/include/ftl/audio/frameset.hpp b/components/audio/include/ftl/audio/frameset.hpp
index 02027e88e..ba18d2fe6 100644
--- a/components/audio/include/ftl/audio/frameset.hpp
+++ b/components/audio/include/ftl/audio/frameset.hpp
@@ -2,12 +2,12 @@
 #define _FTL_AUDIO_FRAMESET_HPP_
 
 #include <ftl/audio/frame.hpp>
-#include <ftl/data/frameset.hpp>
+#include <ftl/data/new_frameset.hpp>
 
 namespace ftl {
 namespace audio {
 
-typedef ftl::data::FrameSet<ftl::audio::Frame> FrameSet;
+typedef ftl::data::FrameSet FrameSet;
 
 }
 }
diff --git a/components/audio/include/ftl/audio/source.hpp b/components/audio/include/ftl/audio/source.hpp
index ff0663ddc..1ece75db1 100644
--- a/components/audio/include/ftl/audio/source.hpp
+++ b/components/audio/include/ftl/audio/source.hpp
@@ -3,6 +3,7 @@
 
 #include <ftl/audio/buffer.hpp>
 #include <ftl/audio/frameset.hpp>
+#include <ftl/data/creators.hpp>
 #include <ftl/configurable.hpp>
 #include <ftl/config.h>
 
@@ -13,40 +14,23 @@
 namespace ftl {
 namespace audio {
 
-static constexpr int kFrameSize = 960;
-
-typedef ftl::data::Generator<ftl::audio::FrameSet> Generator;
-
-class Source : public ftl::Configurable, public ftl::audio::Generator {
+class Source : public ftl::Configurable, public ftl::data::DiscreteSource {
     public:
     explicit Source(nlohmann::json &config);
     ~Source();
 
-    /** Number of frames in last frameset. This can change over time. */
-	size_t size() override;
+	bool capture(int64_t ts) override;
 
-	/**
-	 * Get the persistent state object for a frame. An exception is thrown
-	 * for a bad index.
-	 */
-	ftl::audio::FrameState &state(size_t ix) override;
-
-	/** Register a callback to receive new frame sets. */
-	void onFrameSet(const ftl::audio::FrameSet::Callback &) override;
+	bool retrieve(ftl::data::Frame &) override;
 
     private:
-    ftl::audio::FrameState state_;
     bool active_;
-    ftl::timer::TimerHandle timer_hp_;
-	ftl::timer::TimerHandle timer_main_;
-	ftl::audio::FrameSet::Callback cb_;
+	ftl::audio::AudioSettings settings_;
 
 	ftl::audio::Buffer<short> *buffer_;
 	int to_read_;
 	int64_t latency_;
 
-	ftl::audio::FrameSet frameset_;
-
 	#ifdef HAVE_PORTAUDIO
 	PaStream *stream_;
 	#endif
diff --git a/components/audio/include/ftl/audio/speaker.hpp b/components/audio/include/ftl/audio/speaker.hpp
index f27795bf4..885a6e475 100644
--- a/components/audio/include/ftl/audio/speaker.hpp
+++ b/components/audio/include/ftl/audio/speaker.hpp
@@ -21,11 +21,14 @@ class Speaker : public ftl::Configurable {
 	void queue(int64_t ts, ftl::audio::Frame &fs);
 
 	void setDelay(int64_t ms);
+	void setVolume(float value);
+	float volume();
 
 	private:
 	ftl::audio::Buffer<short> *buffer_;
 	bool active_;
 	float extra_delay_;
+	float volume_;
 	int64_t latency_;
 
 	#ifdef HAVE_PORTAUDIO
diff --git a/components/audio/src/portaudio.cpp b/components/audio/src/portaudio.cpp
index 7395877c2..74e209386 100644
--- a/components/audio/src/portaudio.cpp
+++ b/components/audio/src/portaudio.cpp
@@ -1,17 +1,19 @@
 #include <ftl/audio/portaudio.hpp>
 #include <ftl/config.h>
+#include <ftl/threads.hpp>
 #include <loguru.hpp>
 
 #include <atomic>
 
 static std::atomic<int> counter = 0;
+static MUTEX pa_mutex;
 
 #ifdef HAVE_PORTAUDIO
 
 #include <portaudio.h>
 
 void ftl::audio::pa_init() {
-    // TODO: Mutex lock?
+    UNIQUE_LOCK(pa_mutex, lk);
     if (counter == 0) {
         auto err = Pa_Initialize();
         if (err != paNoError) {
@@ -34,7 +36,7 @@ void ftl::audio::pa_init() {
 }
 
 void ftl::audio::pa_final() {
-    // TODO: Mutex lock?
+    UNIQUE_LOCK(pa_mutex, lk);
     --counter;
     if (counter == 0) {
         auto err = Pa_Terminate();
diff --git a/components/audio/src/software_decoder.cpp b/components/audio/src/software_decoder.cpp
index c5516fa26..62888eed9 100644
--- a/components/audio/src/software_decoder.cpp
+++ b/components/audio/src/software_decoder.cpp
@@ -26,9 +26,8 @@ SoftwareDecoder::~SoftwareDecoder() {
 bool SoftwareDecoder::_createOpus(const ftl::codecs::Packet &pkt) {
 	#ifdef HAVE_OPUS
 	bool stereo = pkt.flags & ftl::codecs::kFlagStereo;
-	if (pkt.definition == cur_definition_ && stereo == cur_stereo_ && opus_decoder_) return true;
+	if (opus_decoder_ && stereo == cur_stereo_) return true;
 
-	cur_definition_ = pkt.definition;
 	cur_stereo_ = stereo;
 
 	if (opus_decoder_) {
@@ -36,12 +35,7 @@ bool SoftwareDecoder::_createOpus(const ftl::codecs::Packet &pkt) {
 		opus_decoder_ = nullptr;
 	}
 
-	int sample_rate;
-	switch (pkt.definition) {
-	case ftl::codecs::definition_t::hz48000		: sample_rate = 48000; break;
-	case ftl::codecs::definition_t::hz44100		: sample_rate = 44100; break;
-	default: return false;
-	}
+	int sample_rate = 48000;  // TODO: Allow it to be different
 
 	int errcode = 0;
 	int channels = (stereo) ? 2 : 1;
@@ -78,6 +72,9 @@ bool SoftwareDecoder::_decodeOpus(const ftl::codecs::Packet &pkt, std::vector<sh
 
 	for (size_t i=0; i<pkt.data.size(); ) {
 		const short *len = (const short*)inptr;
+		if (*len == 0) break;
+		if (frames == 10) break;
+
 		inptr += 2;
 		i += (*len)+2;
 		int samples = opus_multistream_decode(opus_decoder_, inptr, *len, outptr, FRAME_SIZE, 0);
diff --git a/components/audio/src/software_encoder.cpp b/components/audio/src/software_encoder.cpp
index 9a5da23a0..adc618c5e 100644
--- a/components/audio/src/software_encoder.cpp
+++ b/components/audio/src/software_encoder.cpp
@@ -16,7 +16,7 @@ using ftl::codecs::codec_t;
 #define FRAME_SIZE 960
 #define MAX_PACKET_SIZE (3*2*FRAME_SIZE)
 
-SoftwareEncoder::SoftwareEncoder() : ftl::audio::Encoder(), opus_encoder_(nullptr), cur_bitrate_(0) {
+SoftwareEncoder::SoftwareEncoder() : ftl::audio::Encoder(), opus_encoder_(nullptr), cur_stereo_(false), cur_bitrate_(0) {
 
 }
 
@@ -44,9 +44,8 @@ bool SoftwareEncoder::encode(const std::vector<short> &in, ftl::codecs::Packet &
 bool SoftwareEncoder::_createOpus(ftl::codecs::Packet &pkt) {
 	#ifdef HAVE_OPUS
 	bool stereo = pkt.flags & ftl::codecs::kFlagStereo;
-	if (pkt.definition == cur_definition_ && stereo == cur_stereo_ && opus_encoder_) return true;
+	if (opus_encoder_ && stereo == cur_stereo_) return true;
 
-	cur_definition_ = pkt.definition;
 	cur_stereo_ = stereo;
 
 	if (opus_encoder_) {
@@ -54,12 +53,7 @@ bool SoftwareEncoder::_createOpus(ftl::codecs::Packet &pkt) {
 		opus_encoder_ = nullptr;
 	}
 
-	int sample_rate;
-	switch (pkt.definition) {
-	case ftl::codecs::definition_t::hz48000		: sample_rate = 48000; break;
-	case ftl::codecs::definition_t::hz44100		: sample_rate = 44100; break;
-	default: return false;
-	}
+	int sample_rate = 48000;  // TODO: Allow it to be different
 
 	int errcode = 0;
 	int channels = (stereo) ? 2 : 1;
@@ -92,11 +86,12 @@ bool SoftwareEncoder::_encodeOpus(const std::vector<short> &in, ftl::codecs::Pac
 	int channels = (cur_stereo_) ? 2 : 1;
 
 	int frame_est = (in.size() / (channels*FRAME_SIZE))+1;
-	pkt.data.resize(MAX_PACKET_SIZE*frame_est);
+	size_t insize = pkt.data.size();
+	pkt.data.resize(insize+MAX_PACKET_SIZE*frame_est);
 	int count = 0;
 	int frames = 0;
 
-	unsigned char *outptr = pkt.data.data();
+	unsigned char *outptr = pkt.data.data()+insize;
 
 	//LOG(INFO) << "Encode " << (in.size() / (channels*FRAME_SIZE)) << " audio frames";
 
@@ -104,7 +99,6 @@ bool SoftwareEncoder::_encodeOpus(const std::vector<short> &in, ftl::codecs::Pac
 		short *len = (short*)outptr;
 		outptr += 2;
 		int nbBytes = opus_multistream_encode(opus_encoder_, &in.data()[i], FRAME_SIZE, outptr, MAX_PACKET_SIZE);
-		//LOG(INFO) << "Opus encode: " << nbBytes << ", " << (in.size()-i);
 		if (nbBytes <= 0) return false;
 
 		//if (nbBytes > 32000) LOG(WARNING) << "Packet exceeds size limit";
@@ -116,7 +110,7 @@ bool SoftwareEncoder::_encodeOpus(const std::vector<short> &in, ftl::codecs::Pac
 		++frames;
 	}
 
-	pkt.data.resize(count);
+	pkt.data.resize(insize+count);
 	//LOG(INFO) << "Opus Encode = " << pkt.data.size() << ", " << frames;
 	return true;
 
diff --git a/components/audio/src/source.cpp b/components/audio/src/source.cpp
index aa3258b9d..c6c49757e 100644
--- a/components/audio/src/source.cpp
+++ b/components/audio/src/source.cpp
@@ -7,8 +7,6 @@
 
 using ftl::audio::Source;
 using ftl::audio::Frame;
-using ftl::audio::FrameSet;
-using ftl::audio::FrameState;
 using ftl::audio::Audio;
 using ftl::codecs::Channel;
 
@@ -135,51 +133,10 @@ Source::Source(nlohmann::json &config) : ftl::Configurable(config), buffer_(null
 
 	to_read_ = 0;
 
-	ftl::audio::AudioSettings settings;
-	settings.channels = channels;
-	settings.sample_rate = 48000;
-	settings.frame_size = 960;
-	state_.setLeft(settings);
-
-    timer_hp_ = ftl::timer::add(ftl::timer::kTimerHighPrecision, [this](int64_t ts) {
-        if (buffer_) to_read_ = buffer_->size();
-        return true;
-    });
-
-	timer_main_ = ftl::timer::add(ftl::timer::kTimerMain, [this](int64_t ts) {
-
-        // Remove one interval since the audio starts from the last frame
-		frameset_.timestamp = ts - ftl::timer::getInterval() + latency_;
-
-		frameset_.id = 0;
-		frameset_.count = 1;
-		//frameset_.stale = false;
-		frameset_.clear(ftl::data::FSFlag::STALE);
-
-        if (to_read_ < 1 || !buffer_) return true;
-
-		if (frameset_.frames.size() < 1) frameset_.frames.emplace_back();
-
-		auto &frame = frameset_.frames[0];
-		frame.reset();
-		frame.setOrigin(&state_);
-        std::vector<short> &data = frame.create<Audio>((buffer_->channels() == 2) ? Channel::AudioStereo : Channel::AudioMono).data();
-
-		/*data.resize(ftl::audio::kFrameSize*to_read_*channels_);  // For stereo * 2
-		short *ptr = data.data();
-		for (int i=0; i<to_read_; ++i) {
-			if (channels_ == 1) mono_buffer_.readFrame(ptr);
-			else stereo_buffer_.readFrame(ptr);
-			ptr += ftl::audio::kFrameSize*channels_;  // For stereo * 2
-		}*/
-		buffer_->read(data, to_read_);
-
-		// Then do something with the data!
-		//LOG(INFO) << "Audio Frames Sent: " << to_read_ << " - " << ltime;
-		if (cb_) cb_(frameset_);
-
-        return true;
-    }); 
+	settings_.channels = channels;
+	settings_.sample_rate = 48000;
+	settings_.frame_size = 960;
+	//state_.setLeft(settings);
 
 	LOG(INFO) << "Microphone ready.";
 
@@ -196,7 +153,7 @@ Source::~Source() {
         active_ = false;
 
 		#ifdef HAVE_PORTAUDIO
-        auto err = Pa_StopStream(stream_);
+        auto err = Pa_AbortStream(stream_);
 
         if (err != paNoError) {
             LOG(ERROR) << "Portaudio stop stream error: " << Pa_GetErrorText(err);
@@ -216,15 +173,20 @@ Source::~Source() {
 	#endif
 }
 
-size_t Source::size() {
-    return 1;
-}
-
-ftl::audio::FrameState &Source::state(size_t ix) {
-    if (ix >= 1) throw FTL_Error("State index out-of-bounds");
-    return state_;
+bool Source::capture(int64_t ts) {
+	if (buffer_) to_read_ = buffer_->size();
+	return true;
 }
 
-void Source::onFrameSet(const ftl::audio::FrameSet::Callback &cb) {
-	cb_ = cb;
+bool Source::retrieve(ftl::data::Frame &frame) {
+	// Remove one interval since the audio starts from the last frame
+		//frameset_.timestamp = ts - ftl::timer::getInterval() + latency_;
+
+    if (to_read_ < 1 || !buffer_) return true;
+	auto alist = frame.create<std::list<Audio>>((buffer_->channels() == 2) ? Channel::AudioStereo : Channel::AudioMono);
+	Audio aframe;
+    std::vector<short> &data = aframe.data();
+	buffer_->read(data, to_read_);
+	alist = std::move(aframe);
+	return true;
 }
diff --git a/components/audio/src/speaker.cpp b/components/audio/src/speaker.cpp
index 48fcde2db..9474b4500 100644
--- a/components/audio/src/speaker.cpp
+++ b/components/audio/src/speaker.cpp
@@ -8,7 +8,6 @@
 using ftl::audio::Speaker;
 using ftl::audio::Frame;
 using ftl::audio::FrameSet;
-using ftl::audio::FrameState;
 using ftl::audio::Audio;
 using ftl::codecs::Channel;
 
@@ -36,9 +35,10 @@ Speaker::Speaker(nlohmann::json &config) : ftl::Configurable(config), buffer_(nu
 	#else  // No portaudio
 	LOG(ERROR) << "No audio support";
 	#endif
+	volume_ = 1.0f;
 	active_ = false;
 	extra_delay_ = value("delay",0.0f);
-	on("delay", [this](const ftl::config::Event &e) {
+	on("delay", [this]() {
 		extra_delay_ = value("delay",0.0f);
 	});
 }
@@ -48,7 +48,7 @@ Speaker::~Speaker() {
 		active_ = false;
 
 		#ifdef HAVE_PORTAUDIO
-		auto err = Pa_StopStream(stream_);
+		auto err = Pa_AbortStream(stream_);
 
 		if (err != paNoError) {
 			LOG(ERROR) << "Portaudio stop stream error: " << Pa_GetErrorText(err);
@@ -83,12 +83,12 @@ void Speaker::_open(int fsize, int sample, int channels) {
 	}
 
 	PaStreamParameters outputParameters;
-    //bzero( &inputParameters, sizeof( inputParameters ) );
-    outputParameters.channelCount = channels;
-    outputParameters.device = Pa_GetDefaultOutputDevice();
-    outputParameters.sampleFormat = paInt16;
-    outputParameters.suggestedLatency = Pa_GetDeviceInfo(outputParameters.device)->defaultLowOutputLatency;
-    outputParameters.hostApiSpecificStreamInfo = NULL;
+	//bzero( &inputParameters, sizeof( inputParameters ) );
+	outputParameters.channelCount = channels;
+	outputParameters.device = Pa_GetDefaultOutputDevice();
+	outputParameters.sampleFormat = paInt16;
+	outputParameters.suggestedLatency = Pa_GetDeviceInfo(outputParameters.device)->defaultLowOutputLatency;
+	outputParameters.hostApiSpecificStreamInfo = NULL;
 
 	//LOG(INFO) << "OUTPUT LATENCY: " << outputParameters.suggestedLatency;
 	latency_ = int64_t(outputParameters.suggestedLatency * 1000.0);
@@ -97,8 +97,8 @@ void Speaker::_open(int fsize, int sample, int channels) {
 		&stream_,
 		NULL,
 		&outputParameters,
-		sample,  // Sample rate
-		960,    // Size of single frame
+		sample,	// Sample rate
+		960,	// Size of single frame
 		paNoFlag,
 		(channels == 1) ? pa_speaker_callback<ftl::audio::MonoBuffer16<2000>> : pa_speaker_callback<ftl::audio::StereoBuffer16<2000>>,
 		this->buffer_
@@ -127,15 +127,27 @@ void Speaker::_open(int fsize, int sample, int channels) {
 }
 
 void Speaker::queue(int64_t ts, ftl::audio::Frame &frame) {
-	auto &audio = frame.get<ftl::audio::Audio>((frame.hasChannel(Channel::AudioStereo)) ? Channel::AudioStereo : Channel::AudioMono);
+	const auto &audio = frame.get<std::list<ftl::audio::Audio>>
+		((frame.hasChannel(Channel::AudioStereo)) ? Channel::AudioStereo : Channel::AudioMono);
 
 	if (!buffer_) {
-		_open(960, frame.getSettings().sample_rate, frame.getSettings().channels);
+		_open(960, 48000, (frame.hasChannel(Channel::AudioStereo)) ? 2 : 1);
 	}
 	if (!buffer_) return;
 
 	//LOG(INFO) << "Buffer Fullness (" << ts << "): " << buffer_->size() << " - " << audio.size();
-	buffer_->write(audio.data());
+	for (const auto &d : audio) {
+		if (volume_ != 1.0) {
+			auto data = d.data();
+			for (auto &v : data) {
+				v = v * volume_;
+			}
+			buffer_->write(data);
+		}
+		else {
+			buffer_->write(d.data());
+		}
+	}
 	//LOG(INFO) << "Audio delay: " << buffer_.delay() << "s";
 }
 
@@ -148,3 +160,12 @@ void Speaker::setDelay(int64_t ms) {
 		//LOG(INFO) << "Audio delay: " << buffer_->delay();
 	}
 }
+
+void Speaker::setVolume(float value) {
+	// TODO: adjust volume using system mixer
+	volume_ = std::max(0.0f, std::min(1.0f, value));
+}
+
+float Speaker::volume() {
+	return volume_;
+}
diff --git a/components/calibration/CMakeLists.txt b/components/calibration/CMakeLists.txt
index 00faed0b6..68d658842 100644
--- a/components/calibration/CMakeLists.txt
+++ b/components/calibration/CMakeLists.txt
@@ -1,15 +1,20 @@
 set(CALIBSRC
 	src/parameters.cpp
+	src/extrinsic.cpp
+	src/structures.cpp
+	src/visibility.cpp
+	src/object.cpp
 )
 
 if (WITH_CERES)
 	list(APPEND CALIBSRC src/optimize.cpp)
+	set_source_files_properties(src/optimize.cpp PROPERTIES COMPILE_FLAGS -O3)
 endif()
 
 add_library(ftlcalibration ${CALIBSRC})
 
 target_include_directories(ftlcalibration
-	PUBLIC 
+	PUBLIC
 	$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
 	$<INSTALL_INTERFACE:include>
 	PRIVATE
@@ -17,7 +22,8 @@ target_include_directories(ftlcalibration
 	${OpenCV_INCLUDE_DIRS}
 )
 
-target_link_libraries(ftlcalibration ftlcommon Threads::Threads ${OpenCV_LIBS} Eigen3::Eigen ceres)
+# ftlcodecs required for ftl::data::Channel
+target_link_libraries(ftlcalibration ftlcommon ftlcodecs Threads::Threads ${OpenCV_LIBS} Eigen3::Eigen ceres)
 
 if (BUILD_TESTS)
 	ADD_SUBDIRECTORY(test)
diff --git a/components/calibration/include/ftl/calibration.hpp b/components/calibration/include/ftl/calibration.hpp
index be7533631..0a6d98ca0 100644
--- a/components/calibration/include/ftl/calibration.hpp
+++ b/components/calibration/include/ftl/calibration.hpp
@@ -1,2 +1,6 @@
+
 #include "calibration/parameters.hpp"
 #include "calibration/optimize.hpp"
+#include "calibration/extrinsic.hpp"
+#include "calibration/structures.hpp"
+#Include "calibration/object.hpp"
diff --git a/components/calibration/include/ftl/calibration/extrinsic.hpp b/components/calibration/include/ftl/calibration/extrinsic.hpp
new file mode 100644
index 000000000..5bded2bc3
--- /dev/null
+++ b/components/calibration/include/ftl/calibration/extrinsic.hpp
@@ -0,0 +1,244 @@
+#pragma once
+
+#include <ftl/utility/msgpack.hpp>
+
+#include <ftl/calibration/visibility.hpp>
+#include <ftl/calibration/structures.hpp>
+#include <ftl/calibration/optimize.hpp>
+#include <opencv2/core.hpp>
+
+#include <ftl/utility/msgpack.hpp>
+
+#include <set>
+
+namespace ftl {
+namespace calibration {
+
+/**
+ * Helper for saving data from multiple cameras and sets image points. Each
+ * set of images dosn't have to be complete (all cameras included).
+ *
+ * Implementation limit: maximum number of cameras limited to 64; Valid camera
+ * indices between 0 and 63; other values are UB!
+ *
+ * Template parameter float or double.
+ */
+template<typename T>
+class CalibrationPoints {
+public:
+	struct Points {
+		// camera index
+		uint64_t cameras;
+		 // object index
+		unsigned int object;
+		// points in image coordinates, camera index as key
+		std::map<unsigned int, std::vector<cv::Point_<T>>> points;
+		// triangulated points, camera pair as map key
+		std::map<std::pair<unsigned int, unsigned int>,
+				 std::vector<cv::Point3_<T>>> triangulated;
+
+		bool has(unsigned int c) const { return (cameras & (uint64_t(1) << c)); }
+
+		MSGPACK_DEFINE(cameras, object, points, triangulated);
+	};
+
+	CalibrationPoints() : count_(0), visibility_(64), current_{0, ~(unsigned int)(0), {}, {}} {};
+
+	/** Set calibration target. Can be changed after calling and before adding
+	 * any points next(). */
+
+	/* 2d (planar) target. Returns object ID */
+	unsigned int setObject(const std::vector<cv::Point_<T>> &target);
+	/* 3d target. Returns object ID */
+	unsigned int setObject(const std::vector<cv::Point3_<T>> &target);
+
+	/* Add points for current set. Points can only be set once for each set. */
+	void addPoints(unsigned int c, const std::vector<cv::Point_<T>>& points);
+
+	/** Continue next set of images. Target must be set. If no points were added
+	 * next() is no-op. */
+	void next();
+
+	/** Set triangulated points. Note: flat input.
+	 * @param	c_base	base camera (origin to point coordinates)
+	 * @param	c_match	match camera
+	 * @param	points	points
+	 * @param	idx		index offset, if more image points are added, adjust idx
+	 * 					accordingly (value of getPointsCount() before adding new
+	 * 					points).
+	 */
+	void setTriangulatedPoints(unsigned int c_base, unsigned int c_match, const std::vector<cv::Point3_<T>>& points, int idx=0);
+	void resetTriangulatedPoints();
+	/** TODO: same as above but non-flat input
+	void setTriangulatedPoints(unsigned int c_base, unsigned int c_match, const std::vector<std::vector<cv::Point3_<T>>>& points, int idx=0);
+	*/
+
+	/** Clear current set of points (clears queue for next()) */
+	void clear();
+
+	/** Get count (how many sets) for camera(s). */
+	int getCount(unsigned int c);
+	int getCount(std::vector<unsigned int> cs);
+
+	/** total number of points */
+	int getPointsCount();
+
+	/** Get intersection of points for given cameras. Returns vector of Points
+	 * contain object and vector of image points. Image points in same order as
+	 * in input parameter. */
+	std::vector<std::vector<cv::Point_<T>>> getPoints(const std::vector<unsigned int> &cameras, unsigned int object);
+
+	std::vector<cv::Point3_<T>> getObject(unsigned int);
+
+	const Visibility& visibility();
+
+	/** Get all points. See Points struct. */
+	const std::vector<Points>& all() { return points_; }
+
+protected:
+	bool hasCamera(unsigned int c);
+	void setCamera(unsigned int c);
+
+private:
+	int count_;
+	Visibility visibility_;
+	Points current_;
+	std::vector<Points> points_;
+	std::vector<std::vector<cv::Point3_<T>>> objects_;
+
+public:
+	MSGPACK_DEFINE(count_, visibility_, current_, points_, objects_);
+};
+
+/**
+ * Same as OpenCV's recoverPose(), but does not assume same intrinsic paramters
+ * for both cameras.
+ *
+ * @todo Write unit tests to check that intrinsic parameters work as expected.
+ */
+int recoverPose(const cv::Mat &E, const std::vector<cv::Point2d> &_points1,
+	const std::vector<cv::Point2d> &_points2, const cv::Mat &_cameraMatrix1,
+	const cv::Mat &_cameraMatrix2, cv::Mat &_R, cv::Mat &_t,
+	double distanceThresh, cv::Mat &triangulatedPoints);
+
+
+/** @brief Calibrate camera pair.
+ *
+ * Alternative to cv::StereoCalibrate.
+ *
+ * Essential matrix is estimated using all point correspondencies, and pose is
+ * calculated with OpenCV's recoverPose() (modification to allow different
+ * intrinsic parameters for each camera).
+ *
+ * Non-linear optimization is used to
+ * determine scale from object points and bundle adjustment is applied to points
+ * and extrisnic parameters. Calibration target shape is also included.
+ *
+ * @param	K1		intrinsic matrix for first camera
+ * @param	D1		distortion coefficients for first camera
+ * @param	K2		intrinsic matrix for second camera
+ * @param	D2		distortion coefficients for second camera
+ * @param	points1	image points obeserved in first camera
+ * @param	points2	image points observed in second camera
+ * @param	object	calibration target points (once)
+ * @param	R		(out) rotation matrix (camera 1 to 2)
+ * @param	t		(out) translation vector (camera 1 to 2)
+ * @param	points_out	triangulated points
+ * @param	optimize	optimize points
+ *
+ * @returns	RMS reprojection error
+ *
+ * Following conditions must hold for input parameters: (points1.size() ==
+ * points2.size()) and (points1.size() % object_points.size() == 0).
+ */
+double calibratePair(const cv::Mat &K1, const cv::Mat &D1,
+	const cv::Mat &K2, const cv::Mat &D2, const std::vector<cv::Point2d> &points1,
+	const std::vector<cv::Point2d> &points2, const std::vector<cv::Point3d> &object_points,
+	cv::Mat &R, cv::Mat &t, std::vector<cv::Point3d> &points_out, bool optimize=true);
+
+class ExtrinsicCalibration {
+public:
+
+	/** add a single camera. Returns index of camera. */
+	unsigned int addCamera(const CalibrationData::Intrinsic &);
+	/** Add a stereo camera pair. Pairs always use other cameras to estimate
+	 * initial pose. Returns index of first camera. */
+	unsigned int addStereoCamera(const CalibrationData::Intrinsic &, const CalibrationData::Intrinsic &);
+	/** Add stereo camera pair with initial pose. Returns index of first camera. */
+	unsigned int addStereoCamera(const CalibrationData::Intrinsic &, const CalibrationData::Intrinsic &, cv::Vec3d rvec, cv::Vec3d tvec);
+
+	const CalibrationData::Intrinsic& intrinsic(unsigned int c);
+	const CalibrationData::Extrinsic& extrinsic(unsigned int c);
+	const CalibrationData::Calibration& calibration(unsigned int c);
+	const CalibrationData::Calibration& calibrationOptimized(unsigned int c);
+
+	/** Add points/targets; Only one calibration target supported!
+	 *
+	 * TODO: Support multiple calibration targets: calibrate pair without
+	 * optimization or support multiple calibration objects there. Input at the
+	 * moment is flat vector, need to group by calibration target size (similar
+	 * to cv::stereoCalibrate/cv::calibrateCamera).
+	 */
+	CalibrationPoints<double>& points() { return points_; }
+
+	/* set bundle adjustment options */
+	void setOptions(ftl::calibration::BundleAdjustment::Options options) { options_ = options; }
+	ftl::calibration::BundleAdjustment::Options options() { return options_; }
+
+	/** Number of cameras added */
+	unsigned int camerasCount() { return calib_.size(); }
+
+	/** status message */
+	std::string status();
+
+	/** run calibration, returns reprojection error */
+	double run();
+
+	double reprojectionError(unsigned int c); // reprojection error rmse
+	double reprojectionError(); // total reprojection error rmse
+
+	/** debug methods */
+	bool fromFile(const std::string& fname);
+	bool toFile(const std::string& fname); // should return new instance...
+
+	MSGPACK_DEFINE(points_, mask_, pairs_, calib_);
+
+protected:
+	/** Initial pairwise calibration and triangulation. */
+	void calculatePairPoses();
+
+	/** Calculate initial poses from pairs */
+	void calculateInitialPoses();
+
+	/** Bundle adjustment on initial poses and triangulations. */
+	double optimize();
+
+private:
+	void updateStatus_(std::string);
+	std::vector<CalibrationData::Calibration> calib_;
+	std::vector<CalibrationData::Calibration> calib_optimized_;
+	ftl::calibration::BundleAdjustment::Options options_;
+
+	CalibrationPoints<double> points_;
+	std::set<std::pair<unsigned int, unsigned int>> mask_;
+	std::map<std::pair<unsigned int, unsigned int>, std::tuple<cv::Mat, cv::Mat, double>> pairs_;
+	int min_points_ = 64; // minimum number of points required for pair calibration
+	// TODO: add map {c1,c2} for existing calibration which is used if available.
+	//
+	std::shared_ptr<std::string> status_;
+
+	std::vector<double> rmse_;
+	double rmse_total_;
+
+	// Theshold for point to be skipped (m); absdiff between minimum and maximum
+	// values of each coordinate axis in all triangulated points is calculated
+	// and l2 norm is compared against threshold value. Optimization uses median
+	// coordinate values; threshold can be fairly big.
+	static constexpr float threshold_bad_ = 0.67;
+	// theshold for warning message (% of points discarded)
+	static constexpr float threhsold_warning_ = 0.1;
+};
+
+
+} // namespace calibration
+}
diff --git a/components/calibration/include/ftl/calibration/object.hpp b/components/calibration/include/ftl/calibration/object.hpp
new file mode 100644
index 000000000..40e42b9ff
--- /dev/null
+++ b/components/calibration/include/ftl/calibration/object.hpp
@@ -0,0 +1,52 @@
+#pragma once
+
+#include <opencv2/core/mat.hpp>
+#include <opencv2/aruco.hpp>
+
+/** Calibration objects */
+
+namespace ftl
+{
+namespace calibration
+{
+
+class CalibrationObject {
+public:
+	virtual int detect(cv::InputArray, std::vector<cv::Point2d>&, const cv::Mat& K=cv::Mat(), const cv::Mat& D=cv::Mat()) = 0;
+	virtual std::vector<cv::Point3d> object() = 0;
+};
+
+class ChessboardObject : public CalibrationObject {
+public:
+	ChessboardObject(int rows=18, int cols=25, double square_size=0.015);
+	virtual int detect(cv::InputArray, std::vector<cv::Point2d>&, const cv::Mat& K=cv::Mat(), const cv::Mat& D=cv::Mat());
+	std::vector<cv::Point3d> object() override;
+
+	cv::Size chessboardSize();
+	double squareSize();
+
+private:
+	void init();
+	cv::Size chessboard_size_;
+	double square_size_;
+	int flags_;
+	std::vector<cv::Point3d> object_points_;
+};
+
+class ArUCoObject : public CalibrationObject {
+public:
+	ArUCoObject(cv::aruco::PREDEFINED_DICTIONARY_NAME dictionary = cv::aruco::DICT_6X6_100,	float baseline = 0.25f, float tag_size = 0.15, int id1=0, int id2=1);
+	virtual int detect(cv::InputArray, std::vector<cv::Point2d>&, const cv::Mat& K=cv::Mat(), const cv::Mat& D=cv::Mat());
+	std::vector<cv::Point3d> object() override;
+
+private:
+	cv::Ptr<cv::aruco::Dictionary> dict_;
+	cv::Ptr<cv::aruco::DetectorParameters> params_;
+	float baseline_;
+	float tag_size_;
+	int id1_;
+	int id2_;
+};
+
+} // namespace calibration
+} // namespace ft
diff --git a/components/calibration/include/ftl/calibration/optimize.hpp b/components/calibration/include/ftl/calibration/optimize.hpp
index ac122f517..eeaf6d9de 100644
--- a/components/calibration/include/ftl/calibration/optimize.hpp
+++ b/components/calibration/include/ftl/calibration/optimize.hpp
@@ -9,7 +9,10 @@
 
 #include <ftl/config.h>
 
+#ifdef HAVE_CERES
 #include <ceres/ceres.h>
+#endif
+
 #include <opencv2/core/core.hpp>
 
 // BundleAdjustment uses Point3d instances via double*
@@ -21,8 +24,78 @@ static_assert(std::is_standard_layout<cv::Point3d>());
 namespace ftl {
 namespace calibration {
 
+/**
+ * Camera paramters (Ceres)
+ */
+struct Camera {
+	Camera() {}
+	Camera(const cv::Mat& K, const cv::Mat& D, const cv::Mat& R, const cv::Mat& tvec, cv::Size size);
+	Camera(const CalibrationData::Calibration& calib);
+
+	CalibrationData::Intrinsic intrinsic() const;
+	CalibrationData::Extrinsic extrinsic() const;
+
+	void setRotation(const cv::Mat& R);
+	void setTranslation(const cv::Mat& tvec);
+	void setExtrinsic(const cv::Mat& R, const cv::Mat& t) {
+		setRotation(R);
+		setTranslation(t);
+	}
+
+	void setIntrinsic(const cv::Mat& K, cv::Size sz);
+	void setDistortion(const cv::Mat &D);
+	void setIntrinsic(const cv::Mat& K, const cv::Mat& D, cv::Size sz) {
+		setIntrinsic(K, sz);
+		setDistortion(D);
+	}
+
+	cv::Mat intrinsicMatrix() const;
+	cv::Mat distortionCoefficients() const;
+
+	cv::Mat rvec() const;
+	cv::Mat tvec() const;
+	cv::Mat rmat() const;
+
+	cv::Mat extrinsicMatrix() const;
+	cv::Mat extrinsicMatrixInverse() const;
+
+	cv::Size size;
+
+	const static int n_parameters = 18;
+	const static int n_distortion_parameters = 8;
+
+	double data[n_parameters] = {0.0};
+
+	enum Parameter {
+		ROTATION = 0,
+		Q1 = 0,
+		Q2 = 1,
+		Q3 = 2,
+		Q4 = 3,
+		TRANSLATION = 4,
+		TX = 4,
+		TY = 5,
+		TZ = 6,
+		F = 7,
+		CX = 8,
+		CY = 9,
+		DISTORTION = 10,
+		K1 = 10,
+		K2 = 11,
+		P1 = 12,
+		P2 = 13,
+		K3 = 14,
+		K4 = 15,
+		K5 = 16,
+		K6 = 17
+	};
+};
+
 #ifdef HAVE_CERES
 
+/** Project point using camera model implemented for Ceres */
+cv::Point2d projectPoint(const Camera& camera, const cv::Point3d &p);
+
 /**
  * @brief Optimize scale.
  * @param object Reference object points
@@ -41,9 +114,6 @@ double optimizeScale(const std::vector<cv::Point3d>& object, std::vector<cv::Poi
  * - rotation and translation rx, ry, rz, tx, ty, tz,
  * - focal legth and principal point: f, cx, cy
  * - radial distortion (first three cofficients): k1, k2, k3
- *
- * @note: Distortion paramters are used in reprojection error, but they are
- *        not not optimized.
  */
 class BundleAdjustment {
 public:
@@ -74,11 +144,14 @@ public:
 
 		/**
 		 * @todo Radial distortion must be monotonic. This constraint is not
-		 *       included in the model, thus distortion parameters are always
-		 *       fixed.
+		 *       included in the model.
 		 */
-		// distortion coefficient optimization is not supported
+		/// fix all distortion coefficients to constant (initial values)
 		bool fix_distortion = true;
+		/// use distortion coefficients k4, k5, and k6; if false, set to zero
+		bool rational_model = true;
+		/// assume zero distortion during optimization
+		bool zero_distortion = false;
 
 		bool optimize_intrinsic = true;
 		bool optimize_motion = true;
@@ -90,7 +163,7 @@ public:
 	};
 
 	/**
-	 * Add camera(s)
+	 * Add camera(s). Stored as pointers. TODO: copy instead
 	 */
 	void addCamera(Camera &K);
 	void addCameras(std::vector<Camera> &K);
@@ -99,15 +172,19 @@ public:
 	 * @brief Add points
 	 */
 	void addPoint(const std::vector<bool>& visibility, const std::vector<cv::Point2d>& observations, cv::Point3d& point);
+	/**
+	 * @brief Vector for each camera TODO: verify this works
+	 */
 	void addPoints(const std::vector<std::vector<bool>>& visibility, const std::vector<std::vector<cv::Point2d>>& observations,
 		std::vector<cv::Point3d>& points);
 
 	/**
-	 * @brief Add points, all assumed visible
+	 * @brief Add points, all assumed visible. Values copied.
 	 */
 	void addPoint(const std::vector<cv::Point2d>& observations, cv::Point3d& point);
 	void addPoints(const std::vector<std::vector<cv::Point2d>>& observations, std::vector<cv::Point3d>& points);
 
+	/** TODO: estimate pose for each view which to optimize */
 	void addObject(const std::vector<cv::Point3d>& object_points);
 
 	/** @brief Perform bundle adjustment with custom options.
@@ -118,20 +195,20 @@ public:
 	 */
 	void run();
 
-	/** @brief Calculate MSE error (for one camera)
+	/** @brief Calculate RMSE error (for one camera)
 	 */
 	double reprojectionError(const int camera) const;
 
-	/** @brief Calculate MSE error for all cameras
+	/** @brief Calculate RMSE error for all cameras
 	 */
 	double reprojectionError() const;
 
 protected:
-	double* getCameraPtr(int i) { return cameras_[i]->data; }
+	double* getCameraPtr(int i) { return cameras_.at(i)->data; }
 
-	/** @brief Calculate MSE error
+	/** @brief Calculate squared error
 	 */
-	void _reprojectionErrorMSE(const int camera, double &error, double &npoints) const;
+	void _reprojectionErrorSE(const int camera, double &error, double &npoints) const;
 
 	/** @brief Set camera parametrization (fixed parameters/cameras)
 	 */
@@ -150,8 +227,8 @@ private:
 		// pixel coordinates: x, y
 		std::vector<cv::Point2d> observations;
 
-		// world coordinates: x, y, z
-		double* point;
+		// point in world coordinates
+		cv::Point3d point;
 	};
 
 	// group of points with known structure; from idx_start to idx_end
diff --git a/components/calibration/include/ftl/calibration/parameters.hpp b/components/calibration/include/ftl/calibration/parameters.hpp
index f26150657..d0ed5ec4a 100644
--- a/components/calibration/include/ftl/calibration/parameters.hpp
+++ b/components/calibration/include/ftl/calibration/parameters.hpp
@@ -2,66 +2,13 @@
 #ifndef _FTL_CALIBRATION_PARAMETERS_HPP_
 #define _FTL_CALIBRATION_PARAMETERS_HPP_
 
+#include <ftl/calibration/structures.hpp>
+
 #include <opencv2/core/core.hpp>
 
 namespace ftl {
 namespace calibration {
 
-/**
- * Camera paramters
- */
-struct Camera {
-	Camera() {}
-	Camera(const cv::Mat& K, const cv::Mat& D, const cv::Mat& R, const cv::Mat& tvec);
-
-	void setRotation(const cv::Mat& R);
-	void setTranslation(const cv::Mat& tvec);
-	void setExtrinsic(const cv::Mat& R, const cv::Mat& t) {
-		setRotation(R);
-		setTranslation(t);
-	}
-
-	void setIntrinsic(const cv::Mat& K);
-	void setDistortion(const cv::Mat &D);
-	void setIntrinsic(const cv::Mat& K, const cv::Mat& D) {
-		setIntrinsic(K);
-		setDistortion(D);
-	}
-
-	cv::Mat intrinsicMatrix() const;
-	cv::Mat distortionCoefficients() const;
-
-	cv::Mat rvec() const;
-	cv::Mat tvec() const;
-	cv::Mat rmat() const;
-
-	cv::Mat extrinsicMatrix() const;
-	cv::Mat extrinsicMatrixInverse() const;
-
-	const static int n_parameters = 12;
-	const static int n_distortion_parameters = 3;
-
-	double data[n_parameters] = {0.0};
-
-	enum Parameter {
-		ROTATION = 0,
-		RX = 0,
-		RY = 1,
-		RZ = 2,
-		TRANSLATION = 3,
-		TX = 3,
-		TY = 4,
-		TZ = 5,
-		F = 6,
-		CX = 7,
-		CY = 8,
-		DISTORTION = 9,
-		K1 = 9,
-		K2 = 10,
-		K3 = 11
-	};
-};
-
 namespace validate {
 
 /**
@@ -80,11 +27,11 @@ bool cameraMatrix(const cv::Mat &M);
  * @param D    distortion coefficients
  * @param size resolution
  * @note Tangential and prism distortion coefficients are not validated.
- * 
+ *
  * Radial distortion is always monotonic for real lenses and distortion
  * function has to be bijective. This is verified by evaluating the distortion
  * function for integer values from 0 to sqrt(width^2+height^2).
- * 
+ *
  * Camera model documented in
  * https://docs.opencv.org/master/d9/d0c/group__calib3d.html#details
  */
@@ -92,11 +39,9 @@ bool distortionCoefficients(const cv::Mat &D, cv::Size size);
 
 }
 
-
 namespace transform {
 
-// TODO: Some of the methods can be directly replace with OpenCV
-//       (opencv2/calib3d.hpp)
+// TODO: Some of the methods can be directly replace with OpenCV (opencv2/calib3d.hpp)
 
 /**
  * @brief Get rotation matrix and translation vector from transformation matrix.
@@ -126,19 +71,20 @@ inline void inverse(cv::Mat &R, cv::Mat &t) {
 }
 
 /**
- * @brief Inverse transform inplace
+ * @brief Inverse transform
  * @param T   transformation matrix (4x4)
  */
-inline void inverse(cv::Mat &T) {
+[[nodiscard]] inline cv::Mat inverse(const cv::Mat &T) {
 	cv::Mat rmat;
 	cv::Mat tvec;
 	getRotationAndTranslation(T, rmat, tvec);
-	T = cv::Mat::eye(4, 4, CV_64FC1);
+	cv::Mat T_ = cv::Mat::eye(4, 4, CV_64FC1);
 
-	T(cv::Rect(3, 0, 1, 1)) = -rmat.col(0).dot(tvec);
-	T(cv::Rect(3, 1, 1, 1)) = -rmat.col(1).dot(tvec);
-	T(cv::Rect(3, 2, 1, 1)) = -rmat.col(2).dot(tvec);
-	T(cv::Rect(0, 0, 3, 3)) = rmat.t();
+	T_(cv::Rect(3, 0, 1, 1)) = -rmat.col(0).dot(tvec);
+	T_(cv::Rect(3, 1, 1, 1)) = -rmat.col(1).dot(tvec);
+	T_(cv::Rect(3, 2, 1, 1)) = -rmat.col(2).dot(tvec);
+	T_(cv::Rect(0, 0, 3, 3)) = rmat.t();
+	return T_;
 }
 
 inline cv::Point3d apply(const cv::Point3d& point, const cv::InputArray& R, const cv::InputArray& t) {
@@ -160,10 +106,10 @@ inline cv::Point3d apply(const cv::Point3d& point, const cv::InputArray& T) {
 
 /**
  * @brief Scale camera intrinsic matrix
- * @param size_new	New resolution
  * @param size_old	Original (camera matrix) resolution
+ * @param size_new	New resolution
  */
-cv::Mat scaleCameraMatrix(const cv::Mat &K, const cv::Size &size_new, const cv::Size &size_old);
+[[nodiscard]] cv::Mat scaleCameraMatrix(const cv::Mat &K, const cv::Size &size_old, const cv::Size &size_new);
 
 /**
  * @brief Calculate MSE reprojection error
diff --git a/components/calibration/include/ftl/calibration/structures.hpp b/components/calibration/include/ftl/calibration/structures.hpp
new file mode 100644
index 000000000..e0eeb2fce
--- /dev/null
+++ b/components/calibration/include/ftl/calibration/structures.hpp
@@ -0,0 +1,119 @@
+#ifndef _FTL_CALIBRATION_STRUCTURES_HPP_
+#define _FTL_CALIBRATION_STRUCTURES_HPP_
+
+#include <ftl/utility/msgpack.hpp>
+#include <ftl/codecs/channels.hpp>
+
+namespace ftl {
+namespace calibration {
+
+struct CalibrationData {
+
+	struct Intrinsic {
+		friend CalibrationData;
+
+		struct DistortionCoefficients {
+			friend CalibrationData;
+
+			DistortionCoefficients();
+
+			/**
+			 * Access distortion coefficients, stored in OpenCV order. Out of
+			 * bounds access is undefined.
+			 *
+			 * 0,1			r1-r2 radial distortion
+			 * 2,3			p1-p2 tangential distortion
+			 * 4,5,6,7		r3-r6 radial distortion
+			 * 8,9,10,11	s1-s4 thin prism distortion
+			 */
+			double& operator[](unsigned i);
+			double operator[](unsigned i) const;
+
+			/**
+			 * Return distortion parameters in cv::Mat. Shares same memory.
+			 */
+			const cv::Mat Mat(int nparams = 12) const;
+			cv::Mat Mat(int nparams = 12);
+
+		private:
+			std::vector<double> data_;
+
+		public:
+			MSGPACK_DEFINE(data_);
+		};
+		Intrinsic();
+		Intrinsic(const cv::Mat &K, cv::Size sz);
+		Intrinsic(const cv::Mat &K, const cv::Mat &D, cv::Size sz);
+
+		/** New instance with scaled values for new resolution */
+		Intrinsic(const Intrinsic& other, cv::Size sz);
+
+		/** Replace current values with new ones */
+		void set(const cv::Mat &K, cv::Size sz);
+		void set(const cv::Mat &K, const cv::Mat &D, cv::Size sz);
+
+		/** Camera matrix */
+		cv::Mat matrix() const;
+		/** Camera matrix (scaled) */
+		cv::Mat matrix(cv::Size) const;
+
+		cv::Size resolution;
+		double fx;
+		double fy;
+		double cx;
+		double cy;
+		DistortionCoefficients distCoeffs;
+
+		/** (optional) sensor size */
+		cv::Size2d sensorSize;
+
+		MSGPACK_DEFINE(resolution, fx, fy, cx, cy, distCoeffs, sensorSize);
+	};
+	struct Extrinsic {
+		Extrinsic();
+		Extrinsic(const cv::Mat &T);
+		Extrinsic(cv::InputArray R, cv::InputArray t);
+
+		void set(const cv::Mat &T);
+		void set(cv::InputArray R, cv::InputArray t);
+
+		Extrinsic inverse() const;
+
+		/** get as a 4x4 matrix */
+		cv::Mat matrix() const;
+		/** get 3x3 rotation matrix */
+		cv::Mat rmat() const;
+
+		cv::Vec3d rvec;
+		cv::Vec3d tvec;
+		MSGPACK_DEFINE(rvec, tvec);
+	};
+
+	struct Calibration {
+		Intrinsic intrinsic;
+		Extrinsic extrinsic;
+		MSGPACK_DEFINE(intrinsic, extrinsic);
+	};
+
+	CalibrationData() : enabled(false) {}
+	bool enabled;
+
+	[[nodiscard]]
+	static CalibrationData readFile(const std::string &path);
+	void writeFile(const std::string &path) const;
+
+	/** Get reference for channel. Create if doesn't exist. */
+	Calibration& get(ftl::codecs::Channel channel);
+	bool hasCalibration(ftl::codecs::Channel channel) const;
+
+private:
+	std::map<ftl::codecs::Channel, Calibration> data_;
+
+public:
+	MSGPACK_DEFINE(enabled, data_);
+};
+
+}
+}
+
+#endif
diff --git a/components/calibration/include/ftl/calibration/visibility.hpp b/components/calibration/include/ftl/calibration/visibility.hpp
new file mode 100644
index 000000000..ae1c2991b
--- /dev/null
+++ b/components/calibration/include/ftl/calibration/visibility.hpp
@@ -0,0 +1,109 @@
+#pragma once
+#ifndef _FTL_VISIBILITY_HPP_
+#define _FTL_VISIBILITY_HPP_
+
+#include <vector>
+#include <string>
+
+#include <ftl/utility/msgpack.hpp>
+
+namespace ftl {
+namespace calibration {
+
+/**
+ * @brief Result from Dijkstra's algorithm.
+ */
+template<typename T>
+class Paths {
+public:
+	Paths(int id, const std::vector<int> &previous, const std::vector<T> &distances);
+
+	/**
+	 * @brief Shortest path from node i.
+	 */
+	std::vector<int> from(int i) const;
+
+	/**
+	 * @brief Shortest to node i.
+	 */
+	std::vector<int> to(int i) const;
+
+	/**
+	 * @brief Is graph connected, i.e. all nodes are reachable.
+	 */
+	bool connected() const;
+
+	/**
+	 * @brief Distance to node i
+	 */
+	T distance(int i) const;
+
+	std::string to_string() const;
+
+private:
+	int id_; // node id
+	std::vector<int> previous_;
+	std::vector<T> distances_;
+};
+
+/**
+ * @brief Dijstra's algorithm: shortest paths from node i.
+ * @param i		node index
+ * @param graph	adjacency matrix
+ *
+ * dijstra<int>(), dijstra<float>() and dijstra<double>() defined.
+ */
+template<typename T>
+Paths<T> dijstra(const int i, const std::vector<std::vector<T>> &graph_);
+
+class Visibility {
+	public:
+	Visibility() {};
+	explicit Visibility(int n_cameras);
+	explicit Visibility(const std::vector<std::vector<int>> &graph);
+
+	void init(int n_cameras);
+
+	template<typename T>
+	void update(const std::vector<T> &add);
+
+	void update(uint64_t add);
+
+	void mask(int a, int b);
+	void unmask(int a, int b);
+
+	int count(int camera) const;
+	int count(int camera1, int camera2) const;
+
+	// minimum counts and cameras
+	int min() const;
+	int max() const;
+	int argmax() const;
+	int argmin() const;
+
+	/**
+	 * @brief Find most visibility shortest path to camera i
+	 */
+	Paths<float> shortestPath(int i) const;
+
+	protected:
+	std::vector<int> neighbors(int i) const;
+	float distance(int a, int b) const;
+
+	private:
+
+	int n_cameras_;	// number of cameras
+	int n_max_;		// highest index used
+	// adjacency matrix
+	std::vector<std::vector<int>> graph_;
+	// masked values (mask_[i][j]) are not used
+	std::vector<std::vector<bool>> mask_;
+
+public:
+	MSGPACK_DEFINE(n_cameras_, n_max_, graph_, mask_);
+};
+
+}
+}
+
+#endif
diff --git a/components/calibration/src/extrinsic.cpp b/components/calibration/src/extrinsic.cpp
new file mode 100644
index 000000000..615b13034
--- /dev/null
+++ b/components/calibration/src/extrinsic.cpp
@@ -0,0 +1,719 @@
+#include <loguru.hpp>
+
+#include <ftl/exception.hpp>
+#include <ftl/calibration/optimize.hpp>
+#include <ftl/calibration/extrinsic.hpp>
+
+#include <fstream>
+#include <sstream>
+
+#include <opencv2/calib3d.hpp>
+
+////////////////////////////////////////////////////////////////////////////////
+
+/** check bit i in a */
+inline bool hasOne(uint64_t a, unsigned int i) {
+	return a & (uint64_t(1) << i);
+}
+
+/** all bits set in b are also set in a */
+inline bool hasAll(uint64_t a, uint64_t b) {
+	return (b & a) == b;
+}
+
+/** set bit i in a */
+inline void setOne(uint64_t &a, unsigned int i) {
+	a |= (uint64_t(1) << i);
+}
+
+/** get highest bit*/
+inline int hbit(uint64_t a) {
+#ifdef __GNUC__
+	return 64 - __builtin_clzll(a);
+#endif
+	int v = 1;
+	while (a >>= 1) { v++; }
+	return v;
+}
+
+inline int popcount(uint64_t bits) {
+	#if defined(_MSC_VER)
+		return __popcnt64(bits);
+	#elif defined(__GNUC__)
+		return __builtin_popcountl(bits);
+	#else
+		int count = 0;
+		while (bits != 0) {
+			bits = bits >> 1;
+			count += uint64_t(1) & bits;
+		}
+		return count;
+	#endif
+}
+
+// ==== CalibrationPoints ================================================
+
+namespace ftl {
+namespace calibration {
+
+template<typename T>
+void CalibrationPoints<T>::addPoints(unsigned int c, const std::vector<cv::Point_<T>>& points) {
+	if (hasCamera(c)) {
+		throw ftl::exception("Points already set for camera. "
+							 "Forgot to call next()?");
+	}
+	if (current_.object == ~(unsigned int)(0)) {
+		throw ftl::exception("Target has be set before adding points.");
+	}
+
+	if (objects_[current_.object].size() != points.size()) {
+		throw ftl::exception("Number of points must cv::Match object points");
+	}
+
+	std::vector<cv::Point_<T>> p(points.begin(), points.end());
+	current_.points[c] = p;
+	setCamera(c);
+};
+
+template<typename T>
+unsigned int CalibrationPoints<T>::setObject(const std::vector<cv::Point3_<T>> &object) {
+	if (!current_.points.empty()) {
+		throw ftl::exception("Points already set, object can not be changed. "
+							 "Forgot to call next()?");
+	}
+
+	// check if object already exists
+	for (unsigned int i = 0; i < objects_.size(); i++) {
+		if (objects_[i].size() != object.size()) { continue; }
+
+		bool eq = true;
+		for (unsigned int j = 0; j < object.size(); j++) {
+			eq &= (objects_[i][j] == object[j]);
+		}
+		if (eq) {
+			current_.object = i;
+			return i;
+		}
+	}
+
+	// not found
+	current_.object = objects_.size();
+	objects_.push_back(object);
+	return current_.object;
+}
+
+template<typename T>
+unsigned int CalibrationPoints<T>::setObject(const std::vector<cv::Point_<T>> &object) {
+	if (!current_.points.empty()) {
+		throw ftl::exception("Points already set, object can not be changed. "
+							 "Forgot to call next()?");
+	}
+	std::vector<cv::Point3_<T>> object3d;
+	object3d.reserve(object.size());
+
+	for (const auto& p : object) {
+		object3d.push_back({p.x, p.y, T(0.0)});
+	}
+	return setObject(object3d);
+}
+
+template<typename T>
+void CalibrationPoints<T>::next() {
+	if (objects_.empty()) {
+		throw ftl::exception("object must be set before calling next()");
+	}
+	if (current_.cameras == uint64_t(0)) {
+		return;
+	}
+
+	count_ += objects_[current_.object].size();
+	points_.push_back(current_);
+	visibility_.update(current_.cameras);
+	clear();
+}
+
+template<typename T>
+void CalibrationPoints<T>::clear() {
+	current_ = {uint64_t(0), (unsigned int)(objects_.size()) - 1u, {}, {}};
+}
+
+template<typename T>
+bool CalibrationPoints<T>::hasCamera(unsigned int c) {
+	return hasOne(current_.cameras, c);
+}
+
+template<typename T>
+void CalibrationPoints<T>::setCamera(unsigned int c) {
+	setOne(current_.cameras, c);
+}
+
+template<typename T>
+int CalibrationPoints<T>::getCount(unsigned int c) {
+	return visibility_.count(c);
+}
+
+template<typename T>
+int CalibrationPoints<T>::getPointsCount() {
+	return count_;
+}
+
+template<typename T>
+const Visibility& CalibrationPoints<T>::visibility() {
+	return visibility_;
+}
+
+template<typename T>
+void CalibrationPoints<T>::setTriangulatedPoints(unsigned int c_base, unsigned int c_match,
+	const std::vector<cv::Point3_<T>>& points, int idx) {
+
+	uint64_t required = 0;
+	setOne(required, c_base);
+	setOne(required, c_match);
+
+	auto itr = points.begin();
+	for (unsigned int i = idx; i < points_.size(); i++) {
+		if (hasAll(points_[i].cameras, required)) {
+			auto obj_sz = objects_[points_[i].object].size();
+			std::vector<cv::Point3_<T>> pts;
+			pts.reserve(obj_sz);
+			for (unsigned int i_obj = 0; i_obj < obj_sz; i_obj++) {
+				pts.push_back(*itr);
+				itr++;
+			}
+			points_[i].triangulated[{c_base, c_match}] = pts;
+			if (itr == points.end()) { break; }
+		}
+	}
+}
+
+template<typename T>
+void CalibrationPoints<T>::resetTriangulatedPoints() {
+	for (unsigned int i = 0; i < points_.size(); i++) {
+		points_[i].triangulated.clear();
+	}
+}
+
+template<typename T>
+std::vector<std::vector<cv::Point_<T>>> CalibrationPoints<T>::getPoints(const std::vector<unsigned int>& cameras, unsigned int object) {
+
+	std::vector<std::vector<cv::Point_<T>>> points;
+	points.resize(cameras.size());
+	std::vector<unsigned int> lookup;
+
+	uint64_t required = 0;
+	for (unsigned i = 0; i < cameras.size(); i++) {
+		setOne(required, cameras[i]);
+
+		if ((cameras[i] + 1) > lookup.size()) {
+			lookup.resize(cameras[i] + 1, ~(unsigned int)(0));
+		}
+		lookup[cameras[i]] = i;
+	}
+
+	for (const auto& set : points_) {
+		if (!hasAll(set.cameras, required))	{ continue; }
+		if (set.object != object)			{ continue; }
+
+		for (auto &[i, data] : set.points) {
+			if (!hasOne(required, i)) { continue; }
+
+			points[lookup[i]].insert
+				(points[lookup[i]].end(), data.begin(), data.end());
+		}
+	}
+
+	return points;
+}
+
+
+template<typename T>
+std::vector<cv::Point3_<T>> CalibrationPoints<T>::getObject(unsigned int object) {
+	return objects_[object];
+}
+
+template class CalibrationPoints<float>;
+template class CalibrationPoints<double>;
+
+////////////////////////////////////////////////////////////////////////////////
+
+int recoverPose(const cv::Mat &E, const std::vector<cv::Point2d> &_points1,
+	const std::vector<cv::Point2d> &_points2, const cv::Mat &_cameraMatrix1,
+	const cv::Mat &_cameraMatrix2, cv::Mat &_R, cv::Mat &_t, double distanceThresh,
+	cv::Mat &triangulatedPoints) {
+
+	cv::Mat cameraMatrix1;
+	cv::Mat cameraMatrix2;
+	cv::Mat cameraMatrix;
+
+	cv::Mat points1(_points1.size(), 2, CV_64FC1);
+	cv::Mat points2(_points2.size(), 2, CV_64FC1);
+
+	CHECK(points1.size() == points2.size());
+
+	for (size_t i = 0; i < _points1.size(); i++) {
+		auto p1 = points1.ptr<double>(i);
+		p1[0] = _points1[i].x;
+		p1[1] = _points1[i].y;
+
+		auto p2 = points2.ptr<double>(i);
+		p2[0] = _points2[i].x;
+		p2[1] = _points2[i].y;
+	}
+
+	_cameraMatrix1.convertTo(cameraMatrix1, CV_64F);
+	_cameraMatrix2.convertTo(cameraMatrix2, CV_64F);
+	cameraMatrix = cv::Mat::eye(cv::Size(3, 3), CV_64FC1);
+
+	double fx1 = cameraMatrix1.at<double>(0,0);
+	double fy1 = cameraMatrix1.at<double>(1,1);
+	double cx1 = cameraMatrix1.at<double>(0,2);
+	double cy1 = cameraMatrix1.at<double>(1,2);
+
+	double fx2 = cameraMatrix2.at<double>(0,0);
+	double fy2 = cameraMatrix2.at<double>(1,1);
+	double cx2 = cameraMatrix2.at<double>(0,2);
+	double cy2 = cameraMatrix2.at<double>(1,2);
+
+	points1.col(0) = (points1.col(0) - cx1) / fx1;
+	points1.col(1) = (points1.col(1) - cy1) / fy1;
+
+	points2.col(0) = (points2.col(0) - cx2) / fx2;
+	points2.col(1) = (points2.col(1) - cy2) / fy2;
+
+	// TODO mask
+	// cameraMatrix = I (for details, see OpenCV's recoverPose() source code)
+	// modules/calib3d/src/five-point.cpp (461)
+	//
+	// https://github.com/opencv/opencv/blob/371bba8f54560b374fbcd47e7e02f015ac4969ad/modules/calib3d/src/five-point.cpp#L461
+
+	return cv::recoverPose( E, points1, points2, cameraMatrix, _R, _t,
+							distanceThresh, cv::noArray(), triangulatedPoints);
+}
+
+double calibratePair(const cv::Mat &K1, const cv::Mat &D1,
+		const cv::Mat &K2, const cv::Mat &D2,
+		const std::vector<cv::Point2d> &points1,
+		const std::vector<cv::Point2d> &points2,
+		const std::vector<cv::Point3d> &object_points, cv::Mat &R,
+		cv::Mat &t, std::vector<cv::Point3d> &points_out, bool optimize) {
+
+	cv::Mat F = cv::findFundamentalMat(points1, points2, cv::noArray(), cv::FM_8POINT);
+	cv::Mat E = K2.t() * F * K1;
+
+	cv::Mat points3dh;
+	// distanceThresh unit?
+	recoverPose(E, points1, points2, K1, K2, R, t, 1000.0, points3dh);
+
+	points_out.clear();
+	points_out.reserve(points3dh.cols);
+
+	// convert from homogenous coordinates
+	for (int col = 0; col < points3dh.cols; col++) {
+		CHECK(points3dh.at<double>(3, col) != 0);
+		cv::Point3d p = cv::Point3d(points3dh.at<double>(0, col),
+							points3dh.at<double>(1, col),
+							points3dh.at<double>(2, col))
+							/ points3dh.at<double>(3, col);
+		points_out.push_back(p);
+	}
+
+	double s = ftl::calibration::optimizeScale(object_points, points_out);
+	t = t * s;
+
+	auto params1 = Camera(K1, D1, cv::Mat::eye(3, 3, CV_64FC1), cv::Mat::zeros(3, 1, CV_64FC1), {0, 0});
+	auto params2 = Camera(K2, D2, R, t, {0, 0});
+
+	auto ba = BundleAdjustment();
+	ba.addCamera(params1);
+	ba.addCamera(params2);
+
+	for (size_t i = 0; i < points_out.size(); i++) {
+		ba.addPoint({points1[i], points2[i]}, points_out[i]);
+	}
+
+	// needs to be implemented correctly: optimize each pose of the target
+	ba.addObject(object_points);
+
+	double error = ba.reprojectionError();
+
+	if (optimize) {
+		BundleAdjustment::Options options;
+		options.optimize_intrinsic = false;
+		// any difference if both transformations multiplied with (T_1)^-1
+		// (inverse of first camera's transforma)after optimization instead?
+		options.fix_camera_extrinsic = {0};
+		ba.run(options);
+		error = ba.reprojectionError();
+	}
+	CHECK((cv::countNonZero(params1.rvec()) == 0) &&
+		  (cv::countNonZero(params1.tvec()) == 0));
+
+	return sqrt(error);
+}
+
+// ==== Extrinsic Calibration ==================================================
+
+unsigned int ExtrinsicCalibration::addCamera(const CalibrationData::Intrinsic &c) {
+	unsigned int idx = calib_.size();
+	calib_.push_back({c, {}});
+	return idx;
+}
+
+unsigned int ExtrinsicCalibration::addStereoCamera(const CalibrationData::Intrinsic &c1, const CalibrationData::Intrinsic &c2) {
+	unsigned int idx = calib_.size();
+	calib_.push_back({c1, {}});
+	calib_.push_back({c2, {}});
+	mask_.insert({idx, idx + 1});
+	return idx;
+}
+
+std::string ExtrinsicCalibration::status() {
+	auto str = std::atomic_load(&status_);
+	if (str) { return *str; }
+	else { return ""; }
+}
+
+void ExtrinsicCalibration::updateStatus_(std::string str) {
+	std::atomic_store(&status_, std::make_shared<std::string>(str));
+}
+
+void ExtrinsicCalibration::calculatePairPoses() {
+
+	const auto& visibility =  points_.visibility();
+	// Calibrate all pairs. TODO: might be expensive if number of cameras is high
+	// if not performed for all pairs, remaining non-triangulated poits have to
+	// be separately triangulated later.
+
+	int i = 1;
+	int i_max = (camerasCount() * camerasCount()) / 2 + 1;
+
+	for (unsigned int c1 = 0; c1 < camerasCount(); c1++) {
+	for (unsigned int c2 = c1; c2 < camerasCount(); c2++) {
+
+		updateStatus_(	"Calculating pose for pair " +
+						std::to_string(i++) + " of " + std::to_string(i_max));
+
+		if (c1 == c2) {
+			pairs_[{c1, c2}] = { cv::Mat::eye(cv::Size(3, 3), CV_64FC1),
+								 cv::Mat(cv::Size(1, 3), CV_64FC1, cv::Scalar(0.0)),
+								 0.0};
+
+			continue;
+		}
+
+		if (mask_.count({c1, c2}) > 0 ) { continue; }
+		if (pairs_.find({c1, c2}) != pairs_.end()) { continue; }
+
+		// require minimum number of visible points
+		if (visibility.count(c1, c2) < min_points_) {
+			LOG(WARNING) << "skipped pair (" << c1 << ", " << c2 << "), not enough points";
+			continue;
+		}
+
+		// calculate paramters and update triangulation
+
+		cv::Mat K1 = calib_[c1].intrinsic.matrix();
+		cv::Mat distCoeffs1 = calib_[c1].intrinsic.distCoeffs.Mat();
+		cv::Mat K2 = calib_[c2].intrinsic.matrix();
+		cv::Mat distCoeffs2 = calib_[c2].intrinsic.distCoeffs.Mat();
+		auto pts = points().getPoints({c1, c2}, 0);
+		auto object = points().getObject(0);
+		cv::Mat R, t;
+		std::vector<cv::Point3d> points3d;
+		auto rmse = calibratePair(K1, distCoeffs1, K2, distCoeffs2,
+			pts[0], pts[1], object, R, t, points3d, true);
+
+		// debug info
+		LOG(INFO) << "RMSE (cameras " << c1 << " & " << c2 << "): " << rmse;
+
+		points().setTriangulatedPoints(c1, c2, points3d);
+
+		pairs_[{c1, c2}] = {R, t, rmse};
+
+		cv::Mat R_i, t_i;
+		R.copyTo(R_i);
+		t.copyTo(t_i);
+		transform::inverse(R_i, t_i);
+		pairs_[{c2, c1}] = {R_i, t_i, rmse};
+	}}
+}
+
+void ExtrinsicCalibration::calculateInitialPoses() {
+	updateStatus_("Initial poses from chained transformations");
+
+	// mask stereo cameras (do not pairwise calibrate a stereo pair; unreliable)
+	auto visibility =  points_.visibility();
+	for (const auto& m: mask_) {
+		visibility.mask(m.first, m.second);
+	}
+
+	// mask cameras which did not have enough points TODO: triangulation later
+	// would still be useful (calculate initial poses, then triangulate)
+	for (unsigned int c1 = 0; c1 < camerasCount(); c1++) {
+	for (unsigned int c2 = c1; c2 < camerasCount(); c2++) {
+		if (pairs_.count({c1, c2}) == 0) {
+			visibility.mask(c1, c2);
+		}
+
+		// mask bad pairs (high rmse)
+		/*if (std::get<2>(pairs_.at({c1, c2})) > 16.0) {
+			visibility.mask(c1, c2);
+		}*/
+	}}
+
+	// pick optimal camera: most views of calibration pattern
+	unsigned int c_ref = visibility.argmax();
+
+	auto paths = visibility.shortestPath(c_ref);
+
+	for (unsigned int c = 0; c < camerasCount(); c++) {
+		if (c == c_ref) { continue; }
+
+		cv::Mat R_chain = cv::Mat::eye(cv::Size(3, 3), CV_64FC1);
+		cv::Mat t_chain = cv::Mat(cv::Size(1, 3), CV_64FC1, cv::Scalar(0.0));
+
+		auto path = paths.to(c);
+		do {
+			// iterate in reverse order
+			auto prev = path.back();
+			path.pop_back();
+			auto next = path.back();
+
+			cv::Mat R = std::get<0>(pairs_.at({prev, next}));
+			cv::Mat t = std::get<1>(pairs_.at({prev, next}));
+
+			CHECK_EQ(R.size(), cv::Size(3, 3));
+			CHECK_EQ(t.total(), 3);
+
+			R_chain = R * R_chain;
+			t_chain = t + R * t_chain;
+		}
+		while(path.size() > 1);
+
+		// note: direction of chain in the loop, hence inverse()
+		calib_[c].extrinsic =
+			CalibrationData::Extrinsic(R_chain, t_chain).inverse();
+	}
+}
+
+static std::vector<bool> visibility(unsigned int ncameras, uint64_t visible) {
+	std::vector<bool> res(ncameras, false);
+	for (unsigned int i = 0; i < ncameras; i++) {
+		res[i] = visible & (uint64_t(1) << i);
+	}
+	return res;
+}
+
+/* absolute difference between min and max for each set of coordinates */
+static cv::Point3d absdiff(const std::vector<double> &xs, const std::vector<double> &ys, const std::vector<double> &zs) {
+	double minx = INFINITY;
+	double maxx = -INFINITY;
+	for (auto x : xs) {
+		minx = std::min(minx, x);
+		maxx = std::max(maxx, x);
+	}
+	double miny = INFINITY;
+	double maxy = -INFINITY;
+	for (auto y : ys) {
+		miny = std::min(miny, y);
+		maxy = std::max(maxy, y);
+	}
+	double minz = INFINITY;
+	double maxz = -INFINITY;
+	for (auto z : zs) {
+		minz = std::min(minz, z);
+		maxz = std::max(maxz, z);
+	}
+	return {abs(minx - maxx), abs(miny - maxy), abs(minz - maxz)};
+}
+
+double ExtrinsicCalibration::optimize() {
+
+	BundleAdjustment ba;
+	std::vector<Camera> cameras;
+	std::vector<cv::Mat> T; // camera to world
+
+	cameras.reserve(calib_.size());
+	unsigned int ncameras = calib_.size();
+
+	for (const auto& c : calib_) {
+		auto camera = c;
+		T.push_back(c.extrinsic.inverse().matrix());
+		cameras.push_back(Camera(camera));
+	}
+	for (auto &c : cameras) {
+		// BundleAdjustment stores pointers; do not resize cameras vector
+		ba.addCamera(c);
+	}
+
+	// Transform triangulated points into same coordinates. Poinst which are
+	// triangulated multiple times: use median values. Note T[] contains
+	// inverse transformations, as points are transformed from camera to world
+	// (instead the other way around by parameters in cameras[]).
+
+	updateStatus_("Calculating points in world coordinates");
+
+	// NOTE: above CalibrationPoints datastructure not optimal regarding how
+	//		 points are actually used here; BundleAdjustment interface also
+	//		 expects flat arrays; overall cv::Mats would probably be better
+	//		 suited as they can be easily interpreted as flat arrays or
+	//		 multi-dimensional.
+
+	int n_points_bad = 0;
+	int n_points_missing = 0;
+	int n_points = 0;
+
+	for (const auto& itm : points_.all()) {
+		auto sz = points_.getObject(itm.object).size();
+		auto vis = visibility(ncameras, itm.cameras);
+
+		for (unsigned int i = 0; i < sz; i++) {
+			n_points++;
+
+			// observation and triangulated coordinates; Use {NAN, NAN} for
+			// non-visible points (if those are used by mistake, Ceres will
+			// fail with error message).
+			std::vector<cv::Point2d> obs(ncameras, {NAN, NAN});
+			std::vector<double> px;
+			std::vector<double> py;
+			std::vector<double> pz;
+
+			for (const auto& [c, o] : itm.points) {
+				obs[c] = o[i];
+			}
+
+			for (const auto [c, pts] : itm.triangulated) {
+				auto p = transform::apply(pts[i], T[c.first]);
+				px.push_back(p.x);
+				py.push_back(p.y);
+				pz.push_back(p.z);
+			}
+
+			// median coordinate for each axis
+			std::sort(px.begin(), px.end());
+			std::sort(py.begin(), py.end());
+			std::sort(pz.begin(), pz.end());
+			cv::Point3d p;
+
+			unsigned int n = px.size();
+			unsigned int m = n / 2;
+			if (m == 0) {
+				n_points_missing++;
+				break;
+				// not triangulated (see earlier steps)
+				// TODO: triangulate here
+			}
+			if (n % 2 == 0 && n > 1) {
+				// mean of two points if number of points even
+				cv::Point3d p1 = {px[m - 1], py[m - 1], pz[m - 1]};
+				cv::Point3d p2 = {px[m], py[m], pz[m]};
+				p = (p1 + p2)/2.0;
+			}
+			else {
+				p = {px[m], py[m], pz[m]};
+			}
+
+			// TODO: desgin more meaningful check
+			if (cv::norm(absdiff(px, py, pz)) > threshold_bad_) {
+				n_points_bad++;
+				continue;
+			}
+
+			ba.addPoint(vis, obs, p);
+		}
+	}
+
+	if (float(n_points_bad)/float(n_points - n_points_missing) > threhsold_warning_) {
+		// print wanrning message; calibration possibly fails if triangulation
+		// was very low quality (more than % bad points)
+		LOG(ERROR) << "Large variation in "<< n_points_bad << " "
+					  "triangulated points. Are initial intrinsic parameters "
+					  "good?";
+	}
+
+	if (float(n_points_missing)/float(n_points - n_points_bad) > threhsold_warning_) {
+		// low number of points; most points only visible in pairs?
+		LOG(WARNING) << "Large number of points skipped. Are there enough "
+						"visible points between stereo camera pairs? (TODO: "
+						"implement necessary triangulation after pair "
+						"calibration)";
+	}
+
+	updateStatus_("Bundle adjustment");
+	options_.verbose = true;
+	LOG(INFO) << "fix intrinsics: " << (options_.optimize_intrinsic ? "no" : "yes");
+	LOG(INFO) << "fix focal: " << (options_.fix_focal ? "yes" : "no");
+	LOG(INFO) << "fix principal point: " << (options_.fix_principal_point ? "yes" : "no");
+	LOG(INFO) << "fix distortion: " << (options_.fix_distortion ? "yes" : "no");
+
+	ba.run(options_);
+
+	calib_optimized_.resize(calib_.size());
+	rmse_.resize(calib_.size());
+
+	for (unsigned int i = 0; i < cameras.size(); i++) {
+		rmse_[i] = ba.reprojectionError(i);
+		auto intr = cameras[i].intrinsic();
+		auto extr = cameras[i].extrinsic();
+		calib_optimized_[i] = calib_[i];
+		calib_optimized_[i].intrinsic.set(intr.matrix(), intr.distCoeffs.Mat(), intr.resolution);
+		calib_optimized_[i].extrinsic.set(extr.rvec, extr.tvec);
+	}
+
+	rmse_total_ = ba.reprojectionError();
+	return rmse_total_;
+}
+
+double ExtrinsicCalibration::run() {
+	updateStatus_("Starting");
+	points_.resetTriangulatedPoints();
+	pairs_.clear();
+	calculatePairPoses();
+	calculateInitialPoses();
+	return optimize();
+}
+
+const CalibrationData::Calibration& ExtrinsicCalibration::calibration(unsigned int c) {
+	return calib_.at(c);
+}
+
+const CalibrationData::Calibration& ExtrinsicCalibration::calibrationOptimized(unsigned int c) {
+	return calib_optimized_.at(c);
+}
+
+double ExtrinsicCalibration::reprojectionError(unsigned int c) {
+	return rmse_.at(c);
+}
+
+double ExtrinsicCalibration::reprojectionError() {
+	return rmse_total_;
+}
+
+bool ExtrinsicCalibration::toFile(const std::string& fname) {
+	points_.clear();
+	std::ofstream ofs(fname, std::ios_base::trunc);
+	msgpack::pack(ofs, *this);
+	ofs.close();
+	return true;
+}
+
+bool ExtrinsicCalibration::fromFile(const std::string& fname) {
+
+	points_ = CalibrationPoints<double>();
+	mask_ = {};
+	calib_ = {};
+
+	std::ifstream ifs(fname);
+	std::stringstream buf;
+	msgpack::object_handle oh;
+
+	buf << ifs.rdbuf();
+	msgpack::unpack(oh, buf.str().data(), buf.str().size());
+	oh.get().convert(*this);
+
+	return true;
+}
+
+
+}
+}
diff --git a/components/calibration/src/object.cpp b/components/calibration/src/object.cpp
new file mode 100644
index 000000000..83f70a3b6
--- /dev/null
+++ b/components/calibration/src/object.cpp
@@ -0,0 +1,159 @@
+#include <loguru.hpp>
+
+#include <ftl/exception.hpp>
+#include <ftl/calibration/object.hpp>
+
+#include <opencv2/core/cuda.hpp>
+#include <opencv2/calib3d.hpp>
+#include <opencv2/imgproc.hpp>
+
+using ftl::calibration::ArUCoObject;
+using ftl::calibration::ChessboardObject;
+
+
+ArUCoObject::ArUCoObject(cv::aruco::PREDEFINED_DICTIONARY_NAME dictionary,
+	float baseline, float tag_size, int id1, int id2) :
+	baseline_(baseline), tag_size_(tag_size),id1_(id1), id2_(id2) {
+
+	dict_ = cv::aruco::getPredefinedDictionary(dictionary);
+	params_ = cv::aruco::DetectorParameters::create();
+	params_->cornerRefinementMinAccuracy = 0.01;
+	// cv::aruco::CORNER_REFINE_CONTOUR memory issues? intrinsic quality?
+	params_->cornerRefinementMethod = cv::aruco::CORNER_REFINE_APRILTAG;
+}
+
+std::vector<cv::Point3d> ArUCoObject::object() {
+	return {
+		cv::Point3d(0.0, 0.0, 0.0),
+		cv::Point3d(tag_size_, 0.0, 0.0),
+		cv::Point3d(tag_size_, tag_size_, 0.0),
+		cv::Point3d(0.0, tag_size_, 0.0),
+
+		cv::Point3d(baseline_, 0.0, 0.0),
+		cv::Point3d(baseline_ + tag_size_, 0.0, 0.0),
+		cv::Point3d(baseline_ + tag_size_, tag_size_, 0.0),
+		cv::Point3d(baseline_, tag_size_, 0.0)
+	};
+}
+
+int ArUCoObject::detect(cv::InputArray im, std::vector<cv::Point2d>& result, const cv::Mat& K, const cv::Mat& distCoeffs) {
+
+	// note: cv::aruco requires floats
+	std::vector<std::vector<cv::Point2f>> corners;
+	std::vector<int> ids;
+	cv::Mat im_gray;
+
+	if (im.size() == cv::Size(0, 0)) {
+		return -1;
+	}
+	if (im.isGpuMat()) {
+		cv::Mat dl;
+		im.getGpuMat().download(dl);
+		cv::cvtColor(dl, im_gray, cv::COLOR_BGRA2GRAY);
+	}
+	else if (im.isMat()) {
+		cv::cvtColor(im.getMat(), im_gray, cv::COLOR_BGRA2GRAY);
+	}
+	else {
+		throw ftl::exception("Bad input (not cv::Mat/cv::GpuMat)");
+	}
+
+	cv::aruco::detectMarkers(im_gray, dict_, corners, ids, params_,
+								cv::noArray(), K, distCoeffs);
+
+
+	if (ids.size() < 2) { return 0; }
+
+	const size_t n_corners = 4;
+	const size_t n_tags = 2;
+
+	std::vector<cv::Point2d> marker1; marker1.reserve(n_corners);
+	std::vector<cv::Point2d> marker2; marker2.reserve(n_corners);
+
+	int n = 0;
+	// find the right markers
+	for (unsigned int i = 0; i < ids.size(); i++) {
+		if (ids[i] == id1_) {
+			n++;
+			for (auto& p : corners[i]) {
+				marker1.push_back({p.x, p.y});
+			}
+			CHECK(corners[i].size() == n_corners);
+		}
+		if (ids[i] == id2_) {
+			n++;
+			for (auto& p : corners[i]) {
+				marker2.push_back({p.x, p.y});
+			}
+			CHECK(corners[i].size() == n_corners);
+		}
+	}
+
+	if (marker1.empty() || marker2.empty()) {
+		return 0;
+	}
+
+	if (n != 2) {
+		LOG(WARNING) << "Found more than one marker with same ID";
+		return 0;
+	}
+
+	// add the points to output
+	result.reserve(n_tags*n_corners + result.size());
+	for (size_t i_corner = 0; i_corner < n_corners; i_corner++) {
+		result.push_back(marker1[i_corner]);
+	}
+	for (size_t i_corner = 0; i_corner < n_corners; i_corner++) {
+		result.push_back(marker2[i_corner]);
+	}
+
+	return n_tags*n_corners;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+cv::Size ChessboardObject::chessboardSize() {
+	return {chessboard_size_.width + 1, chessboard_size_.height + 1};
+}
+
+double ChessboardObject::squareSize() {
+	return square_size_;
+}
+
+ChessboardObject::ChessboardObject(int rows, int cols, double square_size) :
+		chessboard_size_(cols - 1, rows - 1), square_size_(square_size),
+		flags_(cv::CALIB_CB_NORMALIZE_IMAGE|cv::CALIB_CB_ACCURACY) {
+
+	init();
+}
+
+void ChessboardObject::init() {
+	object_points_.reserve(chessboard_size_.width * chessboard_size_.height);
+	for (int row = 0; row < chessboard_size_.height; ++row) {
+	for (int col = 0; col < chessboard_size_.width; ++col) {
+		object_points_.push_back({col * square_size_, row * square_size_, 0});
+	}}
+}
+
+int ChessboardObject::detect(cv::InputArray im, std::vector<cv::Point2d>& points, const cv::Mat& K, const cv::Mat& D) {
+	cv::Mat tmp;
+
+	if (im.isMat()) {
+		tmp = im.getMat();
+	}
+	else if (im.isGpuMat()) {
+		im.getGpuMat().download(tmp);
+	}
+	else {
+		throw ftl::exception("Image not cv::Mat or cv::GpuMat");
+	}
+
+	if (cv::findChessboardCornersSB(tmp, chessboard_size_, points, flags_)) {
+		return 1;
+	}
+	return 0;
+}
+
+std::vector<cv::Point3d> ChessboardObject::object() {
+	return object_points_;
+}
diff --git a/components/calibration/src/optimize.cpp b/components/calibration/src/optimize.cpp
index 6c8f31064..04722774d 100644
--- a/components/calibration/src/optimize.cpp
+++ b/components/calibration/src/optimize.cpp
@@ -1,7 +1,9 @@
 #include "ftl/calibration/optimize.hpp"
 #include "ftl/calibration/parameters.hpp"
 
-#include "loguru.hpp"
+#include <loguru.hpp>
+
+#include <ftl/exception.hpp>
 
 #include <algorithm>
 #include <unordered_set>
@@ -15,7 +17,8 @@ using cv::Mat;
 
 using cv::Point3d;
 using cv::Point2d;
-
+using cv::Vec3d;
+using cv::Size;
 using cv::Rect;
 
 using ftl::calibration::BundleAdjustment;
@@ -23,19 +26,176 @@ using ftl::calibration::Camera;
 
 ////////////////////////////////////////////////////////////////////////////////
 
+void Camera::setRotation(const Mat& R) {
+	if (((R.size() != Size(3, 3)) &&
+		(R.size() != Size(3, 1)) &&
+		(R.size() != Size(1, 3))) ||
+		(R.type() != CV_64FC1)) {
+
+		throw ftl::exception("bad rotation matrix size/type");
+	}
+
+	Mat rvec;
+	if (R.size() == cv::Size(3, 3)) { cv::Rodrigues(R, rvec); }
+	else { rvec = R; }
+
+	ceres::AngleAxisToQuaternion<double>((double*)(rvec.data), data + Parameter::ROTATION);
+}
+
+void Camera::setTranslation(const Mat& t) {
+	if ((t.type() != CV_64FC1) ||
+		(t.size() != cv::Size(1, 3))) {
+
+		throw ftl::exception("bad translation vector");
+	}
+
+	data[Parameter::TX] = t.at<double>(0);
+	data[Parameter::TY] = t.at<double>(1);
+	data[Parameter::TZ] = t.at<double>(2);
+}
+
+
+void Camera::setIntrinsic(const Mat& K, cv::Size sz) {
+	size = sz;
+	if ((K.type() != CV_64FC1) || (K.size() != cv::Size(3, 3))) {
+		throw ftl::exception("bad intrinsic matrix");
+	}
+
+	data[Parameter::F] = K.at<double>(0, 0);
+	data[Parameter::CX] = K.at<double>(0, 2);
+	data[Parameter::CY] = K.at<double>(1, 2);
+}
+
+void Camera::setDistortion(const Mat& D) {
+	if ((D.type() != CV_64FC1)) {
+		throw ftl::exception("distortion coefficients must be CV_64FC1");
+	}
+	switch(D.total()) {
+		case 12:
+			/*
+			data[Parameter::S1] = D.at<double>(8);
+			data[Parameter::S2] = D.at<double>(9);
+			data[Parameter::S3] = D.at<double>(10);
+			data[Parameter::S4] = D.at<double>(11);
+			*/
+			[[fallthrough]];
+
+		case 8:
+			data[Parameter::K4] = D.at<double>(5);
+			data[Parameter::K5] = D.at<double>(6);
+			data[Parameter::K6] = D.at<double>(7);
+			[[fallthrough]];
+
+		case 5:
+			data[Parameter::K3] = D.at<double>(4);
+			[[fallthrough]];
+
+		default:
+			data[Parameter::K1] = D.at<double>(0);
+			data[Parameter::K2] = D.at<double>(1);
+			data[Parameter::P1] = D.at<double>(2);
+			data[Parameter::P2] = D.at<double>(3);
+	}
+}
+
+Camera::Camera(const Mat &K, const Mat &D, const Mat &R, const Mat &tvec, cv::Size sz) {
+	setIntrinsic(K, D, sz);
+	if (!R.empty()) { setRotation(R); }
+	if (!tvec.empty()) { setTranslation(tvec); }
+}
+
+Camera::Camera(const ftl::calibration::CalibrationData::Calibration& calib) {
+	setIntrinsic(calib.intrinsic.matrix(), calib.intrinsic.distCoeffs.Mat(), calib.intrinsic.resolution);
+	setExtrinsic(calib.extrinsic.matrix()(cv::Rect(0, 0, 3, 3)), cv::Mat(calib.extrinsic.tvec));
+}
+
+ftl::calibration::CalibrationData::Intrinsic Camera::intrinsic() const {
+	return ftl::calibration::CalibrationData::Intrinsic(intrinsicMatrix(), distortionCoefficients(), size);
+}
+ftl::calibration::CalibrationData::Extrinsic Camera::extrinsic() const {
+	return ftl::calibration::CalibrationData::Extrinsic(rvec(), tvec());
+}
+
+Mat Camera::intrinsicMatrix() const {
+	Mat K = Mat::eye(3, 3, CV_64FC1);
+	K.at<double>(0, 0) = data[Parameter::F];
+	K.at<double>(1, 1) = data[Parameter::F];
+	K.at<double>(0, 2) = data[Parameter::CX];
+	K.at<double>(1, 2) = data[Parameter::CY];
+	return K;
+}
+
+Mat Camera::distortionCoefficients() const {
+	Mat D;
+	if      (Camera::n_distortion_parameters <= 4)  { D = Mat::zeros(1, 4, CV_64FC1); }
+	else if (Camera::n_distortion_parameters <= 5)  { D = Mat::zeros(1, 5, CV_64FC1); }
+	else if (Camera::n_distortion_parameters <= 8)  { D = Mat::zeros(1, 8, CV_64FC1); }
+	else if (Camera::n_distortion_parameters <= 12) { D = Mat::zeros(1, 12, CV_64FC1); }
+	else if (Camera::n_distortion_parameters <= 14) { D = Mat::zeros(1, 14, CV_64FC1); }
+
+	switch(Camera::n_distortion_parameters) {
+		case 14: // not used in OpenCV?
+		case 12:
+		case 8:
+			D.at<double>(5) = data[Parameter::K4];
+			D.at<double>(6) = data[Parameter::K5];
+			D.at<double>(7) = data[Parameter::K6];
+		case 5:
+			D.at<double>(4) = data[Parameter::K3];
+		case 4:
+			D.at<double>(0) = data[Parameter::K1];
+			D.at<double>(1) = data[Parameter::K2];
+			D.at<double>(2) = data[Parameter::P1];
+			D.at<double>(3) = data[Parameter::P2];
+	}
+
+	return D;
+}
+
+Mat Camera::rvec() const {
+	cv::Mat rvec(cv::Size(3, 1), CV_64FC1);
+	ceres::QuaternionToAngleAxis(data + Parameter::ROTATION,
+		(double*)(rvec.data));
+	return rvec;
+}
+
+Mat Camera::tvec() const {
+	return Mat(Vec3d(data[Parameter::TX], data[Parameter::TY], data[Parameter::TZ]));
+}
+
+Mat Camera::rmat() const {
+	Mat R;
+	cv::Rodrigues(rvec(), R);
+	return R;
+}
+
+Mat Camera::extrinsicMatrix() const {
+	Mat T = Mat::eye(4, 4, CV_64FC1);
+	rmat().copyTo(T(Rect(0, 0, 3, 3)));
+	tvec().copyTo(T(Rect(3, 0, 1, 3)));
+	return T;
+}
+
+Mat Camera::extrinsicMatrixInverse() const {
+	return transform::inverse(extrinsicMatrix());
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
 struct ReprojectionError {
 	/**
 	 * Reprojection error.
 	 *
 	 * Camera model has _CAMERA_PARAMETERS parameters:
 	 *
-	 * - rotation and translation: rx, ry, rz, tx, ty, tx
+	 * - rotation and translation: q1, q2, q3, q4, tx, ty, tx
 	 * - focal length: f (fx == fy assumed)
 	 * - pricipal point: cx, cy
-	 * - first three radial distortion coefficients: k1, k2, k3
+	 * - distortion coefficients: k1, k2, k3, k4, k5, k6, p1, p2
 	 *
 	 * Camera model documented in
 	 * https://docs.opencv.org/master/d9/d0c/group__calib3d.html
+	 * https://github.com/opencv/opencv/blob/b698d0a6ee12342a87b8ad739d908fd8d7ff1fca/modules/calib3d/src/calibration.cpp#L774
 	 */
 	explicit ReprojectionError(double observed_x, double observed_y)
 		: observed_x(observed_x), observed_y(observed_y) {}
@@ -46,9 +206,8 @@ struct ReprojectionError {
 					T* residuals) const {
 
 		T p[3];
+		ceres::QuaternionRotatePoint(camera + Camera::Parameter::ROTATION, point, p);
 
-		// Apply rotation and translation
-		ceres::AngleAxisRotatePoint(camera + Camera::Parameter::ROTATION, point, p);
 
 		p[0] += camera[Camera::Parameter::TX];
 		p[1] += camera[Camera::Parameter::TY];
@@ -58,24 +217,37 @@ struct ReprojectionError {
 		T y = T(p[1]) / p[2];
 
 		// Intrinsic parameters
-		const T& focal = camera[Camera::Parameter::F];
+		const T& f = camera[Camera::Parameter::F];
 		const T& cx = camera[Camera::Parameter::CX];
 		const T& cy = camera[Camera::Parameter::CY];
 
-		// Distortion parameters k1, k2, k3
+		// Distortion parameters
 		const T& k1 = camera[Camera::Parameter::K1];
 		const T& k2 = camera[Camera::Parameter::K2];
 		const T& k3 = camera[Camera::Parameter::K3];
+		const T& k4 = camera[Camera::Parameter::K4];
+		const T& k5 = camera[Camera::Parameter::K5];
+		const T& k6 = camera[Camera::Parameter::K6];
+		const T& p1 = camera[Camera::Parameter::P1];
+		const T& p2 = camera[Camera::Parameter::P2];
 
 		const T r2 = x*x + y*y;
 		const T r4 = r2*r2;
 		const T r6 = r4*r2;
 
-		T distortion = T(1.0) + k1*r2 + k2*r4 + k3*r6;
-
+		// Radial distortion
+		const T cdist = T(1.0) + k1*r2 + k2*r4 + k3*r6;
+		// Radial distortion: rational model
+		const T icdist = T(1.0)/(T(1.0) + k4*r2 + k5*r4 + k6*r6);
+		// Tangential distortion
+		const T pdistx =      (T(2.0)*x*y)*p1 + (r2 + T(2.0)*x*x)*p2;
+		const T pdisty = (r2 + T(2.0)*y*y)*p1 +      (T(2.0)*x*y)*p2;
+		// Apply distortion
+		const T xd = x*cdist*icdist + pdistx;
+		const T yd = y*cdist*icdist + pdisty;
 		// Projected point position
-		T predicted_x = focal*x*distortion + cx;
-		T predicted_y = focal*y*distortion + cy;
+		T predicted_x = f*xd + cx;
+		T predicted_y = f*yd + cy;
 
 		// Error: the difference between the predicted and observed position
 		residuals[0] = predicted_x - T(observed_x);
@@ -99,25 +271,15 @@ struct ReprojectionError {
 	double observed_y;
 };
 
-struct LengthError {
-	explicit LengthError(const double d) : d(d) {}
-
-	template <typename T>
-	bool operator()(const T* const p1, const T* const p2, T* residual) const {
-		auto x = p1[0] - p2[0];
-		auto y = p1[1] - p2[1];
-		auto z = p1[2] - p2[2];
-		residual[0] = T(d) - sqrt(x*x + y*y + z*z);
-
-		return true;
-	}
+static ReprojectionError reproject_ = ReprojectionError(0.0, 0.0);
 
-	static ceres::CostFunction* Create(const double d) {
-		return (new ceres::AutoDiffCostFunction<LengthError, 1, 3, 3>(new LengthError(d)));
-	}
+cv::Point2d ftl::calibration::projectPoint(const Camera& camera, const cv::Point3d& point) {
+	cv::Point2d out;
+	reproject_(static_cast<const double* const>(camera.data), reinterpret_cast<const double* const>(&(point.x)), reinterpret_cast<double*>(&(out.x)));
+	return out;
+}
 
-	double d;
-};
+////////////////////////////////////////////////////////////////////////////////
 
 struct ScaleError {
 	ScaleError(const double d, const Point3d& p) : d(d), p(p) {}
@@ -200,8 +362,7 @@ double ftl::calibration::optimizeScale(const vector<Point3d> &object_points, vec
 ////////////////////////////////////////////////////////////////////////////////
 
 void BundleAdjustment::addCamera(Camera& camera) {
-	// cameras can't be added after points
-	if (points_.size() != 0) { throw std::exception(); }
+	if (points_.size() != 0) { throw ftl::exception("cameras can't be added after points"); }
 
 	cameras_.push_back(&camera);
 }
@@ -212,17 +373,34 @@ void BundleAdjustment::addCameras(vector<Camera>& cameras) {
 
 void BundleAdjustment::addPoint(const vector<bool>& visibility, const vector<Point2d>& observations, Point3d& point) {
 	if ((observations.size() != visibility.size()) ||
-		(visibility.size() != cameras_.size())) { throw std::exception(); }
+		(visibility.size() != cameras_.size())) { throw ftl::exception("observation and visibility vector sizes do not match"); }
 
-	points_.push_back(BundleAdjustment::Point{visibility, observations, reinterpret_cast<double*>(&point)});
+	points_.push_back(BundleAdjustment::Point{visibility, observations, point});
 }
 
 void BundleAdjustment::addPoints(const vector<vector<bool>>& visibility, const vector<vector<Point2d>>& observations, vector<Point3d>& points) {
-	if ((observations.size() != points.size()) ||
-		(observations.size() != visibility.size())) { throw std::exception(); }
+	if (observations.size() != visibility.size()) { throw ftl::exception("observation and visibility vector sizes do not match"); }
 
-	for (size_t i = 0; i < points.size(); i++) {
-		addPoint(visibility[i], observations[i], points[i]);
+	auto npoints = points.size();
+	auto ncameras = observations.size();
+
+	for (unsigned c = 0; c < ncameras; c++) {
+		if ((npoints != observations[c].size()) ||
+			(npoints != visibility[c].size())) {
+				throw ftl::exception("wrong number of points");
+			}
+	}
+
+	vector<bool> add_vis;
+	vector<Point2d> add_obs;
+	for (size_t i = 0; i < npoints; i++) {
+		add_obs.clear();
+		add_vis.clear();
+		for (size_t c = 0; c < ncameras; c++) {
+			add_vis.push_back(visibility[c][i]);
+			add_obs.push_back(observations[c][i]);
+		}
+		addPoint(add_vis, add_obs, points[i]);
 	}
 }
 
@@ -230,36 +408,54 @@ void BundleAdjustment::addPoint(const vector<Point2d>& observations, Point3d& po
 	vector<bool> visibility(observations.size(), true);
 	addPoint(visibility, observations, point);
 }
+
 void BundleAdjustment::addPoints(const vector<vector<Point2d>>& observations, std::vector<Point3d>& points) {
-	if (observations.size() != points.size()) { throw std::exception(); }
+	if (observations.size() != points.size()) { throw ftl::exception("observation and visibility vector sizes do not match"); }
 	for (size_t i = 0; i < points.size(); i++) {
 		addPoint(observations[i], points[i]);
 	}
 }
 
 void BundleAdjustment::addObject(const vector<Point3d> &object_points) {
-	if (points_.size() % object_points.size() != 0) { throw std::exception(); }
+	if (points_.size() % object_points.size() != 0) { throw ftl::exception("object does match point count"); }
 	objects_.push_back(BundleAdjustment::Object {0, (int) points_.size(), object_points});
 }
 
 void BundleAdjustment::_setCameraParametrization(ceres::Problem &problem, const BundleAdjustment::Options &options) {
-	
+
 	vector<int> constant_camera_parameters;
 
-	// extrinsic paramters
-	if (!options.optimize_motion) {
-		for (int i = 0; i < 3; i++) {
-			constant_camera_parameters.push_back(Camera::Parameter::ROTATION + i);
-			constant_camera_parameters.push_back(Camera::Parameter::TRANSLATION + i);
+	// apply options
+	for (size_t i = 0; i < cameras_.size(); i++) {
+		if (!options.rational_model) {
+			cameras_[i]->data[Camera::Parameter::K4] = 0.0;
+			cameras_[i]->data[Camera::Parameter::K5] = 0.0;
+			cameras_[i]->data[Camera::Parameter::K6] = 0.0;
+		}
+		if (options.zero_distortion) {
+			cameras_[i]->data[Camera::Parameter::K1] = 0.0;
+			cameras_[i]->data[Camera::Parameter::K2] = 0.0;
+			cameras_[i]->data[Camera::Parameter::K3] = 0.0;
+			cameras_[i]->data[Camera::Parameter::K4] = 0.0;
+			cameras_[i]->data[Camera::Parameter::K5] = 0.0;
+			cameras_[i]->data[Camera::Parameter::K6] = 0.0;
+			cameras_[i]->data[Camera::Parameter::P1] = 0.0;
+			cameras_[i]->data[Camera::Parameter::P2] = 0.0;
 		}
 	}
 
-	if (!options.fix_distortion) {
-		LOG(WARNING) << "Optimization of distortion parameters is not supported"
-					 << "and results may contain invalid values.";
+	// set extrinsic paramters constant for all cameras
+	if (!options.optimize_motion) {
+		constant_camera_parameters.push_back(Camera::Parameter::Q1);
+		constant_camera_parameters.push_back(Camera::Parameter::Q2);
+		constant_camera_parameters.push_back(Camera::Parameter::Q3);
+		constant_camera_parameters.push_back(Camera::Parameter::Q4);
+		constant_camera_parameters.push_back(Camera::Parameter::TX);
+		constant_camera_parameters.push_back(Camera::Parameter::TY);
+		constant_camera_parameters.push_back(Camera::Parameter::TZ);
 	}
 
-	// intrinsic parameters
+	// set intrinsic parameters constant for all cameras
 	if (!options.optimize_intrinsic || options.fix_focal) {
 		constant_camera_parameters.push_back(Camera::Parameter::F);
 	}
@@ -272,6 +468,11 @@ void BundleAdjustment::_setCameraParametrization(ceres::Problem &problem, const
 		constant_camera_parameters.push_back(Camera::Parameter::K1);
 		constant_camera_parameters.push_back(Camera::Parameter::K2);
 		constant_camera_parameters.push_back(Camera::Parameter::K3);
+		constant_camera_parameters.push_back(Camera::Parameter::K4);
+		constant_camera_parameters.push_back(Camera::Parameter::K5);
+		constant_camera_parameters.push_back(Camera::Parameter::K6);
+		constant_camera_parameters.push_back(Camera::Parameter::P1);
+		constant_camera_parameters.push_back(Camera::Parameter::P2);
 	}
 
 	if (!options.optimize_motion && !options.optimize_intrinsic) {
@@ -288,36 +489,61 @@ void BundleAdjustment::_setCameraParametrization(ceres::Problem &problem, const
 			options.fix_camera_extrinsic.begin(), options.fix_camera_extrinsic.end());
 
 		for (size_t i = 0; i < cameras_.size(); i++) {
-			std::unordered_set<int> fixed_parameters(
+			std::unordered_set<int> constant_parameters(
 				constant_camera_parameters.begin(),
 				constant_camera_parameters.end());
 
 			if (fix_extrinsic.find(i) != fix_extrinsic.end()) {
-				fixed_parameters.insert({
-					Camera::Parameter::RX, Camera::Parameter::RY,
-					Camera::Parameter::RZ, Camera::Parameter::TX,
-					Camera::Parameter::TY, Camera::Parameter::TZ
+				constant_parameters.insert({
+					Camera::Parameter::Q1, Camera::Parameter::Q2,
+					Camera::Parameter::Q3, Camera::Parameter::Q4,
+					Camera::Parameter::TX, Camera::Parameter::TY,
+					Camera::Parameter::TZ
 				});
 			}
 
 			if (fix_intrinsic.find(i) != fix_intrinsic.end()) {
-				fixed_parameters.insert({
+				constant_parameters.insert({
 					Camera::Parameter::F, Camera::Parameter::CX,
 					Camera::Parameter::CY
 				});
 			}
 
-			vector<int> params(fixed_parameters.begin(), fixed_parameters.end());
+			vector<int> params(constant_parameters.begin(), constant_parameters.end());
 
 			if (params.size() == Camera::n_parameters) {
-				// Ceres crashes if all parameters are set constant using
+				// Ceres crashes if all parameters are set constant with
 				// SubsetParameterization()
 				// https://github.com/ceres-solver/ceres-solver/issues/347
+				// https://github.com/ceres-solver/ceres-solver/commit/27183d661ecae246dbce6d03cacf84f39fba1f1e
 				problem.SetParameterBlockConstant(getCameraPtr(i));
 			}
 			else if (params.size() > 0) {
-				problem.SetParameterization(getCameraPtr(i),
-					new ceres::SubsetParameterization(Camera::n_parameters, params));
+				ceres::LocalParameterization* camera_parameterization = nullptr;
+
+				if (constant_parameters.count(Camera::Parameter::ROTATION) == 0) {
+					// quaternion parametrization
+					for (auto& v : params) { v -= 4; }
+					camera_parameterization =
+						new ceres::ProductParameterization(
+							new ceres::QuaternionParameterization(),
+							new ceres::SubsetParameterization(Camera::n_parameters - 4, params));
+				}
+				else {
+					// extrinsic parameters constant
+					CHECK(constant_parameters.count(Camera::Parameter::Q1));
+					CHECK(constant_parameters.count(Camera::Parameter::Q2));
+					CHECK(constant_parameters.count(Camera::Parameter::Q3));
+					CHECK(constant_parameters.count(Camera::Parameter::Q4));
+					CHECK(constant_parameters.count(Camera::Parameter::TX));
+					CHECK(constant_parameters.count(Camera::Parameter::TY));
+					CHECK(constant_parameters.count(Camera::Parameter::TZ));
+
+					camera_parameterization =
+						new ceres::SubsetParameterization(Camera::n_parameters, params);
+				}
+
+				problem.SetParameterization(getCameraPtr(i), camera_parameterization);
 			}
 		}
 	}
@@ -325,7 +551,7 @@ void BundleAdjustment::_setCameraParametrization(ceres::Problem &problem, const
 
 void BundleAdjustment::_buildBundleAdjustmentProblem(ceres::Problem &problem, const BundleAdjustment::Options &options) {
 
-	ceres::LossFunction *loss_function = nullptr;
+	ceres::LossFunction *loss_function = nullptr; // squared
 
 	if (options.loss == Options::Loss::HUBER) {
 		loss_function = new ceres::HuberLoss(1.0);
@@ -334,7 +560,7 @@ void BundleAdjustment::_buildBundleAdjustmentProblem(ceres::Problem &problem, co
 		loss_function = new ceres::CauchyLoss(1.0);
 	}
 
-	for (auto point : points_) {
+	for (auto& point : points_) {
 		for (size_t i = 0; i < point.observations.size(); i++) {
 			if (!point.visibility[i]) { continue; }
 			ceres::CostFunction* cost_function =
@@ -343,59 +569,17 @@ void BundleAdjustment::_buildBundleAdjustmentProblem(ceres::Problem &problem, co
 			problem.AddResidualBlock(cost_function,
 						loss_function,
 						getCameraPtr(i),
-						point.point);
+						&(point.point.x)
+			);
 		}
 	}
 
-	// apply options
-
 	_setCameraParametrization(problem, options);
 
 	if (!options.optmize_structure) {
 		// do not optimize points
-		for (auto &point : points_) { problem.SetParameterBlockConstant(point.point); }
-	}
-}
-
-void BundleAdjustment::_buildLengthProblem(ceres::Problem &problem, const BundleAdjustment::Options &options) {
-
-	// same idea as in scale optimization
-
-	ceres::LossFunction *loss_function = nullptr;
-
-	// should use separate configuration option
-	/*
-	if (options.loss == Options::Loss::HUBER) {
-		loss_function = new ceres::HuberLoss(1.0);
-	}
-	else if (options.loss == Options::Loss::CAUCHY) {
-		loss_function = new ceres::CauchyLoss(1.0);
-	}
-	*/
-
-	for (auto &object : objects_) {
-		int npoints = object.object_points.size();
-		auto &object_points = object.object_points;
-
-		vector<double> d;
-		for (int i = 0; i < npoints; i++) {
-			for (int j = i + 1; j < npoints; j++) {
-				d.push_back(norm(object_points[i]-object_points[j]));
-			}
-		}
-
-		for (int p = object.idx_start; p < object.idx_end; p += npoints) {
-			size_t i_d = 0;
-			for (size_t i = 0; i < object_points.size(); i++) {
-				for (size_t j = i + 1; j < object_points.size(); j++) {
-					double* p1 = points_[p+i].point;
-					double* p2 = points_[p+j].point;
-
-					auto cost_function = LengthError::Create(d[i_d++]);
-
-					problem.AddResidualBlock(cost_function, loss_function, p1, p2);
-				}
-			}
+		for (auto &point : points_) {
+			problem.SetParameterBlockConstant(&(point.point.x));
 		}
 	}
 }
@@ -403,7 +587,6 @@ void BundleAdjustment::_buildLengthProblem(ceres::Problem &problem, const Bundle
 void BundleAdjustment::_buildProblem(ceres::Problem &problem, const BundleAdjustment::Options &options) {
 
 	_buildBundleAdjustmentProblem(problem, options);
-	_buildLengthProblem(problem, options);
 }
 
 void BundleAdjustment::run(const BundleAdjustment::Options &bundle_adjustment_options) {
@@ -411,13 +594,13 @@ void BundleAdjustment::run(const BundleAdjustment::Options &bundle_adjustment_op
 	_buildProblem(problem, bundle_adjustment_options);
 
 	ceres::Solver::Options options;
-	options.linear_solver_type = ceres::SPARSE_NORMAL_CHOLESKY;
+	options.linear_solver_type = ceres::DENSE_SCHUR;
 	options.minimizer_progress_to_stdout = bundle_adjustment_options.verbose;
-	
+
 	if (bundle_adjustment_options.max_iter > 0) {
 		options.max_num_iterations = bundle_adjustment_options.max_iter;
 	}
-	
+
 	if (bundle_adjustment_options.num_threads > 0) {
 		options.num_threads = bundle_adjustment_options.num_threads;
 	}
@@ -438,31 +621,24 @@ void BundleAdjustment::run() {
 	run(options);
 }
 
-void BundleAdjustment::_reprojectionErrorMSE(const int camera, double &error, double &npoints) const {
-	vector<Point2d>	observations;
-	vector<Point3d>	points;
-
-	observations.reserve(points_.size());
-	points.reserve(points_.size());
+void BundleAdjustment::_reprojectionErrorSE(const int camera, double &error, double &npoints) const {
+	error = 0.0;
+	npoints = 0.0;
 
 	for (const auto& point : points_) {
 		if (!point.visibility[camera]) { continue; }
-		observations.push_back(point.observations[camera]);
-		points.push_back(Point3d(point.point[0], point.point[1], point.point[2]));
+		const auto& obs = point.observations[camera];
+		const auto& proj = projectPoint(*(cameras_[camera]), point.point);
+		error += pow(proj.x - obs.x, 2);
+		error += pow(proj.y - obs.y, 2);
+		npoints += 1.0;
 	}
-
-	auto K = cameras_[camera]->intrinsicMatrix();
-	auto rvec = cameras_[camera]->rvec();
-	auto tvec = cameras_[camera]->tvec();
-
-	error = ftl::calibration::reprojectionError(observations, points, K, Mat::zeros(1, 5, CV_64FC1), rvec, tvec);
-	npoints = points.size();
 }
 
 double BundleAdjustment::reprojectionError(const int camera) const {
-	double error, ncameras;
-	_reprojectionErrorMSE(camera, ncameras, error);
-	return error / ncameras;
+	double error, npoints;
+	_reprojectionErrorSE(camera, error, npoints);
+	return sqrt(error / npoints);
 }
 
 double BundleAdjustment::reprojectionError() const {
@@ -470,9 +646,9 @@ double BundleAdjustment::reprojectionError() const {
 	double npoints = 0.0;
 	for (size_t i = 0; i < cameras_.size(); i++) {
 		double e, n;
-		_reprojectionErrorMSE(i, e, n);
-		error += e * n;
+		_reprojectionErrorSE(i, e, n);
+		error += e;
 		npoints += n;
 	}
-	return error / npoints;
+	return sqrt(error / npoints);
 }
diff --git a/components/calibration/src/parameters.cpp b/components/calibration/src/parameters.cpp
index d0e20aa63..fae57d07d 100644
--- a/components/calibration/src/parameters.cpp
+++ b/components/calibration/src/parameters.cpp
@@ -1,7 +1,13 @@
-#include "ftl/calibration/parameters.hpp"
+
+#include <loguru.hpp>
+
+#include <ftl/calibration/parameters.hpp>
+#include <ftl/exception.hpp>
 
 #include <opencv2/calib3d/calib3d.hpp>
 
+#include <ceres/rotation.h>
+
 using cv::Mat;
 using cv::Size;
 using cv::Point2d;
@@ -13,141 +19,6 @@ using std::vector;
 
 using namespace ftl::calibration;
 
-using ftl::calibration::Camera;
-
-////////////////////////////////////////////////////////////////////////////////
-
-void Camera::setRotation(const Mat& R) {
-	if (((R.size() != Size(3, 3)) &&
-		(R.size() != Size(3, 1)) &&
-		(R.size() != Size(1, 3))) ||
-		(R.type() != CV_64FC1)) { throw std::exception(); }
-
-	Mat rvec;
-	if (R.size() == cv::Size(3, 3)) { cv::Rodrigues(R, rvec); }
-	else { rvec = R; }
-
-	data[Parameter::RX] = rvec.at<double>(0);
-	data[Parameter::RY] = rvec.at<double>(1);
-	data[Parameter::RZ] = rvec.at<double>(2);
-}
-
-void Camera::setTranslation(const Mat& t) {
-	if ((t.type() != CV_64FC1) ||
-		(t.size() != cv::Size(1, 3))) { throw std::exception(); }
-
-	data[Parameter::TX] = t.at<double>(0);
-	data[Parameter::TY] = t.at<double>(1);
-	data[Parameter::TZ] = t.at<double>(2);
-}
-
-
-void Camera::setIntrinsic(const Mat& K) {
-	if ((K.type() != CV_64FC1) || (K.size() != cv::Size(3, 3))) {
-		throw std::exception();
-	}
-
-	data[Parameter::F] = K.at<double>(0, 0);
-	data[Parameter::CX] = K.at<double>(0, 2);
-	data[Parameter::CY] = K.at<double>(1, 2);
-}
-
-void Camera::setDistortion(const Mat& D) {
-	if ((D.type() != CV_64FC1)) { throw std::exception(); }
-	switch(D.total()) {
-		case 12:
-			/*
-			data[Parameter::S1] = D.at<double>(8);
-			data[Parameter::S2] = D.at<double>(9);
-			data[Parameter::S3] = D.at<double>(10);
-			data[Parameter::S4] = D.at<double>(11);
-			*/
-			[[fallthrough]];
-		
-		case 8:
-			/*
-			data[Parameter::K4] = D.at<double>(5);
-			data[Parameter::K5] = D.at<double>(6);
-			data[Parameter::K6] = D.at<double>(7);
-			*/
-			[[fallthrough]];
-
-		case 5:
-			data[Parameter::K3] = D.at<double>(4);
-			[[fallthrough]];
-
-		default:
-			data[Parameter::K1] = D.at<double>(0);
-			data[Parameter::K2] = D.at<double>(1);
-			/*
-			data[Parameter::P1] = D.at<double>(2);
-			data[Parameter::P2] = D.at<double>(3);
-			*/
-	}
-}
-
-Camera::Camera(const Mat &K, const Mat &D, const Mat &R, const Mat &tvec) {
-	setIntrinsic(K, D);
-	if (!R.empty()) { setRotation(R); }
-	if (!tvec.empty()) { setTranslation(tvec); }
-}
-
-Mat Camera::intrinsicMatrix() const {
-	Mat K = Mat::eye(3, 3, CV_64FC1);
-	K.at<double>(0, 0) = data[Parameter::F];
-	K.at<double>(1, 1) = data[Parameter::F];
-	K.at<double>(0, 2) = data[Parameter::CX];
-	K.at<double>(1, 2) = data[Parameter::CY];
-	return K;
-}
-
-Mat Camera::distortionCoefficients() const {
-	Mat D;
-	if      (Camera::n_distortion_parameters <= 4)  { D = Mat::zeros(4, 1, CV_64FC1); }
-	else if (Camera::n_distortion_parameters <= 5)  { D = Mat::zeros(5, 1, CV_64FC1); }
-	else if (Camera::n_distortion_parameters <= 8)  { D = Mat::zeros(8, 1, CV_64FC1); }
-	else if (Camera::n_distortion_parameters <= 12) { D = Mat::zeros(12, 1, CV_64FC1); }
-	else if (Camera::n_distortion_parameters <= 14) { D = Mat::zeros(14, 1, CV_64FC1); }
-
-	switch(Camera::n_distortion_parameters) {
-		case 14:
-		case 12:
-		case 8:
-		case 5:
-			D.at<double>(4) = data[Parameter::K3];
-		case 4:
-			D.at<double>(0) = data[Parameter::K1];
-			D.at<double>(1) = data[Parameter::K2];
-	}
-
-	return D;
-}
-
-Mat Camera::rvec() const {
-	return Mat(Vec3d(data[Parameter::RX], data[Parameter::RY], data[Parameter::RZ]));
-}
-
-Mat Camera::tvec() const {
-	return Mat(Vec3d(data[Parameter::TX], data[Parameter::TY], data[Parameter::TZ]));
-}
-
-Mat Camera::rmat() const {
-	Mat R;
-	cv::Rodrigues(rvec(), R);
-	return R;
-}
-
-Mat Camera::extrinsicMatrix() const {
-	Mat T = Mat::eye(4, 4, CV_64FC1);
-	rmat().copyTo(T(Rect(0, 0, 3, 3)));
-	tvec().copyTo(T(Rect(3, 0, 1, 3)));
-	return T;
-}
-
-Mat Camera::extrinsicMatrixInverse() const {
-	return extrinsicMatrix().inv();
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 
 bool validate::translationStereo(const Mat &t) {
@@ -170,7 +41,7 @@ bool validate::rotationMatrix(const Mat &M) {
 	// rotation matrix is orthogonal: M.T * M == M * M.T == I
 	//if (cv::countNonZero((M.t() * M) != Mat::eye(Size(3, 3), CV_64FC1)) != 0)
 	//									{ return false; }
-	
+
 	return true;
 }
 
@@ -178,9 +49,9 @@ bool validate::pose(const Mat &M) {
 	if (M.size() != Size(4, 4))			{ return false; }
 	if (!validate::rotationMatrix(M(cv::Rect(0 , 0, 3, 3))))
 										{ return false; }
-	if (!(	(M.at<double>(3, 0) == 0.0) && 
-			(M.at<double>(3, 1) == 0.0) && 
-			(M.at<double>(3, 2) == 0.0) && 
+	if (!(	(M.at<double>(3, 0) == 0.0) &&
+			(M.at<double>(3, 1) == 0.0) &&
+			(M.at<double>(3, 2) == 0.0) &&
 			(M.at<double>(3, 3) == 1.0))) { return false; }
 
 	return true;
@@ -190,11 +61,11 @@ bool validate::cameraMatrix(const Mat &M) {
 	if (M.type() != CV_64F)				{ return false; }
 	if (M.channels() != 1)				{ return false; }
 	if (M.size() != Size(3, 3))			{ return false; }
-	
-	if (!(	(M.at<double>(2, 0) == 0.0) && 
-			(M.at<double>(2, 1) == 0.0) && 
+
+	if (!(	(M.at<double>(2, 0) == 0.0) &&
+			(M.at<double>(2, 1) == 0.0) &&
 			(M.at<double>(2, 2) == 1.0))) { return false; }
-	
+
 	return true;
 }
 
@@ -223,7 +94,7 @@ bool ftl::calibration::validate::distortionCoefficients(const Mat &D, Size size)
 			s[3] = D.at<double>(11);
 			*/
 			[[fallthrough]];
-		
+
 		case 8:
 			k[3] = D.at<double>(5);
 			k[4] = D.at<double>(6);
@@ -242,7 +113,7 @@ bool ftl::calibration::validate::distortionCoefficients(const Mat &D, Size size)
 			p[1] = D.at<double>(3);
 			*/
 	}
-	
+
 	int diagonal = sqrt(size.width*size.width+size.height*size.height) + 1.0;
 
 	bool is_n = true;
@@ -274,11 +145,11 @@ bool ftl::calibration::validate::distortionCoefficients(const Mat &D, Size size)
 
 		if (!is_n && !is_p) { return false; }
 	}
-	
+
 	return true;
 }
 
-Mat ftl::calibration::scaleCameraMatrix(const Mat &K, const Size &size_new, const Size &size_old) {
+Mat ftl::calibration::scaleCameraMatrix(const Mat &K, const Size &size_old, const Size &size_new) {
 	Mat S(cv::Size(3, 3), CV_64F, 0.0);
 	double scale_x = ((double) size_new.width) / ((double) size_old.width);
 	double scale_y = ((double) size_new.height) / ((double) size_old.height);
diff --git a/components/calibration/src/structures.cpp b/components/calibration/src/structures.cpp
new file mode 100644
index 000000000..2c44a6b02
--- /dev/null
+++ b/components/calibration/src/structures.cpp
@@ -0,0 +1,234 @@
+#include <opencv2/core.hpp>
+#include <opencv2/calib3d.hpp>
+#include <opencv2/core/utility.hpp>
+
+#include <ftl/exception.hpp>
+#include <ftl/calibration/structures.hpp>
+#include <ftl/calibration/parameters.hpp>
+
+using ftl::calibration::CalibrationData;
+
+CalibrationData::Intrinsic::DistortionCoefficients::DistortionCoefficients() :
+		data_(14, 0.0) {
+}
+
+const cv::Mat CalibrationData::Intrinsic::DistortionCoefficients::Mat(int nparams) const {
+	if (nparams <= 0) {
+		return cv::Mat();
+	}
+	if (nparams > 14) {
+		nparams = 14;
+	}
+	return cv::Mat(cv::Size(nparams, 1), CV_64FC1, const_cast<double*>(data_.data()));
+}
+
+cv::Mat CalibrationData::Intrinsic::DistortionCoefficients::Mat(int nparams) {
+	if (nparams <= 0) {
+		return cv::Mat();
+	}
+	if (nparams > 14) {
+		nparams = 14;
+	}
+	return cv::Mat(cv::Size(nparams, 1), CV_64FC1, data_.data());
+}
+
+double& CalibrationData::Intrinsic::DistortionCoefficients::operator[](unsigned i) { return data_[i]; }
+double CalibrationData::Intrinsic::DistortionCoefficients::operator[](unsigned i) const { return data_[i]; }
+
+CalibrationData::Intrinsic::Intrinsic() :
+	resolution(0, 0), fx(0.0), fy(0.0), cx(0.0), cy(0.0) {}
+
+void CalibrationData::Intrinsic::set(const cv::Mat& K, cv::Size sz) {
+	fx = K.at<double>(0, 0);
+	fy = K.at<double>(1, 1);
+	cx = K.at<double>(0, 2);
+	cy = K.at<double>(1, 2);
+	resolution = sz;
+}
+
+void CalibrationData::Intrinsic::set(const cv::Mat& K, const cv::Mat& D, cv::Size sz) {
+	D.copyTo(distCoeffs.Mat(D.cols));
+	set(K, sz);
+}
+
+CalibrationData::Intrinsic::Intrinsic(const cv::Mat &K, cv::Size size) {
+	set(K, size);
+}
+
+CalibrationData::Intrinsic::Intrinsic(const cv::Mat &K, const cv::Mat &D, cv::Size size) {
+	set(K, D, size);
+}
+
+CalibrationData::Intrinsic::Intrinsic(const CalibrationData::Intrinsic& other, cv::Size size) {
+	distCoeffs = DistortionCoefficients(other.distCoeffs);
+	sensorSize = other.sensorSize;
+	auto K = other.matrix(size);
+	fx = K.at<double>(0, 0);
+	fy = K.at<double>(1, 1);
+	cx = K.at<double>(0, 2);
+	cy = K.at<double>(1, 2);
+	resolution = size;
+}
+
+cv::Mat CalibrationData::Intrinsic::matrix() const {
+	cv::Mat K(cv::Size(3, 3), CV_64FC1, cv::Scalar(0));
+	K.at<double>(0, 0) = fx;
+	K.at<double>(1, 1) = fy;
+	K.at<double>(0, 2) = cx;
+	K.at<double>(1, 2) = cy;
+	K.at<double>(2, 2) = 1.0;
+	return K;
+}
+
+cv::Mat CalibrationData::Intrinsic::matrix(cv::Size size) const {
+	return ftl::calibration::scaleCameraMatrix(matrix(), resolution, size);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+
+void CalibrationData::Extrinsic::set(const cv::Mat& T) {
+	if (T.type() != CV_64FC1) {
+		throw ftl::exception("Input must be CV_64FC1");
+	}
+	if (!ftl::calibration::validate::pose(T)) {
+		throw ftl::exception("T is not a valid transform matrix");
+	}
+
+	cv::Rodrigues(T(cv::Rect(0, 0, 3, 3)), rvec);
+	tvec[0] = T.at<double>(0, 3);
+	tvec[1] = T.at<double>(1, 3);
+	tvec[2] = T.at<double>(2, 3);
+}
+
+void CalibrationData::Extrinsic::set(cv::InputArray R, cv::InputArray t) {
+	if ((t.type() != CV_64FC1) || (R.type() != CV_64FC1)) {
+		throw ftl::exception("Type of R and t must be CV_64FC1");
+	}
+
+	if ((t.size() != cv::Size(3, 1)) && (t.size() != cv::Size(1, 3))) {
+		throw ftl::exception("Size of t must be (3, 1) or (1, 3");
+	}
+
+	if (R.isMat()) {
+		const auto& rmat = R.getMat();
+
+		if (R.size() == cv::Size(3, 3)) {
+			if (!ftl::calibration::validate::rotationMatrix(rmat)) {
+				throw ftl::exception("R is not a rotation matrix");
+			}
+			cv::Rodrigues(rmat, rvec);
+		}
+		else if ((R.size() == cv::Size(3, 1)) || R.size() == cv::Size(1, 3)) {
+			rvec[0] = rmat.at<double>(0);
+			rvec[1] = rmat.at<double>(1);
+			rvec[2] = rmat.at<double>(2);
+		}
+		else {
+			throw ftl::exception("R must be a 3x3 rotation matrix or 3x1/1x3 rotation vector (Rodrigues)");
+		}
+	}
+
+	const auto& tmat = t.getMat();
+	tvec[0] = tmat.at<double>(0);
+	tvec[1] = tmat.at<double>(1);
+	tvec[2] = tmat.at<double>(2);
+}
+
+CalibrationData::Extrinsic::Extrinsic() : rvec(0.0, 0.0, 0.0), tvec(0.0, 0.0, 0.0) {}
+
+CalibrationData::Extrinsic::Extrinsic(const cv::Mat &T) {
+	set(T);
+}
+
+CalibrationData::Extrinsic::Extrinsic(cv::InputArray R, cv::InputArray t) {
+	set(R, t);
+}
+
+cv::Mat CalibrationData::Extrinsic::matrix() const {
+	cv::Mat T(cv::Size(4, 4), CV_64FC1, cv::Scalar(0));
+	cv::Rodrigues(rvec, T(cv::Rect(0, 0, 3, 3)));
+	T.at<double>(0, 3) = tvec[0];
+	T.at<double>(1, 3) = tvec[1];
+	T.at<double>(2, 3) = tvec[2];
+	T.at<double>(3, 3) = 1.0;
+	return T;
+}
+
+ CalibrationData::Extrinsic CalibrationData::Extrinsic::inverse() const {
+	return CalibrationData::Extrinsic(ftl::calibration::transform::inverse(matrix()));
+}
+
+cv::Mat CalibrationData::Extrinsic::rmat() const {
+	cv::Mat R(cv::Size(3, 3), CV_64FC1, cv::Scalar(0));
+	cv::Rodrigues(rvec, R);
+	return R;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+CalibrationData CalibrationData::readFile(const std::string &path) {
+
+	cv::FileStorage fs;
+	fs.open(path.c_str(), cv::FileStorage::READ);
+	if (!fs.isOpened()) {
+		throw std::exception();
+	}
+	CalibrationData calibration;
+	fs["enabled"] >> calibration.enabled;
+
+	for (auto it = fs["calibration"].begin(); it != fs["calibration"].end(); it++) {
+		Calibration calib;
+		ftl::codecs::Channel channel;
+
+		(*it)["channel"] >> channel;
+		(*it)["resolution"] >> calib.intrinsic.resolution;
+		(*it)["fx"] >> calib.intrinsic.fx;
+		(*it)["fy"] >> calib.intrinsic.fy;
+		(*it)["cx"] >> calib.intrinsic.cx;
+		(*it)["cy"] >> calib.intrinsic.cy;
+		(*it)["distCoeffs"] >> calib.intrinsic.distCoeffs.data_;
+		(*it)["sensorSize"] >> calib.intrinsic.sensorSize;
+		(*it)["rvec"] >> calib.extrinsic.rvec;
+		(*it)["tvec"] >> calib.extrinsic.tvec;
+
+		calibration.data_[channel] = calib;
+	}
+
+	return calibration;
+}
+
+void CalibrationData::writeFile(const std::string &path) const {
+	cv::FileStorage fs(path, cv::FileStorage::WRITE);
+	if (!fs.isOpened()) {
+		throw std::exception();
+	}
+
+	fs << "enabled" << enabled;
+	fs << "calibration" << "[";
+	for (auto &[channel, data] : data_) {
+		fs	<< "{"
+			<<	"channel" << int(channel)
+			<<	"resolution" << data.intrinsic.resolution
+			<<	"fx" << data.intrinsic.fx
+			<<	"fy" << data.intrinsic.fy
+			<<	"cx" << data.intrinsic.cx
+			<<	"cy" << data.intrinsic.cy
+			<<	"distCoeffs" << data.intrinsic.distCoeffs.data_
+			<< 	"rvec" << data.extrinsic.rvec
+			<< 	"tvec" << data.extrinsic.tvec
+			<< 	"sensorSize" << data.intrinsic.sensorSize
+			<< "}";
+	}
+	fs << "]";
+
+	fs.release();
+}
+
+CalibrationData::Calibration& CalibrationData::get(ftl::codecs::Channel channel) {
+	return data_[channel];
+}
+
+bool CalibrationData::hasCalibration(ftl::codecs::Channel channel) const {
+	return data_.count(channel) != 0;
+}
diff --git a/components/calibration/src/visibility.cpp b/components/calibration/src/visibility.cpp
new file mode 100644
index 000000000..d3878bbdf
--- /dev/null
+++ b/components/calibration/src/visibility.cpp
@@ -0,0 +1,307 @@
+#include <loguru.hpp>
+
+#include <numeric>
+#include <limits>
+#include <algorithm>
+#include <queue>
+
+#include <opencv2/core.hpp>
+
+#include <ftl/exception.hpp>
+#include <ftl/calibration/visibility.hpp>
+
+using cv::Mat;
+using cv::Scalar;
+using cv::Size;
+using std::vector;
+using std::pair;
+using std::make_pair;
+
+using std::vector;
+using std::pair;
+using std::make_pair;
+
+using ftl::calibration::Paths;
+using ftl::calibration::Visibility;
+
+/** get highest bit*/
+inline int hbit(uint64_t a) {
+#ifdef __GNUC__
+	return 64 - __builtin_clzll(a);
+#endif
+	int v = 1;
+	while (a >>= 1) { v++; }
+	return v;
+}
+
+template<typename T>
+Paths<T>::Paths(int id, const vector<int> &previous, const vector<T> &distances) :
+	id_(id), previous_(previous), distances_(distances) {}
+
+template<typename T>
+vector<int> Paths<T>::from(int i) const {
+	vector<int> path;
+	path.push_back(i);
+
+	if (previous_[i] == -1) { return path; }
+	int current = previous_[i];
+
+	while (previous_[current] != -1) {
+		if (distance(i) == std::numeric_limits<T>::max()) { return {}; } // no path
+
+		path.push_back(current);
+		current = previous_[current];
+	}
+
+	path.push_back(id_);
+	return path;
+}
+
+template<typename T>
+vector<int> Paths<T>::to(int i) const {
+	vector<int> path = from(i);
+	std::reverse(path.begin(), path.end());
+	return path;
+}
+
+template<typename T>
+T Paths<T>::distance(int i) const {
+	return distances_[i];
+}
+
+template<typename T>
+bool Paths<T>::connected() const {
+	for (const auto &distance : distances_) {
+		if (distance == std::numeric_limits<T>::max()) { return false; }
+	}
+	return true;
+}
+
+template<typename T>
+std::string Paths<T>::to_string() const {
+	std::stringstream sb;
+
+	int node = -1;
+	for (size_t i = 0; i < distances_.size(); i++) {
+		if (previous_[i] == -1) {
+			node = i;
+			break;
+		}
+	}
+
+	for (size_t i = 0; i < distances_.size(); i++) {
+		sb << node;
+
+		for (const auto &p : to(i)) {
+			sb << " -> " << p;
+		}
+
+		sb << " (" << distances_[i] << ")\n";
+	}
+	return sb.str();
+}
+
+template class Paths<int>;
+template class Paths<float>;
+template class Paths<double>;
+
+////////////////////////////////////////////////////////////////////////////////
+
+template<typename T>
+static bool isValidAdjacencyMatrix(const vector<vector<T>> &graph) {
+	size_t nrows = graph.size();
+	for (const auto &col : graph) {
+		if (nrows != col.size()) { return false; }
+	}
+
+	for (size_t r = 0; r < nrows; r++) {
+		for (size_t c = r; c < nrows; c++) {
+			if (graph[r][c] != graph[c][r]) {
+				return false; }
+		}
+	}
+
+	return true;
+}
+
+template<typename T>
+static vector<int> find_neighbors(int i, const vector<vector<T>> &graph) {
+	vector<int> res;
+
+	for (size_t j = 0; j < graph.size(); j++) {
+		if (graph[i][j] > 0) { res.push_back(j); }
+	}
+
+	return res;
+}
+
+template<typename T>
+static pair<vector<int>, vector<T>> dijstra_impl(const int i, const vector<vector<T>> &graph) {
+	// distance, index
+	std::priority_queue<pair<T, int>, vector<pair<T, int>>, std::greater<pair<T, int>>> pq;
+
+	pq.push(make_pair(0, i));
+
+	vector<T> distance_path(graph.size(), std::numeric_limits<T>::max());
+	vector<int> previous(graph.size(), -1);
+
+	distance_path[i] = 0;
+
+	while(!pq.empty()) {
+		int current = pq.top().second;
+		pq.pop();
+
+		for (int n : find_neighbors(current, graph)) {
+			T cost = graph[current][n] + distance_path[current];
+			if (distance_path[n] > cost) {
+				distance_path[n] = cost;
+				pq.push(make_pair(cost, n));
+				previous[n] = current;
+			}
+		}
+	}
+
+	return make_pair(previous, distance_path);
+}
+
+template<typename T>
+Paths<T> ftl::calibration::dijstra(const int i, const vector<vector<T>> &graph) {
+	auto tmp = dijstra_impl(i, graph);
+	return Paths<T>(i, tmp.first, tmp.second);
+}
+
+template Paths<int> ftl::calibration::dijstra(const int i, const vector<vector<int>> &graph);
+template Paths<float> ftl::calibration::dijstra(const int i, const vector<vector<float>> &graph);
+template Paths<double> ftl::calibration::dijstra(const int i, const vector<vector<double>> &graph);
+
+////////////////////////////////////////////////////////////////////////////////
+
+Visibility::Visibility(int n_cameras) :
+	n_cameras_(n_cameras),
+	n_max_(0),
+	graph_(n_cameras, vector(n_cameras, 0)),
+	mask_(n_cameras, vector(n_cameras, false))
+	{}
+
+Visibility::Visibility(const vector<vector<int>> &graph) :
+	n_cameras_(graph.size()),
+	graph_(graph),
+	mask_(graph.size(), vector(graph.size(), false)) {
+
+		if (!isValidAdjacencyMatrix(graph)) {
+			throw std::exception();
+		}
+}
+
+void Visibility::init(int n_cameras) {
+	n_cameras_ = n_cameras;
+	graph_ = vector(n_cameras, vector(n_cameras, 0));
+	mask_ = vector(n_cameras, vector(n_cameras, false));
+}
+
+template<typename T>
+void Visibility::update(const vector<T> &add) {
+	if ((int) add.size() != n_cameras_) {
+		throw ftl::exception("number of points must match number of cameras");
+	}
+	n_max_ = n_cameras_;
+
+	for (int i = 0; i < n_cameras_; i++) {
+		if (!add[i]) { continue; }
+
+		for (int j = 0; j < n_cameras_; j++) {
+			if (add[j]) { graph_[i][j] += 1; }
+		}
+	}
+}
+
+void Visibility::update(uint64_t add) {
+	if (n_cameras_ > 64) {
+		throw ftl::exception("Bitmask update only if number of cameras less than 64");
+	}
+	n_max_ = std::max(n_max_, hbit(add));
+
+	for (int i = 0; i < n_max_; i++) {
+		if (!(add & (uint64_t(1) << i))) { continue; }
+
+		for (int j = 0; j < n_max_; j++) {
+			if (add & (uint64_t(1) << j)) {
+				graph_[i][j] += 1;
+			}
+		}
+	}
+}
+
+template void Visibility::update(const std::vector<int> &add);
+template void Visibility::update(const std::vector<bool> &add);
+
+void Visibility::mask(int a, int b) {
+	mask_[a][b] = true;
+	mask_[b][a] = true;
+}
+
+void Visibility::unmask(int a, int b) {
+	mask_[a][b] = false;
+	mask_[b][a] = false;
+}
+
+int Visibility::count(int camera) const {
+	return graph_[camera][camera];
+}
+
+int Visibility::count(int camera1, int camera2) const {
+	return graph_[camera1][camera2];
+}
+
+float Visibility::distance(int a, int b) const {
+	int v = graph_[a][b];
+	if (v == 0) { return 0.0f; }
+	return 1.0f/float(v);
+}
+
+Paths<float> Visibility::shortestPath(int i) const {
+	if ((i < 0) || (i >= n_max_)) { throw ftl::exception("Invalid index"); }
+
+	vector<vector<float>> graph(n_max_, vector<float>(n_max_, 0.0f));
+	for (int r = 0; r < n_max_; r++) {
+		for (int c = 0; c < n_max_; c++) {
+			if (r == c) { continue; }
+
+			if (!mask_[r][c]) {
+				// use inverse of count as distance in graph
+				graph[r][c] = distance(r, c);
+			}
+		}
+	}
+
+	auto res = dijstra_impl(i, graph);
+	for (float &distance : res.second) {
+		distance = 1.0f / distance;
+	}
+
+	return Paths<float>(i, res.first, res.second);
+}
+
+int Visibility::argmax() const {
+	int a = -1;
+	int v = std::numeric_limits<int>::min();
+	for (int i = 0; i < n_max_; i++) {
+		if (count(i) > v) {
+			v = count(i);
+			a = i;
+		}
+	}
+	return a;
+}
+
+int Visibility::argmin() const {
+	int a = -1;
+	int v = std::numeric_limits<int>::max();
+	for (int i = 0; i < n_max_; i++) {
+		if (count(i) < v) {
+			v = count(i);
+			a = i;
+		}
+	}
+	return a;
+}
diff --git a/components/calibration/test/CMakeLists.txt b/components/calibration/test/CMakeLists.txt
index 0bd1f1f19..42f82defd 100644
--- a/components/calibration/test/CMakeLists.txt
+++ b/components/calibration/test/CMakeLists.txt
@@ -6,5 +6,28 @@ add_executable(calibration_parameters_unit
 )
 
 target_include_directories(calibration_parameters_unit PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include")
-target_link_libraries(calibration_parameters_unit ftlcommon Threads::Threads ${OS_LIBS} ${OpenCV_LIBS})
-add_test(CalibrationValidateTest calibration_parameters_unit)
\ No newline at end of file
+target_link_libraries(calibration_parameters_unit ftlcalibration ftlcommon ftlcodecs Threads::Threads ${OS_LIBS} ${OpenCV_LIBS})
+add_test(CalibrationValidateTest calibration_parameters_unit)
+
+### Calibration Helper #########################################################
+add_executable(calibration_helper_unit
+	./tests.cpp
+	./test_helper.cpp
+	../src/extrinsic.cpp
+)
+
+target_include_directories(calibration_helper_unit PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include")
+target_link_libraries(calibration_helper_unit ftlcalibration ftlcommon Threads::Threads ${OS_LIBS} ${OpenCV_LIBS})
+add_test(CalibrationHelperTest calibration_helper_unit)
+
+### Extrinsic calib ############################################################
+
+add_executable(calibration_extrinsic_unit
+	./tests.cpp
+	./test_extrinsic.cpp
+	../src/extrinsic.cpp
+)
+
+target_include_directories(calibration_extrinsic_unit PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include")
+target_link_libraries(calibration_extrinsic_unit ftlcalibration ftlcommon Threads::Threads ${OS_LIBS} ${OpenCV_LIBS})
+add_test(CalibrationExtrinsicTest calibration_extrinsic_unit)
diff --git a/components/calibration/test/test_extrinsic.cpp b/components/calibration/test/test_extrinsic.cpp
new file mode 100644
index 000000000..a2a303395
--- /dev/null
+++ b/components/calibration/test/test_extrinsic.cpp
@@ -0,0 +1,48 @@
+#include "catch.hpp"
+
+#include <ftl/calibration/visibility.hpp>
+
+using std::vector;
+using ftl::calibration::dijstra;
+
+/// https://www.geeksforgeeks.org/dijkstras-shortest-path-algorithm-greedy-algo-7/
+static const vector<vector<int>> graph1 = {
+		{0, 4, 0, 0, 0, 0, 0, 8, 0},
+		{4, 0, 8, 0, 0, 0, 0,11, 0},
+		{0, 8, 0, 7, 0, 4, 0, 0, 2},
+		{0, 0, 7, 0, 9,14, 0, 0, 0},
+		{0, 0, 0, 9, 0,10, 0, 0, 0},
+		{0, 0, 4,14,10, 0, 2, 0, 0},
+		{0, 0, 0, 0, 0, 2, 0, 1, 6},
+		{8,11, 0, 0, 0, 0, 1, 0, 7},
+		{0, 0, 2, 0, 0, 0, 6, 7, 0}
+};
+
+TEST_CASE("Dijstra's Algorithm", "") {
+	SECTION("Find shortest paths") {
+		auto path = dijstra(0, graph1);
+
+		REQUIRE(path.distance(1) == 4);
+		REQUIRE(path.distance(2) == 12);
+		REQUIRE(path.distance(3) == 19);
+		REQUIRE(path.distance(4) == 21);
+		REQUIRE(path.distance(5) == 11);
+		REQUIRE(path.distance(6) == 9);
+		REQUIRE(path.distance(7) == 8);
+		REQUIRE(path.distance(8) == 14);
+
+		REQUIRE((path.to(1) == vector {0, 1}));
+		REQUIRE((path.to(2) == vector {0, 1, 2}));
+		REQUIRE((path.to(3) == vector {0, 1, 2, 3}));
+		REQUIRE((path.to(4) == vector {0, 7, 6, 5, 4}));
+		REQUIRE((path.to(5) == vector {0, 7, 6, 5}));
+		REQUIRE((path.to(6) == vector {0, 7, 6}));
+		REQUIRE((path.to(7) == vector {0, 7}));
+		REQUIRE((path.to(8) == vector {0, 1, 2, 8}));
+	}
+
+	SECTION("Check connectivity") {
+		auto path = dijstra(0, graph1);
+		REQUIRE(path.connected());
+	}
+}
diff --git a/components/calibration/test/test_helper.cpp b/components/calibration/test/test_helper.cpp
new file mode 100644
index 000000000..1bad42e99
--- /dev/null
+++ b/components/calibration/test/test_helper.cpp
@@ -0,0 +1,139 @@
+#include "catch.hpp"
+#include <ftl/calibration/extrinsic.hpp>
+
+TEST_CASE("Exceptions") {
+	SECTION("Require target is set before adding poitns") {
+		auto helper = ftl::calibration::CalibrationPoints<double>();
+		REQUIRE_THROWS(helper.addPoints(0, {{0,0}}));
+	}
+
+	SECTION("Do not allow setting points twice before next()") {
+		auto helper = ftl::calibration::CalibrationPoints<double>();
+		helper.setObject({{0, 0, 0}});
+		helper.addPoints(0, {{0,0}});
+		REQUIRE_THROWS(helper.addPoints(0, {{0,0}}));
+	}
+
+	SECTION("Adding points must have same number of points as in target") {
+		auto helper = ftl::calibration::CalibrationPoints<double>();
+		helper.setObject({{0, 0, 0}});
+		REQUIRE_THROWS(helper.addPoints(0, {{0,0}, {0,0}}));
+	}
+}
+
+TEST_CASE("Add and retrieve points") {
+	SECTION("All same (double)") {
+		auto helper = ftl::calibration::CalibrationPoints<double>();
+		int npoints = 2;
+		std::vector<cv::Point2d> points0 = {{0, 0}, {0, 1}};
+		std::vector<cv::Point2d> points1 = {{0, 2}, {0, 3}};
+		std::vector<cv::Point2d> points2 = {{0, 4}, {0, 5}};
+
+		helper.setObject({{1,2,3}, {4,5,6}});
+		helper.addPoints(0, points0);
+		helper.addPoints(1, points1);
+		helper.addPoints(2, points2);
+		helper.next();
+
+		auto points = helper.getPoints({0, 1, 2}, 0);
+
+		REQUIRE(points.size() == 3);
+		for (int i = 0; i < npoints; i++) {
+			REQUIRE(points.at(0).at(i) == points0[i]);
+			REQUIRE(points.at(1).at(i) == points1[i]);
+			REQUIRE(points.at(2).at(i) == points2[i]);
+		}
+	}
+
+	SECTION("One missing in first set, all queried (double)") {
+		auto helper = ftl::calibration::CalibrationPoints<double>();
+		int npoints = 2;
+		std::vector<cv::Point2d> points0 = {{0, 0}, {0, 1}};
+		std::vector<cv::Point2d> points1 = {{0, 2}, {0, 3}};
+		std::vector<cv::Point2d> points2 = {{0, 4}, {0, 5}};
+
+		helper.setObject({{1,2,3}, {4,5,6}});
+		helper.addPoints(0, points0);
+		helper.addPoints(2, points2);
+		helper.next();
+
+		helper.setObject({{1,2,3}, {4,5,6}});
+		helper.addPoints(0, points0);
+		helper.addPoints(1, points1);
+		helper.addPoints(2, points2);
+		helper.next();
+
+		auto points = helper.getPoints({0, 1, 2}, 0);
+
+		REQUIRE(points.size() == 3); // three cameras
+		REQUIRE(helper.getPointsCount() == 4); // next called twice
+
+		for (int i = 0; i < npoints; i++) {
+			REQUIRE(points.at(0).at(i) == points0[i]);
+			REQUIRE(points.at(1).at(i) == points1[i]);
+			REQUIRE(points.at(2).at(i) == points2[i]);
+		}
+	}
+
+	SECTION("One missing in first set, subset queried (double)") {
+		// same as above, but one point is not added
+
+		auto helper = ftl::calibration::CalibrationPoints<double>();
+		int npoints = 2;
+		std::vector<cv::Point2d> points0 = {{0, 0}, {0, 1}};
+		std::vector<cv::Point2d> points1 = {{0, 2}, {0, 3}};
+		std::vector<cv::Point2d> points2 = {{0, 4}, {0, 5}};
+
+		helper.setObject({{1,2,3}, {4,5,6}});
+		helper.addPoints(0, points0);
+		helper.addPoints(2, points2);
+		helper.next();
+
+		helper.setObject({{1,2,3}, {4,5,6}});
+		helper.addPoints(0, points0);
+		helper.addPoints(1, points1);
+		helper.addPoints(2, points2);
+		helper.next();
+
+		auto points = helper.getPoints({0, 2}, 0);
+
+		REQUIRE(points.size() == 2);
+		REQUIRE(helper.getPointsCount() == 4);
+
+		for (int i = 0; i < npoints; i++) {
+			REQUIRE(points.at(0).at(i) == points0[i]);
+			REQUIRE(points.at(1).at(i) == points2[i]);
+		}
+	}
+
+	SECTION("One missing in first set, subset queried in reverse order (double)") {
+		// same as above, getPoints({2, 0}) instead of getPoints({0, 2})
+
+		auto helper = ftl::calibration::CalibrationPoints<double>();
+		int npoints = 2;
+		std::vector<cv::Point2d> points0 = {{0, 0}, {0, 1}};
+		std::vector<cv::Point2d> points1 = {{0, 2}, {0, 3}};
+		std::vector<cv::Point2d> points2 = {{0, 4}, {0, 5}};
+
+		helper.setObject({{1,2,3}, {4,5,6}});
+		helper.addPoints(0, points0);
+		helper.addPoints(2, points2);
+		helper.next();
+
+		helper.setObject({{7,8,9}, {10,11,12}});
+		helper.addPoints(0, points0);
+		helper.addPoints(1, points1);
+		helper.addPoints(2, points2);
+		helper.next();
+
+		auto points = helper.getPoints({2, 0}, 0);
+
+		REQUIRE(points.size() == 2);
+		REQUIRE(helper.getPointsCount() == 4);
+
+		for (int i = 0; i < npoints; i++) {
+			REQUIRE(points.at(0).at(i) == points2[i]);
+			REQUIRE(points.at(1).at(i) == points0[i]);
+		}
+	}
+}
diff --git a/components/calibration/test/test_parameters.cpp b/components/calibration/test/test_parameters.cpp
index 7d19b9d75..355be37d3 100644
--- a/components/calibration/test/test_parameters.cpp
+++ b/components/calibration/test/test_parameters.cpp
@@ -1,5 +1,6 @@
 #include "catch.hpp"
-#include "ftl/calibration/parameters.hpp"
+#include <ftl/calibration/parameters.hpp>
+#include <ftl/calibration/structures.hpp>
 
 using cv::Size;
 using cv::Mat;
@@ -17,7 +18,7 @@ TEST_CASE("Calibration values", "") {
 		D.at<double>(0) = 1.0;
 		D.at<double>(1) = 1.0;
 		REQUIRE(ftl::calibration::validate::distortionCoefficients(D, Size(1920, 1080)));
-		
+
 		D.at<double>(0) =  0.01512461889185869;
 		D.at<double>(1) = -0.1207895096066378;
 		D.at<double>(4) =  0.1582571415357494;
@@ -38,3 +39,31 @@ TEST_CASE("Calibration values", "") {
 		REQUIRE(!ftl::calibration::validate::distortionCoefficients(D, Size(1920, 1080)));
 	}
 }
+
+TEST_CASE("Test reading/writing file") {
+	using ftl::calibration::CalibrationData;
+	using ftl::codecs::Channel;
+
+	CalibrationData::Calibration calib;
+	CalibrationData calib_read;
+	CalibrationData data;
+
+	calib.intrinsic.resolution = {1, 1};
+	calib.intrinsic.fx = 1.0;
+	calib.intrinsic.fy = 1.0;
+	calib.intrinsic.cx = 0.5;
+	calib.intrinsic.cy = 0.5;
+	data.get(Channel::Left) = calib;
+
+	data.writeFile("/tmp/calib.yml");
+	calib_read = CalibrationData::readFile("/tmp/calib.yml");
+	REQUIRE(calib_read.hasCalibration(Channel::Left));
+
+	data.writeFile("/tmp/calib.json");
+	calib_read = CalibrationData::readFile("/tmp/calib.json");
+	REQUIRE(calib_read.hasCalibration(Channel::Left));
+
+	data.writeFile("/tmp/calib.xml");
+	calib_read = CalibrationData::readFile("/tmp/calib.xml");
+	REQUIRE(calib_read.hasCalibration(Channel::Left));
+}
diff --git a/components/codecs/CMakeLists.txt b/components/codecs/CMakeLists.txt
index 6e7b42f3c..821a11ed9 100644
--- a/components/codecs/CMakeLists.txt
+++ b/components/codecs/CMakeLists.txt
@@ -14,6 +14,7 @@ target_include_directories(BaseCodec PUBLIC
 	${CMAKE_CURRENT_SOURCE_DIR}/src/Video_Codec_SDK_9.1.23/Samples/NvCodec
 	$<TARGET_PROPERTY:ftlcommon,INTERFACE_INCLUDE_DIRECTORIES>
 )
+set_property(TARGET BaseCodec PROPERTY CUDA_ARCHITECTURES OFF)
 
 add_library(OpenCVCodec OBJECT	
 	src/opencv_encoder.cpp
@@ -24,6 +25,8 @@ target_include_directories(OpenCVCodec PUBLIC
 	$<TARGET_PROPERTY:ftlcommon,INTERFACE_INCLUDE_DIRECTORIES>
 )
 
+set_property(TARGET OpenCVCodec PROPERTY CUDA_ARCHITECTURES OFF)
+
 set(CODECSRC
 $<TARGET_OBJECTS:BaseCodec>
 $<TARGET_OBJECTS:OpenCVCodec>
@@ -44,6 +47,8 @@ target_include_directories(NvidiaCodec PUBLIC
 )
 list(APPEND CODECSRC $<TARGET_OBJECTS:NvidiaCodec>)
 
+set_property(TARGET NvidiaCodec PROPERTY CUDA_ARCHITECTURES OFF)
+
 add_library(ftlcodecs ${CODECSRC})
 
 if (WIN32)
@@ -62,6 +67,10 @@ target_include_directories(ftlcodecs PUBLIC
 #target_include_directories(cv-node PUBLIC ${PROJECT_SOURCE_DIR}/include)
 target_link_libraries(ftlcodecs ftlcommon ${OpenCV_LIBS} ${CUDA_LIBRARIES} Eigen3::Eigen nvcuvid cuda)
 
+target_precompile_headers(ftlcodecs REUSE_FROM ftlcommon)
+
+set_property(TARGET ftlcodecs PROPERTY CUDA_ARCHITECTURES OFF)
+
 if (BUILD_TESTS)
 add_subdirectory(test)
 endif()
diff --git a/components/codecs/include/ftl/codecs/channels.hpp b/components/codecs/include/ftl/codecs/channels.hpp
index 43ef166a2..71c97dc9f 100644
--- a/components/codecs/include/ftl/codecs/channels.hpp
+++ b/components/codecs/include/ftl/codecs/channels.hpp
@@ -2,7 +2,7 @@
 #define _FTL_RGBD_CHANNELS_HPP_
 
 #include <bitset>
-#include <ftl/utility/msgpack.hpp>
+#include <msgpack.hpp>
 
 namespace ftl {
 namespace codecs {
@@ -53,12 +53,17 @@ enum struct Channel : int {
 	Index           = 68,
 	Control			= 69,	// For stream and encoder control
 	Settings3		= 70,
+	MetaData		= 71,	// Map of string pairs (key, value)
+	Capabilities	= 72,	// Unordered set of int capabilities
+	CalibrationData = 73,	// Just for stereo intrinsics/extrinsics etc
+	Thumbnail		= 74,	// Small JPG thumbnail, sometimes updated
 
 	Data			= 2048,	// Custom data, any codec.
 	Faces			= 2049, // Data about detected faces
 	Transforms		= 2050,	// Transformation matrices for framesets
 	Shapes3D		= 2051,	// Labeled 3D shapes
-	Messages		= 2052	// Vector of Strings
+	Messages		= 2052,	// Vector of Strings
+	Touch			= 2053  // List of touch data type (each touch point)
 };
 
 inline bool isVideo(Channel c) { return (int)c < 32; };
@@ -166,7 +171,7 @@ inline bool isFloatChannel(ftl::codecs::Channel chan) {
 
 MSGPACK_ADD_ENUM(ftl::codecs::Channel);
 
-template <int BASE=0>
+/*template <int BASE=0>
 inline ftl::codecs::Channels<BASE> operator|(ftl::codecs::Channel a, ftl::codecs::Channel b) {
 	return ftl::codecs::Channels<BASE>(a) | b;
 }
@@ -174,6 +179,6 @@ inline ftl::codecs::Channels<BASE> operator|(ftl::codecs::Channel a, ftl::codecs
 template <int BASE=0>
 inline ftl::codecs::Channels<BASE> operator+(ftl::codecs::Channel a, ftl::codecs::Channel b) {
 	return ftl::codecs::Channels<BASE>(a) | b;
-}
+}*/
 
 #endif  // _FTL_RGBD_CHANNELS_HPP_
diff --git a/components/codecs/include/ftl/codecs/codecs.hpp b/components/codecs/include/ftl/codecs/codecs.hpp
index 8dd2b18a0..68d4591e6 100644
--- a/components/codecs/include/ftl/codecs/codecs.hpp
+++ b/components/codecs/include/ftl/codecs/codecs.hpp
@@ -2,7 +2,7 @@
 #define _FTL_CODECS_BITRATES_HPP_
 
 #include <cstdint>
-#include <ftl/utility/msgpack.hpp>
+#include <msgpack.hpp>
 
 namespace ftl {
 namespace codecs {
@@ -22,6 +22,9 @@ static constexpr uint8_t kFlagPartial = 0x10;		// This frameset is not complete
 static constexpr uint8_t kFlagStereo = 0x20;		// Left-Right stereo in single channel
 static constexpr uint8_t kFlagMultiple = 0x80;		// Multiple video frames in single packet
 
+static constexpr uint8_t kFlagRequest = 0x01;		// Used for empty data packets to mark a request for data
+static constexpr uint8_t kFlagCompleted = 0x02;		// Last packet for timestamp
+
 /**
  * Compression format used.
  */
diff --git a/components/codecs/include/ftl/codecs/packet.hpp b/components/codecs/include/ftl/codecs/packet.hpp
index 97bda6e82..040f8a1da 100644
--- a/components/codecs/include/ftl/codecs/packet.hpp
+++ b/components/codecs/include/ftl/codecs/packet.hpp
@@ -5,7 +5,7 @@
 #include <vector>
 #include <ftl/codecs/codecs.hpp>
 #include <ftl/codecs/channels.hpp>
-#include <ftl/utility/msgpack.hpp>
+#include <msgpack.hpp>
 
 namespace ftl {
 namespace codecs {
@@ -18,7 +18,7 @@ static constexpr uint8_t kAllFramesets = 255;
  */
 struct Header {
 	const char magic[4] = {'F','T','L','F'};
-	uint8_t version = 4;
+	uint8_t version = 5;
 };
 
 /**
@@ -36,12 +36,7 @@ struct IndexHeader {
  */
 struct Packet {
 	ftl::codecs::codec_t codec;
-
-	union {
-	[[deprecated]] ftl::codecs::definition_t definition;	// Data resolution
-	uint8_t reserved=7;
-	};
-
+	uint8_t reserved=0;
 	uint8_t frame_count;	// v4+ Frames included in this packet
 	uint8_t bitrate=0;	// v4+ For multi-bitrate encoding, 0=highest
 	uint8_t flags;			// Codec dependent flags (eg. I-Frame or P-Frame)
@@ -51,6 +46,28 @@ struct Packet {
 };
 
 static constexpr unsigned int kStreamCap_Static = 0x01;
+static constexpr unsigned int kStreamCap_Recorded = 0x02;
+
+/** V4 packets have no stream flags field */
+struct StreamPacketV4 {
+	int version;			// FTL version, Not encoded into stream
+
+	int64_t timestamp;
+	uint8_t streamID;  		// Source number [or v4 frameset id]
+	uint8_t frame_number;	// v4+ First frame number (packet may include multiple frames)
+	ftl::codecs::Channel channel;		// Actual channel of this current set of packets
+
+	inline int frameNumber() const { return (version >= 4) ? frame_number : streamID; }
+	inline size_t frameSetID() const { return (version >= 4) ? streamID : 0; }
+
+	int64_t localTimestamp;  		// Not message packet / saved
+	unsigned int hint_capability;	// Is this a video stream, for example
+	size_t hint_source_total;		// Number of tracks per frame to expect
+
+	MSGPACK_DEFINE(timestamp, streamID, frame_number, channel);
+
+	operator std::string() const;
+};
 
 /**
  * Add timestamp and channel information to a raw encoded frame packet. This
@@ -62,23 +79,19 @@ struct StreamPacket {
 
 	int64_t timestamp;
 	uint8_t streamID;  		// Source number [or v4 frameset id]
-
-	union {
-		[[deprecated]] uint8_t channel_count;	// v1-3 Number of channels to expect for this frame(set) to complete (usually 1 or 2)
-		uint8_t frame_number;	// v4+ First frame number (packet may include multiple frames)
-	};
-
+	uint8_t frame_number;	// v4+ First frame number (packet may include multiple frames)
 	ftl::codecs::Channel channel;		// Actual channel of this current set of packets
+	uint8_t flags=0;
 
 	inline int frameNumber() const { return (version >= 4) ? frame_number : streamID; }
 	inline size_t frameSetID() const { return (version >= 4) ? streamID : 0; }
-	inline int64_t localTimestamp() const { return timestamp + originClockDelta; }
 
-	int64_t originClockDelta;  		// Not message packet / saved
+	int64_t localTimestamp;  		// Not message packet / saved
 	unsigned int hint_capability;	// Is this a video stream, for example
 	size_t hint_source_total;		// Number of tracks per frame to expect
+	int retry_count = 0;			// Decode retry count
 
-	MSGPACK_DEFINE(timestamp, streamID, frame_number, channel);
+	MSGPACK_DEFINE(timestamp, streamID, frame_number, channel, flags);
 
 	operator std::string() const;
 };
diff --git a/components/codecs/include/ftl/codecs/touch.hpp b/components/codecs/include/ftl/codecs/touch.hpp
new file mode 100644
index 000000000..a48ba834a
--- /dev/null
+++ b/components/codecs/include/ftl/codecs/touch.hpp
@@ -0,0 +1,35 @@
+#ifndef _FTL_CODECS_TOUCH_HPP_
+#define _FTL_CODECS_TOUCH_HPP_
+
+#include <ftl/utility/msgpack.hpp>
+
+namespace ftl {
+namespace codecs {
+
+enum class TouchType {
+	MOUSE_LEFT=0,
+	MOUSE_RIGHT=1,
+	MOUSE_MIDDLE=2,
+	TOUCH_SCREEN=3,
+	COLLISION=16
+};
+
+struct Touch {
+	Touch() {};
+
+	int id;
+	TouchType type;
+	uint8_t strength;
+	int x;
+	int y;
+	float d;
+
+	MSGPACK_DEFINE(id, type, strength, x, y, d);
+};
+
+}
+}
+
+MSGPACK_ADD_ENUM(ftl::codecs::TouchType);
+
+#endif
diff --git a/components/codecs/src/channels.cpp b/components/codecs/src/channels.cpp
index 1e9d8d526..cc30c5168 100644
--- a/components/codecs/src/channels.cpp
+++ b/components/codecs/src/channels.cpp
@@ -1,93 +1,74 @@
 #include <ftl/codecs/channels.hpp>
-
+#include <unordered_map>
 #include <opencv2/opencv.hpp>
 
+using ftl::codecs::Channel;
+
 struct ChannelInfo {
 	const char *name;
 	int type;
 };
 
-static ChannelInfo info[] = {
-    "Colour", CV_8UC4,			// 0
-    "Depth", CV_32F,			// 1
-    "Right", CV_8UC4,			// 2
-    "DepthRight", CV_32F,		// 3
-    "Deviation", CV_32F,		// 4
-    "Normals", CV_16FC4,		// 5
-    "Weights", CV_16SC1,		// 6
-    "Confidence", CV_32F,		// 7
-    "EnergyVector", CV_32FC4,	// 8
-    "Flow", CV_32F,				// 9
-    "Energy", CV_32F,			// 10
-	"Mask", CV_8U,				// 11
-	"Density", CV_32F,			// 12
-    "Support1", CV_8UC4,		// 13
-    "Support2", CV_8UC4,		// 14
-    "Segmentation", CV_32S,		// 15
-
-	"ColourNormals", 0,			// 16
-	"ColourHighRes", CV_8UC4,			// 17
-	"Disparity", CV_32F,				// 18
-	"Smoothing", 0,				// 19
-	"Colour2HighRes", CV_8UC4,		// 20
-	"Overlay", 0,				// 21
-	"GroundTruth", CV_32F,		// 22
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
+static const std::unordered_map<Channel,ChannelInfo> info = {
+    {Channel::Colour, {"Left", CV_8UC4}},
+	{Channel::Depth, {"Depth", CV_32F}},
+	{Channel::Right, {"Right", CV_8UC4}},
+	{Channel::Depth2, {"Depth Right", CV_32F}},
+	{Channel::Deviation, {"Deviation", CV_32F}},
+	{Channel::Normals, {"Normals", CV_32FC4}},
+	{Channel::Weights, {"Weights", CV_32F}},
+	{Channel::Confidence, {"Confidence", CV_32F}},
+	{Channel::EnergyVector, {"Energy Vector", CV_32FC4}},
+	{Channel::Flow, {"Flow", CV_32F}},
+	{Channel::Energy, {"Energy", CV_32F}},
+	{Channel::Mask, {"Mask", CV_8U}},
+	{Channel::Density, {"Density", CV_32F}},
+	{Channel::Support1, {"Support1", CV_8UC4}},
+	{Channel::Support2, {"Support2", CV_8UC4}},
+	{Channel::Segmentation, {"Segmentation", CV_8U}},
+	{Channel::Normals2, {"Normals Right", CV_32FC4}},
+	{Channel::ColourHighRes, {"Left High-res", CV_8UC4}},
+	{Channel::Disparity, {"Disparity", CV_16S}},
+	{Channel::Smoothing, {"Smoothing", CV_32F}},
+	{Channel::Colour2HighRes, {"Right High-res", CV_8UC4}},
+	{Channel::Overlay, {"Overlay", CV_8UC4}},
+	{Channel::GroundTruth, {"Ground Truth", CV_32F}},
 
-	"AudioLeft", 0,
-	"AudioRight", 0,
+	{Channel::AudioMono, {"Audio (Mono)", -1}},
+	{Channel::AudioStereo, {"Audio (Stereo)", -1}},
 
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
-	"NoName", 0,
+	{Channel::Configuration, {"Configuration", -1}},
+	{Channel::Calibration, {"Calibration", -1}},
+	{Channel::Pose, {"Pose", -1}},
+	{Channel::Calibration2, {"Calibration High-res", -1}},
+	{Channel::MetaData, {"Meta Data", -1}},
+	{Channel::Capabilities, {"Capabilities", -1}},
+	{Channel::CalibrationData, {"Calibration Data", -1}},
+	{Channel::Thumbnail, {"Thumbnail", -1}},
 
-	"Configuration", 0,
-	"Calibration", 0,
-	"Pose", 0,
-	"Data", 0
+	{Channel::Data, {"Generic Data", -1}},
+	{Channel::Faces, {"Faces", -1}},
+	{Channel::Shapes3D, {"Shapes 3D", -1}},
+	{Channel::Messages, {"Messages", -1}},
+	{Channel::Touch, {"Touch", -1}}
 };
 
 std::string ftl::codecs::name(Channel c) {
 	if (c == Channel::None) return "None";
-	else return info[(int)c].name;
+	auto i = info.find(c);
+	if (i != info.end()) {
+		return i->second.name;
+	} else {
+		return "Unknown";
+	}
 }
 
 int ftl::codecs::type(Channel c)  {
 	if (c == Channel::None) return 0;
-	else return info[(int)c].type;
+	auto i = info.find(c);
+	if (i != info.end()) {
+		return i->second.type;
+	} else {
+		return -1;
+	}
 }
diff --git a/components/codecs/src/depth_convert.cu b/components/codecs/src/depth_convert.cu
index 81dc7d3bb..d69ef3348 100644
--- a/components/codecs/src/depth_convert.cu
+++ b/components/codecs/src/depth_convert.cu
@@ -301,13 +301,7 @@ void ftl::cuda::vuya_to_depth(const cv::cuda::PtrStepSz<float> &depth, const cv:
 
 	if (x >= RADIUS && y >= RADIUS && x < vuya.cols-RADIUS-1 && y < vuya.rows-RADIUS-1) {
         ushort4 in = vuya(y,x);
-        bool isdiscon = false;
-		//int minerr = 65000;
 		ushort best = in.z;
-		//ushort miny = 65000;
-
-		//float sumY = 0.0f;
-		//float weights = 0.0f;
 		float mcost = 1.e10f;
 
 		// 1) In small radius, is there a discontinuity?
@@ -323,7 +317,6 @@ void ftl::cuda::vuya_to_depth(const cv::cuda::PtrStepSz<float> &depth, const cv:
 					ushort4 inn = vuya(y+v,x+u);
 					if (inn.w == 0) {
 						float err = fabsf(float(in.z) - float(inn.z));
-						float dist = v*v + u*u;
 						float cost = err*err; //err*err*dist;
 						if (mcost > cost) {
 							mcost = cost;
diff --git a/components/codecs/src/nvidia_decoder.cpp b/components/codecs/src/nvidia_decoder.cpp
index 0901e5188..b4638ad04 100644
--- a/components/codecs/src/nvidia_decoder.cpp
+++ b/components/codecs/src/nvidia_decoder.cpp
@@ -100,7 +100,7 @@ uint8_t* NvidiaDecoder::_decode(const uint8_t* src, uint64_t srcSize) {
 		// Some cuvid implementations have one frame latency. Refeed frame into pipeline in this case.
 		const uint32_t DECODE_TRIES = 3;
 		for (uint32_t i = 0; (i < DECODE_TRIES) && (numFramesDecoded <= 0); ++i)
-			nv_decoder_->Decode(src, srcSize, &decodedFrames, &numFramesDecoded, CUVID_PKT_ENDOFPICTURE, &timeStamps, n_++, stream_);
+			nv_decoder_->Decode(src, static_cast<int32_t>(srcSize), &decodedFrames, &numFramesDecoded, CUVID_PKT_ENDOFPICTURE, &timeStamps, n_++, stream_);
 	} catch (NVDECException& e) {
 		throw FTL_Error("Decode failed (" << e.getErrorString() << ", error " << std::to_string(e.getErrorCode()) << " = " + DecErrorCodeToString(e.getErrorCode()) << ")");
 	}
@@ -142,7 +142,6 @@ bool NvidiaDecoder::decode(const ftl::codecs::Packet &pkt, cv::cuda::GpuMat &out
 		return false;
 	}
 
-	int rc = 0;
 	uint8_t *decodedPtr = nullptr;
 
 	if (pkt.flags & ftl::codecs::kFlagMultiple) {
@@ -196,14 +195,14 @@ bool NvidiaDecoder::decode(const ftl::codecs::Packet &pkt, cv::cuda::GpuMat &out
 
 			ftl::cuda::vuya_to_depth(out, sroi, csroi, 16.0f, cvstream);
 		} else {
-			ftl::cuda::nv12_to_float(decodedPtr, width_, (float*)out.data, out.step1(), width_/2, height_, stream_);
+			ftl::cuda::nv12_to_float(decodedPtr, width_, (float*)out.data, static_cast<uint32_t>(out.step1()), width_/2, height_, stream_);
 		}
 	} else {
 		// Flag 0x1 means frame is in RGB so needs conversion to BGR
 		if (pkt.flags & 0x1) {
-			Nv12ToColor32<BGRA32>(decodedPtr, width_, out.data, out.step1(), width_, height_, 0, stream_);
+			Nv12ToColor32<BGRA32>(decodedPtr, width_, out.data, static_cast<int>(out.step1()), width_, height_, 0, stream_);
 		} else {
-			Nv12ToColor32<RGBA32>(decodedPtr, width_, out.data, out.step1(), width_, height_, 0, stream_);
+			Nv12ToColor32<RGBA32>(decodedPtr, width_, out.data, static_cast<int>(out.step1()), width_, height_, 0, stream_);
 		}
 	}
 
diff --git a/components/codecs/src/nvidia_encoder.cpp b/components/codecs/src/nvidia_encoder.cpp
index 65fcc88cb..9f9b9e8a3 100644
--- a/components/codecs/src/nvidia_encoder.cpp
+++ b/components/codecs/src/nvidia_encoder.cpp
@@ -1,5 +1,5 @@
 #include <ftl/codecs/nvidia_encoder.hpp>
-#include <loguru.hpp>
+//#include <loguru.hpp>
 #include <ftl/timer.hpp>
 #include <ftl/codecs/codecs.hpp>
 #include <ftl/cuda_util.hpp>
@@ -117,7 +117,7 @@ static uint64_t calculateBitrate(int64_t pixels, float ratescale) {
 	default						: bitrate = 16.0f;
 	}*/
 
-	float bitrate = 8.0f * float(pixels);
+	float bitrate = 16.0f * float(pixels);
 
 	//bitrate *= 1000.0f*1000.0f;
 	float minrate = 0.05f * bitrate;
@@ -144,7 +144,7 @@ static bool validate(const cv::cuda::GpuMat &in, ftl::codecs::Packet &pkt) {
 	}
 
 	if (pkt.codec == codec_t::H264 && in.type() == CV_32F) {
-		LOG(ERROR) << "Lossy compression not supported with H264 currently";
+		//LOG(ERROR) << "Lossy compression not supported with H264 currently";
 		return false;
 	}
 
@@ -152,16 +152,13 @@ static bool validate(const cv::cuda::GpuMat &in, ftl::codecs::Packet &pkt) {
 		return false;
 	}
 
-	auto width = in.cols;
-	auto height = in.rows;
-
 	if (in.empty()) {
-		LOG(WARNING) << "No data";
+		//LOG(WARNING) << "No data";
 		return false;
 	}
 
 	if (in.type() != CV_32F && in.type() != CV_8UC4) {
-		LOG(ERROR) << "Input type does not match given format";
+		//LOG(ERROR) << "Input type does not match given format";
 		pkt.flags = 0;
 		return false;
 	}
@@ -188,7 +185,7 @@ bool NvidiaEncoder::encode(const cv::cuda::GpuMat &in, ftl::codecs::Packet &pkt)
 	} else if (params_.isLossy()) {
 		ftl::cuda::depth_to_nv12_10(in, (ushort*)f->inputPtr, (ushort*)(((uchar*)f->inputPtr)+(nvenc_->GetEncodeHeight()*f->pitch)), f->pitch/2, 16.0f, stream_);
 	} else {
-		ftl::cuda::float_to_nv12_16bit((float*)in.data, in.step1(), (uchar*)f->inputPtr, f->pitch, nvenc_->GetEncodeWidth()/2, nvenc_->GetEncodeHeight(), cv::cuda::StreamAccessor::getStream(stream_));
+		ftl::cuda::float_to_nv12_16bit((float*)in.data, static_cast<uint32_t>(in.step1()), (uchar*)f->inputPtr, f->pitch, nvenc_->GetEncodeWidth()/2, nvenc_->GetEncodeHeight(), cv::cuda::StreamAccessor::getStream(stream_));
 	}
 
 	// TODO: Use page locked memory?
@@ -202,7 +199,7 @@ bool NvidiaEncoder::encode(const cv::cuda::GpuMat &in, ftl::codecs::Packet &pkt)
 	was_reset_ = false;
 
 	if (cs == 0 || cs >= ftl::codecs::kVideoBufferSize) {
-		LOG(ERROR) << "Could not encode video frame";
+		//LOG(ERROR) << "Could not encode video frame";
 		return false;
 	} else {
 		return true;
@@ -214,7 +211,7 @@ bool NvidiaEncoder::_createEncoder(const cv::cuda::GpuMat &in, const ftl::codecs
 	if (nvenc_ && (params == params_)) return true;
 
 	uint64_t bitrate = calculateBitrate(in.cols*in.rows, float(pkt.bitrate)/255.0f) * pkt.frame_count;
-	LOG(INFO) << "Calculated bitrate " << ((params.is_float) ? "(float)" : "(rgb)") << ": " << bitrate;
+	//LOG(INFO) << "Calculated bitrate " << ((params.is_float) ? "(float)" : "(rgb)") << ": " << bitrate;
 	
 	params_ = params;
 
@@ -229,7 +226,7 @@ bool NvidiaEncoder::_createEncoder(const cv::cuda::GpuMat &in, const ftl::codecs
 	cuCtxGetCurrent(&cudaContext);    
 
 	if (nvenc_) {
-		LOG(INFO) << "Destroying old NVENC encoder";
+		//LOG(INFO) << "Destroying old NVENC encoder";
 		std::vector<std::vector<uint8_t>> tmp;
 		nvenc_->EndEncode(tmp);
 		nvenc_->DestroyEncoder();
@@ -284,7 +281,7 @@ bool NvidiaEncoder::_createEncoder(const cv::cuda::GpuMat &in, const ftl::codecs
 
 		if (params.isLossy())
 		{
-			encodeConfig.rcParams.averageBitRate = bitrate;
+			encodeConfig.rcParams.averageBitRate = static_cast<uint32_t>(bitrate);
 			encodeConfig.rcParams.rateControlMode = NV_ENC_PARAMS_RC_CBR_LOWDELAY_HQ;
 			encodeConfig.rcParams.vbvBufferSize = encodeConfig.rcParams.averageBitRate * initializeParams.frameRateDen / initializeParams.frameRateNum; // bitrate / framerate = one frame
 			encodeConfig.rcParams.maxBitRate = encodeConfig.rcParams.averageBitRate;
@@ -302,7 +299,7 @@ bool NvidiaEncoder::_createEncoder(const cv::cuda::GpuMat &in, const ftl::codecs
 		//LOG(ERROR) << "Could not create video encoder";
 		return false;
 	} else {
-		LOG(INFO) << "NVENC encoder created";
+		//LOG(INFO) << "NVENC encoder created";
 
 		//nvenc_->SetIOCudaStreams(cv::cuda::StreamAccessor::getStream(stream_), cv::cuda::StreamAccessor::getStream(stream_));
 
diff --git a/components/codecs/src/opencv_decoder.cpp b/components/codecs/src/opencv_decoder.cpp
index 13f47f19b..f4b3c0a20 100644
--- a/components/codecs/src/opencv_decoder.cpp
+++ b/components/codecs/src/opencv_decoder.cpp
@@ -30,14 +30,12 @@ bool OpenCVDecoder::decode(const ftl::codecs::Packet &pkt, cv::cuda::GpuMat &out
 	cv::Rect roi(cx,cy,chunk_width,chunk_height);
 	cv::cuda::GpuMat chunkHead = out(roi);
 
-	//LOG(INFO) << "DECODE JPEG " << (int)pkt.block_number << "/" << chunk_dim;
-
 	cv::Mat tmp2_, tmp_;
 	// Decode in temporary buffers to prevent long locks
 	cv::imdecode(pkt.data, cv::IMREAD_UNCHANGED, &tmp2_);
 
 	if (tmp2_.type() == CV_8UC3) {
-		cv::cvtColor(tmp2_, tmp_, cv::COLOR_BGR2BGRA);
+		cv::cvtColor(tmp2_, tmp_, cv::COLOR_RGB2BGRA);
 	} else {
 		tmp_ = tmp2_;
 	}
diff --git a/components/codecs/src/reader.cpp b/components/codecs/src/reader.cpp
index 2a2fc4145..26d4f58f4 100644
--- a/components/codecs/src/reader.cpp
+++ b/components/codecs/src/reader.cpp
@@ -80,7 +80,7 @@ bool Reader::read(int64_t ts, const std::function<void(const ftl::codecs::Stream
 			stream_->read(buffer_.buffer(), buffer_.buffer_capacity());
 			//if (stream_->bad()) return false;
 
-			int bytes = stream_->gcount();
+			size_t bytes = static_cast<size_t>(stream_->gcount());
 			if (bytes == 0) break;
 			buffer_.buffer_consumed(bytes);
 			partial = false;
diff --git a/components/codecs/test/CMakeLists.txt b/components/codecs/test/CMakeLists.txt
index 7a64f1b08..34753db97 100644
--- a/components/codecs/test/CMakeLists.txt
+++ b/components/codecs/test/CMakeLists.txt
@@ -39,6 +39,8 @@ ${CMAKE_CURRENT_SOURCE_DIR}/../src/Video_Codec_SDK_9.1.23/Samples/NvCodec)
 target_link_libraries(nvidia_codec_unit
 	Threads::Threads ${OS_LIBS} ${OpenCV_LIBS} ${CUDA_LIBRARIES} ftlcommon nvcuvid cuda)
 
+set_property(TARGET nvidia_codec_unit PROPERTY CUDA_ARCHITECTURES OFF)
+
 
 add_test(NvidiaCodecUnitTest nvidia_codec_unit)
 
@@ -57,12 +59,12 @@ add_test(NvidiaCodecUnitTest nvidia_codec_unit)
 #add_test(RWUnitTest rw_unit)
 
 ### Channel Unit ###############################################################
-add_executable(channel_unit
-$<TARGET_OBJECTS:CatchTest>
-	./channel_unit.cpp
-)
-target_include_directories(channel_unit PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include")
-target_link_libraries(channel_unit
-	ftlcommon)
+#add_executable(channel_unit
+#$<TARGET_OBJECTS:CatchTest>
+#	./channel_unit.cpp
+#)
+#target_include_directories(channel_unit PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include")
+#target_link_libraries(channel_unit
+#	ftlcommon)
 
-add_test(ChannelUnitTest channel_unit)
+#add_test(ChannelUnitTest channel_unit)
diff --git a/components/codecs/test/nvidia_codec_unit.cpp b/components/codecs/test/nvidia_codec_unit.cpp
index 67928ea76..f72ef049b 100644
--- a/components/codecs/test/nvidia_codec_unit.cpp
+++ b/components/codecs/test/nvidia_codec_unit.cpp
@@ -191,7 +191,7 @@ TEST_CASE( "NvidiaEncoder::encode() - A large tiled lossy float image" ) {
 	SECTION("auto codec and definition, 4x2 frame") {
 		ftl::codecs::Packet pkt;
 		pkt.codec = codec_t::Any;
-		pkt.bitrate = 255;
+		pkt.bitrate = 128;
 		pkt.flags = 0;
 		pkt.frame_count = 7;
 
diff --git a/components/codecs/test/opencv_codec_unit.cpp b/components/codecs/test/opencv_codec_unit.cpp
index cf03f71a2..43c94304f 100644
--- a/components/codecs/test/opencv_codec_unit.cpp
+++ b/components/codecs/test/opencv_codec_unit.cpp
@@ -90,7 +90,6 @@ TEST_CASE( "OpenCVDecoder::decode() - A colour test image no resolution change"
 
 	ftl::codecs::Packet pkt;
 	pkt.codec = codec_t::Any;
-	pkt.definition = definition_t::Any;
 	pkt.bitrate = 255;
 	pkt.flags = 0;
 	pkt.frame_count = 1;
diff --git a/components/common/cpp/CMakeLists.txt b/components/common/cpp/CMakeLists.txt
index fe1d7671b..71715801d 100644
--- a/components/common/cpp/CMakeLists.txt
+++ b/components/common/cpp/CMakeLists.txt
@@ -19,7 +19,9 @@ check_function_exists(uriParseSingleUriA HAVE_URIPARSESINGLE)
 
 add_library(ftlcommon ${COMMONSRC})
 
+if (NOT WIN32)
 target_compile_options(ftlcommon PUBLIC $<$<COMPILE_LANGUAGE:CXX>:-fPIC>)
+endif()
 
 target_include_directories(ftlcommon PUBLIC
 	$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
@@ -27,6 +29,13 @@ target_include_directories(ftlcommon PUBLIC
 	PRIVATE src)
 target_link_libraries(ftlcommon Threads::Threads Eigen3::Eigen ${OS_LIBS} ${OpenCV_LIBS} ${URIPARSER_LIBRARIES} ${CUDA_LIBRARIES})
 
+target_precompile_headers(ftlcommon
+	PRIVATE include/ftl/cuda_common.hpp
+	PRIVATE include/loguru.hpp
+)
+
+set_property(TARGET ftlcommon PROPERTY CUDA_ARCHITECTURES OFF)
+
 if (BUILD_TESTS)
 add_subdirectory(test)
 endif()
diff --git a/components/common/cpp/include/ftl/configurable.hpp b/components/common/cpp/include/ftl/configurable.hpp
index 1c157ba60..b2a66943d 100644
--- a/components/common/cpp/include/ftl/configurable.hpp
+++ b/components/common/cpp/include/ftl/configurable.hpp
@@ -12,6 +12,7 @@
 #include <list>
 #include <functional>
 #include <optional>
+#include <unordered_set>
 
 #define REQUIRED(...) required(__func__, __VA_ARGS__)
 
@@ -92,7 +93,23 @@ class Configurable {
 	 * @param prop Name of property to watch
 	 * @param callback A function object that will be called on change.
 	 */
-	void on(const std::string &prop, std::function<void(const config::Event&)>);
+	void on(const std::string &prop, std::function<void()>);
+
+	/**
+	 * Same callback for all properties in set.
+	 */
+	void onAny(const std::unordered_set<std::string> &props, std::function<void()>);
+
+	template <typename T>
+	void on(const std::string &prop, T &v) {
+		on(prop, [&v,this,prop]() { v = *this->get<T>(prop); });
+	}
+
+	template <typename T>
+	void on(const std::string &prop, T &v, const T &def) {
+		v = this->value(prop, def);
+		on(prop, [&v,this,prop]() { v = *this->get<T>(prop); });
+	}
 
 	void patchPtr(nlohmann::json &newcfg) { config_ = &newcfg; }
 
@@ -102,13 +119,40 @@ class Configurable {
 	 */
 	virtual void refresh();
 
+	/**
+	 * Restore configurable properties from session storage using this key.
+	 * The key could be the same as configurable ID or perhaps uses another
+	 * property such as URI. If restore is used it will also result in a save
+	 * when the configurable is destroyed. The key should ideally be unique.
+	 * 
+	 * The allowed parameter specifies the set of properties that can be saved.
+	 */
+	void restore(const std::string &key, const std::unordered_set<std::string> &allowed);
+
+	/**
+	 * Load defaults from config file. The key represents type information and
+	 * many configurables can load from the same key. If load defaults has been
+	 * used by the configurable, then it is also called again when the
+	 * configurable is reset.
+	 */
+	void loadDefaults(const std::string &key);
+
+	virtual void reset() {};
+
+	void save();
+
 	protected:
 	nlohmann::json *config_;
 
 	virtual void inject(const std::string &name, nlohmann::json &value) {}
 
 	private:
-	std::map<std::string, std::list<std::function<void(const config::Event&)>>> observers_; 
+	std::string restore_;
+	std::string defaults_;
+	std::unordered_set<std::string> save_allowed_;
+
+	typedef std::list<std::function<void()>> ObserverList;
+	std::unordered_map<std::string,ObserverList> observers_; 
 
 	void _trigger(const std::string &name);
 };
diff --git a/components/common/cpp/include/ftl/configuration.hpp b/components/common/cpp/include/ftl/configuration.hpp
index bdbc4a590..d555d6dc5 100644
--- a/components/common/cpp/include/ftl/configuration.hpp
+++ b/components/common/cpp/include/ftl/configuration.hpp
@@ -9,6 +9,7 @@
 #include <string>
 #include <vector>
 #include <optional>
+#include <unordered_set>
 
 namespace ftl {
 
@@ -24,6 +25,10 @@ bool create_directory(const std::string &path);
 bool is_video(const std::string &file);
 std::vector<std::string> directory_listing(const std::string &path);
 
+nlohmann::json loadJSON(const std::string &path);
+
+bool saveJSON(const std::string &path, nlohmann::json &json);
+
 namespace config {
 
 typedef nlohmann::json json_t;
@@ -34,10 +39,13 @@ std::optional<std::string> locateFile(const std::string &name);
 
 std::map<std::string, std::string> read_options(char ***argv, int *argc);
 
-Configurable *configure(int argc, char **argv, const std::string &root);
+Configurable *configure(int argc, char **argv, const std::string &root, const std::unordered_set<std::string> &restoreable={});
 
 Configurable *configure(json_t &);
 
+nlohmann::json &getRestore(const std::string &key);
+nlohmann::json &getDefault(const std::string &key);
+
 void cleanup();
 
 void removeConfigurable(Configurable *cfg);
@@ -75,6 +83,11 @@ json_t &resolveWait(const std::string &);
  */
 Configurable *find(const std::string &uri);
 
+/**
+ * Add an alternative URI for a configurable.
+ */
+void alias(const std::string &uri, Configurable *cfg);
+
 /**
  * Get all configurables that contain a specified tag. Tags are given under the
  * "tags" property as an array of strings, but only during configurable
@@ -172,11 +185,12 @@ T *ftl::config::create(json_t &link, ARGS ...args) {
 		cfg->patchPtr(link);
 	}
 
-	try {
-		return dynamic_cast<T*>(cfg);
-	} catch(...) {
+	T* ptr = dynamic_cast<T*>(cfg);
+	if (ptr) {
+		return ptr;
+	}
+	else {
 		throw FTL_Error("Configuration URI object is of wrong type: " << id);
-		//return nullptr;
 	}
 }
 
diff --git a/components/common/cpp/include/ftl/cuda_operators.hpp b/components/common/cpp/include/ftl/cuda_operators.hpp
index 5fc84fbcb..304818b58 100644
--- a/components/common/cpp/include/ftl/cuda_operators.hpp
+++ b/components/common/cpp/include/ftl/cuda_operators.hpp
@@ -240,7 +240,7 @@ inline __host__ __device__ float2 normalize(float2 v)
 // floor
 inline __host__ __device__ float2 floor(const float2 v)
 {
-    return make_float2(floor(v.x), floor(v.y));
+    return make_float2(floorf(v.x), floorf(v.y));
 }
 
 // reflect
@@ -252,7 +252,7 @@ inline __host__ __device__ float2 reflect(float2 i, float2 n)
 // absolute value
 inline __host__ __device__ float2 fabs(float2 v)
 {
-	return make_float2(fabs(v.x), fabs(v.y));
+	return make_float2(fabsf(v.x), fabsf(v.y));
 }
 
 inline __device__ __host__ int2 sign(float2 f) { 
@@ -423,7 +423,7 @@ inline __host__ __device__ float3 normalize(float3 v)
 // floor
 inline __host__ __device__ float3 floor(const float3 v)
 {
-    return make_float3(floor(v.x), floor(v.y), floor(v.z));
+    return make_float3(floorf(v.x), floorf(v.y), floorf(v.z));
 }
 
 // reflect
@@ -435,7 +435,7 @@ inline __host__ __device__ float3 reflect(float3 i, float3 n)
 // absolute value
 inline __host__ __device__ float3 fabs(float3 v)
 {
-	return make_float3(fabs(v.x), fabs(v.y), fabs(v.z));
+	return make_float3(fabsf(v.x), fabsf(v.y), fabsf(v.z));
 }
 
 inline __device__ __host__ int3 sign(float3 f) { 
@@ -567,13 +567,13 @@ inline __host__ __device__ float4 normalize(float4 v)
 // floor
 inline __host__ __device__ float4 floor(const float4 v)
 {
-    return make_float4(floor(v.x), floor(v.y), floor(v.z), floor(v.w));
+    return make_float4(floorf(v.x), floorf(v.y), floorf(v.z), floorf(v.w));
 }
 
 // absolute value
 inline __host__ __device__ float4 fabs(float4 v)
 {
-	return make_float4(fabs(v.x), fabs(v.y), fabs(v.z), fabs(v.w));
+	return make_float4(fabsf(v.x), fabsf(v.y), fabsf(v.z), fabsf(v.w));
 }
 
 // int3 functions
diff --git a/components/common/cpp/include/ftl/exception.hpp b/components/common/cpp/include/ftl/exception.hpp
index 43e303d9b..e78fe3854 100644
--- a/components/common/cpp/include/ftl/exception.hpp
+++ b/components/common/cpp/include/ftl/exception.hpp
@@ -65,6 +65,6 @@ class exception : public std::exception
 
 }
 
-#define FTL_Error(A) (ftl::exception(ftl::Formatter() << __FILE__ << ":" << __LINE__ << ": " << A))
+#define FTL_Error(A) (ftl::exception(ftl::Formatter() << A << " [" << __FILE__ << ":" << __LINE__ << "]"))
 
 #endif  // _FTL_EXCEPTION_HPP_
diff --git a/components/common/cpp/include/ftl/handle.hpp b/components/common/cpp/include/ftl/handle.hpp
index d7e2f8089..9c712c039 100644
--- a/components/common/cpp/include/ftl/handle.hpp
+++ b/components/common/cpp/include/ftl/handle.hpp
@@ -2,6 +2,7 @@
 #define _FTL_HANDLE_HPP_
 
 #include <ftl/threads.hpp>
+#include <ftl/exception.hpp>
 #include <functional>
 #include <unordered_map>
 
@@ -14,7 +15,7 @@ struct BaseHandler {
 	inline Handle make_handle(BaseHandler*, int);
 
 	protected:
-	MUTEX mutex_;
+	std::mutex mutex_;
 	int id_=0;
 };
 
@@ -22,7 +23,7 @@ struct BaseHandler {
  * A `Handle` is used to manage registered callbacks, allowing them to be
  * removed safely whenever the `Handle` instance is destroyed.
  */
-struct Handle {
+struct [[nodiscard]] Handle {
 	friend struct BaseHandler;
 
 	/**
@@ -52,7 +53,11 @@ struct Handle {
 		return *this;
 	}
 
-	inline ~Handle() { if (handler_) handler_->remove(*this); }
+	inline ~Handle() {
+		if (handler_) {
+			handler_->remove(*this);
+		}
+	}
 
 	private:
 	BaseHandler *handler_;
@@ -65,14 +70,18 @@ struct Handle {
  * This class is used to manage callbacks. The template parameters are the
  * arguments to be passed to the callback when triggered. This class is already
  * thread-safe.
+ *
+ * POSSIBLE BUG:	On destruction any remaining handles will be left with
+ * 					dangling pointer to Handler.
  */
 template <typename ...ARGS>
 struct Handler : BaseHandler {
+
 	/**
 	 * Add a new callback function. It returns a `Handle` object that must
 	 * remain in scope, the destructor of the `Handle` will remove the callback.
 	 */
-	[[nodiscard]] Handle on(const std::function<bool(ARGS...)> &f) {
+	Handle on(const std::function<bool(ARGS...)> &f) {
 		std::unique_lock<std::mutex> lk(mutex_);
 		int id = id_++;
 		callbacks_[id] = f;
@@ -111,10 +120,67 @@ struct Handler : BaseHandler {
 	std::unordered_map<int, std::function<bool(ARGS...)>> callbacks_;
 };
 
+/**
+ * This class is used to manage callbacks. The template parameters are the
+ * arguments to be passed to the callback when triggered. This class is already
+ * thread-safe. Note that this version only allows a single callback at a time
+ * and throws an exception if multiple are added without resetting.
+ */
+template <typename ...ARGS>
+struct SingletonHandler : BaseHandler {
+	/**
+	 * Add a new callback function. It returns a `Handle` object that must
+	 * remain in scope, the destructor of the `Handle` will remove the callback.
+	 */
+	[[nodiscard]] Handle on(const std::function<bool(ARGS...)> &f) {
+		std::unique_lock<std::mutex> lk(mutex_);
+		if (callback_) throw FTL_Error("Callback already bound");
+		callback_ = f;
+		return make_handle(this, id_++);
+	}
+
+	/**
+	 * Safely trigger all callbacks. Note that `Handler` is locked when
+	 * triggering so callbacks cannot make modifications to it or they will
+	 * lock up. To remove a callback, return false from the callback, else
+	 * return true.
+	 */
+	bool trigger(ARGS ...args) {
+		std::unique_lock<std::mutex> lk(mutex_);
+		if (callback_) {
+			bool keep = callback_(std::forward<ARGS>(args)...);
+			if (!keep) callback_ = nullptr;
+			return keep;
+		} else {
+			return false;
+		}
+		//} catch (const std::exception &e) {
+		//	LOG(ERROR) << "Exception in callback: " << e.what();
+		//}
+	}
+
+	/**
+	 * Remove a callback using its `Handle`. This is equivalent to allowing the
+	 * `Handle` to be destroyed or cancelled. If the handle does not match the
+	 * currently bound callback then the callback is not removed.
+	 */
+	void remove(const Handle &h) override {
+		std::unique_lock<std::mutex> lk(mutex_);
+		if (h.id() == id_-1) callback_ = nullptr;
+	}
+
+	void reset() { callback_ = nullptr; }
+
+	operator bool() const { return (bool)callback_; }
+
+	private:
+	std::function<bool(ARGS...)> callback_;
+};
+
 }
 
 ftl::Handle ftl::BaseHandler::make_handle(BaseHandler *h, int id) {
 	return ftl::Handle(h, id);
 }
 
-#endif
\ No newline at end of file
+#endif
diff --git a/components/common/cpp/include/ftl/threads.hpp b/components/common/cpp/include/ftl/threads.hpp
index 83086135a..38021f099 100644
--- a/components/common/cpp/include/ftl/threads.hpp
+++ b/components/common/cpp/include/ftl/threads.hpp
@@ -31,6 +31,8 @@
 #define SHARED_LOCK(M,L) std::shared_lock<std::remove_reference<decltype(M)>::type> L(M);
 #endif  // DEBUG_MUTEX
 
+#define SHARED_LOCK_TYPE(M) std::shared_lock<M>
+
 namespace ftl {
 	extern ctpl::thread_pool pool;
 }
diff --git a/components/common/cpp/include/ftl/timer.hpp b/components/common/cpp/include/ftl/timer.hpp
index bf378425d..9ccb0b31f 100644
--- a/components/common/cpp/include/ftl/timer.hpp
+++ b/components/common/cpp/include/ftl/timer.hpp
@@ -1,6 +1,7 @@
 #ifndef _FTL_COMMON_TIMER_HPP_
 #define _FTL_COMMON_TIMER_HPP_
 
+#include <ftl/handle.hpp>
 #include <functional>
 
 namespace ftl {
@@ -15,6 +16,11 @@ namespace ftl {
  */
 namespace timer {
 
+/**
+ * Timer level determines in what order and when a timer callback is called.
+ * This allows some timers to operate at higher precision / lower latency
+ * than others, as well as having idle callbacks.
+ */
 enum timerlevel_t {
 	kTimerHighPrecision = 0,
 	kTimerSwap,
@@ -24,44 +30,6 @@ enum timerlevel_t {
 	kTimerMAXLEVEL
 };
 
-/**
- * Represents a timer job for control purposes. Use to remove timer jobs in
- * a destructor, for example.
- */
-struct TimerHandle {
-	TimerHandle() : id_(-1) {}
-	explicit TimerHandle(int i) : id_(i) {}
-	TimerHandle(const TimerHandle &t) : id_(t.id()) {}
-
-	/**
-	 * Cancel the timer job. If currently executing it will block and wait for
-	 * the job to complete.
-	 */
-	void cancel() const;
-	void pause() const;
-	void unpause() const;
-
-	/**
-	 * Do the timer job every N frames.
-	 */
-	void setMultiplier(unsigned int) const;
-
-	/**
-	 * Give the timer job a name for logging output.
-	 */
-	void setName(const std::string &) const;
-
-	/**
-	 * Allow copy assignment.
-	 */
-	TimerHandle &operator=(const TimerHandle &h) { id_ = h.id(); return *this; }
-
-	inline int id() const { return id_; }
-
-	private:
-	int id_;
-};
-
 int64_t get_time();
 
 /**
@@ -114,7 +82,7 @@ void setClockSlave(bool);
  * If all high precision callbacks together take more than 1ms to complete, a
  * warning is produced.
  */
-const TimerHandle add(timerlevel_t, const std::function<bool(int64_t ts)> &);
+ftl::Handle add(timerlevel_t, const std::function<bool(int64_t ts)> &);
 
 /**
  * Initiate the timer and optionally block the current process.
diff --git a/components/common/cpp/include/ftl/transactional.hpp b/components/common/cpp/include/ftl/transactional.hpp
new file mode 100644
index 000000000..298fa5d2a
--- /dev/null
+++ b/components/common/cpp/include/ftl/transactional.hpp
@@ -0,0 +1,48 @@
+#ifndef _FTL_TRANSACTIONAL_HPP_
+#define _FTL_TRANSACTIONAL_HPP_
+
+#include <ftl/threads.hpp>
+
+namespace ftl {
+
+/**
+ * Use RAII style transactional objects with shared locking. This wraps an
+ * object with a lock and provides a release notification mechanism to allow
+ * completion code.
+ */
+template <typename T>
+class Transactional {
+	static_assert(std::is_pointer<T>::value, "Transactional type must be a pointer");
+
+	public:
+	Transactional(T obj, SHARED_MUTEX &mtx) : ref_(obj), mtx_(mtx), lock_(mtx_) {}
+	Transactional(T obj, SHARED_MUTEX &mtx, const std::function<void(T)> &complete) : ref_(obj), mtx_(mtx), lock_(mtx_), completed_(complete) {}
+	Transactional(const Transactional &)=delete;
+	Transactional()=delete;
+	~Transactional() {
+		lock_.unlock();
+		if (completed_) completed_(ref_);
+	}
+
+	Transactional(Transactional &&t) : ref_(t.ref_), mtx_(t.mtx_), lock_(mtx_), completed_(t.completed_) {
+		t.completed_ = nullptr;
+	}
+
+	Transactional &operator=(const Transactional &)=delete;
+
+	T operator->() { return ref_; }
+	const T operator->() const { return ref_; }
+
+	T operator*() { return ref_; }
+	const T operator*() const { return ref_; }
+
+	private:
+	T ref_;
+	SHARED_MUTEX &mtx_;
+	SHARED_LOCK_TYPE(SHARED_MUTEX) lock_;
+	std::function<void(T)> completed_;
+};
+
+}
+
+#endif
diff --git a/components/common/cpp/include/ftl/uri.hpp b/components/common/cpp/include/ftl/uri.hpp
index 71e3bf456..455d4f845 100644
--- a/components/common/cpp/include/ftl/uri.hpp
+++ b/components/common/cpp/include/ftl/uri.hpp
@@ -32,7 +32,8 @@ namespace ftl {
 			SCHEME_IPC,
 			SCHEME_FILE,
 			SCHEME_OTHER,
-			SCHEME_DEVICE
+			SCHEME_DEVICE,
+			SCHEME_GROUP
 		};
 
 		bool isValid() const { return m_valid; };
@@ -51,21 +52,28 @@ namespace ftl {
 		 * Get the URI without query parameters, and limit path to length N.
 		 * If N is negative then it is taken from full path length.
 		 */
-		std::string getBaseURI(int n);
+		std::string getBaseURI(int n) const;
+
+		std::string getBaseURIWithUser() const;
 
 		std::string getPathSegment(int n) const;
 
+		inline size_t getPathLength() const { return m_pathseg.size(); }
+
 		void setAttribute(const std::string &key, const std::string &value);
 		void setAttribute(const std::string &key, int value);
 
 		template <typename T>
-		T getAttribute(const std::string &key) {
-			return T(m_qmap[key]);
+		T getAttribute(const std::string &key) const {
+			auto i = m_qmap.find(key);
+			return (i != m_qmap.end()) ? T(i->second) : T();
 		}
 
+		bool hasAttribute(const std::string &a) const { return m_qmap.count(a) > 0; }
+
 		std::string to_string() const;
 
-		void to_json(nlohmann::json &);
+		void to_json(nlohmann::json &) const;
 
 		private:
 		void _parse(uri_t puri);
@@ -86,13 +94,15 @@ namespace ftl {
 	};
 
 	template <>
-	inline int URI::getAttribute<int>(const std::string &key) {
-		return std::stoi(m_qmap[key]);
+	inline int URI::getAttribute<int>(const std::string &key) const {
+		auto i = m_qmap.find(key);
+		return (i != m_qmap.end()) ? std::stoi(i->second) : 0;
 	}
 
 	template <>
-	inline std::string URI::getAttribute<std::string>(const std::string &key) {
-		return m_qmap[key];
+	inline std::string URI::getAttribute<std::string>(const std::string &key) const {
+		auto i = m_qmap.find(key);
+		return (i != m_qmap.end()) ? i->second : "";
 	}
 }
 
diff --git a/components/common/cpp/include/ftl/utility/msgpack.hpp b/components/common/cpp/include/ftl/utility/msgpack.hpp
index 30319d225..3e13df144 100644
--- a/components/common/cpp/include/ftl/utility/msgpack.hpp
+++ b/components/common/cpp/include/ftl/utility/msgpack.hpp
@@ -34,7 +34,7 @@ struct convert<cv::Size_<T>> {
 	msgpack::object const& operator()(msgpack::object const& o, cv::Size_<T>& v) const {
 		if (o.type != msgpack::type::ARRAY) { throw msgpack::type_error(); }
 		if (o.via.array.size != 2) { throw msgpack::type_error(); }
-		
+
 		T width = o.via.array.ptr[0].as<T>();
 		T height = o.via.array.ptr[1].as<T>();
 		v = cv::Size_<T>(width, height);
@@ -79,7 +79,7 @@ struct convert<cv::Rect_<T>> {
 	msgpack::object const& operator()(msgpack::object const& o, cv::Rect_<T> &v) const {
 		if (o.type != msgpack::type::ARRAY) { throw msgpack::type_error(); }
 		if (o.via.array.size != 4) { throw msgpack::type_error(); }
-		
+
 		T height = o.via.array.ptr[0].as<T>();
 		T width = o.via.array.ptr[1].as<T>();
 		T x = o.via.array.ptr[2].as<T>();
@@ -126,7 +126,7 @@ struct convert<cv::Vec<T, SIZE>> {
 	msgpack::object const& operator()(msgpack::object const& o, cv::Vec<T, SIZE> &v) const {
 		if (o.type != msgpack::type::ARRAY) { throw msgpack::type_error(); }
 		if (o.via.array.size != SIZE) { throw msgpack::type_error(); }
-		
+
 		for (int i = 0; i < SIZE; i++) { v[i] = o.via.array.ptr[i].as<T>(); }
 
 		return o;
@@ -148,6 +148,93 @@ struct object_with_zone<cv::Vec<T, SIZE>> {
 	}
 };
 
+////////////////////////////////////////////////////////////////////////////////
+// cv::Point_
+
+template<typename T>
+struct pack<cv::Point_<T>> {
+	template <typename Stream>
+	packer<Stream>& operator()(msgpack::packer<Stream>& o, cv::Point_<T> const& p) const {
+
+		o.pack_array(2);
+		o.pack(p.x);
+		o.pack(p.y);
+		return o;
+	}
+};
+
+template<typename T>
+struct convert<cv::Point_<T>> {
+	msgpack::object const& operator()(msgpack::object const& o, cv::Point_<T> &p) const {
+		if (o.type != msgpack::type::ARRAY) { throw msgpack::type_error(); }
+		if (o.via.array.size != 2) { throw msgpack::type_error(); }
+
+		p.x = o.via.array.ptr[0].as<T>();
+		p.y = o.via.array.ptr[1].as<T>();
+
+		return o;
+	}
+};
+
+template <typename T>
+struct object_with_zone<cv::Point_<T>> {
+	void operator()(msgpack::object::with_zone& o, cv::Point_<T> const& p) const {
+		o.type = type::ARRAY;
+		o.via.array.size = 2;
+		o.via.array.ptr = static_cast<msgpack::object*>(
+			o.zone.allocate_align(	sizeof(msgpack::object) * o.via.array.size,
+									MSGPACK_ZONE_ALIGNOF(msgpack::object)));
+
+		o.via.array.ptr[0] = msgpack::object(p.x, o.zone);
+		o.via.array.ptr[1] = msgpack::object(p.y, o.zone);
+	}
+};
+
+////////////////////////////////////////////////////////////////////////////////
+// cv::Point3_
+
+template<typename T>
+struct pack<cv::Point3_<T>> {
+	template <typename Stream>
+	packer<Stream>& operator()(msgpack::packer<Stream>& o, cv::Point3_<T> const& p) const {
+
+		o.pack_array(3);
+		o.pack(p.x);
+		o.pack(p.y);
+		o.pack(p.z);
+		return o;
+	}
+};
+
+template<typename T>
+struct convert<cv::Point3_<T>> {
+	msgpack::object const& operator()(msgpack::object const& o, cv::Point3_<T> &p) const {
+		if (o.type != msgpack::type::ARRAY) { throw msgpack::type_error(); }
+		if (o.via.array.size != 3) { throw msgpack::type_error(); }
+
+		p.x = o.via.array.ptr[0].as<T>();
+		p.y = o.via.array.ptr[1].as<T>();
+		p.z = o.via.array.ptr[2].as<T>();
+
+		return o;
+	}
+};
+
+template <typename T>
+struct object_with_zone<cv::Point3_<T>> {
+	void operator()(msgpack::object::with_zone& o, cv::Point3_<T> const& p) const {
+		o.type = type::ARRAY;
+		o.via.array.size = 3;
+		o.via.array.ptr = static_cast<msgpack::object*>(
+			o.zone.allocate_align(	sizeof(msgpack::object) * o.via.array.size,
+									MSGPACK_ZONE_ALIGNOF(msgpack::object)));
+
+		o.via.array.ptr[0] = msgpack::object(p.x, o.zone);
+		o.via.array.ptr[1] = msgpack::object(p.y, o.zone);
+		o.via.array.ptr[2] = msgpack::object(p.z, o.zone);
+	}
+};
+
 ////////////////////////////////////////////////////////////////////////////////
 // cv::Mat
 
@@ -160,7 +247,7 @@ struct pack<cv::Mat> {
 		o.pack_array(3);
 		o.pack(v.type());
 		o.pack(v.size());
-		
+
 		auto size = v.total() * v.elemSize();
 		o.pack(msgpack::type::raw_ref(reinterpret_cast<char*>(v.data), size));
 
@@ -181,11 +268,11 @@ struct convert<cv::Mat> {
 		if (o.via.array.ptr[2].via.bin.size != (v.total() * v.elemSize())) {
 			throw msgpack::type_error();
 		}
-	
+
 		memcpy(	v.data,
 				reinterpret_cast<const uchar*>(o.via.array.ptr[2].via.bin.ptr),
 				o.via.array.ptr[2].via.bin.size);
-		
+
 		return o;
 	}
 };
@@ -198,7 +285,7 @@ struct object_with_zone<cv::Mat> {
 		o.via.array.ptr = static_cast<msgpack::object*>(
 			o.zone.allocate_align(	sizeof(msgpack::object) * o.via.array.size,
 									MSGPACK_ZONE_ALIGNOF(msgpack::object)));
-		
+
 		auto size = v.total() * v.elemSize();
 		o.via.array.ptr[0] = msgpack::object(v.type(), o.zone);
 		o.via.array.ptr[1] = msgpack::object(v.size(), o.zone);
@@ -206,7 +293,7 @@ struct object_with_zone<cv::Mat> {
 		// https://github.com/msgpack/msgpack-c/wiki/v2_0_cpp_object#conversion
 		// raw_ref not copied to zone (is this a problem?)
 		o.via.array.ptr[2] = msgpack::object(
-			msgpack::type::raw_ref(reinterpret_cast<char*>(v.data), size),
+			msgpack::type::raw_ref(reinterpret_cast<char*>(v.data), static_cast<uint32_t>(size)),
 			o.zone);
 	}
 };
@@ -231,7 +318,7 @@ struct convert<Eigen::Matrix<T, X, Y>> {
 	msgpack::object const& operator()(msgpack::object const& o, Eigen::Matrix<T, X, Y> &v) const {
 		if (o.type != msgpack::type::ARRAY) { throw msgpack::type_error(); }
 		if (o.via.array.size != X*Y) { throw msgpack::type_error(); }
-		
+
 		for (int i = 0; i < X*Y; i++) { v.data()[i] = o.via.array.ptr[i].as<T>(); }
 
 		return o;
diff --git a/components/common/cpp/include/ftl/utility/string.hpp b/components/common/cpp/include/ftl/utility/string.hpp
new file mode 100644
index 000000000..d88aa1153
--- /dev/null
+++ b/components/common/cpp/include/ftl/utility/string.hpp
@@ -0,0 +1,12 @@
+#ifndef _FTL_UTILITY_STRING_HPP_
+#define _FTL_UTILITY_STRING_HPP_
+
+template <typename T>
+std::string to_string_with_precision(const T a_value, const int n = 6) {
+	std::ostringstream out;
+	out.precision(n);
+	out << std::fixed << a_value;
+	return out.str();
+}
+
+#endif
\ No newline at end of file
diff --git a/components/common/cpp/src/configurable.cpp b/components/common/cpp/src/configurable.cpp
index 5791980d4..1f377fc51 100644
--- a/components/common/cpp/src/configurable.cpp
+++ b/components/common/cpp/src/configurable.cpp
@@ -22,13 +22,39 @@ Configurable::Configurable(nlohmann::json &config) : config_(&config) {
 		LOG(FATAL) << "Configurable json is not an object: " << config;
 	}
 
+	/*if (!config.contains("$id")) {
+		config["$id"] = "ftl://utu.fi";
+	}*/
+
 	ftl::config::registerConfigurable(this);
 }
 
 Configurable::~Configurable() {
+	save();
 	ftl::config::removeConfigurable(this);
 }
 
+void Configurable::save() {
+	if (restore_.size() > 0) {
+		auto &r = ftl::config::getRestore(restore_);
+		for (auto &i : save_allowed_) {
+			r[i] = (*config_)[i];
+		}
+	}
+}
+
+void Configurable::restore(const std::string &key, const std::unordered_set<std::string> &allowed) {
+	save();
+	
+	auto &r = ftl::config::getRestore(key);
+	if (r.is_object()) {
+		config_->merge_patch(r);
+		
+	}
+	restore_ = key;
+	save_allowed_ = allowed;
+}
+
 template <typename T>
 T ftl::Configurable::value(const std::string &name, const T &def) {
 	auto r = get<T>(name);
@@ -114,7 +140,7 @@ void Configurable::_trigger(const string &name) {
 	if (ix != observers_.end()) {
 		for (auto &f : (*ix).second) {
 			try {
-				f({this, name});
+				f();
 			} catch(...) {
 				LOG(ERROR) << "Exception in event handler for '" << name << "'";
 			}
@@ -122,7 +148,13 @@ void Configurable::_trigger(const string &name) {
 	}
 }
 
-void Configurable::on(const string &prop, function<void(const ftl::config::Event&)> f) {
+void Configurable::onAny(const std::unordered_set<string> &props, function<void()> f) {
+	for (const auto &p : props) {
+		on(p, f);
+	}
+}
+
+void Configurable::on(const string &prop, function<void()> f) {
 	auto ix = observers_.find(prop);
 	if (ix == observers_.end()) {
 		observers_[prop] = {f};
diff --git a/components/common/cpp/src/configuration.cpp b/components/common/cpp/src/configuration.cpp
index 0d2b79a97..f756d1d57 100644
--- a/components/common/cpp/src/configuration.cpp
+++ b/components/common/cpp/src/configuration.cpp
@@ -30,9 +30,11 @@
 #include <string>
 #include <map>
 #include <iostream>
+#include <iomanip>
 
 using ftl::config::json_t;
 using std::ifstream;
+using std::ofstream;
 using std::string;
 using std::map;
 using std::vector;
@@ -175,6 +177,42 @@ optional<string> ftl::config::locateFile(const string &name) {
 	return {};
 }
 
+nlohmann::json ftl::loadJSON(const std::string &path) {
+	ifstream i(path.c_str());
+	//i.open(path);
+	if (i.is_open()) {
+		try {
+			nlohmann::json t;
+			i >> t;
+			return t;
+		} catch (nlohmann::json::parse_error& e) {
+			LOG(ERROR) << "Parse error in loading JSON: "  << e.what();
+			return {};
+		} catch (...) {
+			LOG(ERROR) << "Unknown error opening JSON file: " << path;
+		}
+		return {};
+	} else {
+		return {};
+	}
+}
+
+bool ftl::saveJSON(const std::string &path, nlohmann::json &json) {
+	ofstream o(path.c_str());
+	//i.open(path);
+	if (o.is_open()) {
+		try {
+			o << std::setw(4) << json << std::endl;
+			return true;
+		} catch (...) {
+			LOG(ERROR) << "Unknown error saving JSON file: " << path;
+		}
+		return false;
+	} else {
+		return false;
+	}
+}
+
 /**
  * Combine one json config with another patch json config.
  */
@@ -199,10 +237,26 @@ static bool mergeConfig(const string path) {
 	}
 }
 
+static SHARED_MUTEX mutex;
 static std::map<std::string, json_t*> config_index;
 static std::map<std::string, ftl::Configurable*> config_instance;
+static std::map<std::string, ftl::Configurable*> config_alias;
 static std::map<std::string, std::vector<ftl::Configurable*>> tag_index;
 
+static std::string cfg_root_str;
+static nlohmann::json config_restore;
+static nlohmann::json config_defaults;
+
+nlohmann::json &ftl::config::getRestore(const std::string &key) {
+	UNIQUE_LOCK(mutex, lk);
+	return config_restore[key];
+}
+
+nlohmann::json &ftl::config::getDefault(const std::string &key) {
+	UNIQUE_LOCK(mutex, lk);
+	return config_defaults[key];
+}
+
 /*
  * Recursively URI index the JSON structure.
  */
@@ -222,6 +276,7 @@ static void _indexConfig(json_t &cfg) {
 }
 
 ftl::Configurable *ftl::config::find(const std::string &uri) {
+	if (uri.size() == 0) return nullptr;
 	std::string actual_uri = uri;
 	if (uri[0] == '/') {
 		if (uri.size() == 1) {
@@ -230,18 +285,30 @@ ftl::Configurable *ftl::config::find(const std::string &uri) {
 			actual_uri = rootCFG->getID() + uri;
 		}
 	}
+
+	SHARED_LOCK(mutex, lk);
 	
 	auto ix = config_instance.find(actual_uri);
-	if (ix == config_instance.end()) return nullptr;
+	if (ix == config_instance.end()) {
+		auto ix = config_alias.find(actual_uri);
+		if (ix == config_alias.end()) return nullptr;
+		else return (*ix).second;
+	}
 	else return (*ix).second;
 }
 
+void ftl::config::alias(const std::string &uri, Configurable *cfg) {
+	UNIQUE_LOCK(mutex, lk);
+	config_alias[uri] = cfg;
+}
+
 const std::vector<Configurable*> &ftl::config::findByTag(const std::string &tag) {
 	return tag_index[tag];
 }
 
 std::vector<std::string> ftl::config::list() {
 	vector<string> r;
+	SHARED_LOCK(mutex, lk);
 	for (auto i : config_instance) {
 		r.push_back(i.first);
 	}
@@ -250,6 +317,7 @@ std::vector<std::string> ftl::config::list() {
 
 const std::vector<Configurable *> ftl::config::getChildren(const string &uri) {
 	std::vector<Configurable *> children;
+	SHARED_LOCK(mutex, lk);
 	for (const auto &[curi, c] : config_instance) {
 		auto mismatch = std::mismatch(uri.begin(), uri.end(), curi.begin());
 		if (mismatch.first == uri.end()) {
@@ -265,15 +333,19 @@ void ftl::config::registerConfigurable(ftl::Configurable *cfg) {
 		LOG(ERROR) << "Configurable object is missing $id property: " << cfg->getConfig();
 		return;
 	}
+
+	UNIQUE_LOCK(mutex, lk);
 	auto ix = config_instance.find(*uri);
 	if (ix != config_instance.end()) {
 		// FIXME: HACK NOTE TODO SHOULD BE FATAL
 		LOG(ERROR) << "Attempting to create a duplicate object: " << *uri;
 	} else {
 		config_instance[*uri] = cfg;
-		LOG(INFO) << "Registering instance: " << *uri;
+		//LOG(INFO) << "Registering instance: " << *uri;
 
+		lk.unlock();
 		auto tags = cfg->get<vector<string>>("tags");
+		lk.lock();
 		if (tags) {
 			for (auto &t : *tags) {
 				//LOG(INFO) << "REGISTER TAG: " << t;
@@ -314,20 +386,22 @@ bool ftl::config::update(const std::string &puri, const json_t &value) {
 	string tail = "";
 	string head = "";
 	string uri = preprocessURI(puri);
-	size_t last_hash = uri.find_last_of('#');
-	if (last_hash != string::npos) {
+	//size_t last_hash = uri.find_last_of('/');
+	//if (last_hash != string::npos) {
 		size_t last = uri.find_last_of('/');
-		if (last != string::npos && last > last_hash) {
+		if (last != string::npos) {
 			tail = uri.substr(last+1);
 			head = uri.substr(0, last);
 		} else {
-			tail = uri.substr(last_hash+1);
-			head = uri.substr(0, last_hash);
+		//	tail = uri.substr(last_hash+1);
+		//	head = uri.substr(0, last_hash);
+			LOG(WARNING) << "Expected a URI path: " << uri;
+			return false;
 		}
-	} else {
-		LOG(WARNING) << "Expected a # in an update URI: " << uri;
-		return false;
-	}
+	//} else {
+	//	LOG(WARNING) << "Expected a # in an update URI: " << uri;
+	//	return false;
+	//}
 
 	Configurable *cfg = find(head);
 
@@ -400,6 +474,8 @@ json_t &ftl::config::resolve(const std::string &puri, bool eager) {
 		//}
 	}
 
+	SHARED_LOCK(mutex, lk);
+
 	ftl::URI uri(uri_str);
 	if (uri.isValid()) {
 		std::string u = uri.getBaseURI();
@@ -532,6 +608,7 @@ map<string, string> ftl::config::read_options(char ***argv, int *argc) {
  */
 static void process_options(Configurable *root, const map<string, string> &opts) {
 	for (auto opt : opts) {
+		if (opt.first == "") continue;
 		if (opt.first == "config") continue;
 		if (opt.first == "root") continue;
 
@@ -558,7 +635,7 @@ static void process_options(Configurable *root, const map<string, string> &opts)
 			auto v = nlohmann::json::parse(opt.second);
 			ftl::config::update(*root->get<string>("$id") + string("/") + opt.first, v);
 		} catch(...) {
-			LOG(ERROR) << "Unrecognised option: " << *root->get<string>("$id") << "#" << opt.first;
+			LOG(ERROR) << "Unrecognised option: " << *root->get<string>("$id") << "/" << opt.first;
 		}
 	}
 }
@@ -594,19 +671,46 @@ Configurable *ftl::config::configure(ftl::config::json_t &cfg) {
 	return rootcfg;
 }
 
-static bool doing_cleanup = false;
+// Remove all $ keys from json
+static void stripJSON(nlohmann::json &j) {
+	for (auto i=j.begin(); i != j.end(); ) {
+		if (i.key()[0] == '$') {
+			i = j.erase(i);
+			continue;
+		}
+        if ((*i).is_structured()) {
+            stripJSON(*i);
+        }
+		++i;
+	}
+}
+
+static std::atomic_bool doing_cleanup = false;
 void ftl::config::cleanup() {
 	if (doing_cleanup) return;
 	doing_cleanup = true;
-	for (auto f : config_instance) {
-		delete f.second;
+
+	//UNIQUE_LOCK(mutex, lk);
+
+	for (auto &f : config_instance) {
+		LOG(WARNING) << "Not deleted properly: " << f.second->getID();
+		//delete f.second;
+	//	f.second->save();
+	}
+	while (config_instance.begin() != config_instance.end()) {
+		delete config_instance.begin()->second;
 	}
 	config_instance.clear();
+
+	stripJSON(config_restore);
+	ftl::saveJSON(std::string(FTL_LOCAL_CONFIG_ROOT "/")+cfg_root_str+std::string("_session.json"), config_restore);
+
 	doing_cleanup = false;
 }
 
 void ftl::config::removeConfigurable(Configurable *cfg) {
-	if (doing_cleanup) return;
+	//if (doing_cleanup) return;
+	UNIQUE_LOCK(mutex, lk);
 
 	auto i = config_instance.find(cfg->getID());
 	if (i != config_instance.end()) {
@@ -634,22 +738,22 @@ std::vector<nlohmann::json*> ftl::config::_createArray(ftl::Configurable *parent
 			if (entity.is_object()) {
 				if (!entity["$id"].is_string()) {
 					std::string id_str = *parent->get<std::string>("$id");
-					if (id_str.find('#') != std::string::npos) {
+					//if (id_str.find('#') != std::string::npos) {
 						entity["$id"] = id_str + std::string("/") + name + std::string("/") + std::to_string(i);
-					} else {
-						entity["$id"] = id_str + std::string("#") + name + std::string("/") + std::to_string(i);
-					}
+					//} else {
+					//	entity["$id"] = id_str + std::string("#") + name + std::string("/") + std::to_string(i);
+					//}
 				}
 
 				result.push_back(&entity);
 			} else if (entity.is_null()) {
 				// Must create the object from scratch...
 				std::string id_str = *parent->get<std::string>("$id");
-				if (id_str.find('#') != std::string::npos) {
+				//if (id_str.find('#') != std::string::npos) {
 					id_str = id_str + std::string("/") + name + std::string("/") + std::to_string(i);
-				} else {
-					id_str = id_str + std::string("#") + name + std::string("/") + std::to_string(i);
-				}
+				//} else {
+				//	id_str = id_str + std::string("#") + name + std::string("/") + std::to_string(i);
+				//}
 				parent->getConfig()[name] = {
 					// cppcheck-suppress constStatement
 					{"$id", id_str}
@@ -664,7 +768,7 @@ std::vector<nlohmann::json*> ftl::config::_createArray(ftl::Configurable *parent
 		//LOG(WARNING) << "Expected an array for '" << name << "' in " << parent->getID();
 	}
 
-	return std::move(result);
+	return result;
 }
 
 nlohmann::json &ftl::config::_create(ftl::Configurable *parent, const std::string &name) {
@@ -675,22 +779,22 @@ nlohmann::json &ftl::config::_create(ftl::Configurable *parent, const std::strin
 	if (entity.is_object()) {
 		if (!entity["$id"].is_string()) {
 			std::string id_str = *parent->get<std::string>("$id");
-			if (id_str.find('#') != std::string::npos) {
+			//if (id_str.find('#') != std::string::npos) {
 				entity["$id"] = id_str + std::string("/") + name;
-			} else {
-				entity["$id"] = id_str + std::string("#") + name;
-			}
+			//} else {
+			//	entity["$id"] = id_str + std::string("#") + name;
+			//}
 		}
 
 		return entity;
 	} else if (entity.is_null()) {
 		// Must create the object from scratch...
 		std::string id_str = *parent->get<std::string>("$id");
-		if (id_str.find('#') != std::string::npos) {
+		//if (id_str.find('#') != std::string::npos) {
 			id_str = id_str + std::string("/") + name;
-		} else {
-			id_str = id_str + std::string("#") + name;
-		}
+		//} else {
+		//	id_str = id_str + std::string("#") + name;
+		//}
 		parent->getConfig()[name] = {
 			// cppcheck-suppress constStatement
 			{"$id", id_str}
@@ -745,7 +849,7 @@ template void ftl::config::setJSON<float>(nlohmann::json *config, const std::str
 template void ftl::config::setJSON<int>(nlohmann::json *config, const std::string &name, int value);
 template void ftl::config::setJSON<std::string>(nlohmann::json *config, const std::string &name, std::string value);
 
-Configurable *ftl::config::configure(int argc, char **argv, const std::string &root) {
+Configurable *ftl::config::configure(int argc, char **argv, const std::string &root, const std::unordered_set<std::string> &restoreable) {
 	loguru::g_preamble_date = false;
 	loguru::g_preamble_uptime = false;
 	loguru::g_preamble_thread = false;
@@ -771,19 +875,31 @@ Configurable *ftl::config::configure(int argc, char **argv, const std::string &r
 	}
 
 	string root_str = (options.find("root") != options.end()) ? nlohmann::json::parse(options["root"]).get<string>() : root;
+	cfg_root_str = root_str;
 
 	if (options.find("id") != options.end()) config["$id"] = nlohmann::json::parse(options["id"]).get<string>();
 	_indexConfig(config);
 
-	Configurable *rootcfg = create<Configurable>(config);
-	if (root_str.size() > 0) {
-		LOG(INFO) << "Setting root to " << root_str;
-		rootcfg = create<Configurable>(rootcfg, root_str);
+	config_restore = std::move(ftl::loadJSON(std::string(FTL_LOCAL_CONFIG_ROOT "/")+cfg_root_str+std::string("_session.json")));
+
+	Configurable *rootcfg = nullptr;
+
+	try {
+		if (!config.contains("$id")) config["$id"] = "ftl://utu.fi";
+		rootcfg = create<Configurable>(config);
+		rootCFG = rootcfg;
+		if (root_str.size() > 0) {
+			LOG(INFO) << "Setting root to " << root_str;
+			rootcfg = create<Configurable>(rootcfg, root_str);
+		}
+	} catch (const std::exception &e) {
+		LOG(FATAL) << "Exception setting root: " << e.what();
 	}
 
 	//root_config = rootcfg->getConfig();
 	rootCFG = rootcfg;
 	rootcfg->set("paths", paths);
+	rootcfg->restore("root", restoreable);
 	process_options(rootcfg, options);
 
 	if (rootcfg->get<int>("profiler")) {
@@ -793,9 +909,9 @@ Configurable *ftl::config::configure(int argc, char **argv, const std::string &r
 	if (rootcfg->get<std::string>("branch")) {
 		ftl::branch_name = *rootcfg->get<std::string>("branch");
 	}
-	rootcfg->on("branch", [](const ftl::config::Event &e) {
-		if (e.entity->get<std::string>("branch")) {
-			ftl::branch_name = *e.entity->get<std::string>("branch");
+	rootcfg->on("branch", [rootcfg]() {
+		if (rootcfg->get<std::string>("branch")) {
+			ftl::branch_name = *rootcfg->get<std::string>("branch");
 		}
 	});
 
@@ -805,7 +921,7 @@ Configurable *ftl::config::configure(int argc, char **argv, const std::string &r
 	// Check CUDA
 	ftl::cuda::initialise();
 
-	int pool_size = rootcfg->value("thread_pool_factor", 2.0f)*std::thread::hardware_concurrency();
+	int pool_size = int(rootcfg->value("thread_pool_factor", 2.0f)*float(std::thread::hardware_concurrency()));
 	if (pool_size != ftl::pool.size()) ftl::pool.resize(pool_size);
 
 
diff --git a/components/common/cpp/src/cuda_common.cpp b/components/common/cpp/src/cuda_common.cpp
index ede4a998a..273b540c4 100644
--- a/components/common/cpp/src/cuda_common.cpp
+++ b/components/common/cpp/src/cuda_common.cpp
@@ -11,7 +11,7 @@ static std::vector<cudaDeviceProp> properties;
 
 bool ftl::cuda::initialise() {
 	if (dev_count > 0) return true;
-	
+
 	// Do an initial CUDA check
 	cudaSafeCall(cudaGetDeviceCount(&dev_count));
 	CHECK_GE(dev_count, 1) << "No CUDA devices found";
@@ -174,4 +174,4 @@ void BufferBase::upload(const cv::Mat &m, cudaStream_t stream) {
 void BufferBase::download(cv::Mat &m, cudaStream_t stream) const {
 	m.create(height(), width(), cvType_);
 	cudaSafeCall(cudaMemcpy2DAsync(m.data, m.step, devicePtr(), pitch(), m.cols * m.elemSize(), m.rows, cudaMemcpyDeviceToHost, stream));
-}
\ No newline at end of file
+}
diff --git a/components/common/cpp/src/timer.cpp b/components/common/cpp/src/timer.cpp
index bc8f97e2d..b249b0f6c 100644
--- a/components/common/cpp/src/timer.cpp
+++ b/components/common/cpp/src/timer.cpp
@@ -29,9 +29,9 @@ static int last_id = 0;
 static bool clock_slave = true;
 
 struct TimerJob {
-	int id;
-	function<bool(int64_t)> job;
-	volatile bool active;
+	int id=0;
+	ftl::SingletonHandler<int64_t> job;
+	std::atomic_bool active=false;
 	// TODO: (Nick) Implement richer forms of timer
 	//bool paused;
 	//int multiplier;
@@ -76,7 +76,7 @@ static void waitTimePoint() {
 		auto idle_job = jobs[kTimerIdle10].begin();
 		while (idle_job != jobs[kTimerIdle10].end() && msdelay >= 10 && sincelast != mspf) {
 			(*idle_job).active = true;
-			bool doremove = !(*idle_job).job(now);
+			bool doremove = !(*idle_job).job.trigger(now);
 
 			if (doremove) {
 				idle_job = jobs[kTimerIdle10].erase(idle_job);
@@ -89,6 +89,30 @@ static void waitTimePoint() {
 		}
 	}
 
+	/*while (msdelay >= 10 && sincelast != mspf) {
+		sleep_for(milliseconds(5));
+		now = get_time();
+		msdelay = mspf - (now % mspf);
+	}*/
+
+	if (msdelay >= 2 && sincelast != mspf) {
+		UNIQUE_LOCK(mtx, lk);
+		auto idle_job = jobs[kTimerIdle1].begin();
+		while (idle_job != jobs[kTimerIdle1].end() && msdelay >= 2 && sincelast != mspf) {
+			(*idle_job).active = true;
+			bool doremove = !(*idle_job).job.trigger(now);
+
+			if (doremove) {
+				idle_job = jobs[kTimerIdle1].erase(idle_job);
+				LOG(INFO) << "Timer job removed";
+			} else {
+				(*idle_job++).active = false;
+			}
+			now = get_time();
+			msdelay = mspf - (now % mspf);
+		}
+	}
+
 	if (hprec_) {
 		// Spin loop until exact grab time
 		// Accurate to around 4 micro seconds.
@@ -106,6 +130,8 @@ static void waitTimePoint() {
 		now = get_time();
 	}
 	last_frame = now/mspf;
+	int64_t over = now - (last_frame*mspf);
+	if (over > 1) LOG(WARNING) << "Timer off by " << over << "ms";
 }
 
 void ftl::timer::setInterval(int ms) {
@@ -117,7 +143,7 @@ void ftl::timer::setHighPrecision(bool hp) {
 }
 
 int ftl::timer::getInterval() {
-	return mspf;
+	return static_cast<int>(mspf);
 }
 
 void ftl::timer::setClockAdjustment(int64_t ms) {
@@ -132,13 +158,16 @@ bool ftl::timer::isClockSlave() {
 	return clock_slave;
 }
 
-const TimerHandle ftl::timer::add(timerlevel_t l, const std::function<bool(int64_t ts)> &f) {
+ftl::Handle ftl::timer::add(timerlevel_t l, const std::function<bool(int64_t ts)> &f) {
 	if (l < 0 || l >= kTimerMAXLEVEL) return {};
 
 	UNIQUE_LOCK(mtx, lk);
 	int newid = last_id++;
-	jobs[l].push_back({newid, f, false, "NoName"});
-	return TimerHandle(newid);
+	auto &j = jobs[l].emplace_back();
+	j.id = newid;
+	j.name = "NoName";
+	ftl::Handle h = j.job.on(f);
+	return h;
 }
 
 static void removeJob(int id) {
@@ -162,23 +191,28 @@ static void trigger_jobs() {
 	UNIQUE_LOCK(mtx, lk);
 	const int64_t ts = last_frame*mspf;
 
+	if (active_jobs > 1) {
+		LOG(WARNING) << "Previous timer incomplete, skipping " << ts;
+		return;
+	}
+
 	// First do non-blocking high precision callbacks
 	const int64_t before = get_time();
 	for (auto &j : jobs[kTimerHighPrecision]) {
-		j.job(ts);
+		j.job.trigger(ts);
 	}
 	const int64_t after = get_time();
 	if (after - before > 1) LOG(WARNING) << "Precision jobs took too long (" << (after-before) << "ms)";
 
 	// Then do also non-blocking swap callbacks
 	for (auto &j : jobs[kTimerSwap]) {
-		j.job(ts);
+		j.job.trigger(ts);
 	}
 
 	// Now use thread jobs to do more intensive callbacks
 	for (auto &j : jobs[kTimerMain]) {
 		if (j.active) {
-			//LOG(WARNING) << "Timer job too slow ... skipped for " << ts;
+			LOG(WARNING) << "Timer job too slow ... skipped for " << ts;
 			continue;
 		}
 		j.active = true;
@@ -186,12 +220,32 @@ static void trigger_jobs() {
 
 		auto *pj = &j;
 
-		ftl::pool.push([pj,ts](int id) {
-			bool doremove = !pj->job(ts);
+		// If last job in list then do in this thread
+		if (active_jobs == static_cast<int>(jobs[kTimerMain].size())+1) {
+			lk.unlock();
+			bool doremove = !pj->job.trigger(ts);
 			pj->active = false;
 			active_jobs--;
 			if (doremove) removeJob(pj->id);
-		});
+			lk.lock();
+			break;
+		} else {
+			ftl::pool.push([pj,ts](int id) {
+				bool doremove = !pj->job.trigger(ts);
+				pj->active = false;
+				active_jobs--;
+				if (doremove) removeJob(pj->id);
+			});
+		}
+	}
+
+	// Final cleanup of stale jobs
+	for (size_t j=0; j<kTimerMAXLEVEL; ++j) {
+		for (auto i=jobs[j].begin(); i!=jobs[j].end(); i++) {
+			if ((bool)((*i).job) == false) {
+				i = jobs[j].erase(i);
+			}
+		}
 	}
 }
 
@@ -246,25 +300,3 @@ void ftl::timer::reset() {
 		jobs[i].clear();
 	}
 }
-
-// ===== TimerHandle ===========================================================
-
-void ftl::timer::TimerHandle::cancel() const {
-	removeJob(id());
-}
-
-void ftl::timer::TimerHandle::pause() const {
-
-}
-
-void ftl::timer::TimerHandle::unpause() const {
-
-}
-
-void ftl::timer::TimerHandle::setMultiplier(unsigned int N) const {
-
-}
-
-void ftl::timer::TimerHandle::setName(const std::string &name) const {
-
-}
diff --git a/components/common/cpp/src/uri.cpp b/components/common/cpp/src/uri.cpp
index fa059a0a8..8885078d8 100644
--- a/components/common/cpp/src/uri.cpp
+++ b/components/common/cpp/src/uri.cpp
@@ -2,7 +2,7 @@
 #include <nlohmann/json.hpp>
 // #include <filesystem>  TODO When available
 #include <cstdlib>
-//#include <loguru.hpp>
+#include <loguru.hpp>
 
 #ifndef WIN32
 #include <unistd.h>
@@ -72,7 +72,7 @@ void URI::_parse(uri_t puri) {
 		m_frag = "";
 	} else {
 		m_host = std::string(uri.hostText.first, uri.hostText.afterLast - uri.hostText.first);
-		
+
 		std::string prototext = std::string(uri.scheme.first, uri.scheme.afterLast - uri.scheme.first);
 		if (prototext == "tcp") m_proto = SCHEME_TCP;
 		else if (prototext == "udp") m_proto = SCHEME_UDP;
@@ -82,6 +82,7 @@ void URI::_parse(uri_t puri) {
 		else if (prototext == "ipc") m_proto = SCHEME_IPC;
 		else if (prototext == "device") m_proto = SCHEME_DEVICE;
 		else if (prototext == "file") m_proto = SCHEME_FILE;
+		else if (prototext == "group") m_proto = SCHEME_GROUP;
 		else m_proto = SCHEME_OTHER;
 		m_protostr = prototext;
 
@@ -106,7 +107,7 @@ void URI::_parse(uri_t puri) {
 					uri.query.afterLast) != URI_SUCCESS) {
 				// Failure
 			}
-			
+
 			UriQueryListA *item = queryList;
 			while (item) {
 				m_qmap[item->key] = item->value;
@@ -140,9 +141,12 @@ void URI::_parse(uri_t puri) {
 			else if (uri.fragment.first != NULL) {
 				m_base += std::string(start, uri.fragment.first - start - 1);
 			}
-			else {
+			else if (start) {
 				m_base += std::string(start);
 			}
+			else {
+				m_base += std::string("");
+			}
 		}
 	}
 }
@@ -157,7 +161,7 @@ string URI::getPathSegment(int n) const {
 	else return m_pathseg[N];
 }
 
-string URI::getBaseURI(int n) {
+string URI::getBaseURI(int n) const {
 	if (n >= (int)m_pathseg.size()) return m_base;
 	if (n >= 0) {
 		string r = m_protostr + string("://") + m_host + ((m_port != 0) ? string(":") + std::to_string(m_port) : "");
@@ -172,13 +176,27 @@ string URI::getBaseURI(int n) {
 		size_t N = m_pathseg.size()+n;
 		for (size_t i=0; i<N; i++) {
 			r += "/";
-			r += getPathSegment(i);
+			r += getPathSegment(static_cast<int>(i));
 		}
 
 		return r;
 	} else return "";
 }
 
+std::string URI::getBaseURIWithUser() const {
+	std::string result;
+
+	result += m_protostr + "://";
+	if (m_userinfo.size() > 0) {
+		result += getUserInfo();
+		result += "@";
+	}
+	result += m_host;
+	if (m_port > 0) result += std::string(":") + std::to_string(m_port);
+	result += m_path;
+	return result;
+}
+
 string URI::getQuery() const {
 	string q;
 	for (auto x : m_qmap) {
@@ -196,8 +214,8 @@ void URI::setAttribute(const string &key, int value) {
 	m_qmap[key] = std::to_string(value);
 }
 
-void URI::to_json(nlohmann::json &json) {
-	std::string uri = getBaseURI();
+void URI::to_json(nlohmann::json &json) const {
+	std::string uri = to_string();
 	if (m_frag.size() > 0) uri += std::string("#") + getFragment();
 
 	json["uri"] = uri;
@@ -207,14 +225,18 @@ void URI::to_json(nlohmann::json &json) {
 		size_t pos = 0;
 		size_t lpos = 0;
 		while ((pos = i.first.find('/', lpos)) != std::string::npos) {
-			current = &((*current)[i.first.substr(lpos, pos-lpos)]);
+			std::string subobj = i.first.substr(lpos, pos-lpos);
+			current = &((*current)[subobj]);
 			lpos = pos+1;
 		}
+
+		std::string obj = i.first.substr(lpos);
+
 		auto p = nlohmann::json::parse(i.second, nullptr, false);
 		if (!p.is_discarded()) {
-			(*current)[i.first.substr(lpos)] = p;
+			(*current)[obj] = p;
 		} else {
-			(*current)[i.first.substr(lpos)] = i.second;
+			(*current)[obj] = i.second;
 		}
 	}
 }
diff --git a/components/common/cpp/test/configurable_unit.cpp b/components/common/cpp/test/configurable_unit.cpp
index ed5a1a73c..ec5668c43 100644
--- a/components/common/cpp/test/configurable_unit.cpp
+++ b/components/common/cpp/test/configurable_unit.cpp
@@ -47,7 +47,7 @@ SCENARIO( "Configurable::on()" ) {
 		Configurable cfg(json);
 		bool trig = false;
 
-		cfg.on("test", [&trig](const ftl::config::Event &e) {
+		cfg.on("test", [&trig]() {
 			trig = true;
 		});
 
@@ -63,10 +63,10 @@ SCENARIO( "Configurable::on()" ) {
 		bool trig1 = false;
 		bool trig2 = false;
 
-		cfg.on("test", [&trig1](const ftl::config::Event &e) {
+		cfg.on("test", [&trig1]() {
 			trig1 = true;
 		});
-		cfg.on("test", [&trig2](const ftl::config::Event &e) {
+		cfg.on("test", [&trig2]() {
 			trig2 = true;
 		});
 
diff --git a/components/common/cpp/test/msgpack_unit.cpp b/components/common/cpp/test/msgpack_unit.cpp
index c5e79d3bf..953bac5c9 100644
--- a/components/common/cpp/test/msgpack_unit.cpp
+++ b/components/common/cpp/test/msgpack_unit.cpp
@@ -60,17 +60,25 @@ TEST_CASE( "msgpack cv::Mat" ) {
 	SECTION( "Mat::ones(Size(1, 5), CV_8UC3)" ) {
 		Mat A = Mat::ones(Size(1, 5), CV_8UC3);
 		Mat B = msgpack_unpack<Mat>(msgpack_pack(A));
-		
+
 		REQUIRE(A.size() == B.size());
 		REQUIRE(A.type() == B.type());
-		REQUIRE(cv::countNonZero(A != B) == 0);
+
+		cv::Mat diff;
+		cv::absdiff(A, B, diff);
+		REQUIRE(cv::countNonZero(diff.reshape(1, diff.total())) == 0);
+
+		// how is it possible this REQUIRE() passed earlier? Multi-channel
+		// images can not be used in countNonZero() and A != B returns multi
+		// channel result. (test fixed by comparison above)
+		//REQUIRE(cv::countNonZero(A != B) == 0);
 	}
 
 	SECTION ( "Mat 10x10 CV_64FC1 with random values [-1000, 1000]" ) {
 		Mat A(Size(10, 10), CV_64FC1);
 		cv::randu(A, -1000, 1000);
 		Mat B = msgpack_unpack<Mat>(msgpack_pack(A));
-		
+
 		REQUIRE(A.size() == B.size());
 		REQUIRE(A.type() == B.type());
 		REQUIRE(cv::countNonZero(A != B) == 0);
@@ -82,9 +90,9 @@ TEST_CASE( "msgpack cv::Mat" ) {
 
 		msgpack::zone z;
 		auto obj = msgpack::object(A, z);
-		
+
 		Mat B = msgpack_unpack<Mat>(msgpack_pack(obj));
-		
+
 		REQUIRE(A.size() == B.size());
 		REQUIRE(A.type() == B.type());
 		REQUIRE(cv::countNonZero(A != B) == 0);
@@ -97,12 +105,12 @@ TEST_CASE( "msgpack cv::Mat" ) {
 			A.setTo(0);
 
 			Mat B = msgpack_unpack<Mat>(msgpack_pack(A));
-		
+
 			REQUIRE(A.size() == B.size());
 			REQUIRE(A.type() == B.type());
 			REQUIRE(cv::countNonZero(A != B) == 0);
 		}
-		catch (msgpack::type_error) {
+		catch (const msgpack::type_error &e) {
 			// if not supported, throws exception
 		}
 	}
@@ -111,7 +119,7 @@ TEST_CASE( "msgpack cv::Mat" ) {
 		auto res = msgpack_unpack<cv::Rect2d>(msgpack_pack(cv::Rect2d(1,2,3,4)));
 		REQUIRE(res == cv::Rect2d(1,2,3,4));
 	}
-	
+
 	SECTION( "Vec<T, SIZE>" ) {
 		auto res = msgpack_unpack<cv::Vec4d>(msgpack_pack(cv::Vec4d(1,2,3,4)));
 		REQUIRE(res == cv::Vec4d(1,2,3,4));
diff --git a/components/common/cpp/test/timer_unit.cpp b/components/common/cpp/test/timer_unit.cpp
index 2fdc70034..2c7602646 100644
--- a/components/common/cpp/test/timer_unit.cpp
+++ b/components/common/cpp/test/timer_unit.cpp
@@ -59,13 +59,13 @@ TEST_CASE( "Timer::add() High Precision Accuracy" ) {
 
 		REQUIRE( (rc.id() >= 0) );
 
-		ftl::timer::add(ftl::timer::kTimerHighPrecision, [&didrun](int64_t ts) {
+		auto h = ftl::timer::add(ftl::timer::kTimerHighPrecision, [&didrun](int64_t ts) {
 			didrun[1] = true;
 			ftl::timer::stop(false);
 			return true;
 		});
 
-		ftl::timer::add(ftl::timer::kTimerHighPrecision, [&didrun](int64_t ts) {
+		auto h2 = ftl::timer::add(ftl::timer::kTimerHighPrecision, [&didrun](int64_t ts) {
 			didrun[2] = true;
 			ftl::timer::stop(false);
 			return true;
@@ -184,7 +184,7 @@ TEST_CASE( "Timer::add() Main job" ) {
 
 		REQUIRE( (rc.id() >= 0) );
 
-		ftl::timer::add(ftl::timer::kTimerMain, [&job2](int64_t ts) {
+		auto h = ftl::timer::add(ftl::timer::kTimerMain, [&job2](int64_t ts) {
 			job2++;
 			return true;
 		});
@@ -212,24 +212,7 @@ TEST_CASE( "Timer::add() Main job" ) {
 	}
 }
 
-TEST_CASE( "TimerHandle::cancel()" ) {
-	SECTION( "Invalid id" ) {
-		bool didjob = false;
-		ftl::timer::reset();
-
-		ftl::timer::add(ftl::timer::kTimerMain, [&didjob](int64_t ts) {
-			didjob = true;
-			ftl::timer::stop(false);
-			return true;
-		});
-
-		// Fake Handle
-		ftl::timer::TimerHandle h(44);
-		h.cancel();
-		ftl::timer::start(true);
-		REQUIRE( didjob );
-	}
-
+TEST_CASE( "Timer Handle::cancel()" ) {
 	SECTION( "Valid id" ) {
 		bool didjob = false;
 		ftl::timer::reset();
diff --git a/components/common/cpp/test/uri_unit.cpp b/components/common/cpp/test/uri_unit.cpp
index 59c5391f3..b41cb9a01 100644
--- a/components/common/cpp/test/uri_unit.cpp
+++ b/components/common/cpp/test/uri_unit.cpp
@@ -189,3 +189,15 @@ SCENARIO( "URI::getBaseURI(N)" ) {
 	}
 }
 
+SCENARIO( "URI::getBaseURIWithUser()" ) {
+	GIVEN( "both username and password" ) {
+		URI uri("http://nick:test@localhost:1000/hello/world?group=test2");
+		REQUIRE( uri.getBaseURIWithUser() == "http://nick:test@localhost:1000/hello/world" );
+	}
+
+	GIVEN( "missing username and password" ) {
+		URI uri("http://localhost:1000/hello/world?group=test2");
+		REQUIRE( uri.getBaseURIWithUser() == "http://localhost:1000/hello/world" );
+	}
+}
+
diff --git a/components/control/cpp/src/master.cpp b/components/control/cpp/src/master.cpp
index 44361b2d5..d6a2f1cac 100644
--- a/components/control/cpp/src/master.cpp
+++ b/components/control/cpp/src/master.cpp
@@ -15,7 +15,7 @@ using std::function;
 using ftl::ctrl::LogEvent;
 
 Master::Master(Configurable *root, Universe *net)
-		: root_(root), net_(net) {
+		: root_(root), net_(net), active_(false) {
 	// Init system state
 	state_.paused = false;
 
@@ -36,6 +36,18 @@ Master::Master(Configurable *root, Universe *net)
 		state_.paused = !state_.paused;
 	});
 
+	net->bind("list_streams", []() {
+		return std::list<std::string>();
+	});
+
+	net->bind("find_stream", [](const std::string &uri, bool proxy) {
+		return std::optional<ftl::UUID>{};
+	});
+
+	net->bind("add_stream", [](const std::string &uri) {
+
+	});
+
 	net->bind("update_cfg", [](const std::string &uri, const std::string &value) {
 		ftl::config::update(uri, nlohmann::json::parse(value));
 	});
@@ -82,6 +94,8 @@ Master::Master(Configurable *root, Universe *net)
 		ftl::UUID peer = p->id();
 		auto cs = getConfigurables(peer);
 		for (auto c : cs) {
+			if (ftl::config::find(c) != nullptr) continue;
+
 			//LOG(INFO) << "NET CONFIG: " << c;
 			ftl::config::json_t *configuration = new ftl::config::json_t;
 			*configuration = getConfigurable(peer, c);
@@ -101,6 +115,8 @@ Master::Master(Configurable *root, Universe *net)
 		}
 		peerConfigurables_[peer].clear();
 	});
+
+	active_ = true;
 }
 
 Master::~Master() {
@@ -156,9 +172,9 @@ vector<string> Master::getConfigurables() {
 
 vector<string> Master::getConfigurables(const ftl::UUID &peer) {
 	try {
-		LOG(INFO) << "LISTING CONFIGS";
 		return net_->call<vector<string>>(peer, "list_configurables");
-	} catch (...) {
+	} catch (const ftl::exception &e) {
+		e.ignore();
 		return {};
 	}
 }
@@ -242,4 +258,4 @@ void Master::stop() {
 	}
 
 	in_log_ = false;
-}*/
\ No newline at end of file
+}*/
diff --git a/components/net/cpp/CMakeLists.txt b/components/net/cpp/CMakeLists.txt
index c765fa538..04e89bdfe 100644
--- a/components/net/cpp/CMakeLists.txt
+++ b/components/net/cpp/CMakeLists.txt
@@ -18,6 +18,8 @@ target_include_directories(ftlnet PUBLIC
 	PRIVATE src)
 target_link_libraries(ftlnet ftlctrl ftlcommon Threads::Threads glog::glog ${UUID_LIBRARIES})
 
+target_precompile_headers(ftlnet REUSE_FROM ftlcommon)
+
 install(TARGETS ftlnet EXPORT ftlnet-config
 	ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
 	LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
diff --git a/components/net/cpp/include/ftl/net/dispatcher.hpp b/components/net/cpp/include/ftl/net/dispatcher.hpp
index 1666184c0..ea136483f 100644
--- a/components/net/cpp/include/ftl/net/dispatcher.hpp
+++ b/components/net/cpp/include/ftl/net/dispatcher.hpp
@@ -60,13 +60,16 @@ namespace net {
 
 class Dispatcher {
 	public:
-	explicit Dispatcher(Dispatcher *parent=nullptr) : parent_(parent) {}
-	
+	explicit Dispatcher(Dispatcher *parent=nullptr) : parent_(parent) {
+		// FIXME threading and funcs_; hack use large size
+		funcs_.reserve(1024);
+	}
+
 	//void dispatch(Peer &, const std::string &msg);
 	void dispatch(Peer &, const msgpack::object &msg);
 
 	// Without peer object =====================================================
-	
+
 	template <typename F>
 	void bind(std::string const &name, F func,
 		                  ftl::internal::tags::void_result const &,
@@ -221,11 +224,11 @@ class Dispatcher {
 			funcs_.erase(i);
 		}
 	}
-	
+
 	std::vector<std::string> getBindings() const;
 
 	bool isBound(const std::string &name) const;
-	
+
 	using adaptor_type = std::function<std::unique_ptr<msgpack::object_handle>(
         ftl::net::Peer &, msgpack::object const &)>;
 
@@ -234,16 +237,16 @@ class Dispatcher {
 
     //! \brief This is the type of notification messages.
     using notification_t = std::tuple<int8_t, std::string, msgpack::object>;
-    
+
     using response_t =
         std::tuple<uint32_t, uint32_t, std::string, msgpack::object>;
-	
+
 	private:
 	Dispatcher *parent_;
 	std::unordered_map<std::string, adaptor_type> funcs_;
-	
+
 	std::optional<adaptor_type> _locateHandler(const std::string &name) const;
-	
+
 	static void enforce_arg_count(std::string const &func, std::size_t found,
                                   std::size_t expected);
 
diff --git a/components/net/cpp/include/ftl/net/listener.hpp b/components/net/cpp/include/ftl/net/listener.hpp
index 91dc56a26..bbf6d0c6a 100644
--- a/components/net/cpp/include/ftl/net/listener.hpp
+++ b/components/net/cpp/include/ftl/net/listener.hpp
@@ -27,10 +27,13 @@ class Listener {
 	
 	void connection(std::shared_ptr<Peer> &s);
 	void onConnection(connecthandler_t h) { handler_connect_.push_back(h); };
+
+	inline int port() const { return port_; }
 	
 	private:
 	SOCKET descriptor_;
 	Protocol *default_proto_;
+	int port_;
 	//sockaddr_in slocalAddr;
 	std::vector<connecthandler_t> handler_connect_;
 };
diff --git a/components/net/cpp/include/ftl/net/peer.hpp b/components/net/cpp/include/ftl/net/peer.hpp
index 6ac41b2f7..35cbe5bb2 100644
--- a/components/net/cpp/include/ftl/net/peer.hpp
+++ b/components/net/cpp/include/ftl/net/peer.hpp
@@ -101,6 +101,8 @@ class Peer {
 	 * Make a reconnect attempt. Called internally by Universe object.
 	 */
 	bool reconnect();
+
+	inline bool isOutgoing() const { return outgoing_; }
 	
 	/**
 	 * Test if the connection is valid. This returns true in all conditions
@@ -193,6 +195,8 @@ class Peer {
 	bool isWaiting() const { return is_waiting_; }
 
 	void rawClose() { _badClose(false); }
+
+	inline void noReconnect() { can_reconnect_ = false; }
 	
 	public:
 	static const int kMaxMessage = 10*1024*1024;  // 10Mb currently
@@ -206,8 +210,8 @@ class Peer {
 
 	void _badClose(bool retry=true);
 	
-	void _dispatchResponse(uint32_t id, msgpack::object &obj);
-	void _sendResponse(uint32_t id, const msgpack::object &obj);
+	void _dispatchResponse(uint32_t id, const std::string &name, msgpack::object &obj);
+	void _sendResponse(uint32_t id, const std::string &name, const msgpack::object &obj);
 	
 	/**
 	 * Get the internal OS dependent socket.
@@ -261,6 +265,7 @@ class Peer {
 	
 	std::string uri_;				// Original connection URI, or assumed URI
 	ftl::UUID peerid_;				// Received in handshake or allocated
+	bool outgoing_;
 	
 	ftl::net::Dispatcher *disp_;	// For RPC call dispatch
 	//std::vector<std::function<void(Peer &)>> open_handlers_;
@@ -268,7 +273,7 @@ class Peer {
 	//std::vector<std::function<void(Peer &)>> close_handlers_;
 	std::map<int, std::unique_ptr<virtual_caller>> callbacks_;
 	
-	static volatile int rpcid__;				// Return ID for RPC calls
+	static std::atomic_int rpcid__;				// Return ID for RPC calls
 };
 
 // --- Inline Template Implementations -----------------------------------------
diff --git a/components/net/cpp/include/ftl/net/universe.hpp b/components/net/cpp/include/ftl/net/universe.hpp
index f5af2edef..3d3a4b3ff 100644
--- a/components/net/cpp/include/ftl/net/universe.hpp
+++ b/components/net/cpp/include/ftl/net/universe.hpp
@@ -85,6 +85,9 @@ class Universe : public ftl::Configurable {
 	 * @param addr URI giving protocol, interface and port
 	 */
 	Peer *connect(const std::string &addr);
+
+	bool isConnected(const ftl::URI &uri);
+	bool isConnected(const std::string &s);
 	
 	size_t numberOfPeers() const { return peers_.size(); }
 
@@ -214,7 +217,7 @@ class Universe : public ftl::Configurable {
 	
 	private:
 	void _run();
-	int _setDescriptors();
+	SOCKET _setDescriptors();
 	void _installBindings();
 	void _installBindings(Peer *);
 	//bool _subscribe(const std::string &res);
@@ -236,6 +239,7 @@ class Universe : public ftl::Configurable {
 	
 	std::vector<ftl::net::Listener*> listeners_;
 	std::vector<ftl::net::Peer*> peers_;
+	std::unordered_map<std::string, ftl::net::Peer*> peer_by_uri_;
 	//std::map<std::string, std::vector<ftl::UUID>> subscribers_;
 	//std::unordered_set<std::string> owned_;
 	std::map<ftl::UUID, ftl::net::Peer*> peer_ids_;
@@ -244,6 +248,7 @@ class Universe : public ftl::Configurable {
 	std::list<ReconnectInfo> reconnects_;
 	size_t phase_;
 	std::list<ftl::net::Peer*> garbage_;
+	ftl::Handle garbage_timer_;
 
 	size_t send_size_;
 	size_t recv_size_;
@@ -306,95 +311,74 @@ void Universe::broadcast(const std::string &name, ARGS... args) {
 
 template <typename R, typename... ARGS>
 std::optional<R> Universe::findOne(const std::string &name, ARGS... args) {
-	bool hasreturned = false;
-	std::mutex m;
-	std::condition_variable cv;
-	std::atomic<int> count = 0;
-	std::optional<R> result;
-
-	auto handler = [&](const std::optional<R> &r) {
-		count--;
-		std::unique_lock<std::mutex> lk(m);
-		if (hasreturned || !r) return;
-		hasreturned = true;
-		result = r;
-		lk.unlock();
-		cv.notify_one();
+	struct SharedData {
+		std::atomic_bool hasreturned = false;
+		std::mutex m;
+		std::condition_variable cv;
+		std::optional<R> result;
 	};
 
-	std::map<Peer*, int> record;
-	SHARED_LOCK(net_mutex_,lk);
+	auto sdata = std::make_shared<SharedData>();
 
-	for (auto p : peers_) {
-		if (!p->waitConnection()) continue;
-		count++;
-		record[p] = p->asyncCall<std::optional<R>>(name, handler, args...);
-	}
-	lk.unlock();
-	
-	{	// Block thread until async callback notifies us
-		std::unique_lock<std::mutex> llk(m);
-		// FIXME: what happens if one clients does not return (count != 0)?
-		cv.wait_for(llk, std::chrono::seconds(1), [&hasreturned, &count] {
-			return hasreturned && count == 0;
-		});
-
-		// Cancel any further results
-		lk.lock();
+	auto handler = [sdata](const std::optional<R> &r) {
+		std::unique_lock<std::mutex> lk(sdata->m);
+		if (r && !sdata->hasreturned) {
+			sdata->hasreturned = true;
+			sdata->result = r;
+		}
+		lk.unlock();
+		sdata->cv.notify_one();
+	};
+
+	{
+		SHARED_LOCK(net_mutex_,lk);
 		for (auto p : peers_) {
-			auto m = record.find(p);
-			if (m != record.end()) {
-				p->cancelCall(m->second);
-			}
+			if (!p->waitConnection()) continue;
+			p->asyncCall<std::optional<R>>(name, handler, args...);
 		}
 	}
+	
+	// Block thread until async callback notifies us
+	std::unique_lock<std::mutex> llk(sdata->m);
+	sdata->cv.wait_for(llk, std::chrono::seconds(1), [sdata] {
+		return (bool)sdata->hasreturned;
+	});
 
-	return result;
+	return sdata->result;
 }
 
 template <typename R, typename... ARGS>
 std::vector<R> Universe::findAll(const std::string &name, ARGS... args) {
-	int returncount = 0;
-	int sentcount = 0;
-	std::mutex m;
-	std::condition_variable cv;
-	
-	std::vector<R> results;
+	struct SharedData {
+		std::atomic_int returncount = 0;
+		std::atomic_int sentcount = 0;
+		std::mutex m;
+		std::condition_variable cv;
+		std::vector<R> results;
+	};
+
+	auto sdata = std::make_shared<SharedData>();
 
-	auto handler = [&](const std::vector<R> &r) {
-		//UNIQUE_LOCK(m,lk);
-		std::unique_lock<std::mutex> lk(m);
-		returncount++;
-		results.insert(results.end(), r.begin(), r.end());
+	auto handler = [sdata](const std::vector<R> &r) {
+		std::unique_lock<std::mutex> lk(sdata->m);
+		++sdata->returncount;
+		sdata->results.insert(sdata->results.end(), r.begin(), r.end());
 		lk.unlock();
-		cv.notify_one();
+		sdata->cv.notify_one();
 	};
 
-	std::map<Peer*, int> record;
-	SHARED_LOCK(net_mutex_,lk);
-	for (auto p : peers_) {
-		if (!p->waitConnection()) continue;
-		sentcount++;
-		record[p] = p->asyncCall<std::vector<R>>(name, handler, args...);
-	}
-	lk.unlock();
-	
-	{  // Block thread until async callback notifies us
-		//UNIQUE_LOCK(m,llk);
-		std::unique_lock<std::mutex> llk(m);
-		cv.wait_for(llk, std::chrono::seconds(1), [&returncount,&sentcount]{return returncount == sentcount;});
-
-		// Cancel any further results
-		lk.lock();
+	{
+		SHARED_LOCK(net_mutex_,lk);
 		for (auto p : peers_) {
-			auto m = record.find(p);
-			if (m != record.end()) {
-				p->cancelCall(m->second);
-			}
+			if (!p->waitConnection()) continue;
+			++sdata->sentcount;
+			p->asyncCall<std::vector<R>>(name, handler, args...);
 		}
 	}
-
-	return results;
+	
+	std::unique_lock<std::mutex> llk(sdata->m);
+	sdata->cv.wait_for(llk, std::chrono::seconds(1), [sdata]{return sdata->returncount == sdata->sentcount; });
+	return sdata->results;
 }
 
 template <typename R, typename... ARGS>
diff --git a/components/net/cpp/src/dispatcher.cpp b/components/net/cpp/src/dispatcher.cpp
index b1a5b265e..2be0055ae 100644
--- a/components/net/cpp/src/dispatcher.cpp
+++ b/components/net/cpp/src/dispatcher.cpp
@@ -67,7 +67,7 @@ void ftl::net::Dispatcher::dispatch_call(Peer &s, const msgpack::object &msg) {
     
     if (type == 1) {
     	//DLOG(INFO) << "RPC return for " << id;
-    	s._dispatchResponse(id, args);
+    	s._dispatchResponse(id, name, args);
     } else if (type == 0) {
 		//DLOG(INFO) << "RPC " << name << "() <- " << s.getURI();
 
@@ -77,7 +77,7 @@ void ftl::net::Dispatcher::dispatch_call(Peer &s, const msgpack::object &msg) {
 			//DLOG(INFO) << "Found binding for " << name;
 		    try {
 		        auto result = (*func)(s, args); //->get();
-		        s._sendResponse(id, result->get());
+		        s._sendResponse(id, name, result->get());
 		        /*response_t res_obj = std::make_tuple(1,id,msgpack::object(),result->get());
 				std::stringstream buf;
 				msgpack::pack(buf, res_obj);			
@@ -101,7 +101,7 @@ void ftl::net::Dispatcher::dispatch_call(Peer &s, const msgpack::object &msg) {
 
 optional<Dispatcher::adaptor_type> ftl::net::Dispatcher::_locateHandler(const std::string &name) const {
 	auto it_func = funcs_.find(name);
-	if (it_func == end(funcs_)) {
+	if (it_func == funcs_.end()) {
 		if (parent_ != nullptr) {
 			return parent_->_locateHandler(name);
 		} else {
diff --git a/components/net/cpp/src/listener.cpp b/components/net/cpp/src/listener.cpp
index 14c8f5570..90ec540f4 100644
--- a/components/net/cpp/src/listener.cpp
+++ b/components/net/cpp/src/listener.cpp
@@ -102,7 +102,7 @@ Listener::Listener(const char *pUri) : default_proto_(NULL) {
 
 	if (uri.getProtocol() == URI::SCHEME_TCP) {
 		descriptor_ = tcpListen(uri);
-		std::cout << "Listening: " << pUri << " - " << descriptor_ << std::endl;
+		port_ = uri.getPort();
 	} else if (uri.getProtocol() == URI::SCHEME_WS) {
 		descriptor_ = wsListen(uri);
 	} else {
diff --git a/components/net/cpp/src/peer.cpp b/components/net/cpp/src/peer.cpp
index 2a8bf879c..368a5bbb2 100644
--- a/components/net/cpp/src/peer.cpp
+++ b/components/net/cpp/src/peer.cpp
@@ -59,7 +59,7 @@ using std::vector;
     return ss.str();
 }*/
 
-volatile int Peer::rpcid__ = 0;
+std::atomic_int Peer::rpcid__ = 0;
 
 // Global peer UUID
 ftl::UUID ftl::net::this_peer;
@@ -67,7 +67,7 @@ ftl::UUID ftl::net::this_peer;
 //static ctpl::thread_pool pool(5);
 
 // TODO:(nick) Move to tcp_internal.cpp
-static SOCKET tcpConnect(URI &uri, int ssize, int rsize) {
+static SOCKET tcpConnect(URI &uri, size_t ssize, size_t rsize) {
 	int rc;
 	//sockaddr_in destAddr;
 
@@ -90,11 +90,11 @@ static SOCKET tcpConnect(URI &uri, int ssize, int rsize) {
 	int flags =1; 
     if (setsockopt(csocket, IPPROTO_TCP, TCP_NODELAY, (const char *)&flags, sizeof(flags))) { LOG(ERROR) << "ERROR: setsocketopt(), TCP_NODELAY"; };
 
-	int a = rsize;
+	int a = static_cast<int>(rsize);
 	if (setsockopt(csocket, SOL_SOCKET, SO_RCVBUF, (const char *)&a, sizeof(int)) == -1) {
 		fprintf(stderr, "Error setting socket opts: %s\n", strerror(errno));
 	}
-	a = ssize;
+	a = static_cast<int>(ssize);
 	if (setsockopt(csocket, SOL_SOCKET, SO_SNDBUF, (const char *)&a, sizeof(int)) == -1) {
 		fprintf(stderr, "Error setting socket opts: %s\n", strerror(errno));
 	}
@@ -130,22 +130,30 @@ static SOCKET tcpConnect(URI &uri, int ssize, int rsize) {
 	if (rc < 0) {
 		if (errno == EINPROGRESS) {
 			// FIXME:(Nick) Move to main select thread to prevent blocking
-			fd_set myset; 
+			fd_set myset;
+			fd_set errset; 
 			struct timeval tv;
 			tv.tv_sec = 1; 
 			tv.tv_usec = 0; 
 			FD_ZERO(&myset); 
-			FD_SET(csocket, &myset); 
-			rc = select(csocket+1, NULL, &myset, NULL, &tv); 
-			if (rc <= 0) { //} && errno != EINTR) { 
+			FD_SET(csocket, &myset);
+			FD_ZERO(&errset); 
+			FD_SET(csocket, &errset); 
+
+			rc = select(csocket+1u, NULL, &myset, &errset, &tv); 
+			if (rc <= 0 || FD_ISSET(csocket, &errset)) { //} && errno != EINTR) { 
+				if (rc <= 0) {
+					LOG(ERROR) << "Could not connect to " << uri.getBaseURI();
+				} else {
+					LOG(ERROR) << "Could not connect (" << errno << ") " << uri.getBaseURI(); 	
+				}
+
 				#ifndef WIN32
 				close(csocket);
 				#else
 				closesocket(csocket);
 				#endif
 
-				LOG(ERROR) << "Could not connect to " << uri.getBaseURI();
-
 				return INVALID_SOCKET;
 			}
 		} else {
@@ -179,14 +187,15 @@ Peer::Peer(SOCKET s, Universe *u, Dispatcher *d) : sock_(s), can_reconnect_(fals
 	
 	is_waiting_ = true;
 	scheme_ = ftl::URI::SCHEME_TCP;
+	outgoing_ = false;
 
 	int flags =1; 
     if (setsockopt(s, IPPROTO_TCP, TCP_NODELAY, (const char *)&flags, sizeof(flags))) { LOG(ERROR) << "ERROR: setsocketopt(), TCP_NODELAY"; };
-	int a = u->getRecvBufferSize();
+	int a = static_cast<int>(u->getRecvBufferSize());
 	if (setsockopt(s, SOL_SOCKET, SO_RCVBUF, (const char *)&a, sizeof(int)) == -1) {
 		fprintf(stderr, "Error setting socket opts: %s\n", strerror(errno));
 	}
-	a = u->getSendBufferSize();
+	a = static_cast<int>(u->getSendBufferSize());
 	if (setsockopt(s, SOL_SOCKET, SO_SNDBUF, (const char *)&a, sizeof(int)) == -1) {
 		fprintf(stderr, "Error setting socket opts: %s\n", strerror(errno));
 	}
@@ -232,6 +241,7 @@ Peer::Peer(const char *pUri, Universe *u, Dispatcher *d) : can_reconnect_(true),
 	
 	status_ = kInvalid;
 	sock_ = INVALID_SOCKET;
+	outgoing_ = true;
 	
 	disp_ = new Dispatcher(d);
 
@@ -252,7 +262,7 @@ Peer::Peer(const char *pUri, Universe *u, Dispatcher *d) : can_reconnect_(true),
 				_badClose(false);
 			} else {
 				status_ = kConnecting;
-				LOG(INFO) << "WEB SOCK CONNECTED";
+				LOG(INFO) << "Websocket connected: " << pUri;
 			}
 		} else {
 			LOG(ERROR) << "Connection refused to " << uri.getHost() << ":" << uri.getPort();
@@ -371,7 +381,7 @@ void Peer::close(bool retry) {
 		send("__disconnect__");
 
 		_badClose(retry);
-		LOG(INFO) << "Deliberate disconnect of peer.";
+		LOG(INFO) << "Deliberate disconnect of peer: " << uri_;
 	}
 }
 
@@ -383,12 +393,12 @@ void Peer::_badClose(bool retry) {
 		closesocket(sock_);
 		#endif
 		sock_ = INVALID_SOCKET;
-		status_ = kDisconnected;
 		
 		//auto i = find(sockets.begin(),sockets.end(),this);
 		//sockets.erase(i);
 
 		universe_->_notifyDisconnect(this);
+		status_ = kDisconnected;
 
 		// Attempt auto reconnect?
 		if (retry && can_reconnect_) {
@@ -410,7 +420,7 @@ void Peer::socketError() {
 	// more socket errors...
 	_badClose();
 
-	LOG(ERROR) << "Socket: " << uri_ << " - error " << err;
+	if (err != 0) LOG(ERROR) << "Socket: " << uri_ << " - error " << err;
 }
 
 void Peer::error(int e) {
@@ -434,7 +444,7 @@ void Peer::data() {
 			return;
 		}
 
-		int cap = recv_buf_.buffer_capacity();
+		int cap = static_cast<int>(recv_buf_.buffer_capacity());
 		auto buf = recv_buf_.buffer();
 		lk.unlock();
 
@@ -560,7 +570,7 @@ bool Peer::_data() {
 	return true;
 }
 
-void Peer::_dispatchResponse(uint32_t id, msgpack::object &res) {	
+void Peer::_dispatchResponse(uint32_t id, const std::string &name, msgpack::object &res) {	
 	// TODO: Handle error reporting...
 	UNIQUE_LOCK(cb_mtx_,lk);
 	if (callbacks_.count(id) > 0) {
@@ -577,7 +587,7 @@ void Peer::_dispatchResponse(uint32_t id, msgpack::object &res) {
 			LOG(ERROR) << "Exception in RPC response: " << e.what();
 		}
 	} else {
-		LOG(WARNING) << "Missing RPC callback for result - discarding";
+		LOG(WARNING) << "Missing RPC callback for result - discarding: " << name;
 	}
 }
 
@@ -588,8 +598,8 @@ void Peer::cancelCall(int id) {
 	}
 }
 
-void Peer::_sendResponse(uint32_t id, const msgpack::object &res) {
-	Dispatcher::response_t res_obj = std::make_tuple(1,id,std::string(""),res);
+void Peer::_sendResponse(uint32_t id, const std::string &name, const msgpack::object &res) {
+	Dispatcher::response_t res_obj = std::make_tuple(1,id,name,res);
 	UNIQUE_LOCK(send_mtx_,lk);
 	if (scheme_ == ftl::URI::SCHEME_WS) send_buf_.append_ref(nullptr,0);
 	msgpack::pack(send_buf_, res_obj);
@@ -656,6 +666,8 @@ void Peer::_connected() {
 int Peer::_send() {
 	if (sock_ == INVALID_SOCKET) return -1;
 
+	int c=0;
+
 	// Are we using a websocket?
 	if (scheme_ == ftl::URI::SCHEME_WS) {
 		// Create a websocket header as well.
@@ -682,24 +694,42 @@ int Peer::_send() {
 		// Patch the first io vector to be ws header
 		const_cast<iovec*>(&sendvec[0])->iov_base = buf;
 		const_cast<iovec*>(&sendvec[0])->iov_len = rc;
-	}
 	
 #ifdef WIN32
-	auto send_vec = send_buf_.vector();
-	auto send_size = send_buf_.vector_size();
-	vector<WSABUF> wsabuf(send_size);
-
-	for (int i = 0; i < send_size; i++) {
-		wsabuf[i].len = (ULONG)send_vec[i].iov_len;
-		wsabuf[i].buf = (char*)send_vec[i].iov_base;
-		//c += ftl::net::internal::send(sock_, (char*)send_vec[i].iov_base, (int)send_vec[i].iov_len, 0);
-	}
+		auto send_vec = send_buf_.vector();
+		auto send_size = send_buf_.vector_size();
+		vector<WSABUF> wsabuf(send_size);
+
+		for (int i = 0; i < send_size; i++) {
+			wsabuf[i].len = (ULONG)send_vec[i].iov_len;
+			wsabuf[i].buf = (char*)send_vec[i].iov_base;
+			//c += ftl::net::internal::send(sock_, (char*)send_vec[i].iov_base, (int)send_vec[i].iov_len, 0);
+		}
+
+		DWORD bytessent;
+		c = WSASend(sock_, wsabuf.data(), static_cast<DWORD>(send_size), (LPDWORD)&bytessent, 0, NULL, NULL);
+#else
+		c = ftl::net::internal::writev(sock_, send_buf_.vector(), (int)send_buf_.vector_size());
+#endif
+
+	} else {
+#ifdef WIN32
+		auto send_vec = send_buf_.vector();
+		auto send_size = send_buf_.vector_size();
+		vector<WSABUF> wsabuf(send_size);
+
+		for (int i = 0; i < send_size; i++) {
+			wsabuf[i].len = (ULONG)send_vec[i].iov_len;
+			wsabuf[i].buf = (char*)send_vec[i].iov_base;
+			//c += ftl::net::internal::send(sock_, (char*)send_vec[i].iov_base, (int)send_vec[i].iov_len, 0);
+		}
 
-	DWORD bytessent;
-	int c = WSASend(sock_, wsabuf.data(), send_size, (LPDWORD)&bytessent, 0, NULL, NULL);
+		DWORD bytessent;
+		c = WSASend(sock_, wsabuf.data(), static_cast<DWORD>(send_size), (LPDWORD)&bytessent, 0, NULL, NULL);
 #else
-	int c = ftl::net::internal::writev(sock_, send_buf_.vector(), (int)send_buf_.vector_size());
+		c = ftl::net::internal::writev(sock_, send_buf_.vector(), (int)send_buf_.vector_size());
 #endif
+	} 
 
 	send_buf_.clear();
 	
diff --git a/components/net/cpp/src/universe.cpp b/components/net/cpp/src/universe.cpp
index d5aeb743a..558619602 100644
--- a/components/net/cpp/src/universe.cpp
+++ b/components/net/cpp/src/universe.cpp
@@ -76,7 +76,7 @@ Universe::Universe(nlohmann::json &config) :
 	// Add an idle timer job to garbage collect peer objects
 	// Note: Important to be a timer job to ensure no other timer jobs are
 	// using the object.
-	ftl::timer::add(ftl::timer::kTimerIdle10, [this](int64_t ts) {
+	garbage_timer_ = ftl::timer::add(ftl::timer::kTimerIdle10, [this](int64_t ts) {
 		if (garbage_.size() > 0) {
 			UNIQUE_LOCK(net_mutex_,lk);
 			if (ftl::pool.n_idle() == ftl::pool.size()) {
@@ -148,13 +148,43 @@ bool Universe::listen(const string &addr) {
 	return l->isListening();
 }
 
+bool Universe::isConnected(const ftl::URI &uri) {
+	UNIQUE_LOCK(net_mutex_,lk);
+	return (peer_by_uri_.find(uri.getBaseURI()) != peer_by_uri_.end());
+}
+
+bool Universe::isConnected(const std::string &s) {
+	ftl::URI uri(s);
+	return isConnected(uri);
+}
+
 Peer *Universe::connect(const string &addr) {
+	ftl::URI u(addr);
+
+	// Check if already connected or if self
+	{
+		UNIQUE_LOCK(net_mutex_,lk);
+		if (peer_by_uri_.find(u.getBaseURI()) != peer_by_uri_.end()) {
+			return peer_by_uri_.at(u.getBaseURI());
+		}
+
+		if (u.getHost() == "localhost" || u.getHost() == "127.0.0.1") {
+			for (const auto *l : listeners_) {
+				if (l->port() == u.getPort()) {
+					throw FTL_Error("Cannot connect to self");
+				}
+			}
+		}
+	}
+
+
 	auto p = new Peer(addr.c_str(), this, &disp_);
 	if (!p) return nullptr;
 	
 	if (p->status() != Peer::kInvalid) {
 		UNIQUE_LOCK(net_mutex_,lk);
 		peers_.push_back(p);
+		peer_by_uri_[u.getBaseURI()] = p;
 	}
 	
 	_installBindings(p);
@@ -174,7 +204,7 @@ int Universe::waitConnections() {
 	return count;
 }
 
-int Universe::_setDescriptors() {
+SOCKET Universe::_setDescriptors() {
 	//Reset all file descriptors
 	FD_ZERO(&impl_->sfdread_);
 	FD_ZERO(&impl_->sfderror_);
@@ -230,6 +260,13 @@ void Universe::_cleanupPeers() {
 			auto ix = peer_ids_.find(p->id());
 			if (ix != peer_ids_.end()) peer_ids_.erase(ix);
 
+			for (auto i=peer_by_uri_.begin(); i != peer_by_uri_.end(); ++i) {
+				if (i->second == p) {
+					peer_by_uri_.erase(i);
+					break;
+				}
+			}
+
 			i = peers_.erase(i);
 
 			if (p->status() == ftl::net::Peer::kReconnecting) {
@@ -254,6 +291,29 @@ Peer *Universe::getPeer(const UUID &id) const {
 void Universe::_periodic() {
 	auto i = reconnects_.begin();
 	while (i != reconnects_.end()) {
+
+		std::string addr = i->peer->getURI();
+
+		{
+			UNIQUE_LOCK(net_mutex_,lk);
+			ftl::URI u(addr);
+			bool removed = false;
+
+			if (u.getHost() == "localhost" || u.getHost() == "127.0.0.1") {
+				for (const auto *l : listeners_) {
+					if (l->port() == u.getPort()) {
+						LOG(ERROR) << "Cannot connect to self";
+						garbage_.push_back((*i).peer);
+						i = reconnects_.erase(i);
+						removed = true;
+						break;
+					}
+				}
+			}
+
+			if (removed) continue;
+		}
+
 		if ((*i).peer->reconnect()) {
 			UNIQUE_LOCK(net_mutex_,lk);
 			peers_.push_back((*i).peer);
@@ -292,7 +352,7 @@ void Universe::_run() {
 	auto start = std::chrono::high_resolution_clock::now();
 
 	while (active_) {
-		int n = _setDescriptors();
+		SOCKET n = _setDescriptors();
 		int selres = 1;
 
 		// Do periodics
@@ -312,7 +372,7 @@ void Universe::_run() {
 		//Wait for a network event or timeout in 3 seconds
 		block.tv_sec = 0;
 		block.tv_usec = 100000;
-		selres = select(n+1, &impl_->sfdread_, 0, &impl_->sfderror_, &block);
+		selres = select(n+1u, &impl_->sfdread_, 0, &impl_->sfderror_, &block);
 
 		// NOTE Nick: Is it possible that not all the recvs have been called before I
 		// again reach a select call!? What are the consequences of this? A double recv attempt?
diff --git a/components/operators/CMakeLists.txt b/components/operators/CMakeLists.txt
index bc4edb1ea..5eb5cb71c 100644
--- a/components/operators/CMakeLists.txt
+++ b/components/operators/CMakeLists.txt
@@ -57,6 +57,10 @@ target_include_directories(ftloperators PUBLIC
 
 target_link_libraries(ftloperators ftlrender ftlrgbd ftlcommon sgm libstereo Eigen3::Eigen Threads::Threads ${OpenCV_LIBS})
 
+target_precompile_headers(ftloperators REUSE_FROM ftldata)
+
+set_property(TARGET ftloperators PROPERTY CUDA_ARCHITECTURES OFF)
+
 if (BUILD_TESTS)
 	add_subdirectory(test)
 endif()
diff --git a/components/operators/include/ftl/operators/smoothing.hpp b/components/operators/include/ftl/operators/smoothing.hpp
index 0dc463d2e..c42f801ed 100644
--- a/components/operators/include/ftl/operators/smoothing.hpp
+++ b/components/operators/include/ftl/operators/smoothing.hpp
@@ -23,7 +23,7 @@ class HFSmoother : public ftl::operators::Operator {
 
 	private:
 	cv::cuda::GpuMat temp_;
-	ftl::rgbd::Frame frames_[4];
+	//ftl::rgbd::Frame frames_[4];
 };
 
 /**
@@ -43,7 +43,7 @@ class SmoothChannel : public ftl::operators::Operator {
 	bool apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t stream) override;
 
 	private:
-	ftl::rgbd::Frame temp_[6];
+	ftl::cuda::TextureObject<uchar4> temp_[6];
 };
 
 /**
@@ -61,7 +61,7 @@ class SimpleMLS : public ftl::operators::Operator {
 	bool apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t stream) override;
 
 	private:
-	ftl::rgbd::Frame temp_;
+	ftl::data::Frame temp_;
 };
 
 /**
@@ -78,7 +78,7 @@ class ColourMLS : public ftl::operators::Operator {
 	bool apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t stream) override;
 
 	private:
-	ftl::rgbd::Frame temp_;
+	ftl::data::Frame temp_;
 };
 
 /**
@@ -125,7 +125,7 @@ class AggreMLS : public ftl::operators::Operator {
 	ftl::cuda::TextureObject<float4> centroid_vert_;
 	ftl::cuda::TextureObject<half4> normals_horiz_;
 
-	ftl::rgbd::Frame temp_;
+	ftl::data::Frame temp_;
 };
 
 /**
@@ -145,7 +145,7 @@ class AdaptiveMLS : public ftl::operators::Operator {
 	bool apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t stream) override;
 
 	private:
-	ftl::rgbd::Frame temp_;
+	ftl::data::Frame temp_;
 };
 
 }
diff --git a/components/operators/src/aruco.cpp b/components/operators/src/aruco.cpp
index 34fcfca33..02ed251e2 100644
--- a/components/operators/src/aruco.cpp
+++ b/components/operators/src/aruco.cpp
@@ -19,6 +19,7 @@ using cv::Point2f;
 using cv::Vec3d;
 
 using std::vector;
+using std::list;
 
 static cv::Mat rmat(cv::Vec3d &rvec) {
 	cv::Mat R(cv::Size(3, 3), CV_64FC1);
@@ -55,7 +56,7 @@ ArUco::ArUco(ftl::Configurable *cfg) : ftl::operators::Operator(cfg) {
 	channel_in_ = Channel::Colour;
 	channel_out_ = Channel::Shapes3D;
 
-	cfg->on("dictionary", [this,cfg](const ftl::config::Event &e) {
+	cfg->on("dictionary", [this,cfg]() {
 		dictionary_ = cv::aruco::getPredefinedDictionary(cfg->value("dictionary", 0));
 	});
 }
@@ -80,7 +81,7 @@ bool ArUco::apply(Frame &in, Frame &out, cudaStream_t stream) {
 		//Mat im = in.get<Mat>(channel_in_);
 		// FIXME: Use internal stream here.
 		Mat im; // = in.fastDownload(channel_in_, cv::cuda::Stream::Null());
-		cv::cvtColor(in.fastDownload(channel_in_, cv::cuda::Stream::Null()), im, cv::COLOR_BGRA2BGR);
+		cv::cvtColor(in.get<cv::Mat>(channel_in_), im, cv::COLOR_BGRA2BGR);
 
 		Mat K = in.getLeftCamera().getCameraMatrix();
 		Mat dist = cv::Mat::zeros(cv::Size(5, 1), CV_64FC1);
@@ -98,9 +99,9 @@ bool ArUco::apply(Frame &in, Frame &out, cudaStream_t stream) {
 			cv::aruco::estimatePoseSingleMarkers(corners, marker_size_, K, dist, rvecs, tvecs);
 		}
 
-		vector<Shape3D> result;
+		list<Shape3D> result;
 		if (out.hasChannel(channel_out_)) {
-			out.get(channel_out_, result);
+			result = out.get<list<Shape3D>>(channel_out_);
 		}
 
 		for (size_t i = 0; i < rvecs.size(); i++) {
@@ -114,7 +115,7 @@ bool ArUco::apply(Frame &in, Frame &out, cudaStream_t stream) {
 			}
 		}
 
-		out.create(channel_out_, result);
+		out.create<list<Shape3D>>(channel_out_).list = result;
 
 		if (debug_) {
 			cv::aruco::drawDetectedMarkers(im, corners, ids);
@@ -128,10 +129,10 @@ bool ArUco::apply(Frame &in, Frame &out, cudaStream_t stream) {
 		// TODO: should be uploaded by operator which requires data on GPU
 		//in.upload(channel_in_);
 		if (debug_) {
-			if (in.isGPU(channel_in_)) {
+			//if (in.isGPU(channel_in_)) {
 				cv::cvtColor(im, im, cv::COLOR_BGR2BGRA);
-				out.get<cv::cuda::GpuMat>(channel_in_).upload(im);
-			} else cv::cvtColor(im, in.get<cv::Mat>(channel_in_), cv::COLOR_BGR2BGRA);
+				out.set<cv::cuda::GpuMat>(channel_in_).upload(im);
+			//} else cv::cvtColor(im, in.get<cv::Mat>(channel_in_), cv::COLOR_BGR2BGRA);
 		}
 		return true;
 	}));
diff --git a/components/operators/src/clipping.cpp b/components/operators/src/clipping.cpp
index 7772fad3d..fb51340e5 100644
--- a/components/operators/src/clipping.cpp
+++ b/components/operators/src/clipping.cpp
@@ -59,16 +59,16 @@ bool ClipScene::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cudaStr
 	bool no_clip = config()->value("no_clip", false);
 	bool clip_colour = config()->value("clip_colour", false);
 
-	std::vector<ftl::codecs::Shape3D> shapes;
+	std::list<ftl::codecs::Shape3D> shapes;
 	if (in.hasChannel(Channel::Shapes3D)) {
-		in.get(Channel::Shapes3D, shapes);
+		shapes = in.get<std::list<ftl::codecs::Shape3D>>(Channel::Shapes3D);
 	}
 	shapes.push_back(shape);
-	in.create(Channel::Shapes3D, shapes);
+	in.create<std::list<ftl::codecs::Shape3D>>(Channel::Shapes3D).list = shapes;
 		
 	for (size_t i=0; i<in.frames.size(); ++i) {	
 		if (!in.hasFrame(i)) continue;
-		auto &f = in.frames[i];
+		auto &f = in.frames[i].cast<ftl::rgbd::Frame>();
 		//auto *s = in.sources[i];
 
 		if (f.hasChannel(Channel::Depth)) {
@@ -78,11 +78,11 @@ bool ClipScene::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cudaStr
 			sclip.origin = sclip.origin.getInverse() * pose;
 			if (!no_clip) {
 				if (clip_colour) {
-					f.clearPackets(Channel::Colour);
-					f.clearPackets(Channel::Depth);
+					f.set<ftl::rgbd::VideoFrame>(Channel::Colour);
+					f.set<ftl::rgbd::VideoFrame>(Channel::Depth);
 					ftl::cuda::clipping(f.createTexture<float>(Channel::Depth), f.getTexture<uchar4>(Channel::Colour), f.getLeftCamera(), sclip, stream);
 				} else {
-					f.clearPackets(Channel::Depth);
+					f.set<ftl::rgbd::VideoFrame>(Channel::Depth);
 					ftl::cuda::clipping(f.createTexture<float>(Channel::Depth), f.getLeftCamera(), sclip, stream);
 				}
 			}
diff --git a/components/operators/src/colours.cpp b/components/operators/src/colours.cpp
index 610912076..ad48acd1f 100644
--- a/components/operators/src/colours.cpp
+++ b/components/operators/src/colours.cpp
@@ -15,6 +15,11 @@ ColourChannels::~ColourChannels() {
 }
 
 bool ColourChannels::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t stream) {
+	if (!in.hasChannel(Channel::Colour)) {
+		in.message(ftl::data::Message::Warning_MISSING_CHANNEL, "No colour channel found");
+		return false;
+	}
+
 	auto &col = in.get<cv::cuda::GpuMat>(Channel::Colour);
 
 	// Convert colour from BGR to BGRA if needed
@@ -25,6 +30,7 @@ bool ColourChannels::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStre
 		cv::cuda::swap(col, temp_);
 		cv::cuda::cvtColor(temp_,col, cv::COLOR_BGR2BGRA, 0, cvstream);*/
 
+		in.message(ftl::data::Message::Error_BAD_FORMAT, "Bad colour format");
 		throw FTL_Error("Left colour must be 4 channels");
 	}
 
@@ -39,14 +45,22 @@ bool ColourChannels::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStre
 			cv::cuda::swap(col, temp_);
 			cv::cuda::cvtColor(temp_,col, cv::COLOR_BGR2BGRA, 0, cvstream);*/
 
+			in.message(ftl::data::Message::Error_BAD_FORMAT, "Bad colour format");
 			throw FTL_Error("Right colour must be 4 channels");
 		}
 	}
 
 	//in.resetTexture(Channel::Colour);
-	in.createTexture<uchar4>(Channel::Colour, true);
+	const auto &vf = in.get<ftl::rgbd::VideoFrame>(Channel::Colour);
+	if (vf.isGPU()) {
+		in.createTexture<uchar4>(Channel::Colour, true);
+	}
+
 	if (in.hasChannel(Channel::Right)) {
-		in.createTexture<uchar4>(Channel::Right, true);
+		const auto &vf = in.get<ftl::rgbd::VideoFrame>(Channel::Right);
+		if (vf.isGPU()) {
+			in.createTexture<uchar4>(Channel::Right, true);
+		}
 	}
 
 	/*if (in.hasChannel(Channel::Depth)) {
diff --git a/components/operators/src/depth.cpp b/components/operators/src/depth.cpp
index 19696f67f..cdeac427f 100644
--- a/components/operators/src/depth.cpp
+++ b/components/operators/src/depth.cpp
@@ -59,10 +59,10 @@ DepthBilateralFilter::DepthBilateralFilter(ftl::Configurable* cfg) :
 	max_disc_ = cfg->value("max_discontinuity", 0.1f);
 	channel_ = Channel::Depth;
 
-	cfg->on("edge_discontinuity", [this](const ftl::config::Event &e) {
+	cfg->on("edge_discontinuity", [this]() {
 		edge_disc_ = config()->value("edge_discontinuity", 0.04f);
 	});
-	cfg->on("max_discontinuity", [this](const ftl::config::Event &e) {
+	cfg->on("max_discontinuity", [this]() {
 		max_disc_ = config()->value("max_discontinuity", 0.1f);
 	});
 
@@ -82,10 +82,10 @@ DepthBilateralFilter::DepthBilateralFilter(ftl::Configurable* cfg, const std::tu
 	max_disc_ = cfg->value("max_discontinuity", 0.1f);
 	channel_ = std::get<0>(p);
 
-	cfg->on("edge_discontinuity", [this](const ftl::config::Event &e) {
+	cfg->on("edge_discontinuity", [this]() {
 		edge_disc_ = config()->value("edge_discontinuity", 0.04f);
 	});
-	cfg->on("max_discontinuity", [this](const ftl::config::Event &e) {
+	cfg->on("max_discontinuity", [this]() {
 		max_disc_ = config()->value("max_discontinuity", 0.1f);
 	});
 
@@ -105,7 +105,7 @@ bool DepthBilateralFilter::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out,
 
 	auto cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
 	const GpuMat &rgb = in.get<GpuMat>(Channel::Colour);
-	GpuMat &depth = in.get<GpuMat>(channel_);
+	const GpuMat &depth = in.get<GpuMat>(channel_);
 
 	UNUSED(rgb);
 	UNUSED(depth);
@@ -128,7 +128,7 @@ DepthChannel::DepthChannel(ftl::Configurable *cfg) : ftl::operators::Operator(cf
 }
 
 DepthChannel::~DepthChannel() {
-
+	if (pipe_) delete pipe_;
 }
 
 void DepthChannel::_createPipeline(size_t size) {
@@ -178,12 +178,12 @@ bool DepthChannel::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
 
 	for (size_t i=0; i<in.frames.size(); ++i) {
 		if (!in.hasFrame(i)) continue;
-		auto &f = in.frames[i];
+		auto &f = in.frames[i].cast<ftl::rgbd::Frame>();
 		if (!f.hasChannel(Channel::Depth) && f.hasChannel(Channel::Right)) {
 			_createPipeline(in.frames.size());
 
-			cv::cuda::GpuMat& left = f.get<cv::cuda::GpuMat>(Channel::Left);
-			cv::cuda::GpuMat& right = f.get<cv::cuda::GpuMat>(Channel::Right);
+			const cv::cuda::GpuMat& left = f.get<cv::cuda::GpuMat>(Channel::Left);
+			const cv::cuda::GpuMat& right = f.get<cv::cuda::GpuMat>(Channel::Right);
 			cv::cuda::GpuMat& depth = f.create<cv::cuda::GpuMat>(Channel::Depth);
 			depth.create(left.size(), CV_32FC1);
 
@@ -217,8 +217,8 @@ bool DepthChannel::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream
 	if (!f.hasChannel(Channel::Depth) && f.hasChannel(Channel::Right)) {
 		_createPipeline(1);
 
-		cv::cuda::GpuMat& left = f.get<cv::cuda::GpuMat>(Channel::Left);
-		cv::cuda::GpuMat& right = f.get<cv::cuda::GpuMat>(Channel::Right);
+		const cv::cuda::GpuMat& left = f.get<cv::cuda::GpuMat>(Channel::Left);
+		const cv::cuda::GpuMat& right = f.get<cv::cuda::GpuMat>(Channel::Right);
 		cv::cuda::GpuMat& depth = f.create<cv::cuda::GpuMat>(Channel::Depth);
 		depth.create(depth_size_, CV_32FC1);
 
diff --git a/components/operators/src/detectandtrack.cpp b/components/operators/src/detectandtrack.cpp
index 7135d2e0b..4e9b1b729 100644
--- a/components/operators/src/detectandtrack.cpp
+++ b/components/operators/src/detectandtrack.cpp
@@ -28,7 +28,7 @@ bool DetectAndTrack::init() {
 	fname_ = config()->value<string>("filename", FTL_LOCAL_DATA_ROOT "/haarcascades/haarcascade_frontalface_default.xml");
 	debug_ = config()->value<bool>("debug", false);
 
-	config()->on("debug", [this](const ftl::config::Event &e) {
+	config()->on("debug", [this]() {
 		debug_ = config()->value<bool>("debug", false);
 	});
 
@@ -229,7 +229,7 @@ bool DetectAndTrack::apply(Frame &in, Frame &out, cudaStream_t stream) {
 		Mat im;  // TODO: Keep this as an internal buffer? Perhaps page locked.
 
 		// FIXME: Use internal stream here.
-		cv::cvtColor(in.fastDownload(channel_in_, cv::cuda::Stream::Null()), im, cv::COLOR_BGRA2BGR);
+		cv::cvtColor(in.get<cv::Mat>(channel_in_), im, cv::COLOR_BGRA2BGR);
 
 		if (im.empty()) {
 			throw FTL_Error("Empty image in face detection");
@@ -272,7 +272,7 @@ bool DetectAndTrack::apply(Frame &in, Frame &out, cudaStream_t stream) {
 
 		cv::Mat depth;
 		if (in.hasChannel(Channel::Depth)) {
-			depth = in.fastDownload(Channel::Depth, cv::cuda::Stream::Null());
+			depth = in.get<cv::Mat>(Channel::Depth);
 		}
 
 		std::vector<ftl::codecs::Face> result;
@@ -292,15 +292,15 @@ bool DetectAndTrack::apply(Frame &in, Frame &out, cudaStream_t stream) {
 				cv::rectangle(im, tracked.object, cv::Scalar(0, 0, 255), 1);
 			}
 		}
-		out.create(channel_out_, result);
+		out.create<std::vector<ftl::codecs::Face>>(channel_out_) = result;
 
 		//in.upload(channel_in_);
 		// FIXME: This is a bad idea.
 		if (debug_) {
-			if (in.isGPU(channel_in_)) {
+			//if (in.isGPU(channel_in_)) {
 				cv::cvtColor(im, im, cv::COLOR_BGR2BGRA);
-				out.get<cv::cuda::GpuMat>(channel_in_).upload(im);
-			} else cv::cvtColor(im, in.get<cv::Mat>(channel_in_), cv::COLOR_BGR2BGRA);
+				out.set<cv::cuda::GpuMat>(channel_in_).upload(im);
+			//} else cv::cvtColor(im, in.get<cv::Mat>(channel_in_), cv::COLOR_BGR2BGRA);
 		}
 
 		return true;
diff --git a/components/operators/src/disparity/bilateral_filter.cpp b/components/operators/src/disparity/bilateral_filter.cpp
index 4b14c3bb5..7ffce560c 100644
--- a/components/operators/src/disparity/bilateral_filter.cpp
+++ b/components/operators/src/disparity/bilateral_filter.cpp
@@ -56,7 +56,7 @@ bool DisparityBilateralFilter::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out
 
 	auto cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
 	const GpuMat &rgb = in.get<GpuMat>(Channel::Colour);
-	GpuMat &disp_in = in.get<GpuMat>(Channel::Disparity);
+	const GpuMat &disp_in = in.get<GpuMat>(Channel::Disparity);
 	GpuMat &disp_out = out.create<GpuMat>(Channel::Disparity);
 	disp_int_.create(disp_in.size(), disp_in.type());
 
diff --git a/components/operators/src/disparity/fixstars_sgm.cpp b/components/operators/src/disparity/fixstars_sgm.cpp
index 8ad3a41fb..027402273 100644
--- a/components/operators/src/disparity/fixstars_sgm.cpp
+++ b/components/operators/src/disparity/fixstars_sgm.cpp
@@ -87,7 +87,7 @@ FixstarsSGM::FixstarsSGM(ftl::Configurable* cfg) :
 		LOG(WARNING) << "Invalid value for max_disp, using default value (256)";
 	}
 
-	cfg->on("P1", [this, cfg](const ftl::config::Event&) {
+	cfg->on("P1", [this, cfg]() {
 		int P1 = cfg->value("P1", 0);
 		if (P1 <= 0) {
 			LOG(WARNING) << "Invalid value for P1 (" << P1 << ")";
@@ -98,7 +98,7 @@ FixstarsSGM::FixstarsSGM(ftl::Configurable* cfg) :
 		}
 	});
 
-	cfg->on("P2", [this, cfg](const ftl::config::Event&) {
+	cfg->on("P2", [this, cfg]() {
 		int P2 = cfg->value("P2", 0);
 		if (P2 < P1_) {
 			LOG(WARNING) << "Invalid value for P2 (" << P2 << ")";
@@ -109,7 +109,7 @@ FixstarsSGM::FixstarsSGM(ftl::Configurable* cfg) :
 		}
 	});
 
-	cfg->on("uniqueness", [this, cfg](const ftl::config::Event&) {
+	cfg->on("uniqueness", [this, cfg]() {
 		double uniqueness = cfg->value("uniqueness", 0.0);
 		if (uniqueness < 0.0 || uniqueness > 1.0) {
 			LOG(WARNING) << "Invalid value for uniqueness (" << uniqueness << ")";
@@ -122,11 +122,11 @@ FixstarsSGM::FixstarsSGM(ftl::Configurable* cfg) :
 
 	updateP2Parameters();
 
-	cfg->on("canny_low", [this, cfg](const ftl::config::Event&) {
+	cfg->on("canny_low", [this, cfg]() {
 		updateP2Parameters();
 	});
 
-	cfg->on("canny_high", [this, cfg](const ftl::config::Event&) {
+	cfg->on("canny_high", [this, cfg]() {
 		updateP2Parameters();
 	});
 }
@@ -186,7 +186,7 @@ bool FixstarsSGM::apply(Frame &in, Frame &out, cudaStream_t stream) {
 	}
 
 	bool has_estimate = in.hasChannel(Channel::Disparity);
-	auto &disp = (!has_estimate) ? out.create<GpuMat>(Channel::Disparity, Format<short>(l.size())) : in.get<GpuMat>(Channel::Disparity);
+	const auto &disp = (!has_estimate) ? out.create<ftl::rgbd::VideoFrame>(Channel::Disparity).createGPU(Format<short>(l.size())) : in.get<GpuMat>(Channel::Disparity);
 
 	auto cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
 	cv::cuda::cvtColor(l, lbw_, cv::COLOR_BGRA2GRAY, 0, cvstream);
@@ -227,10 +227,10 @@ bool FixstarsSGM::apply(Frame &in, Frame &out, cudaStream_t stream) {
 		cv::cuda::cvtColor(P2_map_, out.get<GpuMat>(Channel::Colour), cv::COLOR_GRAY2BGRA);
 	}
 	if (config()->value("show_rpe", false)) {
-		ftl::cuda::show_rpe(disp, l, r, 100.0f, stream);
+		ftl::cuda::show_rpe(disp, in.set<GpuMat>(Channel::Left), r, 100.0f, stream);
 	}
 	if (config()->value("show_disp_density", false)) {
-		ftl::cuda::show_disp_density(disp, l, 100.0f, stream);
+		ftl::cuda::show_disp_density(disp, in.set<GpuMat>(Channel::Left), 100.0f, stream);
 	}
 
 	//disp_int_.convertTo(disp, CV_32F, 1.0f / 16.0f, cvstream);
diff --git a/components/operators/src/disparity/libstereo.cpp b/components/operators/src/disparity/libstereo.cpp
index 22aaac630..8531dd98f 100644
--- a/components/operators/src/disparity/libstereo.cpp
+++ b/components/operators/src/disparity/libstereo.cpp
@@ -56,7 +56,7 @@ bool StereoDisparity::apply(Frame &in, Frame &out, cudaStream_t stream) {
 	disp32f_.create(l.size(), CV_32FC1);
 
 	bool has_estimate = in.hasChannel(Channel::Disparity);
-	auto &disp = (!has_estimate) ? out.create<GpuMat>(Channel::Disparity, Format<short>(l.size())) : in.get<GpuMat>(Channel::Disparity);
+	auto &disp = (!has_estimate) ? out.create<ftl::rgbd::VideoFrame>(Channel::Disparity).createGPU(Format<short>(l.size())) : in.get<GpuMat>(Channel::Disparity);
 
 	auto cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
 
diff --git a/components/operators/src/disparity/optflow_smoothing.cpp b/components/operators/src/disparity/optflow_smoothing.cpp
index e2f1e6f93..c69b0727b 100644
--- a/components/operators/src/disparity/optflow_smoothing.cpp
+++ b/components/operators/src/disparity/optflow_smoothing.cpp
@@ -47,7 +47,7 @@ void OpticalFlowTemporalSmoothing::_init(ftl::Configurable* cfg) {
 
 	threshold_ = cfg->value("threshold", 5.0f);
 
-	cfg->on("threshold", [this](const ftl::config::Event&) {
+	cfg->on("threshold", [this]() {
 		float threshold = config()->value("threshold", 5.0f);
 		if (threshold < 0.0) {
 			LOG(WARNING) << "invalid threshold " << threshold << ", value must be positive";
@@ -58,7 +58,7 @@ void OpticalFlowTemporalSmoothing::_init(ftl::Configurable* cfg) {
 		}
 	});
 
-	cfg->on("history_size", [this, &cfg](const ftl::config::Event&) {
+	cfg->on("history_size", [this, &cfg]() {
 		int n_max = cfg->value("history_size", 7);
 
 		if (n_max < 1) {
@@ -89,14 +89,14 @@ bool OpticalFlowTemporalSmoothing::apply(Frame &in, Frame &out, cudaStream_t str
 
 	auto cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
 	const cv::cuda::GpuMat &optflow = in.get<cv::cuda::GpuMat>(Channel::Flow);
-	cv::cuda::GpuMat &data = out.get<cv::cuda::GpuMat>(channel_);
+	cv::cuda::GpuMat &data = out.set<cv::cuda::GpuMat>(channel_);
 	
 	if (data.size() != size_) {
 		size_ = data.size();
 		if (!init()) { return false; }
 	}
 
-	ftl::cuda::optflow_filter(data, optflow, history_, in.get<cv::cuda::GpuMat>(Channel::Support1), n_max_, threshold_, config()->value("filling", false), cvstream);
+	ftl::cuda::optflow_filter(data, optflow, history_, in.set<cv::cuda::GpuMat>(Channel::Support1), n_max_, threshold_, config()->value("filling", false), cvstream);
 	
 	return true;
 }
diff --git a/components/operators/src/fusion/mvmls.cpp b/components/operators/src/fusion/mvmls.cpp
index 0dfe26b14..c16b3ab43 100644
--- a/components/operators/src/fusion/mvmls.cpp
+++ b/components/operators/src/fusion/mvmls.cpp
@@ -47,8 +47,19 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
 	bool show_consistency = config()->value("show_consistency", false);
 	bool show_adjustment = config()->value("show_adjustment", false);
 
-    if (in.frames.size() < 1) return false;
-    auto size = in.firstFrame().get<GpuMat>(Channel::Depth).size();
+    if (in.frames.size() < 1 || in.count == 0) return false;
+	cv::Size size(0,0);
+	for (auto &f : in.frames) {
+		if (f.hasChannel(Channel::Depth)) {
+			size = f.get<GpuMat>(Channel::Depth).size();
+			break;
+		}
+	}
+    
+	if (size.width == 0) {
+		in.firstFrame().message(ftl::data::Message::Warning_MISSING_CHANNEL, "Missing Depth Channel in MVMLS operator");
+		return false;
+	}
 
     // Make sure we have enough buffers
     while (normals_horiz_.size() < in.frames.size()) {
@@ -62,7 +73,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
     for (size_t i=0; i<in.frames.size(); ++i) {
 		if (!in.hasFrame(i)) continue;
 
-        auto &f = in.frames[i];
+        auto &f = in.frames[i].cast<ftl::rgbd::Frame>();
 	    auto size = f.get<GpuMat>(Channel::Depth).size();
 	    centroid_horiz_[i]->create(size.height, size.width);
 	    normals_horiz_[i]->create(size.height, size.width);
@@ -77,12 +88,12 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
         }
 
         // Create required channels
-        f.create<GpuMat>(Channel::Confidence, Format<float>(size));
+        f.create<ftl::rgbd::VideoFrame>(Channel::Confidence).createGPU(Format<float>(size));
         f.createTexture<float>(Channel::Confidence);
-        f.create<GpuMat>(Channel::Screen, Format<short2>(size));
+        f.create<ftl::rgbd::VideoFrame>(Channel::Screen).createGPU(Format<short2>(size));
         f.createTexture<short2>(Channel::Screen);
 
-        f.get<GpuMat>(Channel::Confidence).setTo(cv::Scalar(0.0f), cvstream);
+        f.set<GpuMat>(Channel::Confidence).setTo(cv::Scalar(0.0f), cvstream);
     }
 
     //for (int iter=0; iter<iters; ++iter) {
@@ -95,7 +106,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
             for (size_t i=0; i<in.frames.size(); ++i) {
 				if (!in.hasFrame(i)) continue;
 
-                auto &f1 = in.frames[i];
+                auto &f1 = in.frames[i].cast<ftl::rgbd::Frame>();
                 //f1.get<GpuMat>(Channel::Depth2).setTo(cv::Scalar(0.0f), cvstream);
                 //f1.get<GpuMat>(Channel::Confidence).setTo(cv::Scalar(0.0f), cvstream);
 
@@ -108,7 +119,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
 
                     //LOG(INFO) << "Running phase1";
 
-                    auto &f2 = in.frames[j];
+                    auto &f2 = in.frames[j].cast<ftl::rgbd::Frame>();
                     //auto s1 = in.sources[i];
                     //auto s2 = in.sources[j];
 
@@ -345,7 +356,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
             // Redo normals
             for (size_t i=0; i<in.frames.size(); ++i) {
 				if (!in.hasFrame(i)) continue;
-                auto &f = in.frames[i];
+                auto &f = in.frames[i].cast<ftl::rgbd::Frame>();
                 ftl::cuda::normals(
                     f.getTexture<half4>(Channel::Normals),
                     f.getTexture<float>(Channel::Depth),
@@ -411,7 +422,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
 			for (size_t i=0; i<in.frames.size(); ++i) {
 				if (!in.hasFrame(i)) continue;
 
-				auto &f = in.frames[i];
+				auto &f = in.frames[i].cast<ftl::rgbd::Frame>();
 				//auto *s = in.sources[i];
 
 				// Clear data
@@ -465,7 +476,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
 			if (do_aggr) {
 				for (size_t i=0; i<in.frames.size(); ++i) {
 					if (!in.hasFrame(i)) continue;
-					auto &f1 = in.frames[i];
+					auto &f1 = in.frames[i].cast<ftl::rgbd::Frame>();
 					//f1.get<GpuMat>(Channel::Depth2).setTo(cv::Scalar(0.0f), cvstream);
 					//f1.get<GpuMat>(Channel::Confidence).setTo(cv::Scalar(0.0f), cvstream);
 
@@ -478,7 +489,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
 
 						//LOG(INFO) << "Running phase1";
 
-						auto &f2 = in.frames[j];
+						auto &f2 = in.frames[j].cast<ftl::rgbd::Frame>();
 						//auto s1 = in.sources[i];
 						//auto s2 = in.sources[j];
 
@@ -520,7 +531,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
 			// Normalise aggregations and move the points
 			for (size_t i=0; i<in.frames.size(); ++i) {
 				if (!in.hasFrame(i)) continue;
-				auto &f = in.frames[i];
+				auto &f = in.frames[i].cast<ftl::rgbd::Frame>();
 				//auto *s = in.sources[i];
 				auto size = f.get<GpuMat>(Channel::Depth).size();
 
diff --git a/components/operators/src/gt_analysis.cpp b/components/operators/src/gt_analysis.cpp
index 066f04d94..c977a814b 100644
--- a/components/operators/src/gt_analysis.cpp
+++ b/components/operators/src/gt_analysis.cpp
@@ -60,7 +60,7 @@ bool GTAnalysis::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t
 	}
 
 	std::vector<std::string> msgs;
-	if (in.hasChannel(Channel::Messages)) { in.get(Channel::Messages, msgs); }
+	if (in.hasChannel(Channel::Messages)) { msgs = in.get<std::vector<std::string>>(Channel::Messages); }
 
 	bool use_disp = config()->value("use_disparity", true);
 	auto &dmat = in.get<cv::cuda::GpuMat>(Channel::Depth);
@@ -103,7 +103,7 @@ bool GTAnalysis::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t
 		else 			{ report(msgs, err, o, npixels, "mm", 1000.0); }
 	}
 
-	in.create(Channel::Messages, msgs);
+	in.create<std::vector<std::string>>(Channel::Messages) = msgs;
 
 	return true;
 }
diff --git a/components/operators/src/mask.cpp b/components/operators/src/mask.cpp
index e39fcabea..274f2de71 100644
--- a/components/operators/src/mask.cpp
+++ b/components/operators/src/mask.cpp
@@ -25,7 +25,10 @@ bool DiscontinuityMask::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaS
 	float noiseThresh = config()->value("noise_thresh", 0.8f);
 	float areaMax = config()->value("area_max", 26.0f);  // Cross support radius squared + 1
 
-	if (!in.hasChannel(Channel::Depth) || !in.hasChannel(Channel::Support1)) return false;
+	if (!in.hasChannel(Channel::Depth) || !in.hasChannel(Channel::Support1)) {
+		out.message(ftl::data::Message::Warning_MISSING_CHANNEL, "Missing Depth or Support Channel in Mask Operator");
+		return false;
+	}
 
 	if (!out.hasChannel(Channel::Mask)) {
 		auto &m = out.create<cv::cuda::GpuMat>(Channel::Mask);
@@ -99,7 +102,7 @@ bool CullDiscontinuity::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaS
 	unsigned int radius = config()->value("radius", 0);
 	bool inverted = config()->value("invert", false);
 	
-	out.clearPackets(Channel::Depth);  // Force reset
+	out.set<ftl::rgbd::VideoFrame>(Channel::Depth);  // Force reset
 	ftl::cuda::cull_mask(
 		in.createTexture<uint8_t>(Channel::Mask),
 		out.createTexture<float>(Channel::Depth),
diff --git a/components/operators/src/normals.cpp b/components/operators/src/normals.cpp
index aefd04623..09618fa48 100644
--- a/components/operators/src/normals.cpp
+++ b/components/operators/src/normals.cpp
@@ -18,7 +18,9 @@ Normals::~Normals() {
 
 bool Normals::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t stream) {
 	if (!in.hasChannel(Channel::Depth)) {
-		throw FTL_Error("Missing depth channel in Normals operator");
+		out.message(ftl::data::Message::Warning_MISSING_CHANNEL, "Missing Depth Channel in Normals operator");
+		//throw FTL_Error("Missing depth channel in Normals operator");
+		return false;
 	}
 
 	if (out.hasChannel(Channel::Normals)) {
@@ -47,7 +49,9 @@ NormalDot::~NormalDot() {
 
 bool NormalDot::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t stream) {
 	if (!in.hasChannel(Channel::Depth)) {
-		throw FTL_Error("Missing depth channel in Normals operator");
+		out.message(ftl::data::Message::Warning_MISSING_CHANNEL, "Missing Depth Channel in Normals operator");
+		//throw FTL_Error("Missing depth channel in Normals operator");
+		return false;
 	}
 
 	if (out.hasChannel(Channel::Normals)) {
@@ -80,6 +84,7 @@ bool SmoothNormals::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStrea
     int radius = max(0, min(config()->value("radius",1), 5));
 
 	if (!in.hasChannel(Channel::Depth)) {
+		out.message(ftl::data::Message::Warning_MISSING_CHANNEL, "Missing Depth Channel in Normals operator");
 		throw FTL_Error("Missing depth channel in SmoothNormals operator");
 	}
 
diff --git a/components/operators/src/nvopticalflow.cpp b/components/operators/src/nvopticalflow.cpp
index 2426c1019..27a623b39 100644
--- a/components/operators/src/nvopticalflow.cpp
+++ b/components/operators/src/nvopticalflow.cpp
@@ -74,7 +74,7 @@ bool NVOpticalFlow::apply(Frame &in, Frame &out, cudaStream_t stream) {
 	auto &flow1 = out.create<GpuMat>(channel_out_[0]);
 
 	cv::cuda::cvtColor(in.get<GpuMat>(channel_in_[0]), left_gray_, cv::COLOR_BGRA2GRAY, 0, cvstream);
-	cv::cuda::cvtColor(in.get<GpuMat>(channel_in_[1]), right_gray_, cv::COLOR_BGRA2GRAY, 0, cvstream);
+	if (both_channels) cv::cuda::cvtColor(in.get<GpuMat>(channel_in_[1]), right_gray_, cv::COLOR_BGRA2GRAY, 0, cvstream);
 
 	// TODO: Use optical flow confidence output, perhaps combined with a
 	// sensitivity adjustment
diff --git a/components/operators/src/operator.cpp b/components/operators/src/operator.cpp
index 617514afd..07928ffa3 100644
--- a/components/operators/src/operator.cpp
+++ b/components/operators/src/operator.cpp
@@ -12,7 +12,7 @@ using ftl::rgbd::Source;
 Operator::Operator(ftl::Configurable *config) : config_(config) {
 	enabled_ = config_->value("enabled", true);
 
-	config_->on("enabled", [this](const ftl::config::Event &e) {
+	config_->on("enabled", [this]() {
 		enabled_ = config_->value("enabled", true);
 	});
 }
@@ -38,6 +38,15 @@ Graph::Graph(nlohmann::json &config) : ftl::Configurable(config) {
 }
 
 Graph::~Graph() {
+	// Cleanup configurables
+	for (auto &c : configs_) {
+		delete c.second;
+	}
+	for (auto &o : operators_) {
+		for (auto *i : o.instances) {
+			delete i;
+		}
+	}
 	cudaStreamDestroy(stream_);
 }
 
@@ -64,16 +73,17 @@ bool Graph::apply(FrameSet &in, FrameSet &out, cudaStream_t stream) {
 			}
 
 			for (size_t j=0; j<in.frames.size(); ++j) {
-				if (!in.hasFrame(j)) continue;
+				if (!in.hasFrame(j)) in.frames[j].message(ftl::data::Message::Warning_INCOMPLETE_FRAME, "Frame not complete in Pipeline");
 				
 				int iix = (i.instances[0]->isMemoryHeavy()) ? 0 : j&0x1;
 				auto *instance = i.instances[iix];
 
 				if (instance->enabled()) {
 					try {
-						instance->apply(in.frames[j], out.frames[j], stream_actual);
+						instance->apply(in.frames[j].cast<ftl::rgbd::Frame>(), out.frames[j].cast<ftl::rgbd::Frame>(), stream_actual);
 					} catch (const std::exception &e) {
 						LOG(ERROR) << "Operator exception for '" << instance->config()->getID() << "': " << e.what();
+						in.frames[j].message(ftl::data::Message::Error_OPERATOR_EXCEPTION, "Operator exception");
 						success = false;
 						break;
 					}
@@ -88,6 +98,7 @@ bool Graph::apply(FrameSet &in, FrameSet &out, cudaStream_t stream) {
 					instance->apply(in, out, stream_actual);
 				} catch (const std::exception &e) {
 					LOG(ERROR) << "Operator exception for '" << instance->config()->getID() << "': " << e.what();
+					if (in.frames.size() > 0) in.frames[0].message(ftl::data::Message::Error_OPERATOR_EXCEPTION, "Operator exception");
 					success = false;
 					break;
 				}
@@ -138,6 +149,7 @@ bool Graph::apply(Frame &in, Frame &out, cudaStream_t stream) {
 			} catch (const std::exception &e) {
 				LOG(ERROR) << "Operator exception for '" << instance->config()->getID() << "': " << e.what();
 				success = false;
+				out.message(ftl::data::Message::Error_OPERATOR_EXCEPTION, "Operator exception");
 				break;
 			}
 		}
diff --git a/components/operators/src/poser.cpp b/components/operators/src/poser.cpp
index f81c11d39..2f8a166cf 100644
--- a/components/operators/src/poser.cpp
+++ b/components/operators/src/poser.cpp
@@ -73,32 +73,39 @@ bool Poser::set(const std::string &name, const Eigen::Matrix4d &pose) {
 
 bool Poser::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cudaStream_t stream) {
     if (in.hasChannel(Channel::Shapes3D)) {
-        std::vector<ftl::codecs::Shape3D> transforms;
-        in.get(Channel::Shapes3D, transforms);
+        const auto &transforms = in.get<std::list<ftl::codecs::Shape3D>>(Channel::Shapes3D);
 
 		//LOG(INFO) << "Found shapes 3D global: " << (int)transforms.size();
 
         for (auto &t : transforms) {
         //    LOG(INFO) << "Have FS transform: " << t.label;
-			add(t, in.id, 255);
+			add(t, in.frameset(), 255);
         }
     }
 
 	for (size_t i=0; i<in.frames.size(); ++i) {
-        if (in.hasFrame(i)) {
-            auto &f = in.frames[i];
+        //if (in.hasFrame(i)) {
+            auto &f = in.frames[i].cast<ftl::rgbd::Frame>();
 
             if (f.hasChannel(Channel::Shapes3D)) {
-                std::vector<ftl::codecs::Shape3D> transforms;
-                f.get(Channel::Shapes3D, transforms);
+                const auto &transforms = f.get<std::list<ftl::codecs::Shape3D>>(Channel::Shapes3D);
 
 				//LOG(INFO) << "Found shapes 3D: " << (int)transforms.size();
 
                 for (auto &t : transforms) {
-                    add(t, in.id, i);
+                    add(t, in.frameset(), i);
                 }
             }
-        }
+
+			if (f.hasChannel(Channel::Pose)) {
+				ftl::codecs::Shape3D cam;
+				cam.id = 0;
+				cam.label = f.name();
+				cam.pose = f.getPose().cast<float>();
+				cam.type = ftl::codecs::Shape3DType::CAMERA;
+				add(cam, in.frameset(), i);
+			}
+        //}
     }
 
     string pose_ident = config()->value("pose_ident",string("default"));
@@ -106,7 +113,7 @@ bool Poser::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cudaStream_
         auto p = pose_db__.find(pose_ident);
         if (p != pose_db__.end()) {
 			(*p).second.locked = config()->value("locked",false);
-            in.pose = (*p).second.pose;
+            in.cast<ftl::rgbd::Frame>().setPose() = (config()->value("inverse",false)) ? (*p).second.pose.inverse() : (*p).second.pose;
         } else {
             LOG(WARNING) << "Pose not found: " << pose_ident;
         }
diff --git a/components/operators/src/segmentation.cpp b/components/operators/src/segmentation.cpp
index d2201a088..91a314da2 100644
--- a/components/operators/src/segmentation.cpp
+++ b/components/operators/src/segmentation.cpp
@@ -16,9 +16,16 @@ CrossSupport::~CrossSupport() {
 bool CrossSupport::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t stream) {
 	bool use_mask = config()->value("discon_support", false);
 
+	if (!in.hasChannel(Channel::Colour)) {
+		out.message(ftl::data::Message::Warning_MISSING_CHANNEL, "Missing Colour channel in Support operator");
+		return false;
+	}
 
 	if (use_mask && !in.hasChannel(Channel::Support2)) {
-		if (!in.hasChannel(Channel::Mask)) return false;
+		if (!in.hasChannel(Channel::Mask)) {
+			out.message(ftl::data::Message::Warning_MISSING_CHANNEL, "Missing Mask channel in Support operator");
+			return false;
+		}
 		ftl::cuda::support_region(
 			in.createTexture<uint8_t>(Channel::Mask),
 			out.createTexture<uchar4>(Channel::Support2, ftl::rgbd::Format<uchar4>(in.get<cv::cuda::GpuMat>(Channel::Colour).size())),
diff --git a/components/operators/src/smoothing.cpp b/components/operators/src/smoothing.cpp
index 739162622..0690744f8 100644
--- a/components/operators/src/smoothing.cpp
+++ b/components/operators/src/smoothing.cpp
@@ -89,7 +89,7 @@ bool SmoothChannel::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStrea
 	float scale = 1.0f;
 
 	// Clear to max smoothing
-	out.create<GpuMat>(Channel::Smoothing, Format<float>(width, height)).setTo(cv::Scalar(1.0f));
+	out.create<ftl::rgbd::VideoFrame>(Channel::Smoothing).createGPU(Format<float>(width, height)).setTo(cv::Scalar(1.0f));
 
 	// Reduce to nearest
 	ftl::cuda::smooth_channel(
@@ -108,14 +108,16 @@ bool SmoothChannel::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStrea
 		height /= 2;
 		scale *= 2.0f;
 
+		temp_[i].create(width,height);
+
 		ftl::rgbd::Camera scaledCam = in.getLeftCamera().scaled(width, height);
 
 		// Downscale images for next pass
-		cv::cuda::resize(in.get<GpuMat>(Channel::Colour), temp_[i].create<GpuMat>(Channel::Colour), cv::Size(width, height), 0.0, 0.0, cv::INTER_LINEAR);
+		cv::cuda::resize(in.get<GpuMat>(Channel::Colour), temp_[i].to_gpumat(), cv::Size(width, height), 0.0, 0.0, cv::INTER_LINEAR);
 		//cv::cuda::resize(in.get<GpuMat>(Channel::Depth), temp_[i].create<GpuMat>(Channel::Depth), cv::Size(width, height), 0.0, 0.0, cv::INTER_NEAREST);
 
 		ftl::cuda::smooth_channel(
-			temp_[i].createTexture<uchar4>(Channel::Colour),
+			temp_[i],
 			//temp_[i].createTexture<float>(Channel::Depth),
 			out.getTexture<float>(Channel::Smoothing),
 			scaledCam,
@@ -133,7 +135,7 @@ bool SmoothChannel::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStrea
 
 // ===== MLS ===================================================================
 
-SimpleMLS::SimpleMLS(ftl::Configurable *cfg) : ftl::operators::Operator(cfg) {
+SimpleMLS::SimpleMLS(ftl::Configurable *cfg) : ftl::operators::Operator(cfg), temp_(ftl::data::Frame::make_standalone()) {
 
 }
 
@@ -146,6 +148,8 @@ bool SimpleMLS::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t
 	int iters = config()->value("mls_iterations", 1);
 	int radius = config()->value("mls_radius",2);
 
+	auto &temp = temp_.cast<ftl::rgbd::Frame>();
+
 	if (!in.hasChannel(Channel::Normals)) {
 		/*ftl::cuda::normals(
 			in.createTexture<float4>(Channel::Normals, ftl::rgbd::Format<float4>(in.get<cv::cuda::GpuMat>(Channel::Depth).size())),
@@ -160,9 +164,9 @@ bool SimpleMLS::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t
 	for (int i=0; i<iters; ++i) {
 		ftl::cuda::mls_smooth(
 			in.createTexture<half4>(Channel::Normals),
-			temp_.createTexture<half4>(Channel::Normals, ftl::rgbd::Format<half4>(in.get<cv::cuda::GpuMat>(Channel::Depth).size())),
+			temp.createTexture<half4>(Channel::Normals, ftl::rgbd::Format<half4>(in.get<cv::cuda::GpuMat>(Channel::Depth).size())),
 			in.createTexture<float>(Channel::Depth),
-			temp_.createTexture<float>(Channel::Depth, ftl::rgbd::Format<float>(in.get<cv::cuda::GpuMat>(Channel::Depth).size())),
+			temp.createTexture<float>(Channel::Depth, ftl::rgbd::Format<float>(in.get<cv::cuda::GpuMat>(Channel::Depth).size())),
 			thresh,
 			radius,
 			in.getLeftCamera(),
@@ -171,7 +175,8 @@ bool SimpleMLS::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t
 
 		//in.swapChannels(Channel::Depth, Channel::Depth2);
 		//in.swapChannels(Channel::Normals, Channel::Points);
-		temp_.swapChannels(Channel::Normals + Channel::Depth, in);
+		temp.swapChannel(Channel::Normals, in);
+		temp.swapChannel(Channel::Depth, in);
 	}
 
 	return true;
@@ -179,7 +184,7 @@ bool SimpleMLS::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t
 
 
 
-ColourMLS::ColourMLS(ftl::Configurable *cfg) : ftl::operators::Operator(cfg) {
+ColourMLS::ColourMLS(ftl::Configurable *cfg) : ftl::operators::Operator(cfg), temp_(ftl::data::Frame::make_standalone()) {
 
 }
 
@@ -200,14 +205,16 @@ bool ColourMLS::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t
 		return false;
 	}
 
+	auto &temp = temp_.cast<ftl::rgbd::Frame>();
+
 	// FIXME: Assume in and out are the same frame.
 	for (int i=0; i<iters; ++i) {
 		if (!crosssup) {
 			ftl::cuda::colour_mls_smooth(
 				in.createTexture<half4>(Channel::Normals),
-				temp_.createTexture<half4>(Channel::Normals, ftl::rgbd::Format<half4>(in.get<cv::cuda::GpuMat>(Channel::Depth).size())),
+				temp.createTexture<half4>(Channel::Normals, ftl::rgbd::Format<half4>(in.get<cv::cuda::GpuMat>(Channel::Depth).size())),
 				in.createTexture<float>(Channel::Depth),
-				temp_.createTexture<float>(Channel::Depth, ftl::rgbd::Format<float>(in.get<cv::cuda::GpuMat>(Channel::Depth).size())),
+				temp.createTexture<float>(Channel::Depth, ftl::rgbd::Format<float>(in.get<cv::cuda::GpuMat>(Channel::Depth).size())),
 				in.createTexture<uchar4>(Channel::Colour),
 				thresh,
 				col_smooth,
@@ -219,9 +226,9 @@ bool ColourMLS::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t
 			ftl::cuda::colour_mls_smooth_csr(
 				in.createTexture<uchar4>(Channel::Support1),
 				in.createTexture<half4>(Channel::Normals),
-				temp_.createTexture<half4>(Channel::Normals, ftl::rgbd::Format<half4>(in.get<cv::cuda::GpuMat>(Channel::Depth).size())),
+				temp.createTexture<half4>(Channel::Normals, ftl::rgbd::Format<half4>(in.get<cv::cuda::GpuMat>(Channel::Depth).size())),
 				in.createTexture<float>(Channel::Depth),
-				temp_.createTexture<float>(Channel::Depth, ftl::rgbd::Format<float>(in.get<cv::cuda::GpuMat>(Channel::Depth).size())),
+				temp.createTexture<float>(Channel::Depth, ftl::rgbd::Format<float>(in.get<cv::cuda::GpuMat>(Channel::Depth).size())),
 				in.createTexture<uchar4>(Channel::Colour),
 				thresh,
 				col_smooth,
@@ -233,7 +240,8 @@ bool ColourMLS::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t
 
 		//in.swapChannels(Channel::Depth, Channel::Depth2);
 		//in.swapChannels(Channel::Normals, Channel::Points);
-		temp_.swapChannels(Channel::Normals + Channel::Depth, in);
+		temp_.swapChannel(Channel::Depth, in);
+		temp_.swapChannel(Channel::Normals, in);
 	}
 
 	return true;
@@ -242,8 +250,8 @@ bool ColourMLS::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t
 
 // ====== Aggregating MLS ======================================================
 
-AggreMLS::AggreMLS(ftl::Configurable *cfg) : ftl::operators::Operator(cfg) {
-
+AggreMLS::AggreMLS(ftl::Configurable *cfg) : ftl::operators::Operator(cfg), temp_(ftl::data::Frame::make_standalone()) {
+	temp_.store();
 }
 
 AggreMLS::~AggreMLS() {
@@ -267,6 +275,8 @@ bool AggreMLS::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t s
 		return false;
 	}
 
+	auto &temp = temp_.cast<ftl::rgbd::Frame>();
+
 	auto size = in.get<GpuMat>(Channel::Depth).size();
 	centroid_horiz_.create(size.height, size.width);
 	normals_horiz_.create(size.height, size.width);
@@ -306,7 +316,7 @@ bool AggreMLS::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t s
 			ftl::cuda::mls_adjust_depth(
 				in.createTexture<half4>(Channel::Normals),
 				centroid_vert_,
-				temp_.createTexture<float>(Channel::Depth, ftl::rgbd::Format<float>(size)),
+				temp.createTexture<float>(Channel::Depth, ftl::rgbd::Format<float>(size)),
 				in.getTexture<float>(Channel::Depth),
 				in.getLeftCamera(),
 				stream
@@ -314,15 +324,15 @@ bool AggreMLS::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t s
 
 			//in.swapChannels(Channel::Depth, Channel::Depth2);
 			//in.swapChannels(Channel::Normals, Channel::Points);
-			temp_.swapChannels(ftl::codecs::Channels<0>(Channel::Depth), in);
+			temp_.swapChannel(Channel::Depth, in);
 
 		} else {
 			ftl::cuda::colour_mls_smooth_csr(
 				in.createTexture<uchar4>(Channel::Support1),
 				in.createTexture<half4>(Channel::Normals),
-				temp_.createTexture<half4>(Channel::Normals, ftl::rgbd::Format<half4>(in.get<cv::cuda::GpuMat>(Channel::Depth).size())),
+				temp.createTexture<half4>(Channel::Normals, ftl::rgbd::Format<half4>(in.get<cv::cuda::GpuMat>(Channel::Depth).size())),
 				in.createTexture<float>(Channel::Depth),
-				temp_.createTexture<float>(Channel::Depth, ftl::rgbd::Format<float>(in.get<cv::cuda::GpuMat>(Channel::Depth).size())),
+				temp.createTexture<float>(Channel::Depth, ftl::rgbd::Format<float>(in.get<cv::cuda::GpuMat>(Channel::Depth).size())),
 				in.createTexture<uchar4>(Channel::Colour),
 				thresh,
 				col_smooth,
@@ -331,7 +341,8 @@ bool AggreMLS::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t s
 				stream
 			);
 
-			temp_.swapChannels(Channel::Normals + Channel::Depth, in);
+			temp_.swapChannel(Channel::Depth, in);
+			temp_.swapChannel(Channel::Normals, in);
 		}
 	}
 
@@ -341,7 +352,7 @@ bool AggreMLS::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t s
 
 // ====== Adaptive MLS =========================================================
 
-AdaptiveMLS::AdaptiveMLS(ftl::Configurable *cfg) : ftl::operators::Operator(cfg) {
+AdaptiveMLS::AdaptiveMLS(ftl::Configurable *cfg) : ftl::operators::Operator(cfg), temp_(ftl::data::Frame::make_standalone()) {
 
 }
 
@@ -358,20 +369,23 @@ bool AdaptiveMLS::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_
 		return false;
 	}
 
+	auto &temp = temp_.cast<ftl::rgbd::Frame>();
+
 	// FIXME: Assume in and out are the same frame.
 	for (int i=0; i<iters; ++i) {
 		ftl::cuda::adaptive_mls_smooth(
 			in.createTexture<half4>(Channel::Normals),
-			temp_.createTexture<half4>(Channel::Normals, ftl::rgbd::Format<half4>(in.get<cv::cuda::GpuMat>(Channel::Depth).size())),
+			temp.createTexture<half4>(Channel::Normals, ftl::rgbd::Format<half4>(in.get<cv::cuda::GpuMat>(Channel::Depth).size())),
 			in.createTexture<float>(Channel::Depth),
-			temp_.createTexture<float>(Channel::Depth, ftl::rgbd::Format<float>(in.get<cv::cuda::GpuMat>(Channel::Depth).size())),
+			temp.createTexture<float>(Channel::Depth, ftl::rgbd::Format<float>(in.get<cv::cuda::GpuMat>(Channel::Depth).size())),
 			in.createTexture<float>(Channel::Smoothing),
 			radius,
 			in.getLeftCamera(),
 			stream
 		);
 
-		temp_.swapChannels(Channel::Normals + Channel::Depth, in);
+		temp_.swapChannel(Channel::Depth, in);
+		temp_.swapChannel(Channel::Normals, in);
 
 	}
 
diff --git a/components/operators/src/weighting.cpp b/components/operators/src/weighting.cpp
index 549a22f14..5864f8caf 100644
--- a/components/operators/src/weighting.cpp
+++ b/components/operators/src/weighting.cpp
@@ -33,7 +33,14 @@ bool PixelWeights::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream
 	params.normals = config()->value("use_normals", true);
 	bool output_normals = config()->value("output_normals", params.normals);
 
-	if ((!in.hasChannel(Channel::Depth) && !in.hasChannel(Channel::GroundTruth)) || !in.hasChannel(Channel::Support1)) return false;
+	if (!in.hasChannel(Channel::Depth) && !in.hasChannel(Channel::GroundTruth)) {
+		out.message(ftl::data::Message::Warning_MISSING_CHANNEL, "Missing Depth channel in Weights operators");
+		return false;
+	}
+	if (!in.hasChannel(Channel::Support1)) {
+		out.message(ftl::data::Message::Warning_MISSING_CHANNEL, "Missing Support channel in Weights operators");
+		return false;
+	}
 
 	Channel dchan = (in.hasChannel(Channel::Depth)) ? Channel::Depth : Channel::GroundTruth;
 
@@ -82,7 +89,7 @@ bool CullWeight::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t
 
 	float weight = config()->value("weight", 0.1f);
 	
-	out.clearPackets(Channel::Depth);  // Force reset
+	out.set<ftl::rgbd::VideoFrame>(Channel::Depth);  // Force reset
 	ftl::cuda::cull_weight(
 		in.createTexture<short>(Channel::Weights),
 		out.createTexture<float>(Channel::Depth),
diff --git a/components/renderers/cpp/CMakeLists.txt b/components/renderers/cpp/CMakeLists.txt
index 8c5c1f7f0..5b720cf77 100644
--- a/components/renderers/cpp/CMakeLists.txt
+++ b/components/renderers/cpp/CMakeLists.txt
@@ -10,6 +10,8 @@ add_library(ftlrender
 	src/colouriser.cpp
 	src/colour_util.cu
 	src/overlay.cpp
+	src/gltexture.cpp
+	src/touch.cu
 	#src/assimp_render.cpp
 	#src/assimp_scene.cpp
 )
@@ -27,4 +29,8 @@ target_include_directories(ftlrender PUBLIC
 	PRIVATE src)
 target_link_libraries(ftlrender ftlrgbd ftlcommon Eigen3::Eigen Threads::Threads nanogui ${NANOGUI_EXTRA_LIBS} ${OpenCV_LIBS})
 
+target_precompile_headers(ftlrender REUSE_FROM ftldata)
+
+set_property(TARGET ftlrender PROPERTY CUDA_ARCHITECTURES OFF)
+
 #ADD_SUBDIRECTORY(test)
diff --git a/components/renderers/cpp/include/ftl/cuda/touch.hpp b/components/renderers/cpp/include/ftl/cuda/touch.hpp
new file mode 100644
index 000000000..f98d64e55
--- /dev/null
+++ b/components/renderers/cpp/include/ftl/cuda/touch.hpp
@@ -0,0 +1,28 @@
+#ifndef _FTL_CUDA_TOUCH_HPP_
+#define _FTL_CUDA_TOUCH_HPP_
+
+#include <ftl/cuda_common.hpp>
+
+namespace ftl {
+namespace cuda {
+
+struct Collision {
+	uint screen;
+	float depth;
+
+	__host__ __device__ inline int x() const  { return (screen >> 12) & 0x3FF; }
+	__host__ __device__ inline int y() const  { return screen & 0x3FF; }
+	__host__ __device__ inline float strength() const { return float(screen >> 24) / 32.0f; }
+};
+
+void touch_merge(
+		ftl::cuda::TextureObject<float> &depth_in,
+		ftl::cuda::TextureObject<float> &depth_out,
+        Collision *collisions, int max_collisions,
+		float dist,
+		cudaStream_t stream);
+
+}
+}
+
+#endif
diff --git a/components/renderers/cpp/include/ftl/render/CUDARender.hpp b/components/renderers/cpp/include/ftl/render/CUDARender.hpp
index cfdb4cf40..d5b3ce675 100644
--- a/components/renderers/cpp/include/ftl/render/CUDARender.hpp
+++ b/components/renderers/cpp/include/ftl/render/CUDARender.hpp
@@ -5,6 +5,7 @@
 #include <ftl/rgbd/frameset.hpp>
 #include <ftl/render/render_params.hpp>
 #include <ftl/cuda/points.hpp>
+#include <ftl/cuda/touch.hpp>
 #include <ftl/codecs/channels.hpp>
 //#include <ftl/filters/filter.hpp>
 
@@ -25,13 +26,18 @@ class CUDARender : public ftl::render::FSRenderer {
 	void begin(ftl::rgbd::Frame &, ftl::codecs::Channel) override;
 	void end() override;
 
-	bool submit(ftl::rgbd::FrameSet *in, ftl::codecs::Channels<0>, const Eigen::Matrix4d &t) override;
+	bool submit(ftl::data::FrameSet *in, ftl::codecs::Channels<0>, const Eigen::Matrix4d &t) override;
 	//void setOutputDevice(int);
 
 	void render() override;
 
 	void blend(ftl::codecs::Channel) override;
 
+	/**
+	 * Returns all inter-frameset collisions in camera coordinates.
+	 */
+	inline const std::vector<float4> &getCollisions() const { return collision_points_; }
+
 	void setViewPort(ftl::render::ViewPortMode mode, const ftl::render::ViewPort &vp) {
 		params_.viewport = vp;
 		params_.viewPortMode = mode;
@@ -44,7 +50,8 @@ class CUDARender : public ftl::render::FSRenderer {
 
 	private:
 	int device_;
-	ftl::rgbd::Frame temp_;
+	ftl::data::Frame temp_d_;
+	ftl::rgbd::Frame &temp_;
 	//ftl::rgbd::Frame accum_;
 	ftl::cuda::TextureObject<float4> accum_;		// 2 is number of channels can render together
 	ftl::cuda::TextureObject<int> contrib_;
@@ -52,9 +59,15 @@ class CUDARender : public ftl::render::FSRenderer {
 
 	std::list<ftl::cuda::TextureObject<short2>*> screen_buffers_;
 	std::list<ftl::cuda::TextureObject<float>*> depth_buffers_;
+	ftl::cuda::TextureObject<float> depth_out_;
+
+	ftl::cuda::Collision *collisions_;
+	ftl::cuda::Collision collisions_host_[1024];
+	std::vector<float4> collision_points_;
+	float touch_dist_;
 
 	ftl::rgbd::Frame *out_;
-	ftl::rgbd::FrameSet *scene_;
+	ftl::data::FrameSet *scene_;
 	ftl::cuda::ClipSpace clip_;
 	ftl::render::Colouriser *colouriser_;
 	bool clipping_;
@@ -100,7 +113,7 @@ class CUDARender : public ftl::render::FSRenderer {
 	void _end();
 	void _endSubmit();
 
-	bool _alreadySeen() const { return last_frame_ == scene_->timestamp; }
+	bool _alreadySeen() const { return last_frame_ == scene_->timestamp(); }
 	void _adjustDepthThresholds(const ftl::rgbd::Camera &fcam);
 
 	ftl::cuda::TextureObject<float> &_getDepthBuffer(const cv::Size &);
diff --git a/components/renderers/cpp/include/ftl/render/overlay.hpp b/components/renderers/cpp/include/ftl/render/overlay.hpp
index be4f36d31..fd17ff833 100644
--- a/components/renderers/cpp/include/ftl/render/overlay.hpp
+++ b/components/renderers/cpp/include/ftl/render/overlay.hpp
@@ -6,6 +6,8 @@
 #include <ftl/rgbd/frameset.hpp>
 #include <nanogui/glutil.h>
 
+struct NVGcontext;
+
 namespace ftl {
 namespace overlay {
 
@@ -24,7 +26,7 @@ class Overlay : public ftl::Configurable {
 
 	//void apply(ftl::rgbd::FrameSet &fs, cv::Mat &out, ftl::rgbd::FrameState &state);
 
-	void draw(ftl::rgbd::FrameSet &fs, ftl::rgbd::FrameState &state, const Eigen::Vector2f &);
+	void draw(NVGcontext *, ftl::data::FrameSet &fs, ftl::rgbd::Frame &frame, const Eigen::Vector2f &);
 
 	private:
 	nanogui::GLShader oShader;
diff --git a/components/renderers/cpp/include/ftl/render/renderer.hpp b/components/renderers/cpp/include/ftl/render/renderer.hpp
index a7678b92f..07f92083e 100644
--- a/components/renderers/cpp/include/ftl/render/renderer.hpp
+++ b/components/renderers/cpp/include/ftl/render/renderer.hpp
@@ -32,7 +32,7 @@ class Renderer : public ftl::Configurable {
 	 * frame given as parameter is where the output channels are rendered to.
 	 * The channel parameter is the render output channel which can either be
 	 * Left (Colour) or Right (Colour 2). Using "Right" will also adjust the
-	 * pose to the right eye position and use the right camera intrinsics. 
+	 * pose to the right eye position and use the right camera intrinsics.
 	 */
 	virtual void begin(ftl::rgbd::Frame &, ftl::codecs::Channel)=0;
 
@@ -64,13 +64,13 @@ class FSRenderer : public ftl::render::Renderer {
 	 * multiple times between `begin` and `end` to combine multiple framesets.
 	 * Note that the frameset pointer must remain valid until `end` is called,
 	 * and ideally should not be swapped between.
-	 * 
+	 *
 	 * The channels parameter gives all of the source channels that will be
 	 * rendered into the single colour output. These will be blended
 	 * together by some undefined method. Non colour channels will be converted
 	 * to RGB colour appropriately.
      */
-    virtual bool submit(ftl::rgbd::FrameSet *, ftl::codecs::Channels<0>, const Eigen::Matrix4d &)=0;
+    virtual bool submit(ftl::data::FrameSet *, ftl::codecs::Channels<0>, const Eigen::Matrix4d &)=0;
 };
 
 }
diff --git a/components/renderers/cpp/include/ftl/utility/gltexture.hpp b/components/renderers/cpp/include/ftl/utility/gltexture.hpp
new file mode 100644
index 000000000..c56dd0bb3
--- /dev/null
+++ b/components/renderers/cpp/include/ftl/utility/gltexture.hpp
@@ -0,0 +1,57 @@
+#pragma once
+
+#include <opencv2/core.hpp>
+#include <ftl/cuda_common.hpp>
+
+struct cudaGraphicsResource;
+
+namespace ftl {
+namespace utility {
+
+class GLTexture {
+public:
+	enum class Type {
+		RGBA,
+		BGRA,
+		Float
+	};
+
+	explicit GLTexture();
+	~GLTexture();
+
+	bool isValid() const { return glid_ != std::numeric_limits<unsigned int>::max(); }
+	int width() const { return width_; }
+	int height() const { return height_; }
+	Type type() const { return type_; }
+
+	std::mutex& mutex() { return mtx_; }
+
+	// acquire mutex before make() or free()
+	void make(int width, int height, Type type);
+	void free();
+	unsigned int texture() const;
+
+	cv::cuda::GpuMat map(cudaStream_t stream);
+	void unmap(cudaStream_t stream);
+
+	void copyFrom(const ftl::cuda::TextureObject<uchar4> &buf, cudaStream_t stream = cudaStreamDefault);
+
+	void copyFrom(const cv::Mat &im, cudaStream_t stream = cudaStreamDefault);
+	void copyFrom(const cv::cuda::GpuMat &im, cudaStream_t stream = cudaStreamDefault);
+
+private:
+	unsigned int glid_;
+	unsigned int glbuf_;
+	int width_;
+	int height_;
+	int stride_;
+
+	Type type_;
+
+	std::mutex mtx_; // for locking while in use (opengl thread calls lock() or cuda mapped)
+
+	cudaGraphicsResource *cuda_res_;
+};
+
+}
+}
diff --git a/components/renderers/cpp/src/CUDARender.cpp b/components/renderers/cpp/src/CUDARender.cpp
index 03ea37f63..b3d07ac29 100644
--- a/components/renderers/cpp/src/CUDARender.cpp
+++ b/components/renderers/cpp/src/CUDARender.cpp
@@ -25,13 +25,14 @@ using ftl::render::CUDARender;
 using ftl::codecs::Channel;
 using ftl::codecs::Channels;
 using ftl::rgbd::Format;
+using ftl::rgbd::VideoFrame;
 using cv::cuda::GpuMat;
 using std::stoul;
 using ftl::cuda::Mask;
 using ftl::render::parseCUDAColour;
 using ftl::render::parseCVColour;
 
-CUDARender::CUDARender(nlohmann::json &config) : ftl::render::FSRenderer(config), scene_(nullptr) {
+CUDARender::CUDARender(nlohmann::json &config) : ftl::render::FSRenderer(config), temp_d_(ftl::data::Frame::make_standalone()), temp_(temp_d_.cast<ftl::rgbd::Frame>()), scene_(nullptr) {
 	/*if (config["clipping"].is_object()) {
 		auto &c = config["clipping"];
 		float rx = c.value("pitch", 0.0f);
@@ -59,27 +60,29 @@ CUDARender::CUDARender(nlohmann::json &config) : ftl::render::FSRenderer(config)
 
 	colouriser_ = ftl::create<ftl::render::Colouriser>(this, "colouriser");
 
-	on("clipping_enabled", [this](const ftl::config::Event &e) {
+	on("touch_sensitivity", touch_dist_, 0.04f);
+
+	on("clipping_enabled", [this]() {
 		clipping_ = value("clipping_enabled", true);
 	});
 
 	norm_filter_ = value("normal_filter", -1.0f);
-	on("normal_filter", [this](const ftl::config::Event &e) {
+	on("normal_filter", [this]() {
 		norm_filter_ = value("normal_filter", -1.0f);
 	});
 
 	backcull_ = value("back_cull", true);
-	on("back_cull", [this](const ftl::config::Event &e) {
+	on("back_cull", [this]() {
 		backcull_ = value("back_cull", true);
 	});
 
 	mesh_ = value("meshing", true);
-	on("meshing", [this](const ftl::config::Event &e) {
+	on("meshing", [this]() {
 		mesh_ = value("meshing", true);
 	});
 
 	background_ = parseCVColour(value("background", std::string("#4c4c4c")));
-	on("background", [this](const ftl::config::Event &e) {
+	on("background", [this]() {
 		background_ = parseCVColour(value("background", std::string("#4c4c4c")));
 	});
 
@@ -98,10 +101,15 @@ CUDARender::CUDARender(nlohmann::json &config) : ftl::render::FSRenderer(config)
 
 	cudaSafeCall(cudaStreamCreate(&stream_));
 	last_frame_ = -1;
+
+	temp_.store();
+	// Allocate collisions buffer
+	cudaSafeCall(cudaMalloc(&collisions_, 1024*sizeof(ftl::cuda::Collision)));
 }
 
 CUDARender::~CUDARender() {
-
+	delete colouriser_;
+	cudaFree(collisions_);
 }
 
 void CUDARender::_renderChannel(ftl::rgbd::Frame &output, ftl::codecs::Channel in, const Eigen::Matrix4d &t, cudaStream_t stream) {
@@ -110,12 +118,12 @@ void CUDARender::_renderChannel(ftl::rgbd::Frame &output, ftl::codecs::Channel i
 	if (in == Channel::None) return;
 
 	for (size_t i=0; i < scene_->frames.size(); ++i) {
-		if (!scene_->hasFrame(i)) continue;
-		auto &f = scene_->frames[i];
+		//if (!scene_->hasFrame(i)) continue;
+		auto &f = scene_->frames[i].cast<ftl::rgbd::Frame>();
 
 		if (!f.hasChannel(in)) {
-			LOG(ERROR) << "Reprojecting unavailable channel";
-			return;
+			//LOG(ERROR) << "Reprojecting unavailable channel";
+			continue;
 		}
 
 		_adjustDepthThresholds(f.getLeftCamera());
@@ -169,14 +177,14 @@ void CUDARender::_renderChannel(ftl::rgbd::Frame &output, ftl::codecs::Channel i
 
 void CUDARender::_dibr(ftl::rgbd::Frame &out, const Eigen::Matrix4d &t, cudaStream_t stream) {
 	cv::cuda::Stream cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
-	temp_.get<GpuMat>(Channel::Depth2).setTo(cv::Scalar(0x7FFFFFFF), cvstream);
+	temp_.set<GpuMat>(Channel::Depth2).setTo(cv::Scalar(0x7FFFFFFF), cvstream);
 
 	for (size_t i=0; i < scene_->frames.size(); ++i) {
 		if (!scene_->hasFrame(i)) continue;
-		auto &f = scene_->frames[i];
+		auto &f = scene_->frames[i].cast<ftl::rgbd::Frame>();
 		//auto *s = scene_->sources[i];
 
-		if (f.empty(Channel::Colour)) {
+		if (!f.has(Channel::Colour)) {
 			LOG(ERROR) << "Missing required channel";
 			continue;
 		}
@@ -233,23 +241,27 @@ void CUDARender::_mesh(ftl::rgbd::Frame &out, const Eigen::Matrix4d &t, cudaStre
 	bool do_blend = value("mesh_blend", false);
 	float blend_alpha = value("blend_alpha", 0.02f);
 	if (do_blend) {
-		temp_.get<GpuMat>(Channel::Depth).setTo(cv::Scalar(0x7FFFFFFF), cvstream);
-		temp_.get<GpuMat>(Channel::Weights).setTo(cv::Scalar(0.0f), cvstream);
+		temp_.set<GpuMat>(Channel::Depth).setTo(cv::Scalar(0x7FFFFFFF), cvstream);
+		temp_.set<GpuMat>(Channel::Weights).setTo(cv::Scalar(0.0f), cvstream);
 	} else {
-		temp_.get<GpuMat>(Channel::Depth2).setTo(cv::Scalar(0x7FFFFFFF), cvstream);
+		temp_.set<GpuMat>(Channel::Depth2).setTo(cv::Scalar(0x7FFFFFFF), cvstream);
 	}
 
+	int valid_count = 0;
+
 	// For each source depth map
 	for (size_t i=0; i < scene_->frames.size(); ++i) {
-		if (!scene_->hasFrame(i)) continue;
-		auto &f = scene_->frames[i];
+		//if (!scene_->hasFrame(i)) continue;
+		auto &f = scene_->frames[i].cast<ftl::rgbd::Frame>();
 		//auto *s = scene_->sources[i];
 
-		if (f.empty(Channel::Colour)) {
-			LOG(ERROR) << "Missing required channel";
+		if (!f.has(Channel::Colour)) {
+			//LOG(ERROR) << "Missing required channel";
 			continue;
 		}
 
+		++valid_count;
+
 		//auto pose = MatrixConversion::toCUDA(t.cast<float>() * f.getPose().cast<float>());
 		auto transform = pose_ * MatrixConversion::toCUDA(t.cast<float>() * f.getPose().cast<float>());
 
@@ -285,9 +297,11 @@ void CUDARender::_mesh(ftl::rgbd::Frame &out, const Eigen::Matrix4d &t, cudaStre
 
 		// Must reset depth channel if blending
 		if (do_blend) {
-			temp_.get<GpuMat>(Channel::Depth).setTo(cv::Scalar(0x7FFFFFFF), cvstream);
+			temp_.set<GpuMat>(Channel::Depth).setTo(cv::Scalar(0x7FFFFFFF), cvstream);
 		}
 
+		depth_out_.to_gpumat().setTo(cv::Scalar(1000.0f), cvstream);
+
 		// Decide on and render triangles around each point
 		ftl::cuda::triangle_render1(
 			depthbuffer,
@@ -303,7 +317,8 @@ void CUDARender::_mesh(ftl::rgbd::Frame &out, const Eigen::Matrix4d &t, cudaStre
 			// Blend this sources mesh with previous meshes
 			ftl::cuda::mesh_blender(
 				temp_.getTexture<int>(Channel::Depth),
-				out.createTexture<float>(_getDepthChannel()),
+				//out.createTexture<float>(_getDepthChannel()),
+				depth_out_,
 				f.createTexture<short>(Channel::Weights),
 				temp_.createTexture<float>(Channel::Weights),
 				params_,
@@ -315,20 +330,28 @@ void CUDARender::_mesh(ftl::rgbd::Frame &out, const Eigen::Matrix4d &t, cudaStre
 		}
 	}
 
+	if (valid_count == 0) return;
+
 	// Convert from int depth to float depth
 	//temp_.get<GpuMat>(Channel::Depth2).convertTo(out.get<GpuMat>(Channel::Depth), CV_32F, 1.0f / 100000.0f, cvstream);
 
 	if (do_blend) {
 		ftl::cuda::dibr_normalise(
-			out.getTexture<float>(_getDepthChannel()),
-			out.getTexture<float>(_getDepthChannel()),
+			//out.getTexture<float>(_getDepthChannel()),
+			//out.getTexture<float>(_getDepthChannel()),
+			depth_out_,
+			depth_out_,
 			temp_.getTexture<float>(Channel::Weights),
 			stream_
 		);
 	} else {
-		ftl::cuda::merge_convert_depth(temp_.getTexture<int>(Channel::Depth2), out.createTexture<float>(_getDepthChannel()), 1.0f / 100000.0f, stream_);
+		//ftl::cuda::merge_convert_depth(temp_.getTexture<int>(Channel::Depth2), out.createTexture<float>(_getDepthChannel()), 1.0f / 100000.0f, stream_);
+		ftl::cuda::merge_convert_depth(temp_.getTexture<int>(Channel::Depth2), depth_out_, 1.0f / 100000.0f, stream_);
 	}
 
+	// Now merge new render to any existing frameset render, detecting collisions
+	ftl::cuda::touch_merge(depth_out_, out.createTexture<float>(_getDepthChannel()), collisions_, 1024, touch_dist_, stream_);
+
 	//filters_->filter(out, src, stream);
 
 	// Generate normals for final virtual image
@@ -347,29 +370,30 @@ void CUDARender::_allocateChannels(ftl::rgbd::Frame &out, ftl::codecs::Channel c
 	// Allocate left channel buffers and clear them
 	if (chan == Channel::Colour) {
 		//if (!out.hasChannel(Channel::Depth)) {
-			out.create<GpuMat>(Channel::Depth, Format<float>(camera.width, camera.height));
-			out.create<GpuMat>(Channel::Colour, Format<uchar4>(camera.width, camera.height));
-			out.create<GpuMat>(Channel::Normals, Format<half4>(camera.width, camera.height));
-			out.createTexture<uchar4>(Channel::Colour, true);  // Force interpolated colour
-			out.get<GpuMat>(Channel::Depth).setTo(cv::Scalar(1000.0f), cvstream);
+			out.create<VideoFrame>(Channel::Depth).createGPU(Format<float>(camera.width, camera.height));
+			out.create<VideoFrame>(Channel::Colour).createGPU(Format<uchar4>(camera.width, camera.height));
+			out.create<VideoFrame>(Channel::Normals).createGPU(Format<half4>(camera.width, camera.height));
+			out.createTexture<uchar4>(Channel::Colour, ftl::rgbd::Format<uchar4>(camera.width, camera.height), true);  // Force interpolated colour
+			out.set<GpuMat>(Channel::Depth).setTo(cv::Scalar(1000.0f), cvstream);
 		//}
 	// Allocate right channel buffers and clear them
 	} else {
 		if (!out.hasChannel(Channel::Depth2)) {
-			out.create<GpuMat>(Channel::Depth2, Format<float>(camera.width, camera.height));
-			out.create<GpuMat>(Channel::Colour2, Format<uchar4>(camera.width, camera.height));
-			out.create<GpuMat>(Channel::Normals2, Format<half4>(camera.width, camera.height));
-			out.createTexture<uchar4>(Channel::Colour2, true);  // Force interpolated colour
-			out.get<GpuMat>(Channel::Depth2).setTo(cv::Scalar(1000.0f), cvstream);
+			out.create<VideoFrame>(Channel::Depth2).createGPU(Format<float>(camera.width, camera.height));
+			out.create<VideoFrame>(Channel::Colour2).createGPU(Format<uchar4>(camera.width, camera.height));
+			out.create<VideoFrame>(Channel::Normals2).createGPU(Format<half4>(camera.width, camera.height));
+			out.createTexture<uchar4>(Channel::Colour2, ftl::rgbd::Format<uchar4>(camera.width, camera.height), true);  // Force interpolated colour
+			out.set<GpuMat>(Channel::Depth2).setTo(cv::Scalar(1000.0f), cvstream);
 		}
 	}
-
-	temp_.create<GpuMat>(Channel::Depth, Format<int>(camera.width, camera.height));
-	temp_.create<GpuMat>(Channel::Depth2, Format<int>(camera.width, camera.height));
-	temp_.create<GpuMat>(Channel::Normals, Format<half4>(camera.width, camera.height));
-	temp_.create<GpuMat>(Channel::Weights, Format<float>(camera.width, camera.height));
+	
+	temp_.create<VideoFrame>(Channel::Depth).createGPU(Format<int>(camera.width, camera.height));
+	temp_.create<VideoFrame>(Channel::Depth2).createGPU(Format<int>(camera.width, camera.height));
+	temp_.create<VideoFrame>(Channel::Normals).createGPU(Format<half4>(camera.width, camera.height));
+	temp_.create<VideoFrame>(Channel::Weights).createGPU(Format<float>(camera.width, camera.height));
 	temp_.createTexture<int>(Channel::Depth);
 
+	depth_out_.create(camera.width, camera.height);
 	accum_.create(camera.width, camera.height);
 	contrib_.create(camera.width, camera.height);
 
@@ -418,7 +442,7 @@ void CUDARender::_postprocessColours(ftl::rgbd::Frame &out) {
 			out.getTexture<half4>(_getNormalsChannel()),
 			out.getTexture<uchar4>(out_chan_),
 			col, pose,
-			stream_	
+			stream_
 		);
 	}
 
@@ -438,7 +462,7 @@ void CUDARender::_postprocessColours(ftl::rgbd::Frame &out) {
 			params_.camera,
 			stream_
 		);
-	} else if (out.hasChannel(_getDepthChannel()) && out.hasChannel(out_chan_)) {
+	} else if (mesh_ && out.hasChannel(_getDepthChannel()) && out.hasChannel(out_chan_)) {
 		ftl::cuda::fix_bad_colour(
 			out.getTexture<float>(_getDepthChannel()),
 			out.getTexture<uchar4>(out_chan_),
@@ -467,7 +491,8 @@ void CUDARender::_renderPass2(Channels<0> chans, const Eigen::Matrix4d &t) {
 	for (auto chan : chans) {
 		ftl::codecs::Channel mapped = chan;
 
-		if (chan == Channel::Colour && scene_->firstFrame().hasChannel(Channel::ColourHighRes)) mapped = Channel::ColourHighRes;
+		// FIXME: Doesn't seem to work
+		//if (chan == Channel::Colour && scene_->firstFrame().hasChannel(Channel::ColourHighRes)) mapped = Channel::ColourHighRes;
 
 		_renderChannel(*out_, mapped, t, stream_);
 	}
@@ -492,7 +517,7 @@ void CUDARender::begin(ftl::rgbd::Frame &out, ftl::codecs::Channel chan) {
 
 	// Apply a colour background
 	if (env_image_.empty() || !value("environment_enabled", false)) {
-		out.get<GpuMat>(chan).setTo(background_, cvstream);
+		out.set<GpuMat>(chan).setTo(background_, cvstream);
 	} else {
 		auto pose = poseInverse_.getFloat3x3();
 		ftl::cuda::equirectangular_reproject(
@@ -503,6 +528,9 @@ void CUDARender::begin(ftl::rgbd::Frame &out, ftl::codecs::Channel chan) {
 
 	sets_.clear();
 	stage_ = Stage::ReadySubmit;
+
+	// Reset collision data.
+	cudaSafeCall(cudaMemsetAsync(collisions_, 0, sizeof(int), stream_));
 }
 
 void CUDARender::render() {
@@ -567,14 +595,45 @@ void CUDARender::_endSubmit() {
 void CUDARender::_end() {
 	_postprocessColours(*out_);
 
-	// Final OpenGL flip
-	ftl::cuda::flip(out_->getTexture<uchar4>(out_chan_), stream_);
-	ftl::cuda::flip(out_->getTexture<float>(_getDepthChannel()), stream_);
+	// Final OpenGL flip (easier to do in shader?)
+	/*ftl::cuda::flip(out_->getTexture<uchar4>(out_chan_), stream_);*/
+	/*ftl::cuda::flip(out_->getTexture<float>(_getDepthChannel()), stream_);*/
 
+	cudaSafeCall(cudaMemcpyAsync(collisions_host_, collisions_, sizeof(ftl::cuda::Collision)*1024, cudaMemcpyDeviceToHost, stream_));
 	cudaSafeCall(cudaStreamSynchronize(stream_));
+
+	// Convert collisions into camera coordinates.
+	collision_points_.resize(collisions_host_[0].screen);
+	for (uint i=1; i<collisions_host_[0].screen+1; ++i) {
+		collision_points_[i-1] = make_float4(collisions_host_[i].x(), collisions_host_[i].y(), collisions_host_[i].depth, collisions_host_[i].strength());
+	}
+
+	// Do something with the collisions
+	/*if (collisions_host_[0].screen > 0) {
+		float x = 0.0f;
+		float y = 0.0f;
+		float d = 0.0f;
+		float w = 0.0f;
+
+		for (uint i=1; i<collisions_host_[0].screen+1; ++i) {
+			float inum = collisions_host_[i].strength();
+			int ix = collisions_host_[i].x();
+			int iy = collisions_host_[i].y();
+			x += float(ix)*inum;
+			y += float(iy)*inum;
+			d += collisions_host_[i].depth*inum;
+			w += inum;
+		}
+
+		x /= w;
+		y /= w;
+		d /= w;
+
+		LOG(INFO) << "Collision at: " << x << "," << y << ", " << d;
+	}*/
 }
 
-bool CUDARender::submit(ftl::rgbd::FrameSet *in, Channels<0> chans, const Eigen::Matrix4d &t) {
+bool CUDARender::submit(ftl::data::FrameSet *in, Channels<0> chans, const Eigen::Matrix4d &t) {
 	if (stage_ != Stage::ReadySubmit) {
 		throw FTL_Error("Renderer not ready for submits");
 	}
@@ -588,9 +647,9 @@ bool CUDARender::submit(ftl::rgbd::FrameSet *in, Channels<0> chans, const Eigen:
 	bool success = true;
 
 	try {
-		_renderPass1(in->pose);
+		_renderPass1(t);
 		//cudaSafeCall(cudaStreamSynchronize(stream_));
-	} catch (std::exception &e) {
+	} catch (const ftl::exception &e) {
 		LOG(ERROR) << "Exception in render: " << e.what();
 		success = false;
 	}
@@ -598,9 +657,9 @@ bool CUDARender::submit(ftl::rgbd::FrameSet *in, Channels<0> chans, const Eigen:
 	auto &s = sets_.emplace_back();
 	s.fs = in;
 	s.channels = chans;
-	s.transform = in->pose;
+	s.transform = t;
 
-	last_frame_ = scene_->timestamp;
+	last_frame_ = scene_->timestamp();
 	scene_ = nullptr;
 	return success;
 }
diff --git a/components/renderers/cpp/src/colouriser.cpp b/components/renderers/cpp/src/colouriser.cpp
index 6341f1a86..574073d37 100644
--- a/components/renderers/cpp/src/colouriser.cpp
+++ b/components/renderers/cpp/src/colouriser.cpp
@@ -113,9 +113,15 @@ Colouriser::~Colouriser() {
 }
 
 TextureObject<uchar4> &Colouriser::colourise(ftl::rgbd::Frame &f, Channel c, cudaStream_t stream) {
+	const auto &vf = f.get<ftl::rgbd::VideoFrame>(c);
+	if (!vf.isGPU()) {
+		f.upload(c);
+	}
+
 	switch (c) {
 	case Channel::Overlay		: return f.createTexture<uchar4>(c);
 	case Channel::ColourHighRes	:
+	case Channel::RightHighRes	:
 	case Channel::Colour		:
 	case Channel::Colour2		: return _processColour(f,c,stream);
 	case Channel::GroundTruth	:
@@ -183,7 +189,7 @@ TextureObject<uchar4> &Colouriser::_processColour(ftl::rgbd::Frame &f, Channel c
 	bool colour_sources = value("colour_sources", false);
 
 	if (!colour_sources && show_mask == 0) {
-		return f.createTexture<uchar4>(c);
+		return f.createTexture<uchar4>(c, true);
 	}
 
 	cv::cuda::Stream cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
@@ -192,7 +198,7 @@ TextureObject<uchar4> &Colouriser::_processColour(ftl::rgbd::Frame &f, Channel c
 	auto &buf = _getBuffer(size.width, size.height);
 
 	if (colour_sources) {
-		auto colour = HSVtoRGB(360 / 8 * f.id, 0.6, 0.85);
+		auto colour = HSVtoRGB(360 / 8 * f.source(), 0.6, 0.85);
 		buf.to_gpumat().setTo(colour, cvstream);
 	}
 
diff --git a/components/renderers/cpp/src/dibr.cu b/components/renderers/cpp/src/dibr.cu
index e57ffe615..a00f4be52 100644
--- a/components/renderers/cpp/src/dibr.cu
+++ b/components/renderers/cpp/src/dibr.cu
@@ -14,6 +14,7 @@ using ftl::rgbd::Projection;
 /*
  * DIBR point cloud with a depth check
  */
+ template <Projection PROJECT>
  __global__ void dibr_merge_kernel(TextureObject<float> depth,
 		TextureObject<int> depth_out,
 		float4x4 transform,
@@ -30,7 +31,7 @@ using ftl::rgbd::Projection;
 	//const float d = camPos.z;
 
 	//const uint2 screenPos = params.camera.camToScreen<uint2>(camPos);
-	const float3 screenPos = params.camera.project<Projection::PERSPECTIVE>(camPos);
+	const float3 screenPos = params.camera.project<PROJECT>(camPos);
 	const unsigned int cx = (unsigned int)(screenPos.x+0.5f);
 	const unsigned int cy = (unsigned int)(screenPos.y+0.5f);
 	const float d = screenPos.z;
@@ -70,7 +71,11 @@ void ftl::cuda::dibr_merge(TextureObject<float> &depth, TextureObject<int> &dept
     const dim3 gridSize((depth.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
     const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
 
-	dibr_merge_kernel<<<gridSize, blockSize, 0, stream>>>(depth, depth_out, transform, cam, params);
+	if (params.projection == Projection::PERSPECTIVE) {
+		dibr_merge_kernel<Projection::PERSPECTIVE><<<gridSize, blockSize, 0, stream>>>(depth, depth_out, transform, cam, params);
+	} else {
+		dibr_merge_kernel<Projection::ORTHOGRAPHIC><<<gridSize, blockSize, 0, stream>>>(depth, depth_out, transform, cam, params);
+	}
     cudaSafeCall( cudaGetLastError() );
 }
 
diff --git a/components/renderers/cpp/src/gltexture.cpp b/components/renderers/cpp/src/gltexture.cpp
new file mode 100644
index 000000000..2bb106ecd
--- /dev/null
+++ b/components/renderers/cpp/src/gltexture.cpp
@@ -0,0 +1,188 @@
+#include <ftl/utility/gltexture.hpp>
+
+#include <nanogui/opengl.h>
+#include <loguru.hpp>
+
+#include <ftl/cuda_common.hpp>
+
+#include <cuda_gl_interop.h>
+#include <opencv2/core/cuda_stream_accessor.hpp>
+
+#include <ftl/exception.hpp>
+
+void log_error() {
+	auto err = glGetError();
+	if (err != 0) LOG(ERROR) << "OpenGL Texture error: " << err;
+}
+
+using ftl::utility::GLTexture;
+
+GLTexture::GLTexture() {
+	glid_ = std::numeric_limits<unsigned int>::max();
+	glbuf_ = std::numeric_limits<unsigned int>::max();
+	cuda_res_ = nullptr;
+	width_ = 0;
+	height_ = 0;
+	type_ = Type::RGBA;
+}
+
+GLTexture::~GLTexture() {
+	free();  // Note: Do not simply remove this...
+}
+
+void GLTexture::make(int width, int height, Type type) {
+	if (width != width_ || height != height_ || type_ != type) {
+		free();
+	}
+
+	static constexpr int ALIGNMENT = 128;
+
+	width_ = width;
+	height_ = height;
+	stride_ = ((width*4) % ALIGNMENT != 0) ?
+		((width*4) + (ALIGNMENT - ((width*4) % ALIGNMENT))) / 4:
+		width;
+
+	type_ = type;
+
+	if (width == 0 || height == 0) {
+		throw FTL_Error("Invalid texture size");
+	}
+
+	if (glid_ == std::numeric_limits<unsigned int>::max()) {
+		glGenTextures(1, &glid_); log_error();
+		glBindTexture(GL_TEXTURE_2D, glid_); log_error();
+		glPixelStorei(GL_UNPACK_ROW_LENGTH, stride_); log_error();
+
+		if (type_ == Type::BGRA) {
+			glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_BGRA, GL_UNSIGNED_BYTE, nullptr);
+		} else if (type_ == Type::Float) {
+			glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_BGRA, GL_UNSIGNED_BYTE, nullptr);
+		}
+		log_error();
+
+		glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+		glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+		glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+		glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+		log_error();
+
+		glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
+
+		glBindTexture(GL_TEXTURE_2D, 0);
+
+		glGenBuffers(1, &glbuf_);
+		// Make this the current UNPACK buffer (OpenGL is state-based)
+		glBindBuffer(GL_PIXEL_UNPACK_BUFFER, glbuf_);
+		// Allocate data for the buffer. 4-channel 8-bit image or 1-channel float
+		glBufferData(GL_PIXEL_UNPACK_BUFFER, stride_ * height * 4, NULL, GL_DYNAMIC_COPY);
+
+		cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_res_, glbuf_, cudaGraphicsRegisterFlagsWriteDiscard));
+		glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
+		log_error();
+	}
+}
+
+void GLTexture::free() {
+	if (glid_ != std::numeric_limits<unsigned int>::max()) {
+		glDeleteTextures(1, &glid_);
+		glid_ = std::numeric_limits<unsigned int>::max();
+	}
+
+	if (glbuf_ != std::numeric_limits<unsigned int>::max()) {
+		cudaSafeCall(cudaGraphicsUnregisterResource( cuda_res_ ));
+		cuda_res_ = nullptr;
+		glDeleteBuffers(1, &glbuf_);
+		glbuf_ = std::numeric_limits<unsigned int>::max();
+	}
+}
+
+cv::cuda::GpuMat GLTexture::map(cudaStream_t stream) {
+	mtx_.lock();
+	void *devptr;
+	size_t size;
+	cudaSafeCall(cudaGraphicsMapResources(1, &cuda_res_, stream));
+	cudaSafeCall(cudaGraphicsResourceGetMappedPointer(&devptr, &size, cuda_res_));
+	return cv::cuda::GpuMat(height_, width_, (type_ == Type::BGRA) ? CV_8UC4 : CV_32F, devptr, stride_*4);
+}
+
+void GLTexture::unmap(cudaStream_t stream) {
+	// note: code must not throw, otherwise mtx_.unlock() does not happen
+
+	cudaSafeCall(cudaGraphicsUnmapResources(1, &cuda_res_, stream));
+
+	//glActiveTexture(GL_TEXTURE0);
+	glBindBuffer(GL_PIXEL_UNPACK_BUFFER, glbuf_);
+	// Select the appropriate texture
+	glBindTexture(GL_TEXTURE_2D, glid_);
+
+	glPixelStorei(GL_UNPACK_ROW_LENGTH, stride_);
+	// Make a texture from the buffer
+	if (type_ == Type::BGRA) {
+		glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width_, height_, GL_BGRA, GL_UNSIGNED_BYTE, NULL);
+	} else {
+		glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width_, height_, GL_RED, GL_FLOAT, NULL);
+	}
+	glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
+	glBindTexture(GL_TEXTURE_2D, 0);
+	glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
+
+	mtx_.unlock();
+}
+
+unsigned int GLTexture::texture() const {
+	if (glbuf_ < std::numeric_limits<unsigned int>::max()) {
+		/*//glActiveTexture(GL_TEXTURE0);
+		glBindBuffer( GL_PIXEL_UNPACK_BUFFER, glbuf_);
+		// Select the appropriate texture
+		glBindTexture( GL_TEXTURE_2D, glid_);
+		// Make a texture from the buffer
+		glTexSubImage2D( GL_TEXTURE_2D, 0, 0, 0, width_, height_, GL_BGRA, GL_UNSIGNED_BYTE, NULL);
+		glBindBuffer( GL_PIXEL_UNPACK_BUFFER, 0);*/
+
+		return glid_;
+	} else {
+		throw FTL_Error("No OpenGL texture; use make() first");
+	}
+}
+
+void GLTexture::copyFrom(const ftl::cuda::TextureObject<uchar4> &buffer, cudaStream_t stream) {
+	if (buffer.width() == 0 || buffer.height() == 0) {
+		return;
+	}
+
+	make(buffer.width(), buffer.height(), ftl::utility::GLTexture::Type::BGRA);
+	auto dst = map(stream);
+	cudaSafeCall(cudaMemcpy2D(	dst.data, dst.step, buffer.devicePtr(), buffer.pitch(),
+								buffer.width()*4, buffer.height(), cudaMemcpyDeviceToDevice));
+	unmap(stream);
+}
+
+void GLTexture::copyFrom(const cv::Mat &im, cudaStream_t stream) {
+
+	if (im.rows == 0 || im.cols == 0 || im.channels() != 4 || im.type() != CV_8UC4) {
+		LOG(ERROR) << __FILE__ << ":" << __LINE__ << ": " << "bad OpenCV format";
+		return;
+	}
+
+	auto cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
+	make(im.cols, im.rows, ftl::utility::GLTexture::Type::BGRA);
+
+	auto dst = map(stream);
+	dst.upload(im);
+	unmap(stream);
+}
+
+void GLTexture::copyFrom(const cv::cuda::GpuMat &im, cudaStream_t stream) {
+
+	if (im.rows == 0 || im.cols == 0 || im.channels() != 4 || im.type() != CV_8UC4) {
+		LOG(ERROR) << __FILE__ << ":" << __LINE__ << ": " << "bad OpenCV format";
+		return;
+	}
+
+	auto cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
+	make(im.cols, im.rows, ftl::utility::GLTexture::Type::BGRA);
+	auto dst = map(stream);
+	im.copyTo(dst, cvstream);
+	unmap(stream);
+}
diff --git a/components/renderers/cpp/src/overlay.cpp b/components/renderers/cpp/src/overlay.cpp
index 7054c275a..ef755170e 100644
--- a/components/renderers/cpp/src/overlay.cpp
+++ b/components/renderers/cpp/src/overlay.cpp
@@ -1,5 +1,6 @@
 #include <ftl/render/overlay.hpp>
 #include <ftl/utility/matrix_conversion.hpp>
+#include <ftl/cuda_common.hpp>
 
 #include <opencv2/imgproc.hpp>
 
@@ -21,12 +22,12 @@ namespace {
 		uniform float height;
 		uniform float far;
 		uniform float near;
-        uniform mat4 pose;
-        uniform vec3 scale;
+		uniform mat4 pose;
+		uniform vec3 scale;
 
 		void main() {
-            vec4 vert = pose*(vec4(scale*vertex,1.0));
-            vert = vert / vert.w;
+			vec4 vert = pose*(vec4(scale*vertex,1.0));
+			vert = vert / vert.w;
 			//vec4 pos = vec4(-vert.x*focal / -vert.z / (width/2.0),
 			//	vert.y*focal / -vert.z / (height/2.0),
 			//	(vert.z-near) / (far-near) * 2.0 - 1.0, 1.0);
@@ -46,7 +47,7 @@ namespace {
 		R"(#version 330
 		uniform vec4 blockColour;
 		out vec4 color;
-		
+
 		void main() {
 			color = blockColour;
 		})";
@@ -61,201 +62,201 @@ Overlay::~Overlay() {
 }
 
 void Overlay::_createShapes() {
-    shape_verts_ = {
-        // Box
-        {-1.0, -1.0, -1.0},
-        {1.0, -1.0, -1.0},
-        {1.0, 1.0, -1.0},
-        {-1.0, 1.0, -1.0},
-        {-1.0, -1.0, 1.0},
-        {1.0, -1.0, 1.0},
-        {1.0, 1.0, 1.0},
-        {-1.0, 1.0, 1.0},
-
-        // Camera
-        {0.0, 0.0, 0.0},        // 8
-        {0.5, 0.28, 0.5},
-        {0.5, -0.28, 0.5},
-        {-0.5, 0.28, 0.5},
-        {-0.5, -0.28, 0.5},
-
-        // Axis lines
-        {1.0, 0.0, 0.0},     // 13
-        {0.0, -1.0, 0.0},
-        {0.0, 0.0, 1.0},
-
-        // Plane XZ big
-        {-10.0, 0.0, -10.0},  // 16
-        {10.0, 0.0, -10.0},
-        {10.0, 0.0, 10.0},
-        {-10.0, 0.0, 10.0}
-    };
-
-    // Generate a big plane
-    for (int x=-9; x<=9; ++x) {
-        shape_verts_.push_back({float(x), 0.0, -10.0});
-        shape_verts_.push_back({float(x), 0.0, 10.0});
-        shape_verts_.push_back({-10.0, 0.0, float(x)});
-        shape_verts_.push_back({10.0, 0.0, float(x)});
-    }
-
-    shape_tri_indices_ = {
-        // Box
-        0, 1, 2,
-        0, 2, 3,
-        1, 5, 6,
-        1, 6, 2,
-        0, 4, 7,
-        0, 7, 3,
-        3, 2, 6,
-        3, 6, 7,
-        0, 1, 5,
-        0, 5, 4,
-
-        // Box Lines
-        0, 1,       // 30
-        1, 5,
-        5, 6,
-        6, 2,
-        2, 1,
-        2, 3,
-        3, 0,
-        3, 7,
-        7, 4,
-        4, 5,
-        6, 7,
-        0, 4,
-
-        // Camera
-        8, 9, 10,      // 54
-        8, 11, 12,
-        8, 9, 11,
-        8, 10, 12,
-
-        // Camera Lines
-        8, 9,           // 66
-        8, 10,
-        8, 11,
-        8, 12,
-        9, 10,
-        11, 12,
-        9, 11,
-        10, 12,
-
-        // Axis lines
-        8, 13,          // 82
-        8, 14,
-        8, 15,
-
-        // Big XZ Plane
-        16, 17, 18,     // 88
-        18, 19, 16
-    };
-
-    int i = 20;
-    for (int x=-10; x<=10; ++x) {
-        shape_tri_indices_.push_back(i++);
-        shape_tri_indices_.push_back(i++);
-        shape_tri_indices_.push_back(i++);
-        shape_tri_indices_.push_back(i++);
-    }
-
-    shapes_[Shape::BOX] = {0,30, 30, 12*2};
-    shapes_[Shape::CAMERA] = {54, 4*3, 66, 8*2};
-    shapes_[Shape::XZPLANE] = {88, 2*3, 94, 40*2};
-    shapes_[Shape::AXIS] = {0, 0, 82, 2*3};
-
-    oShader.uploadAttrib("vertex", sizeof(float3)*shape_verts_.size(), 3, sizeof(float), GL_FLOAT, false, shape_verts_.data());
-    oShader.uploadAttrib ("indices", sizeof(int)*shape_tri_indices_.size(), 1, sizeof(int), GL_UNSIGNED_INT, true, shape_tri_indices_.data());
+	shape_verts_ = {
+		// Box
+		{-1.0, -1.0, -1.0},
+		{1.0, -1.0, -1.0},
+		{1.0, 1.0, -1.0},
+		{-1.0, 1.0, -1.0},
+		{-1.0, -1.0, 1.0},
+		{1.0, -1.0, 1.0},
+		{1.0, 1.0, 1.0},
+		{-1.0, 1.0, 1.0},
+
+		// Camera
+		{0.0, 0.0, 0.0},        // 8
+		{0.5, 0.28, 0.5},
+		{0.5, -0.28, 0.5},
+		{-0.5, 0.28, 0.5},
+		{-0.5, -0.28, 0.5},
+
+		// Axis lines
+		{1.0, 0.0, 0.0},     // 13
+		{0.0, -1.0, 0.0},
+		{0.0, 0.0, 1.0},
+
+		// Plane XZ big
+		{-10.0, 0.0, -10.0},  // 16
+		{10.0, 0.0, -10.0},
+		{10.0, 0.0, 10.0},
+		{-10.0, 0.0, 10.0}
+	};
+
+	// Generate a big plane
+	for (int x=-9; x<=9; ++x) {
+		shape_verts_.push_back({float(x), 0.0, -10.0});
+		shape_verts_.push_back({float(x), 0.0, 10.0});
+		shape_verts_.push_back({-10.0, 0.0, float(x)});
+		shape_verts_.push_back({10.0, 0.0, float(x)});
+	}
+
+	shape_tri_indices_ = {
+		// Box
+		0, 1, 2,
+		0, 2, 3,
+		1, 5, 6,
+		1, 6, 2,
+		0, 4, 7,
+		0, 7, 3,
+		3, 2, 6,
+		3, 6, 7,
+		0, 1, 5,
+		0, 5, 4,
+
+		// Box Lines
+		0, 1,       // 30
+		1, 5,
+		5, 6,
+		6, 2,
+		2, 1,
+		2, 3,
+		3, 0,
+		3, 7,
+		7, 4,
+		4, 5,
+		6, 7,
+		0, 4,
+
+		// Camera
+		8, 9, 10,      // 54
+		8, 11, 12,
+		8, 9, 11,
+		8, 10, 12,
+
+		// Camera Lines
+		8, 9,           // 66
+		8, 10,
+		8, 11,
+		8, 12,
+		9, 10,
+		11, 12,
+		9, 11,
+		10, 12,
+
+		// Axis lines
+		8, 13,          // 82
+		8, 14,
+		8, 15,
+
+		// Big XZ Plane
+		16, 17, 18,     // 88
+		18, 19, 16
+	};
+
+	int i = 20;
+	for (int x=-10; x<=10; ++x) {
+		shape_tri_indices_.push_back(i++);
+		shape_tri_indices_.push_back(i++);
+		shape_tri_indices_.push_back(i++);
+		shape_tri_indices_.push_back(i++);
+	}
+
+	shapes_[Shape::BOX] = {0,30, 30, 12*2};
+	shapes_[Shape::CAMERA] = {54, 4*3, 66, 8*2};
+	shapes_[Shape::XZPLANE] = {88, 2*3, 94, 40*2};
+	shapes_[Shape::AXIS] = {0, 0, 82, 2*3};
+
+	oShader.uploadAttrib("vertex", 3*shape_verts_.size(), 3, sizeof(float), GL_FLOAT, false, shape_verts_.data());
+	oShader.uploadAttrib ("indices", 1*shape_tri_indices_.size(), 1, sizeof(int), GL_UNSIGNED_INT, true, shape_tri_indices_.data());
 }
 
 void Overlay::_drawFilledShape(Shape shape, const Eigen::Matrix4d &pose, float scale, uchar4 c) {
-    if (shapes_.find(shape) ==shapes_.end()) {
-        return;
-    }
-
-    Eigen::Matrix4f mv = pose.cast<float>();
-
-    auto [offset,count, loffset, lcount] = shapes_[shape];
-    UNUSED(loffset);
-    UNUSED(lcount);
-    oShader.setUniform("scale", scale);
-    oShader.setUniform("pose", mv);
-    oShader.setUniform("blockColour", Eigen::Vector4f(float(c.x)/255.0f,float(c.y)/255.0f,float(c.z)/255.0f,float(c.w)/255.0f));
+	if (shapes_.find(shape) ==shapes_.end()) {
+		return;
+	}
+
+	Eigen::Matrix4f mv = pose.cast<float>();
+
+	auto [offset,count, loffset, lcount] = shapes_[shape];
+	UNUSED(loffset);
+	UNUSED(lcount);
+	oShader.setUniform("scale", scale);
+	oShader.setUniform("pose", mv);
+	oShader.setUniform("blockColour", Eigen::Vector4f(float(c.x)/255.0f,float(c.y)/255.0f,float(c.z)/255.0f,float(c.w)/255.0f));
 	//oShader.drawIndexed(GL_TRIANGLES, offset, count);
-    glDrawElements(GL_TRIANGLES, (GLsizei) count, GL_UNSIGNED_INT,
-                   (const void *)(offset * sizeof(uint32_t)));
+	glDrawElements(GL_TRIANGLES, (GLsizei) count, GL_UNSIGNED_INT,
+				   (const void *)(offset * sizeof(uint32_t)));
 }
 
 void Overlay::_drawOutlinedShape(Shape shape, const Eigen::Matrix4d &pose, const Eigen::Vector3f &scale, uchar4 fill, uchar4 outline) {
-    if (shapes_.find(shape) ==shapes_.end()) {
-        return;
-    }
-
-    Eigen::Matrix4f mv = pose.cast<float>();
-
-    auto [offset,count,loffset,lcount] = shapes_[shape];
-    oShader.setUniform("scale", scale);
-    oShader.setUniform("pose", mv);
-
-    if (count > 0) {
-        oShader.setUniform("blockColour", Eigen::Vector4f(float(fill.x)/255.0f,float(fill.y)/255.0f,float(fill.z)/255.0f,float(fill.w)/255.0f));
-        //oShader.drawIndexed(GL_TRIANGLES, offset, count);
-        glDrawElements(GL_TRIANGLES, (GLsizei) count, GL_UNSIGNED_INT,
-                    (const void *)(offset * sizeof(uint32_t)));
-    }
-
-    if (lcount != 0) {
-        oShader.setUniform("blockColour", Eigen::Vector4f(float(outline.x)/255.0f,float(outline.y)/255.0f,float(outline.z)/255.0f,float(outline.w)/255.0f));
-        //oShader.drawIndexed(GL_LINE_LOOP, offset, count);
-        glDrawElements(GL_LINES, (GLsizei) lcount, GL_UNSIGNED_INT,
-                    (const void *)(loffset * sizeof(uint32_t)));
-    }
+	if (shapes_.find(shape) ==shapes_.end()) {
+		return;
+	}
+
+	Eigen::Matrix4f mv = pose.cast<float>();
+
+	auto [offset,count,loffset,lcount] = shapes_[shape];
+	oShader.setUniform("scale", scale);
+	oShader.setUniform("pose", mv);
+
+	if (count > 0) {
+		oShader.setUniform("blockColour", Eigen::Vector4f(float(fill.x)/255.0f,float(fill.y)/255.0f,float(fill.z)/255.0f,float(fill.w)/255.0f));
+		//oShader.drawIndexed(GL_TRIANGLES, offset, count);
+		glDrawElements(GL_TRIANGLES, (GLsizei) count, GL_UNSIGNED_INT,
+					(const void *)(offset * sizeof(uint32_t)));
+	}
+
+	if (lcount != 0) {
+		oShader.setUniform("blockColour", Eigen::Vector4f(float(outline.x)/255.0f,float(outline.y)/255.0f,float(outline.z)/255.0f,float(outline.w)/255.0f));
+		//oShader.drawIndexed(GL_LINE_LOOP, offset, count);
+		glDrawElements(GL_LINES, (GLsizei) lcount, GL_UNSIGNED_INT,
+					(const void *)(loffset * sizeof(uint32_t)));
+	}
 }
 
 void Overlay::_drawAxis(const Eigen::Matrix4d &pose, const Eigen::Vector3f &scale) {
-    Eigen::Matrix4f mv = pose.cast<float>();
-
-    auto [offset,count,loffset,lcount] = shapes_[Shape::AXIS];
-    UNUSED(offset);
-    UNUSED(count);
-    UNUSED(lcount);
-    oShader.setUniform("scale", scale);
-    oShader.setUniform("pose", mv);
-
-    oShader.setUniform("blockColour", Eigen::Vector4f(1.0f, 0.0f, 0.0f, 1.0f));
-    //oShader.drawIndexed(GL_LINE_LOOP, offset, count);
-    glDrawElements(GL_LINES, (GLsizei) 2, GL_UNSIGNED_INT,
-                (const void *)(loffset * sizeof(uint32_t)));
-
-    loffset += 2;
-    oShader.setUniform("blockColour", Eigen::Vector4f(0.0f, 1.0f, 0.0f, 1.0f));
-    //oShader.drawIndexed(GL_LINE_LOOP, offset, count);
-    glDrawElements(GL_LINES, (GLsizei) 2, GL_UNSIGNED_INT,
-                (const void *)(loffset * sizeof(uint32_t)));
-
-    loffset += 2;
-    oShader.setUniform("blockColour", Eigen::Vector4f(0.0f, 0.0f, 1.0f, 1.0f));
-    //oShader.drawIndexed(GL_LINE_LOOP, offset, count);
-    glDrawElements(GL_LINES, (GLsizei) 2, GL_UNSIGNED_INT,
-                (const void *)(loffset * sizeof(uint32_t)));
+	Eigen::Matrix4f mv = pose.cast<float>();
+
+	auto [offset,count,loffset,lcount] = shapes_[Shape::AXIS];
+	UNUSED(offset);
+	UNUSED(count);
+	UNUSED(lcount);
+	oShader.setUniform("scale", scale);
+	oShader.setUniform("pose", mv);
+
+	oShader.setUniform("blockColour", Eigen::Vector4f(1.0f, 0.0f, 0.0f, 1.0f));
+	//oShader.drawIndexed(GL_LINE_LOOP, offset, count);
+	glDrawElements(GL_LINES, (GLsizei) 2, GL_UNSIGNED_INT,
+				(const void *)(loffset * sizeof(uint32_t)));
+
+	loffset += 2;
+	oShader.setUniform("blockColour", Eigen::Vector4f(0.0f, 1.0f, 0.0f, 1.0f));
+	//oShader.drawIndexed(GL_LINE_LOOP, offset, count);
+	glDrawElements(GL_LINES, (GLsizei) 2, GL_UNSIGNED_INT,
+				(const void *)(loffset * sizeof(uint32_t)));
+
+	loffset += 2;
+	oShader.setUniform("blockColour", Eigen::Vector4f(0.0f, 0.0f, 1.0f, 1.0f));
+	//oShader.drawIndexed(GL_LINE_LOOP, offset, count);
+	glDrawElements(GL_LINES, (GLsizei) 2, GL_UNSIGNED_INT,
+				(const void *)(loffset * sizeof(uint32_t)));
 }
 
-void Overlay::draw(ftl::rgbd::FrameSet &fs, ftl::rgbd::FrameState &state, const Eigen::Vector2f &screenSize) {
+void Overlay::draw(NVGcontext *ctx, ftl::data::FrameSet &fs, ftl::rgbd::Frame &frame, const Eigen::Vector2f &screenSize) {
 	if (!value("enabled", false)) return;
-	
+
 	double zfar = 8.0f;
-	auto intrin = state.getLeft();
+	auto intrin = frame.getLeft();
 	intrin = intrin.scaled(screenSize[0], screenSize[1]);
 
 	if (!init_) {
 		oShader.init("OverlayShader", overlayVertexShader, overlayFragmentShader);
-        oShader.bind();
-        _createShapes();
+		oShader.bind();
+		_createShapes();
 		init_ = true;
 	} else {
-	    oShader.bind();
-    }
+		oShader.bind();
+	}
 
 	float3 tris[] = {
 		{0.5f, -0.7f, 2.0f},
@@ -263,7 +264,7 @@ void Overlay::draw(ftl::rgbd::FrameSet &fs, ftl::rgbd::FrameState &state, const
 		{0.8f, -0.4f, 2.0f}
 	};
 
-	auto pose = MatrixConversion::toCUDA(state.getPose().cast<float>().inverse());
+	auto pose = MatrixConversion::toCUDA(frame.getPose().cast<float>().inverse());
 
 	tris[0] = pose * tris[0];
 	tris[1] = pose * tris[1];
@@ -293,30 +294,43 @@ void Overlay::draw(ftl::rgbd::FrameSet &fs, ftl::rgbd::FrameState &state, const
 
 	//glFinish();
 
-	if (value("show_poses", false)) {
+	if (value("show_poses", true)) {
 		for (size_t i=0; i<fs.frames.size(); ++i) {
-			auto pose = fs.frames[i].getPose(); //.inverse() * state.getPose();
+			auto &f = fs.frames[i].cast<ftl::rgbd::Frame>();
+			if (f.id().id == frame.id().id) continue;
+
+			auto pose = f.getPose(); //.inverse() * state.getPose();
+
+			std::string name = fs.frames[0].name();
+	
+			auto tpose = frame.getPose().inverse() * pose;
+			_drawOutlinedShape(Shape::CAMERA, tpose, Eigen::Vector3f(0.2f,0.2f,0.2f), make_uchar4(255,0,0,80), make_uchar4(255,0,0,255));
+			_drawAxis(tpose, Eigen::Vector3f(0.2f, 0.2f, 0.2f));
+
+			float3 textpos;
+			textpos.x = tpose(0,3);
+			textpos.y = tpose(1,3);
+			textpos.z = tpose(2,3);
 
-			auto name = fs.frames[i].get<std::string>("name");
-            _drawOutlinedShape(Shape::CAMERA, state.getPose().inverse() * pose, Eigen::Vector3f(0.2f,0.2f,0.2f), make_uchar4(255,0,0,80), make_uchar4(255,0,0,255));
-            _drawAxis(state.getPose().inverse() * pose, Eigen::Vector3f(0.2f, 0.2f, 0.2f));
+			float2 textscreen = f.getLeft().camToScreen<float2>(textpos);
+			nvgText(ctx, textscreen.x, textscreen.y, name.c_str(), nullptr);
 
 			//ftl::overlay::drawCamera(state.getLeft(), out, over_depth_, fs.frames[i].getLeftCamera(), pose, cv::Scalar(0,0,255,255), 0.2,value("show_frustrum", false));
 			//if (name) ftl::overlay::drawText(state.getLeft(), out, over_depth_, *name, pos, 0.5, cv::Scalar(0,0,255,255));
 		}
 	}
 
-    if (value("show_xz_plane", false)) {
-        float gscale = value("grid_scale",0.5f);
-        _drawOutlinedShape(Shape::XZPLANE, state.getPose().inverse(), Eigen::Vector3f(gscale,gscale,gscale), make_uchar4(200,200,200,50), make_uchar4(255,255,255,100));
-    }
+	if (value("show_xz_plane", false)) {
+		float gscale = value("grid_scale",0.5f);
+		_drawOutlinedShape(Shape::XZPLANE, frame.getPose().inverse(), Eigen::Vector3f(gscale,gscale,gscale), make_uchar4(200,200,200,50), make_uchar4(255,255,255,100));
+	}
 
-    if (value("show_axis", true)) {
-        _drawAxis(state.getPose().inverse(), Eigen::Vector3f(0.5f, 0.5f, 0.5f));
-    }
+	if (value("show_axis", true)) {
+		_drawAxis(frame.getPose().inverse(), Eigen::Vector3f(0.5f, 0.5f, 0.5f));
+	}
 
-	if (value("show_shapes", false)) {
-		if (fs.hasChannel(Channel::Shapes3D)) {
+	if (value("show_shapes", true)) {
+		/*if (fs.hasChannel(Channel::Shapes3D)) {
 			std::vector<ftl::codecs::Shape3D> shapes;
 			fs.get(Channel::Shapes3D, shapes);
 
@@ -325,40 +339,52 @@ void Overlay::draw(ftl::rgbd::FrameSet &fs, ftl::rgbd::FrameState &state, const
 				//Eigen::Vector4d pos = pose.inverse() * Eigen::Vector4d(0,0,0,1);
 				//pos /= pos[3];
 
-                Eigen::Vector3f scale(s.size[0]/2.0f, s.size[1]/2.0f, s.size[2]/2.0f);
+				Eigen::Vector3f scale(s.size[0]/2.0f, s.size[1]/2.0f, s.size[2]/2.0f);
 
-                if (s.type == ftl::codecs::Shape3DType::CAMERA) {
-                        //auto pose = s.pose;
-                    auto name = s.label;
-                    _drawOutlinedShape(Shape::CAMERA, state.getPose().inverse() * pose, Eigen::Vector3f(0.2f,0.2f,0.2f), make_uchar4(255,0,0,80), make_uchar4(255,0,0,255));
-                    _drawAxis(state.getPose().inverse() * pose, Eigen::Vector3f(0.2f, 0.2f, 0.2f));
-                } else {
-                    _drawOutlinedShape(Shape::BOX, state.getPose().inverse() * pose, scale, make_uchar4(255,0,255,80), make_uchar4(255,0,255,255));
-                }
+				if (s.type == ftl::codecs::Shape3DType::CAMERA) {
+						//auto pose = s.pose;
+					auto name = s.label;
+					_drawOutlinedShape(Shape::CAMERA, state.getPose().inverse() * pose, Eigen::Vector3f(0.2f,0.2f,0.2f), make_uchar4(255,0,0,80), make_uchar4(255,0,0,255));
+					_drawAxis(state.getPose().inverse() * pose, Eigen::Vector3f(0.2f, 0.2f, 0.2f));
+				} else {
+					_drawOutlinedShape(Shape::BOX, state.getPose().inverse() * pose, scale, make_uchar4(255,0,255,80), make_uchar4(255,0,255,255));
+				}
 
 				//ftl::overlay::drawFilledBox(state.getLeft(), out, over_depth_, pose, cv::Scalar(0,0,255,50), s.size.cast<double>());
-                //ftl::overlay::drawBox(state.getLeft(), out, over_depth_, pose, cv::Scalar(0,0,255,255), s.size.cast<double>());
+				//ftl::overlay::drawBox(state.getLeft(), out, over_depth_, pose, cv::Scalar(0,0,255,255), s.size.cast<double>());
 				//ftl::overlay::drawText(state.getLeft(), out, over_depth_, s.label, pos, 0.5, cv::Scalar(0,0,255,100));
 			}
-		}
+		}*/
 
 		for (size_t i=0; i<fs.frames.size(); ++i) {
 			if (fs.frames[i].hasChannel(Channel::Shapes3D)) {
-				std::vector<ftl::codecs::Shape3D> shapes;
-				fs.frames[i].get(Channel::Shapes3D, shapes);
+				const auto &shapes = fs.frames[i].get<std::list<ftl::codecs::Shape3D>>(Channel::Shapes3D);
 
 				for (auto &s : shapes) {
 					auto pose = s.pose.cast<double>(); //.inverse() * state.getPose();
 					//Eigen::Vector4d pos = pose.inverse() * Eigen::Vector4d(0,0,0,1);
 					//pos /= pos[3];
 
-                    Eigen::Vector3f scale(s.size[0]/2.0f, s.size[1]/2.0f, s.size[2]/2.0f);
+					Eigen::Vector3f scale(s.size[0]/2.0f, s.size[1]/2.0f, s.size[2]/2.0f);
+
+					auto tpose = frame.getPose().inverse() * pose;
+
+					switch (s.type) {
+					case ftl::codecs::Shape3DType::CAMERA: _drawOutlinedShape(Shape::CAMERA, tpose, scale, make_uchar4(255,0,0,80), make_uchar4(255,0,0,255)); break;
+					case ftl::codecs::Shape3DType::CLIPPING: _drawOutlinedShape(Shape::BOX, tpose, scale, make_uchar4(255,0,255,80), make_uchar4(255,0,255,255)); break;
+					case ftl::codecs::Shape3DType::ARUCO: _drawAxis(tpose, Eigen::Vector3f(0.2f, 0.2f, 0.2f)); break;
+					default: break;
+					}
 
-                    switch (s.type) {
-                    case ftl::codecs::Shape3DType::CLIPPING: _drawOutlinedShape(Shape::BOX, state.getPose().inverse() * pose, scale, make_uchar4(255,0,255,80), make_uchar4(255,0,255,255)); break;
-                    case ftl::codecs::Shape3DType::ARUCO: _drawAxis(state.getPose().inverse() * pose, Eigen::Vector3f(0.2f, 0.2f, 0.2f)); break;
-                    default: break;
-                    }
+					if (s.label.size() > 0) {
+						float3 textpos;
+						textpos.x = tpose(0,3);
+						textpos.y = tpose(1,3);
+						textpos.z = tpose(2,3);
+
+						float2 textscreen = frame.getLeft().camToScreen<float2>(textpos);
+						nvgText(ctx, textscreen.x, textscreen.y, s.label.c_str(), nullptr);
+					}
 
 					//ftl::overlay::drawBox(state.getLeft(), out, over_depth_, pose, cv::Scalar(0,0,255,100), s.size.cast<double>());
 					//ftl::overlay::drawText(state.getLeft(), out, over_depth_, s.label, pos, 0.5, cv::Scalar(0,0,255,100));
@@ -367,384 +393,384 @@ void Overlay::draw(ftl::rgbd::FrameSet &fs, ftl::rgbd::FrameState &state, const
 		}
 	}
 
-    glDisable(GL_LINE_SMOOTH);
+	glDisable(GL_LINE_SMOOTH);
 	glDisable(GL_BLEND);
 
 	//cv::flip(out, out, 0);
 }
 
 /*void ftl::overlay::draw3DLine(
-        const ftl::rgbd::Camera &cam,
-        cv::Mat &colour,
-        cv::Mat &depth,
-        const Eigen::Vector4d &begin,
-        const Eigen::Vector4d &end,
-        const cv::Scalar &linecolour) {
-
-
-    auto begin_pos = cam.camToScreen<int2>(make_float3(begin[0], begin[1], begin[2]));
-    auto end_pos = cam.camToScreen<int2>(make_float3(end[0], end[1], end[2]));
-
-    cv::LineIterator lineit(colour, cv::Point(begin_pos.x, begin_pos.y), cv::Point(end_pos.x, end_pos.y));
-    double z_grad = (end[2] - begin[2]) / lineit.count;
-    double current_z = begin[2];
-
-    for(int i = 0; i < lineit.count; i++, ++lineit) {
-        colour.at<cv::Vec4b>(lineit.pos()) = linecolour;
-        depth.at<float>(lineit.pos()) = current_z;
-        current_z += z_grad;
-    }
+		const ftl::rgbd::Camera &cam,
+		cv::Mat &colour,
+		cv::Mat &depth,
+		const Eigen::Vector4d &begin,
+		const Eigen::Vector4d &end,
+		const cv::Scalar &linecolour) {
+
+
+	auto begin_pos = cam.camToScreen<int2>(make_float3(begin[0], begin[1], begin[2]));
+	auto end_pos = cam.camToScreen<int2>(make_float3(end[0], end[1], end[2]));
+
+	cv::LineIterator lineit(colour, cv::Point(begin_pos.x, begin_pos.y), cv::Point(end_pos.x, end_pos.y));
+	double z_grad = (end[2] - begin[2]) / lineit.count;
+	double current_z = begin[2];
+
+	for(int i = 0; i < lineit.count; i++, ++lineit) {
+		colour.at<cv::Vec4b>(lineit.pos()) = linecolour;
+		depth.at<float>(lineit.pos()) = current_z;
+		current_z += z_grad;
+	}
 }
 
 void ftl::overlay::drawPoseBox(
-        const ftl::rgbd::Camera &cam,
-        cv::Mat &colour,
-        cv::Mat &depth,
-        const Eigen::Matrix4d &pose,
-        const cv::Scalar &linecolour,
-        double size) {
-
-    double size2 = size/2.0;
-
-    Eigen::Vector4d p001 = pose.inverse() * Eigen::Vector4d(size2,size2,-size2,1);
-    Eigen::Vector4d p011 = pose.inverse() * Eigen::Vector4d(size2,-size2,-size2,1);
-    Eigen::Vector4d p111 = pose.inverse() * Eigen::Vector4d(-size2,-size2,-size2,1);
-    Eigen::Vector4d p101 = pose.inverse() * Eigen::Vector4d(-size2,size2,-size2,1);
-    Eigen::Vector4d p110 = pose.inverse() * Eigen::Vector4d(-size2,-size2,size2,1);
-    Eigen::Vector4d p100 = pose.inverse() * Eigen::Vector4d(-size2,size2,size2,1);
-    Eigen::Vector4d p010 = pose.inverse() * Eigen::Vector4d(size2,-size2,size2,1);
-    Eigen::Vector4d p000 = pose.inverse() * Eigen::Vector4d(size2,size2,size2,1);
-
-    p001 /= p001[3];
-    p011 /= p011[3];
-    p111 /= p111[3];
-    p101 /= p101[3];
-    p110 /= p110[3];
-    p100 /= p100[3];
-    p010 /= p010[3];
-    p000 /= p000[3];
-
-    if (p001[2] < 0.1 || p011[2] < 0.1 || p111[2] < 0.1 || p101[2] < 0.1 || p110[2] < 0.1 || p100[2] < 0.1 || p010[2] < 0.1 || p000[2] < 0.1) return;
-
-    draw3DLine(cam, colour, depth, p000, p001, linecolour);
-    draw3DLine(cam, colour, depth, p000, p010, linecolour);
-    draw3DLine(cam, colour, depth, p000, p100, linecolour);
-
-    draw3DLine(cam, colour, depth, p001, p011, linecolour);
-    draw3DLine(cam, colour, depth, p001, p101, linecolour);
-
-    draw3DLine(cam, colour, depth, p010, p011, linecolour);
-    draw3DLine(cam, colour, depth, p010, p110, linecolour);
-
-    draw3DLine(cam, colour, depth, p100, p101, linecolour);
-    draw3DLine(cam, colour, depth, p100, p110, linecolour);
-
-    draw3DLine(cam, colour, depth, p101, p111, linecolour);
-    draw3DLine(cam, colour, depth, p110, p111, linecolour);
-    draw3DLine(cam, colour, depth, p011, p111, linecolour);
+		const ftl::rgbd::Camera &cam,
+		cv::Mat &colour,
+		cv::Mat &depth,
+		const Eigen::Matrix4d &pose,
+		const cv::Scalar &linecolour,
+		double size) {
+
+	double size2 = size/2.0;
+
+	Eigen::Vector4d p001 = pose.inverse() * Eigen::Vector4d(size2,size2,-size2,1);
+	Eigen::Vector4d p011 = pose.inverse() * Eigen::Vector4d(size2,-size2,-size2,1);
+	Eigen::Vector4d p111 = pose.inverse() * Eigen::Vector4d(-size2,-size2,-size2,1);
+	Eigen::Vector4d p101 = pose.inverse() * Eigen::Vector4d(-size2,size2,-size2,1);
+	Eigen::Vector4d p110 = pose.inverse() * Eigen::Vector4d(-size2,-size2,size2,1);
+	Eigen::Vector4d p100 = pose.inverse() * Eigen::Vector4d(-size2,size2,size2,1);
+	Eigen::Vector4d p010 = pose.inverse() * Eigen::Vector4d(size2,-size2,size2,1);
+	Eigen::Vector4d p000 = pose.inverse() * Eigen::Vector4d(size2,size2,size2,1);
+
+	p001 /= p001[3];
+	p011 /= p011[3];
+	p111 /= p111[3];
+	p101 /= p101[3];
+	p110 /= p110[3];
+	p100 /= p100[3];
+	p010 /= p010[3];
+	p000 /= p000[3];
+
+	if (p001[2] < 0.1 || p011[2] < 0.1 || p111[2] < 0.1 || p101[2] < 0.1 || p110[2] < 0.1 || p100[2] < 0.1 || p010[2] < 0.1 || p000[2] < 0.1) return;
+
+	draw3DLine(cam, colour, depth, p000, p001, linecolour);
+	draw3DLine(cam, colour, depth, p000, p010, linecolour);
+	draw3DLine(cam, colour, depth, p000, p100, linecolour);
+
+	draw3DLine(cam, colour, depth, p001, p011, linecolour);
+	draw3DLine(cam, colour, depth, p001, p101, linecolour);
+
+	draw3DLine(cam, colour, depth, p010, p011, linecolour);
+	draw3DLine(cam, colour, depth, p010, p110, linecolour);
+
+	draw3DLine(cam, colour, depth, p100, p101, linecolour);
+	draw3DLine(cam, colour, depth, p100, p110, linecolour);
+
+	draw3DLine(cam, colour, depth, p101, p111, linecolour);
+	draw3DLine(cam, colour, depth, p110, p111, linecolour);
+	draw3DLine(cam, colour, depth, p011, p111, linecolour);
 }
 
 void ftl::overlay::drawBox(
-        const ftl::rgbd::Camera &cam,
-        cv::Mat &colour,
-        cv::Mat &depth,
-        const Eigen::Matrix4d &pose,
-        const cv::Scalar &linecolour,
-        const Eigen::Vector3d &size) {
-
-    double size2x = size[0]/2.0;
+		const ftl::rgbd::Camera &cam,
+		cv::Mat &colour,
+		cv::Mat &depth,
+		const Eigen::Matrix4d &pose,
+		const cv::Scalar &linecolour,
+		const Eigen::Vector3d &size) {
+
+	double size2x = size[0]/2.0;
 	double size2y = size[1]/2.0;
 	double size2z = size[2]/2.0;
 
-    Eigen::Vector4d p001 = pose.inverse() * Eigen::Vector4d(size2x,size2y,-size2z,1);
-    Eigen::Vector4d p011 = pose.inverse() * Eigen::Vector4d(size2x,-size2y,-size2z,1);
-    Eigen::Vector4d p111 = pose.inverse() * Eigen::Vector4d(-size2x,-size2y,-size2z,1);
-    Eigen::Vector4d p101 = pose.inverse() * Eigen::Vector4d(-size2x,size2y,-size2z,1);
-    Eigen::Vector4d p110 = pose.inverse() * Eigen::Vector4d(-size2x,-size2y,size2z,1);
-    Eigen::Vector4d p100 = pose.inverse() * Eigen::Vector4d(-size2x,size2y,size2z,1);
-    Eigen::Vector4d p010 = pose.inverse() * Eigen::Vector4d(size2x,-size2y,size2z,1);
-    Eigen::Vector4d p000 = pose.inverse() * Eigen::Vector4d(size2x,size2y,size2z,1);
-
-    p001 /= p001[3];
-    p011 /= p011[3];
-    p111 /= p111[3];
-    p101 /= p101[3];
-    p110 /= p110[3];
-    p100 /= p100[3];
-    p010 /= p010[3];
-    p000 /= p000[3];
-
-    if (p001[2] < 0.1 || p011[2] < 0.1 || p111[2] < 0.1 || p101[2] < 0.1 || p110[2] < 0.1 || p100[2] < 0.1 || p010[2] < 0.1 || p000[2] < 0.1) return;
-
-    draw3DLine(cam, colour, depth, p000, p001, linecolour);
-    draw3DLine(cam, colour, depth, p000, p010, linecolour);
-    draw3DLine(cam, colour, depth, p000, p100, linecolour);
-
-    draw3DLine(cam, colour, depth, p001, p011, linecolour);
-    draw3DLine(cam, colour, depth, p001, p101, linecolour);
-
-    draw3DLine(cam, colour, depth, p010, p011, linecolour);
-    draw3DLine(cam, colour, depth, p010, p110, linecolour);
-
-    draw3DLine(cam, colour, depth, p100, p101, linecolour);
-    draw3DLine(cam, colour, depth, p100, p110, linecolour);
-
-    draw3DLine(cam, colour, depth, p101, p111, linecolour);
-    draw3DLine(cam, colour, depth, p110, p111, linecolour);
-    draw3DLine(cam, colour, depth, p011, p111, linecolour);
+	Eigen::Vector4d p001 = pose.inverse() * Eigen::Vector4d(size2x,size2y,-size2z,1);
+	Eigen::Vector4d p011 = pose.inverse() * Eigen::Vector4d(size2x,-size2y,-size2z,1);
+	Eigen::Vector4d p111 = pose.inverse() * Eigen::Vector4d(-size2x,-size2y,-size2z,1);
+	Eigen::Vector4d p101 = pose.inverse() * Eigen::Vector4d(-size2x,size2y,-size2z,1);
+	Eigen::Vector4d p110 = pose.inverse() * Eigen::Vector4d(-size2x,-size2y,size2z,1);
+	Eigen::Vector4d p100 = pose.inverse() * Eigen::Vector4d(-size2x,size2y,size2z,1);
+	Eigen::Vector4d p010 = pose.inverse() * Eigen::Vector4d(size2x,-size2y,size2z,1);
+	Eigen::Vector4d p000 = pose.inverse() * Eigen::Vector4d(size2x,size2y,size2z,1);
+
+	p001 /= p001[3];
+	p011 /= p011[3];
+	p111 /= p111[3];
+	p101 /= p101[3];
+	p110 /= p110[3];
+	p100 /= p100[3];
+	p010 /= p010[3];
+	p000 /= p000[3];
+
+	if (p001[2] < 0.1 || p011[2] < 0.1 || p111[2] < 0.1 || p101[2] < 0.1 || p110[2] < 0.1 || p100[2] < 0.1 || p010[2] < 0.1 || p000[2] < 0.1) return;
+
+	draw3DLine(cam, colour, depth, p000, p001, linecolour);
+	draw3DLine(cam, colour, depth, p000, p010, linecolour);
+	draw3DLine(cam, colour, depth, p000, p100, linecolour);
+
+	draw3DLine(cam, colour, depth, p001, p011, linecolour);
+	draw3DLine(cam, colour, depth, p001, p101, linecolour);
+
+	draw3DLine(cam, colour, depth, p010, p011, linecolour);
+	draw3DLine(cam, colour, depth, p010, p110, linecolour);
+
+	draw3DLine(cam, colour, depth, p100, p101, linecolour);
+	draw3DLine(cam, colour, depth, p100, p110, linecolour);
+
+	draw3DLine(cam, colour, depth, p101, p111, linecolour);
+	draw3DLine(cam, colour, depth, p110, p111, linecolour);
+	draw3DLine(cam, colour, depth, p011, p111, linecolour);
 }
 
 void ftl::overlay::drawFilledBox(
-        const ftl::rgbd::Camera &cam,
-        cv::Mat &colour,
-        cv::Mat &depth,
-        const Eigen::Matrix4d &pose,
-        const cv::Scalar &linecolour,
-        const Eigen::Vector3d &size) {
-
-    double size2x = size[0]/2.0;
+		const ftl::rgbd::Camera &cam,
+		cv::Mat &colour,
+		cv::Mat &depth,
+		const Eigen::Matrix4d &pose,
+		const cv::Scalar &linecolour,
+		const Eigen::Vector3d &size) {
+
+	double size2x = size[0]/2.0;
 	double size2y = size[1]/2.0;
 	double size2z = size[2]/2.0;
 
-    Eigen::Vector4d p001 = pose.inverse() * Eigen::Vector4d(size2x,size2y,-size2z,1);
-    Eigen::Vector4d p011 = pose.inverse() * Eigen::Vector4d(size2x,-size2y,-size2z,1);
-    Eigen::Vector4d p111 = pose.inverse() * Eigen::Vector4d(-size2x,-size2y,-size2z,1);
-    Eigen::Vector4d p101 = pose.inverse() * Eigen::Vector4d(-size2x,size2y,-size2z,1);
-    Eigen::Vector4d p110 = pose.inverse() * Eigen::Vector4d(-size2x,-size2y,size2z,1);
-    Eigen::Vector4d p100 = pose.inverse() * Eigen::Vector4d(-size2x,size2y,size2z,1);
-    Eigen::Vector4d p010 = pose.inverse() * Eigen::Vector4d(size2x,-size2y,size2z,1);
-    Eigen::Vector4d p000 = pose.inverse() * Eigen::Vector4d(size2x,size2y,size2z,1);
-
-    p001 /= p001[3];
-    p011 /= p011[3];
-    p111 /= p111[3];
-    p101 /= p101[3];
-    p110 /= p110[3];
-    p100 /= p100[3];
-    p010 /= p010[3];
-    p000 /= p000[3];
-
-    if (p001[2] < 0.1 || p011[2] < 0.1 || p111[2] < 0.1 || p101[2] < 0.1 || p110[2] < 0.1 || p100[2] < 0.1 || p010[2] < 0.1 || p000[2] < 0.1) return;
-
-    std::array<cv::Point, 4> pts;
-
-    auto p = cam.camToScreen<int2>(make_float3(p000[0], p000[1], p000[2]));
-    pts[0] = cv::Point(p.x, p.y);
-    p = cam.camToScreen<int2>(make_float3(p001[0], p001[1], p001[2]));
-    pts[1] = cv::Point(p.x, p.y);
-    p = cam.camToScreen<int2>(make_float3(p011[0], p011[1], p011[2]));
-    pts[2] = cv::Point(p.x, p.y);
-    p = cam.camToScreen<int2>(make_float3(p010[0], p010[1], p010[2]));
-    pts[3] = cv::Point(p.x, p.y);
-    cv::fillConvexPoly(colour, pts, linecolour);
-
-    p = cam.camToScreen<int2>(make_float3(p100[0], p100[1], p100[2]));
-    pts[0] = cv::Point(p.x, p.y);
-    p = cam.camToScreen<int2>(make_float3(p101[0], p101[1], p101[2]));
-    pts[1] = cv::Point(p.x, p.y);
-    p = cam.camToScreen<int2>(make_float3(p111[0], p111[1], p111[2]));
-    pts[2] = cv::Point(p.x, p.y);
-    p = cam.camToScreen<int2>(make_float3(p110[0], p110[1], p110[2]));
-    pts[3] = cv::Point(p.x, p.y);
-    cv::fillConvexPoly(colour, pts, linecolour);
-
-    p = cam.camToScreen<int2>(make_float3(p000[0], p000[1], p000[2]));
-    pts[0] = cv::Point(p.x, p.y);
-    p = cam.camToScreen<int2>(make_float3(p010[0], p010[1], p010[2]));
-    pts[1] = cv::Point(p.x, p.y);
-    p = cam.camToScreen<int2>(make_float3(p110[0], p110[1], p110[2]));
-    pts[2] = cv::Point(p.x, p.y);
-    p = cam.camToScreen<int2>(make_float3(p100[0], p100[1], p100[2]));
-    pts[3] = cv::Point(p.x, p.y);
-    cv::fillConvexPoly(colour, pts, linecolour);
-
-    p = cam.camToScreen<int2>(make_float3(p001[0], p001[1], p001[2]));
-    pts[0] = cv::Point(p.x, p.y);
-    p = cam.camToScreen<int2>(make_float3(p011[0], p011[1], p011[2]));
-    pts[1] = cv::Point(p.x, p.y);
-    p = cam.camToScreen<int2>(make_float3(p111[0], p111[1], p111[2]));
-    pts[2] = cv::Point(p.x, p.y);
-    p = cam.camToScreen<int2>(make_float3(p101[0], p101[1], p101[2]));
-    pts[3] = cv::Point(p.x, p.y);
-    cv::fillConvexPoly(colour, pts, linecolour);
-
-    p = cam.camToScreen<int2>(make_float3(p000[0], p000[1], p000[2]));
-    pts[0] = cv::Point(p.x, p.y);
-    p = cam.camToScreen<int2>(make_float3(p001[0], p001[1], p001[2]));
-    pts[1] = cv::Point(p.x, p.y);
-    p = cam.camToScreen<int2>(make_float3(p101[0], p101[1], p101[2]));
-    pts[2] = cv::Point(p.x, p.y);
-    p = cam.camToScreen<int2>(make_float3(p100[0], p100[1], p100[2]));
-    pts[3] = cv::Point(p.x, p.y);
-    cv::fillConvexPoly(colour, pts, linecolour);
-
-    p = cam.camToScreen<int2>(make_float3(p010[0], p010[1], p010[2]));
-    pts[0] = cv::Point(p.x, p.y);
-    p = cam.camToScreen<int2>(make_float3(p011[0], p011[1], p011[2]));
-    pts[1] = cv::Point(p.x, p.y);
-    p = cam.camToScreen<int2>(make_float3(p111[0], p111[1], p111[2]));
-    pts[2] = cv::Point(p.x, p.y);
-    p = cam.camToScreen<int2>(make_float3(p110[0], p110[1], p110[2]));
-    pts[3] = cv::Point(p.x, p.y);
-    cv::fillConvexPoly(colour, pts, linecolour);
+	Eigen::Vector4d p001 = pose.inverse() * Eigen::Vector4d(size2x,size2y,-size2z,1);
+	Eigen::Vector4d p011 = pose.inverse() * Eigen::Vector4d(size2x,-size2y,-size2z,1);
+	Eigen::Vector4d p111 = pose.inverse() * Eigen::Vector4d(-size2x,-size2y,-size2z,1);
+	Eigen::Vector4d p101 = pose.inverse() * Eigen::Vector4d(-size2x,size2y,-size2z,1);
+	Eigen::Vector4d p110 = pose.inverse() * Eigen::Vector4d(-size2x,-size2y,size2z,1);
+	Eigen::Vector4d p100 = pose.inverse() * Eigen::Vector4d(-size2x,size2y,size2z,1);
+	Eigen::Vector4d p010 = pose.inverse() * Eigen::Vector4d(size2x,-size2y,size2z,1);
+	Eigen::Vector4d p000 = pose.inverse() * Eigen::Vector4d(size2x,size2y,size2z,1);
+
+	p001 /= p001[3];
+	p011 /= p011[3];
+	p111 /= p111[3];
+	p101 /= p101[3];
+	p110 /= p110[3];
+	p100 /= p100[3];
+	p010 /= p010[3];
+	p000 /= p000[3];
+
+	if (p001[2] < 0.1 || p011[2] < 0.1 || p111[2] < 0.1 || p101[2] < 0.1 || p110[2] < 0.1 || p100[2] < 0.1 || p010[2] < 0.1 || p000[2] < 0.1) return;
+
+	std::array<cv::Point, 4> pts;
+
+	auto p = cam.camToScreen<int2>(make_float3(p000[0], p000[1], p000[2]));
+	pts[0] = cv::Point(p.x, p.y);
+	p = cam.camToScreen<int2>(make_float3(p001[0], p001[1], p001[2]));
+	pts[1] = cv::Point(p.x, p.y);
+	p = cam.camToScreen<int2>(make_float3(p011[0], p011[1], p011[2]));
+	pts[2] = cv::Point(p.x, p.y);
+	p = cam.camToScreen<int2>(make_float3(p010[0], p010[1], p010[2]));
+	pts[3] = cv::Point(p.x, p.y);
+	cv::fillConvexPoly(colour, pts, linecolour);
+
+	p = cam.camToScreen<int2>(make_float3(p100[0], p100[1], p100[2]));
+	pts[0] = cv::Point(p.x, p.y);
+	p = cam.camToScreen<int2>(make_float3(p101[0], p101[1], p101[2]));
+	pts[1] = cv::Point(p.x, p.y);
+	p = cam.camToScreen<int2>(make_float3(p111[0], p111[1], p111[2]));
+	pts[2] = cv::Point(p.x, p.y);
+	p = cam.camToScreen<int2>(make_float3(p110[0], p110[1], p110[2]));
+	pts[3] = cv::Point(p.x, p.y);
+	cv::fillConvexPoly(colour, pts, linecolour);
+
+	p = cam.camToScreen<int2>(make_float3(p000[0], p000[1], p000[2]));
+	pts[0] = cv::Point(p.x, p.y);
+	p = cam.camToScreen<int2>(make_float3(p010[0], p010[1], p010[2]));
+	pts[1] = cv::Point(p.x, p.y);
+	p = cam.camToScreen<int2>(make_float3(p110[0], p110[1], p110[2]));
+	pts[2] = cv::Point(p.x, p.y);
+	p = cam.camToScreen<int2>(make_float3(p100[0], p100[1], p100[2]));
+	pts[3] = cv::Point(p.x, p.y);
+	cv::fillConvexPoly(colour, pts, linecolour);
+
+	p = cam.camToScreen<int2>(make_float3(p001[0], p001[1], p001[2]));
+	pts[0] = cv::Point(p.x, p.y);
+	p = cam.camToScreen<int2>(make_float3(p011[0], p011[1], p011[2]));
+	pts[1] = cv::Point(p.x, p.y);
+	p = cam.camToScreen<int2>(make_float3(p111[0], p111[1], p111[2]));
+	pts[2] = cv::Point(p.x, p.y);
+	p = cam.camToScreen<int2>(make_float3(p101[0], p101[1], p101[2]));
+	pts[3] = cv::Point(p.x, p.y);
+	cv::fillConvexPoly(colour, pts, linecolour);
+
+	p = cam.camToScreen<int2>(make_float3(p000[0], p000[1], p000[2]));
+	pts[0] = cv::Point(p.x, p.y);
+	p = cam.camToScreen<int2>(make_float3(p001[0], p001[1], p001[2]));
+	pts[1] = cv::Point(p.x, p.y);
+	p = cam.camToScreen<int2>(make_float3(p101[0], p101[1], p101[2]));
+	pts[2] = cv::Point(p.x, p.y);
+	p = cam.camToScreen<int2>(make_float3(p100[0], p100[1], p100[2]));
+	pts[3] = cv::Point(p.x, p.y);
+	cv::fillConvexPoly(colour, pts, linecolour);
+
+	p = cam.camToScreen<int2>(make_float3(p010[0], p010[1], p010[2]));
+	pts[0] = cv::Point(p.x, p.y);
+	p = cam.camToScreen<int2>(make_float3(p011[0], p011[1], p011[2]));
+	pts[1] = cv::Point(p.x, p.y);
+	p = cam.camToScreen<int2>(make_float3(p111[0], p111[1], p111[2]));
+	pts[2] = cv::Point(p.x, p.y);
+	p = cam.camToScreen<int2>(make_float3(p110[0], p110[1], p110[2]));
+	pts[3] = cv::Point(p.x, p.y);
+	cv::fillConvexPoly(colour, pts, linecolour);
 }
 
 void ftl::overlay::drawRectangle(
-        const ftl::rgbd::Camera &cam,
-        cv::Mat &colour,
-        cv::Mat &depth,
-        const Eigen::Matrix4d &pose,
-        const cv::Scalar &linecolour,
-        double width, double height) {
-
-    double width2 = width/2.0;
-    double height2 = height/2.0;
-
-    Eigen::Vector4d p001 = pose.inverse() * Eigen::Vector4d(width2,height2,0,1);
-    Eigen::Vector4d p011 = pose.inverse() * Eigen::Vector4d(width2,-height2,0,1);
-    Eigen::Vector4d p111 = pose.inverse() * Eigen::Vector4d(-width2,-height2,0,1);
-    Eigen::Vector4d p101 = pose.inverse() * Eigen::Vector4d(-width2,height2,0,1);
-
-    p001 /= p001[3];
-    p011 /= p011[3];
-    p111 /= p111[3];
-    p101 /= p101[3];
-
-    if (p001[2] < 0.1 || p011[2] < 0.1 || p111[2] < 0.1 || p101[2] < 0.1) return;
-
-    draw3DLine(cam, colour, depth, p001, p011, linecolour);
-    draw3DLine(cam, colour, depth, p001, p101, linecolour);
-    draw3DLine(cam, colour, depth, p101, p111, linecolour);
-    draw3DLine(cam, colour, depth, p011, p111, linecolour);
+		const ftl::rgbd::Camera &cam,
+		cv::Mat &colour,
+		cv::Mat &depth,
+		const Eigen::Matrix4d &pose,
+		const cv::Scalar &linecolour,
+		double width, double height) {
+
+	double width2 = width/2.0;
+	double height2 = height/2.0;
+
+	Eigen::Vector4d p001 = pose.inverse() * Eigen::Vector4d(width2,height2,0,1);
+	Eigen::Vector4d p011 = pose.inverse() * Eigen::Vector4d(width2,-height2,0,1);
+	Eigen::Vector4d p111 = pose.inverse() * Eigen::Vector4d(-width2,-height2,0,1);
+	Eigen::Vector4d p101 = pose.inverse() * Eigen::Vector4d(-width2,height2,0,1);
+
+	p001 /= p001[3];
+	p011 /= p011[3];
+	p111 /= p111[3];
+	p101 /= p101[3];
+
+	if (p001[2] < 0.1 || p011[2] < 0.1 || p111[2] < 0.1 || p101[2] < 0.1) return;
+
+	draw3DLine(cam, colour, depth, p001, p011, linecolour);
+	draw3DLine(cam, colour, depth, p001, p101, linecolour);
+	draw3DLine(cam, colour, depth, p101, p111, linecolour);
+	draw3DLine(cam, colour, depth, p011, p111, linecolour);
 }
 
 void ftl::overlay::drawPoseCone(
-        const ftl::rgbd::Camera &cam,
-        cv::Mat &colour,
-        cv::Mat &depth,
-        const Eigen::Matrix4d &pose,
-        const cv::Scalar &linecolour,
-        double size) {
+		const ftl::rgbd::Camera &cam,
+		cv::Mat &colour,
+		cv::Mat &depth,
+		const Eigen::Matrix4d &pose,
+		const cv::Scalar &linecolour,
+		double size) {
 
-    double size2 = size;
+	double size2 = size;
 
-    Eigen::Vector4d p110 = pose.inverse() * Eigen::Vector4d(-size2,-size2,size2,1);
-    Eigen::Vector4d p100 = pose.inverse() * Eigen::Vector4d(-size2,size2,size2,1);
-    Eigen::Vector4d p010 = pose.inverse() * Eigen::Vector4d(size2,-size2,size2,1);
-    Eigen::Vector4d p000 = pose.inverse() * Eigen::Vector4d(size2,size2,size2,1);
-    Eigen::Vector4d origin = pose.inverse() * Eigen::Vector4d(0,0,0,1);
+	Eigen::Vector4d p110 = pose.inverse() * Eigen::Vector4d(-size2,-size2,size2,1);
+	Eigen::Vector4d p100 = pose.inverse() * Eigen::Vector4d(-size2,size2,size2,1);
+	Eigen::Vector4d p010 = pose.inverse() * Eigen::Vector4d(size2,-size2,size2,1);
+	Eigen::Vector4d p000 = pose.inverse() * Eigen::Vector4d(size2,size2,size2,1);
+	Eigen::Vector4d origin = pose.inverse() * Eigen::Vector4d(0,0,0,1);
 
-    p110 /= p110[3];
-    p100 /= p100[3];
-    p010 /= p010[3];
-    p000 /= p000[3];
-    origin /= origin[3];
+	p110 /= p110[3];
+	p100 /= p100[3];
+	p010 /= p010[3];
+	p000 /= p000[3];
+	origin /= origin[3];
 
-    if (origin[2] < 0.1 || p110[2] < 0.1 || p100[2] < 0.1 || p010[2] < 0.1 || p000[2] < 0.1) return;
+	if (origin[2] < 0.1 || p110[2] < 0.1 || p100[2] < 0.1 || p010[2] < 0.1 || p000[2] < 0.1) return;
 
-    draw3DLine(cam, colour, depth, p000, origin, linecolour);
-    draw3DLine(cam, colour, depth, p000, p010, linecolour);
-    draw3DLine(cam, colour, depth, p000, p100, linecolour);
+	draw3DLine(cam, colour, depth, p000, origin, linecolour);
+	draw3DLine(cam, colour, depth, p000, p010, linecolour);
+	draw3DLine(cam, colour, depth, p000, p100, linecolour);
 
-    draw3DLine(cam, colour, depth, p010, origin, linecolour);
-    draw3DLine(cam, colour, depth, p010, p110, linecolour);
+	draw3DLine(cam, colour, depth, p010, origin, linecolour);
+	draw3DLine(cam, colour, depth, p010, p110, linecolour);
 
-    draw3DLine(cam, colour, depth, p100, origin, linecolour);
-    draw3DLine(cam, colour, depth, p100, p110, linecolour);
+	draw3DLine(cam, colour, depth, p100, origin, linecolour);
+	draw3DLine(cam, colour, depth, p100, p110, linecolour);
 
-    draw3DLine(cam, colour, depth, p110, origin, linecolour);
+	draw3DLine(cam, colour, depth, p110, origin, linecolour);
 }
 
 void ftl::overlay::drawCamera(
-        const ftl::rgbd::Camera &vcam,
-        cv::Mat &colour,
-        cv::Mat &depth,
-        const ftl::rgbd::Camera &camera,
-        const Eigen::Matrix4d &pose,
-        const cv::Scalar &linecolour,
-        double scale, bool frustrum) {
-
-    //double size2 = size;
-
-    const auto &params = camera;
-    double width = (static_cast<double>(params.width) / static_cast<double>(params.fx)) * scale;
-    double height = (static_cast<double>(params.height) / static_cast<double>(params.fx)) * scale;
-    double width2 = width / 2.0;
-    double height2 = height / 2.0;
-
-    double principx = (((static_cast<double>(params.width) / 2.0) + params.cx) / static_cast<double>(params.fx)) * scale;
-    double principy = (((static_cast<double>(params.height) / 2.0) + params.cy) / static_cast<double>(params.fx)) * scale;
-
-    auto ptcoord = params.screenToCam(0,0,scale);
-    Eigen::Vector4d p110 = pose.inverse() * Eigen::Vector4d(ptcoord.x,ptcoord.y,ptcoord.z,1);
-    ptcoord = params.screenToCam(0,params.height,scale);
-    Eigen::Vector4d p100 = pose.inverse() * Eigen::Vector4d(ptcoord.x,ptcoord.y,ptcoord.z,1);
-    ptcoord = params.screenToCam(params.width,0,scale);
-    Eigen::Vector4d p010 = pose.inverse() * Eigen::Vector4d(ptcoord.x,ptcoord.y,ptcoord.z,1);
-    ptcoord = params.screenToCam(params.width,params.height,scale);
-    Eigen::Vector4d p000 = pose.inverse() * Eigen::Vector4d(ptcoord.x,ptcoord.y,ptcoord.z,1);
-    Eigen::Vector4d origin = pose.inverse() * Eigen::Vector4d(0,0,0,1);
-
-    p110 /= p110[3];
-    p100 /= p100[3];
-    p010 /= p010[3];
-    p000 /= p000[3];
-    origin /= origin[3];
-
-    if (origin[2] < 0.1 || p110[2] < 0.1 || p100[2] < 0.1 || p010[2] < 0.1 || p000[2] < 0.1) return;
-
-    draw3DLine(vcam, colour, depth, p000, origin, linecolour);
-    draw3DLine(vcam, colour, depth, p000, p010, linecolour);
-    draw3DLine(vcam, colour, depth, p000, p100, linecolour);
-
-    draw3DLine(vcam, colour, depth, p010, origin, linecolour);
-    draw3DLine(vcam, colour, depth, p010, p110, linecolour);
-
-    draw3DLine(vcam, colour, depth, p100, origin, linecolour);
-    draw3DLine(vcam, colour, depth, p100, p110, linecolour);
-
-    draw3DLine(vcam, colour, depth, p110, origin, linecolour);
-
-    if (frustrum) {
-        const double fscale = 16.0;
-        ptcoord = params.screenToCam(0,0,fscale);
-        Eigen::Vector4d f110 = pose.inverse() * Eigen::Vector4d(ptcoord.x,ptcoord.y,ptcoord.z,1);
-        ptcoord = params.screenToCam(0,params.height,fscale);
-        Eigen::Vector4d f100 = pose.inverse() * Eigen::Vector4d(ptcoord.x,ptcoord.y,ptcoord.z,1);
-        ptcoord = params.screenToCam(params.width,0,fscale);
-        Eigen::Vector4d f010 = pose.inverse() * Eigen::Vector4d(ptcoord.x,ptcoord.y,ptcoord.z,1);
-        ptcoord = params.screenToCam(params.width,params.height,fscale);
-        Eigen::Vector4d f000 = pose.inverse() * Eigen::Vector4d(ptcoord.x,ptcoord.y,ptcoord.z,1);
-
-        f110 /= f110[3];
-        f100 /= f100[3];
-        f010 /= f010[3];
-        f000 /= f000[3];
-
-        if (f110[2] < 0.1 || f100[2] < 0.1 || f010[2] < 0.1 || f000[2] < 0.1) return;
-
-        draw3DLine(vcam, colour, depth, f000, p000, cv::Scalar(0,255,0,0));
-        draw3DLine(vcam, colour, depth, f010, p010, cv::Scalar(0,255,0,0));
-        draw3DLine(vcam, colour, depth, f100, p100, cv::Scalar(0,255,0,0));
-        draw3DLine(vcam, colour, depth, f110, p110, cv::Scalar(0,255,0,0));
-
-        draw3DLine(vcam, colour, depth, f000, f010, cv::Scalar(0,255,0,0));
-        draw3DLine(vcam, colour, depth, f000, f100, cv::Scalar(0,255,0,0));
-        draw3DLine(vcam, colour, depth, f010, f110, cv::Scalar(0,255,0,0));
-        draw3DLine(vcam, colour, depth, f100, f110, cv::Scalar(0,255,0,0));
-    }
+		const ftl::rgbd::Camera &vcam,
+		cv::Mat &colour,
+		cv::Mat &depth,
+		const ftl::rgbd::Camera &camera,
+		const Eigen::Matrix4d &pose,
+		const cv::Scalar &linecolour,
+		double scale, bool frustrum) {
+
+	//double size2 = size;
+
+	const auto &params = camera;
+	double width = (static_cast<double>(params.width) / static_cast<double>(params.fx)) * scale;
+	double height = (static_cast<double>(params.height) / static_cast<double>(params.fx)) * scale;
+	double width2 = width / 2.0;
+	double height2 = height / 2.0;
+
+	double principx = (((static_cast<double>(params.width) / 2.0) + params.cx) / static_cast<double>(params.fx)) * scale;
+	double principy = (((static_cast<double>(params.height) / 2.0) + params.cy) / static_cast<double>(params.fx)) * scale;
+
+	auto ptcoord = params.screenToCam(0,0,scale);
+	Eigen::Vector4d p110 = pose.inverse() * Eigen::Vector4d(ptcoord.x,ptcoord.y,ptcoord.z,1);
+	ptcoord = params.screenToCam(0,params.height,scale);
+	Eigen::Vector4d p100 = pose.inverse() * Eigen::Vector4d(ptcoord.x,ptcoord.y,ptcoord.z,1);
+	ptcoord = params.screenToCam(params.width,0,scale);
+	Eigen::Vector4d p010 = pose.inverse() * Eigen::Vector4d(ptcoord.x,ptcoord.y,ptcoord.z,1);
+	ptcoord = params.screenToCam(params.width,params.height,scale);
+	Eigen::Vector4d p000 = pose.inverse() * Eigen::Vector4d(ptcoord.x,ptcoord.y,ptcoord.z,1);
+	Eigen::Vector4d origin = pose.inverse() * Eigen::Vector4d(0,0,0,1);
+
+	p110 /= p110[3];
+	p100 /= p100[3];
+	p010 /= p010[3];
+	p000 /= p000[3];
+	origin /= origin[3];
+
+	if (origin[2] < 0.1 || p110[2] < 0.1 || p100[2] < 0.1 || p010[2] < 0.1 || p000[2] < 0.1) return;
+
+	draw3DLine(vcam, colour, depth, p000, origin, linecolour);
+	draw3DLine(vcam, colour, depth, p000, p010, linecolour);
+	draw3DLine(vcam, colour, depth, p000, p100, linecolour);
+
+	draw3DLine(vcam, colour, depth, p010, origin, linecolour);
+	draw3DLine(vcam, colour, depth, p010, p110, linecolour);
+
+	draw3DLine(vcam, colour, depth, p100, origin, linecolour);
+	draw3DLine(vcam, colour, depth, p100, p110, linecolour);
+
+	draw3DLine(vcam, colour, depth, p110, origin, linecolour);
+
+	if (frustrum) {
+		const double fscale = 16.0;
+		ptcoord = params.screenToCam(0,0,fscale);
+		Eigen::Vector4d f110 = pose.inverse() * Eigen::Vector4d(ptcoord.x,ptcoord.y,ptcoord.z,1);
+		ptcoord = params.screenToCam(0,params.height,fscale);
+		Eigen::Vector4d f100 = pose.inverse() * Eigen::Vector4d(ptcoord.x,ptcoord.y,ptcoord.z,1);
+		ptcoord = params.screenToCam(params.width,0,fscale);
+		Eigen::Vector4d f010 = pose.inverse() * Eigen::Vector4d(ptcoord.x,ptcoord.y,ptcoord.z,1);
+		ptcoord = params.screenToCam(params.width,params.height,fscale);
+		Eigen::Vector4d f000 = pose.inverse() * Eigen::Vector4d(ptcoord.x,ptcoord.y,ptcoord.z,1);
+
+		f110 /= f110[3];
+		f100 /= f100[3];
+		f010 /= f010[3];
+		f000 /= f000[3];
+
+		if (f110[2] < 0.1 || f100[2] < 0.1 || f010[2] < 0.1 || f000[2] < 0.1) return;
+
+		draw3DLine(vcam, colour, depth, f000, p000, cv::Scalar(0,255,0,0));
+		draw3DLine(vcam, colour, depth, f010, p010, cv::Scalar(0,255,0,0));
+		draw3DLine(vcam, colour, depth, f100, p100, cv::Scalar(0,255,0,0));
+		draw3DLine(vcam, colour, depth, f110, p110, cv::Scalar(0,255,0,0));
+
+		draw3DLine(vcam, colour, depth, f000, f010, cv::Scalar(0,255,0,0));
+		draw3DLine(vcam, colour, depth, f000, f100, cv::Scalar(0,255,0,0));
+		draw3DLine(vcam, colour, depth, f010, f110, cv::Scalar(0,255,0,0));
+		draw3DLine(vcam, colour, depth, f100, f110, cv::Scalar(0,255,0,0));
+	}
 }
 
 void ftl::overlay::drawText(
-        const ftl::rgbd::Camera &cam,
-        cv::Mat &colour,
-        cv::Mat &depth,
-        const std::string &text,
-        const Eigen::Vector4d &pos,
-        double size,
-        const cv::Scalar &textcolour) {
-
-    auto pt = cam.camToScreen<int2>(make_float3(pos[0], pos[1], pos[2]));
-    if (pos[2] < 0.1) return;
-    cv::putText(colour, text, cv::Point(pt.x, colour.rows-pt.y), 0, size, textcolour, 1, cv::LINE_8, true);
+		const ftl::rgbd::Camera &cam,
+		cv::Mat &colour,
+		cv::Mat &depth,
+		const std::string &text,
+		const Eigen::Vector4d &pos,
+		double size,
+		const cv::Scalar &textcolour) {
+
+	auto pt = cam.camToScreen<int2>(make_float3(pos[0], pos[1], pos[2]));
+	if (pos[2] < 0.1) return;
+	cv::putText(colour, text, cv::Point(pt.x, colour.rows-pt.y), 0, size, textcolour, 1, cv::LINE_8, true);
 }*/
diff --git a/components/renderers/cpp/src/reprojection.cu b/components/renderers/cpp/src/reprojection.cu
index 358e465c1..e58ba6fa7 100644
--- a/components/renderers/cpp/src/reprojection.cu
+++ b/components/renderers/cpp/src/reprojection.cu
@@ -239,7 +239,7 @@ __global__ void reprojection_kernel(
 	const float d = depth_in.tex2D((int)x, (int)y);
 	if (d > params.camera.minDepth && d < params.camera.maxDepth) {
 		//const float2 rpt = convertScreen<VPMODE>(params, x, y);
-		const float3 camPos = transform * params.camera.project<PROJECT>(make_float3(x, y, d));
+		const float3 camPos = transform * params.camera.unproject<PROJECT>(make_float3(x, y, d));
 		if (camPos.z > camera.minDepth && camPos.z < camera.maxDepth) {
 			const float3 screenPos = camera.project<Projection::PERSPECTIVE>(camPos);
 
@@ -250,7 +250,8 @@ __global__ void reprojection_kernel(
 
 				// Boolean match (0 or 1 weight). 1.0 if depths are sufficiently close
 				float weight = depthMatching(params, camPos.z, d2);
-				weight *= float(weights.tex2D(int(screenPos.x+0.5f), int(screenPos.y+0.5f))) / 32767.0f;
+				if (params.m_flags & ftl::render::kUseWeightsChannel)
+					weight *= float(weights.tex2D(int(screenPos.x+0.5f), int(screenPos.y+0.5f))) / 32767.0f;
 
 				const B output = make<B>(input);  // * weight; //weightInput(input, weight);
 
@@ -282,30 +283,35 @@ void ftl::cuda::reproject(
 		if (params.accumulationMode == AccumulationFunction::CloseWeights) {
 			switch (params.projection) {
 			case Projection::PERSPECTIVE: reprojection_kernel<A,B,AccumulationFunction::CloseWeights, Projection::PERSPECTIVE><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
+			case Projection::ORTHOGRAPHIC: reprojection_kernel<A,B,AccumulationFunction::CloseWeights, Projection::ORTHOGRAPHIC><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
 			case Projection::EQUIRECTANGULAR: reprojection_kernel<A,B,AccumulationFunction::CloseWeights, Projection::EQUIRECTANGULAR><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
 			//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::CloseWeights><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
 			}
 		} else if (params.accumulationMode == AccumulationFunction::BestWeight) {
 			switch (params.projection) {
 			case Projection::PERSPECTIVE: reprojection_kernel<A,B,AccumulationFunction::BestWeight, Projection::PERSPECTIVE><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
+			case Projection::ORTHOGRAPHIC: reprojection_kernel<A,B,AccumulationFunction::BestWeight, Projection::ORTHOGRAPHIC><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
 			case Projection::EQUIRECTANGULAR: reprojection_kernel<A,B,AccumulationFunction::BestWeight, Projection::EQUIRECTANGULAR><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
 			//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::BestWeight><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
 			}
 		} else if (params.accumulationMode == AccumulationFunction::Simple) {
 			switch (params.projection) {
 			case Projection::PERSPECTIVE: reprojection_kernel<A,B,AccumulationFunction::Simple, Projection::PERSPECTIVE><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
+			case Projection::ORTHOGRAPHIC: reprojection_kernel<A,B,AccumulationFunction::Simple, Projection::ORTHOGRAPHIC><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
 			case Projection::EQUIRECTANGULAR: reprojection_kernel<A,B,AccumulationFunction::Simple, Projection::EQUIRECTANGULAR><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
 			//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::Simple><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
 			}
 		} else if (params.accumulationMode == AccumulationFunction::ColourDiscard) {
 			switch (params.projection) {
 			case Projection::PERSPECTIVE: reprojection_kernel<A,B,AccumulationFunction::ColourDiscard, Projection::PERSPECTIVE><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
+			case Projection::ORTHOGRAPHIC: reprojection_kernel<A,B,AccumulationFunction::ColourDiscard, Projection::ORTHOGRAPHIC><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
 			case Projection::EQUIRECTANGULAR: reprojection_kernel<A,B,AccumulationFunction::ColourDiscard, Projection::EQUIRECTANGULAR><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
 			//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::ColourDiscard><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
 			}
 		} else if (params.accumulationMode == AccumulationFunction::ColourDiscardSmooth) {
 			switch (params.projection) {
 			case Projection::PERSPECTIVE: reprojection_kernel<A,B,AccumulationFunction::ColourDiscardSmooth, Projection::PERSPECTIVE><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
+			case Projection::ORTHOGRAPHIC: reprojection_kernel<A,B,AccumulationFunction::ColourDiscardSmooth, Projection::ORTHOGRAPHIC><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
 			case Projection::EQUIRECTANGULAR: reprojection_kernel<A,B,AccumulationFunction::ColourDiscardSmooth, Projection::EQUIRECTANGULAR><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
 			//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::ColourDiscardSmooth><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
 			}
@@ -314,30 +320,35 @@ void ftl::cuda::reproject(
 		if (params.accumulationMode == AccumulationFunction::CloseWeights) {
 			switch (params.projection) {
 			case Projection::PERSPECTIVE: reprojection_kernel<A,B,AccumulationFunction::CloseWeights, Projection::PERSPECTIVE><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
+			case Projection::ORTHOGRAPHIC: reprojection_kernel<A,B,AccumulationFunction::CloseWeights, Projection::ORTHOGRAPHIC><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
 			case Projection::EQUIRECTANGULAR: reprojection_kernel<A,B,AccumulationFunction::CloseWeights, Projection::EQUIRECTANGULAR><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
 			//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::CloseWeights><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
 			}
 		} else if (params.accumulationMode == AccumulationFunction::BestWeight) {
 			switch (params.projection) {
 			case Projection::PERSPECTIVE: reprojection_kernel<A,B,AccumulationFunction::BestWeight, Projection::PERSPECTIVE><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
+			case Projection::ORTHOGRAPHIC: reprojection_kernel<A,B,AccumulationFunction::BestWeight, Projection::ORTHOGRAPHIC><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
 			case Projection::EQUIRECTANGULAR: reprojection_kernel<A,B,AccumulationFunction::BestWeight, Projection::EQUIRECTANGULAR><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
 			//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::BestWeight><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
 			}
 		} else if (params.accumulationMode == AccumulationFunction::Simple) {
 			switch (params.projection) {
 			case Projection::PERSPECTIVE: reprojection_kernel<A,B,AccumulationFunction::Simple, Projection::PERSPECTIVE><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
+			case Projection::ORTHOGRAPHIC: reprojection_kernel<A,B,AccumulationFunction::Simple, Projection::ORTHOGRAPHIC><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
 			case Projection::EQUIRECTANGULAR: reprojection_kernel<A,B,AccumulationFunction::Simple, Projection::EQUIRECTANGULAR><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
 			//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::Simple><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
 			}
 		} else if (params.accumulationMode == AccumulationFunction::ColourDiscard) {
 			switch (params.projection) {
 			case Projection::PERSPECTIVE: reprojection_kernel<A,B,AccumulationFunction::ColourDiscard, Projection::PERSPECTIVE><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
+			case Projection::ORTHOGRAPHIC: reprojection_kernel<A,B,AccumulationFunction::ColourDiscard, Projection::ORTHOGRAPHIC><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
 			case Projection::EQUIRECTANGULAR: reprojection_kernel<A,B,AccumulationFunction::ColourDiscard, Projection::EQUIRECTANGULAR><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
 			//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::ColourDiscard><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
 			}
 		} else if (params.accumulationMode == AccumulationFunction::ColourDiscardSmooth) {
 			switch (params.projection) {
 			case Projection::PERSPECTIVE: reprojection_kernel<A,B,AccumulationFunction::ColourDiscardSmooth, Projection::PERSPECTIVE><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
+			case Projection::ORTHOGRAPHIC: reprojection_kernel<A,B,AccumulationFunction::ColourDiscardSmooth, Projection::ORTHOGRAPHIC><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
 			case Projection::EQUIRECTANGULAR: reprojection_kernel<A,B,AccumulationFunction::ColourDiscardSmooth, Projection::EQUIRECTANGULAR><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
 			//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::ColourDiscardSmooth><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
 			}
diff --git a/components/renderers/cpp/src/screen.cu b/components/renderers/cpp/src/screen.cu
index f2dd97e43..5ebbdd223 100644
--- a/components/renderers/cpp/src/screen.cu
+++ b/components/renderers/cpp/src/screen.cu
@@ -91,6 +91,12 @@ void ftl::cuda::screen_coord(TextureObject<float> &depth, TextureObject<float> &
 		case ViewPortMode::Clipping: screen_coord_kernel<ViewPortMode::Clipping, Projection::EQUIRECTANGULAR><<<gridSize, blockSize, 0, stream>>>(depth, depth_out, screen_out, params, pose, camera); break;
 		case ViewPortMode::Stretch: screen_coord_kernel<ViewPortMode::Stretch, Projection::EQUIRECTANGULAR><<<gridSize, blockSize, 0, stream>>>(depth, depth_out, screen_out, params, pose, camera); break;
 		}
+	} else if (params.projection == Projection::ORTHOGRAPHIC) {
+		switch (params.viewPortMode) {
+		case ViewPortMode::Disabled: screen_coord_kernel<ViewPortMode::Disabled, Projection::ORTHOGRAPHIC><<<gridSize, blockSize, 0, stream>>>(depth, depth_out, screen_out, params, pose, camera); break;
+		case ViewPortMode::Clipping: screen_coord_kernel<ViewPortMode::Clipping, Projection::ORTHOGRAPHIC><<<gridSize, blockSize, 0, stream>>>(depth, depth_out, screen_out, params, pose, camera); break;
+		case ViewPortMode::Stretch: screen_coord_kernel<ViewPortMode::Stretch, Projection::ORTHOGRAPHIC><<<gridSize, blockSize, 0, stream>>>(depth, depth_out, screen_out, params, pose, camera); break;
+		}
 	}
 	cudaSafeCall( cudaGetLastError() );
 }
diff --git a/components/renderers/cpp/src/touch.cu b/components/renderers/cpp/src/touch.cu
new file mode 100644
index 000000000..e81d90180
--- /dev/null
+++ b/components/renderers/cpp/src/touch.cu
@@ -0,0 +1,50 @@
+#include <ftl/cuda/touch.hpp>
+#include <ftl/cuda/warp.hpp>
+
+using ftl::cuda::TextureObject;
+using ftl::cuda::warpSum;
+
+__device__ inline ftl::cuda::Collision pack_collision(int cx, int cy, int num, float cd) {
+    return ftl::cuda::Collision{(num << 24) | (cx << 12) | (cy), cd};
+}
+
+ __global__ void touch_kernel(TextureObject<float> depth_in, TextureObject<float> depth_out, ftl::cuda::Collision *collisions, int max_collisions, float dist) {
+	const int x = blockIdx.x*blockDim.x + threadIdx.x;
+    const int y = blockIdx.y*blockDim.y + threadIdx.y;
+
+	bool collision = false;
+	float cd = 0.0f;
+
+    if (x >= 0 && y >= 0 && x < depth_in.width() && y < depth_in.height()) {
+		//uint2 screenPos = make_uint2(30000,30000);
+
+		const float din = depth_in.tex2D(x, y);
+        const float dout = depth_out.tex2D(x, y);
+
+		collision = (din < 1000.0f && fabsf(din-dout) < dist);
+		cd = fminf(din,dout);
+        depth_out(x,y) = cd;
+    }
+    
+    int num_collisions = __popc(__ballot_sync(0xFFFFFFFF, collision));
+    float cx = warpSum((collision) ? float(x) : 0.0f) / float(num_collisions);
+	float cy = warpSum((collision) ? float(y) : 0.0f) / float(num_collisions);
+	cd = warpSum((collision) ? float(cd) : 0.0f) / float(num_collisions);
+    if ((threadIdx.x+threadIdx.y*blockDim.x) % 32 == 0) {
+        if (num_collisions > 0) {
+            //printf("Collision: %f,%f [%d]\n", cx, cy, num_collisions);
+            int ix = atomicInc(&collisions[0].screen, max_collisions-1);
+            collisions[ix+1] = pack_collision(cx, cy, num_collisions, cd);
+        }
+    }
+}
+
+#define T_PER_BLOCK 8
+
+void ftl::cuda::touch_merge(TextureObject<float> &depth_in, TextureObject<float> &depth_out, ftl::cuda::Collision *collisions, int max_collisions, float dist, cudaStream_t stream) {
+    const dim3 gridSize((depth_in.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth_in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
+    const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
+
+    touch_kernel<<<gridSize, blockSize, 0, stream>>>(depth_in, depth_out, collisions, max_collisions, dist);
+	cudaSafeCall( cudaGetLastError() );
+}
diff --git a/components/rgbd-sources/CMakeLists.txt b/components/rgbd-sources/CMakeLists.txt
index 7e47d77e4..726e9960a 100644
--- a/components/rgbd-sources/CMakeLists.txt
+++ b/components/rgbd-sources/CMakeLists.txt
@@ -1,16 +1,17 @@
 set(RGBDSRC
-	src/sources/stereovideo/calibrate.cpp
+	src/sources/stereovideo/rectification.cpp
 	src/sources/stereovideo/opencv.cpp
 	src/source.cpp
 	src/frame.cpp
-	src/frameset.cpp
+	#src/frameset.cpp
 	src/sources/stereovideo/stereovideo.cpp
 	#src/colour.cpp
-	src/group.cpp
-	src/cb_segmentation.cpp
+	#src/group.cpp
+	#src/cb_segmentation.cpp
 	#src/abr.cpp
 	src/sources/screencapture/screencapture.cpp
 	src/camera.cpp
+	#src/init.cpp
 )
 
 if (HAVE_REALSENSE)
@@ -35,7 +36,9 @@ if (CUDA_FOUND)
 set_property(TARGET ftlrgbd PROPERTY CUDA_SEPARABLE_COMPILATION OFF)
 endif()
 
-target_link_libraries(ftlrgbd ftlcalibration ftlcommon ${OpenCV_LIBS} ${LIBSGM_LIBRARIES} ${CUDA_LIBRARIES} Eigen3::Eigen realsense ftlnet ${LibArchive_LIBRARIES} ftlcodecs ftloperators ftldata ${X11_X11_LIB} ${X11_Xext_LIB} Pylon)
+target_link_libraries(ftlrgbd ftlcalibration ftlcommon ${OpenCV_LIBS} ${CUDA_LIBRARIES} Eigen3::Eigen realsense ftlnet ${LibArchive_LIBRARIES} ftlcodecs ftloperators ftldata ${X11_X11_LIB} ${X11_Xext_LIB} ${X11_Xtst_LIB} ${X11_XTest_LIB} Pylon)
+
+target_precompile_headers(ftlrgbd REUSE_FROM ftldata)
 
 if (BUILD_TESTS)
 add_subdirectory(test)
diff --git a/components/rgbd-sources/include/ftl/cb_segmentation.hpp b/components/rgbd-sources/include/ftl/cb_segmentation.hpp
deleted file mode 100644
index 4563e35c1..000000000
--- a/components/rgbd-sources/include/ftl/cb_segmentation.hpp
+++ /dev/null
@@ -1,117 +0,0 @@
-#pragma once
-
-#include <opencv2/core.hpp>
-
-namespace ftl {
-
-/**
- * @brief	Codebook segmentation and depthmap filling.
- * @param	Input image width
- * @param	Input image height
- * 
- * Codebook segmentation based on
- *
- * Kim, K., Chalidabhongse, T. H., Harwood, D., & Davis, L. (2005).
- * Real-time foreground-background segmentation using codebook model.
- * Real-Time Imaging. https://doi.org/10.1016/j.rti.2004.12.004
- * 
- * and fixed size codebook optimization in
- * 
- * Rodriguez-Gomez, R., Fernandez-Sanchez, E. J., Diaz, J., & Ros, E.
- * (2015). Codebook hardware implementation on FPGA for background
- * subtraction. Journal of Real-Time Image Processing.
- * https://doi.org/10.1007/s11554-012-0249-6
- * 
- * Additional modifications to include depth maps as part of the
- * background model.
- */
-class CBSegmentation {
-public:
-	CBSegmentation(char codebook_size, size_t width, size_t height, float alpha, float beta, float epsilon, float sigma, int T_add, int T_del, int T_h);
-
-	/**
-	 * @brief	Segment image.
-	 * @param	Input image (3-channels)
-	 * @param	Output Mat. Background pixels set to 0, foreground pixels > 0.
-	 *
-	 * @todo	Template method on OpenCV type
-	 */
-	void apply(cv::Mat &in, cv::Mat &out, cv::Mat &depth, bool fill=false);
-	void apply(cv::Mat &in, cv::Mat &out);
-	
-protected:
-	class Pixel {
-	public:
-		int idx;
-		float r;
-		float g;
-		float b;
-		float i;
-		int d;
-		long t;
-		Pixel(const int &index, const uchar *bgr, const int &depth, const long &time);
-	};
-
-	class Codeword {
-	public:
-		float r;
-		float g;
-		float b;
-		float i_min, i_max;
-		long f, lambda, p, q;
-
-		float d_m;
-		float d_f;
-		float d_S;
-		
-		void set(CBSegmentation::Pixel &pixel);
-		void update(CBSegmentation::Pixel &pixel);
-
-		bool colordiff(CBSegmentation::Pixel &pixel, float epsilon);
-		bool brightness(CBSegmentation::Pixel &pixel, float alpha, float beta);
-		bool depthdiff(CBSegmentation::Pixel &pixel, float sigma);
-
-		inline int freq() { return f; }
-		inline long getLambda() { return lambda; }
-		inline long ctime() { return p; }
-		inline long atime() { return q; }
-	};
-
-	enum EntryType { H, M };
-
-	union Entry {
-		char size;
-		struct Data {
-			EntryType type;
-			CBSegmentation::Codeword cw;
-		} data ;
-	};
-
-	struct CompareEntry{
-		bool operator()(const Entry &a,const Entry &b) const{
-			return 	!((a.data.type == M && b.data.type == H) ||
-					(a.data.cw.f < b.data.cw.f));
-		}
-	};
-
-	bool processPixel(Pixel &px, Codeword *codeword=nullptr);
-	
-	size_t size_;
-	size_t width_;
-	size_t height_;
-
-	float alpha_;
-	float beta_;
-	float epsilon_;
-	float sigma_;
-
-	int T_add_;
-	int T_del_;
-	int T_h_;
-
-private:
-	long t_ = 1;
-	std::vector<Entry> cb_;
-};
-
-}
\ No newline at end of file
diff --git a/components/rgbd-sources/include/ftl/rgbd/camera.hpp b/components/rgbd-sources/include/ftl/rgbd/camera.hpp
index 804a0c2d3..f68de82d0 100644
--- a/components/rgbd-sources/include/ftl/rgbd/camera.hpp
+++ b/components/rgbd-sources/include/ftl/rgbd/camera.hpp
@@ -142,6 +142,22 @@ inline float3 ftl::rgbd::Camera::unproject<ftl::rgbd::Projection::PERSPECTIVE>(c
 	return make_float3(pos.z*x, pos.z*y, pos.z);
 }
 
+template <> __device__ __host__
+inline float3 ftl::rgbd::Camera::project<ftl::rgbd::Projection::ORTHOGRAPHIC>(const float3 &pos) const {
+	return make_float3(
+		static_cast<float>(pos.x*fx - cx),			
+		static_cast<float>(pos.y*fy - cy),
+		pos.z
+	);
+}
+
+template <> __device__ __host__
+inline float3 ftl::rgbd::Camera::unproject<ftl::rgbd::Projection::ORTHOGRAPHIC>(const float3 &pos) const {
+	const float x = static_cast<float>((pos.x+cx) / fx);
+	const float y = static_cast<float>((pos.y+cy) / fy);
+	return make_float3(x, y, pos.z);
+}
+
 template <> __device__ __host__
 inline float2 ftl::rgbd::Camera::camToScreen<float2>(const float3 &pos) const {
 	return make_float2(
diff --git a/components/rgbd-sources/include/ftl/rgbd/capabilities.hpp b/components/rgbd-sources/include/ftl/rgbd/capabilities.hpp
new file mode 100644
index 000000000..dacab1e52
--- /dev/null
+++ b/components/rgbd-sources/include/ftl/rgbd/capabilities.hpp
@@ -0,0 +1,37 @@
+#ifndef _FTL_RGBD_CAPABILITIES_HPP_
+#define _FTL_RGBD_CAPABILITIES_HPP_
+
+#include <ftl/utility/msgpack.hpp>
+
+namespace ftl {
+namespace rgbd {
+
+/**
+ * To be added to the capabilities channel to indicate what the source device
+ * is capable of. These properties should be features of the source that
+ * cannot be determined by simply checking for channels, and may include
+ * status information about processing that has been performed.
+ */
+enum class Capability : int {
+	MOVABLE=0,	// Is a pose controllable camera
+	ACTIVE,		// An active depth sensor
+	VIDEO,		// Is video and not just static
+	ADJUSTABLE,	// Camera properties can be changed (exposure etc)
+	VIRTUAL,	// Is not a physical camera
+	TOUCH,		// Touch related feedback supported
+	VR,			// Is a VR device, so provides own active pose etc
+	LIVE,		// Live, not recorded (removed from ftl file sources)
+	FUSED,		// Reconstruction has been performed
+	STREAMED,	// Means it came from a stream and not device
+	EQUI_RECT,	// 360 rendered (Equirectangular Render)
+	STEREO		// Side-by-side stereo render
+};
+
+std::string capabilityName(Capability);
+
+}
+}
+
+MSGPACK_ADD_ENUM(ftl::rgbd::Capability);
+
+#endif
\ No newline at end of file
diff --git a/components/rgbd-sources/include/ftl/rgbd/format.hpp b/components/rgbd-sources/include/ftl/rgbd/format.hpp
index 032e2948d..11390f14c 100644
--- a/components/rgbd-sources/include/ftl/rgbd/format.hpp
+++ b/components/rgbd-sources/include/ftl/rgbd/format.hpp
@@ -18,7 +18,7 @@ struct FormatBase {
 	int cvType;			// OpenCV Mat type
 
 	inline bool empty() const { return width == 0 || height == 0; }
-	inline cv::Size size() const { return cv::Size(width, height); }
+	inline cv::Size size() const { return cv::Size(static_cast<int>(width), static_cast<int>(height)); }
 };
 
 template <typename T>
diff --git a/components/rgbd-sources/include/ftl/rgbd/frame.hpp b/components/rgbd-sources/include/ftl/rgbd/frame.hpp
index 8fc747cd7..509c4bec8 100644
--- a/components/rgbd-sources/include/ftl/rgbd/frame.hpp
+++ b/components/rgbd-sources/include/ftl/rgbd/frame.hpp
@@ -8,7 +8,7 @@
 #include <opencv2/core/cuda.hpp>
 #include <opencv2/core/cuda_stream_accessor.hpp>
 
-#include <ftl/data/frame.hpp>
+#include <ftl/data/new_frame.hpp>
 
 #include <ftl/codecs/channels.hpp>
 #include <ftl/rgbd/format.hpp>
@@ -16,8 +16,8 @@
 #include <ftl/codecs/codecs.hpp>
 #include <ftl/codecs/packet.hpp>
 #include <ftl/utility/vectorbuffer.hpp>
-#include <ftl/data/framestate.hpp>
 #include <ftl/cuda_common.hpp>
+#include <ftl/rgbd/capabilities.hpp>
 
 #include <type_traits>
 #include <array>
@@ -28,264 +28,201 @@
 namespace ftl {
 namespace rgbd {
 
-typedef ftl::data::FrameState<ftl::rgbd::Camera,2> FrameState;
+//typedef ftl::data::Frame Frame;
 
-struct VideoData {
-	ftl::cuda::TextureObjectBase tex;
-	cv::cuda::GpuMat gpu;
-	cv::Mat host;
-	bool isgpu;
-	bool validhost;
-	std::list<ftl::codecs::Packet> encoded;
+/*inline const ftl::rgbd::Camera &getLeftCamera(const Frame &f) { return f.get<ftl::rgbd::Camera>(ftl::codecs::Channel::Calibration); }
+inline const ftl::rgbd::Camera &getRightCamera(const Frame &f) { return f.get<ftl::rgbd::Camera>(ftl::codecs::Channel::Calibration2); }
+inline const ftl::rgbd::Camera &getLeft(const Frame &f) { return f.get<ftl::rgbd::Camera>(ftl::codecs::Channel::Calibration); }
+inline const ftl::rgbd::Camera &getRight(const Frame &f) { return f.get<ftl::rgbd::Camera>(ftl::codecs::Channel::Calibration2); }
+inline const Eigen::Matrix4d &getPose(const Frame &f) { return f.get<Eigen::Matrix4d>(ftl::codecs::Channel::Pose); }*/
 
-	template <typename T>
-	T &as() {
-		throw FTL_Error("Unsupported type for Video data channel");
-	};
+class VideoFrame {
+	public:
+	VideoFrame() {}
+
+	// Manually add copy constructor since default is removed
+	VideoFrame(const VideoFrame &);
+	VideoFrame &operator=(const VideoFrame &);
 
 	template <typename T>
-	const T &as() const {
-		throw FTL_Error("Unsupported type for Video data channel");
-	};
+	ftl::cuda::TextureObject<T> &createTexture(const ftl::rgbd::Format<T> &f, bool interpolated);
 
 	template <typename T>
-	T &make() {
-		throw FTL_Error("Unsupported type for Video data channel");
-	};
+	ftl::cuda::TextureObject<T> &createTexture(bool interpolated=false) const;
 
-	inline void reset() {
-		validhost = false;
-		encoded.clear();
-	}
-};
+	cv::cuda::GpuMat &createGPU();
+	cv::cuda::GpuMat &createGPU(const ftl::rgbd::FormatBase &f);
 
-// Specialisations for cv mat types
-template <> cv::Mat &VideoData::as<cv::Mat>();
-template <> const cv::Mat &VideoData::as<cv::Mat>() const;
-template <> cv::cuda::GpuMat &VideoData::as<cv::cuda::GpuMat>();
-template <> const cv::cuda::GpuMat &VideoData::as<cv::cuda::GpuMat>() const;
-
-template <> cv::Mat &VideoData::make<cv::Mat>();
-template <> cv::cuda::GpuMat &VideoData::make<cv::cuda::GpuMat>();
-
-/**
- * Manage a set of image channels corresponding to a single camera frame.
- */
-class Frame : public ftl::data::Frame<0,32,ftl::rgbd::FrameState,VideoData> {
-//class Frame {
-public:
-	using ftl::data::Frame<0,32,ftl::rgbd::FrameState,VideoData>::create;
-
-	Frame();
-	Frame(Frame &&f);
-	~Frame();
-
-	Frame &operator=(Frame &&f);
-
-	// Prevent frame copy, instead use a move.
-	//Frame(const Frame &)=delete;
-	//Frame &operator=(const Frame &)=delete;
-
-	void download(ftl::codecs::Channel c, cv::cuda::Stream stream);
-	void upload(ftl::codecs::Channel c, cv::cuda::Stream stream);
-	void download(ftl::codecs::Channels<0> c, cv::cuda::Stream stream);
-	void upload(ftl::codecs::Channels<0> c, cv::cuda::Stream stream);
-
-	inline void download(ftl::codecs::Channel c, cudaStream_t stream=0) { download(c, cv::cuda::StreamAccessor::wrapStream(stream)); };
-	inline void upload(ftl::codecs::Channel c, cudaStream_t stream=0) { upload(c, cv::cuda::StreamAccessor::wrapStream(stream)); };
-	inline void download(const ftl::codecs::Channels<0> &c, cudaStream_t stream=0) { download(c, cv::cuda::StreamAccessor::wrapStream(stream)); };
-	inline void upload(const ftl::codecs::Channels<0> &c, cudaStream_t stream=0) { upload(c, cv::cuda::StreamAccessor::wrapStream(stream)); };
-
-	/**
-	 * Special case optional download. If a host memory version still exists,
-	 * use that. Only download if no host version exists. This assumes that
-	 * the GPU version has not been modified since the host version was created,
-	 * in otherwords that both version are still the same. It also does not
-	 * actually mark the channel as downloaded.
-	 */
-	cv::Mat &fastDownload(ftl::codecs::Channel c, cv::cuda::Stream stream);
-
-	/**
-	 * Get an existing CUDA texture object.
-	 */
-	template <typename T> const ftl::cuda::TextureObject<T> &getTexture(ftl::codecs::Channel) const;
-
-	/**
-	 * Get an existing CUDA texture object.
-	 */
-	template <typename T> ftl::cuda::TextureObject<T> &getTexture(ftl::codecs::Channel);
-
-	/**
-	 * Create a channel with a given format. This will discard any existing
-	 * data associated with the channel and ensure all data structures and
-	 * memory allocations match the new format.
-	 */
-	template <typename T> T &create(ftl::codecs::Channel c, const ftl::rgbd::FormatBase &f);
-
-	/**
-	 * Create a CUDA texture object for a channel. This version takes a format
-	 * argument to also create (or recreate) the associated GpuMat.
-	 */
-	template <typename T>
-	ftl::cuda::TextureObject<T> &createTexture(ftl::codecs::Channel c, const ftl::rgbd::Format<T> &f, bool interpolated=false);
+	template <typename T> ftl::cuda::TextureObject<T> &getTexture(ftl::codecs::Channel) const;
 
-	/**
-	 * Create a CUDA texture object for a channel. With this version the GpuMat
-	 * must already exist and be of the correct type.
-	 */
-	template <typename T>
-	ftl::cuda::TextureObject<T> &createTexture(ftl::codecs::Channel c, bool interpolated=false);
-
-	/**
-	 * Append encoded data for a channel. This will move the data, invalidating
-	 * the original packet structure. It is to be used to allow data that is
-	 * already encoded to be transmitted or saved again without re-encoding.
-	 * A called to `create` will clear all encoded data for that channel.
-	 */
-	void pushPacket(ftl::codecs::Channel c, ftl::codecs::Packet &pkt);
-
-	/**
-	 * Obtain a list of any existing encodings for this channel.
-	 */
-	const std::list<ftl::codecs::Packet> &getPackets(ftl::codecs::Channel c) const;
-
-	/**
-	 * Clear any existing encoded packets. Used when the channel data is
-	 * modified and the encodings are therefore out-of-date.
-	 */
-	void clearPackets(ftl::codecs::Channel c);
-
-	/**
-	 * Packets from multiple frames are merged together in sequence. An example
-	 * case is if a frame gets dropped but the original encoding is inter-frame
-	 * and hence still requires the dropped frames encoding data.
-	 */
-	void mergeEncoding(ftl::rgbd::Frame &f);
-
-	void resetTexture(ftl::codecs::Channel c);
-
-	/**
-	 * Check if any specified channels are empty or missing.
-	 */
-	bool empty(ftl::codecs::Channels<0> c);
-
-	/**
-	 * Check if a specific channel is missing or has no memory allocated.
-	 */
-	inline bool empty(ftl::codecs::Channel c) {
-		auto &m = getData(c);
-		return !hasChannel(c) || (m.host.empty() && m.gpu.empty());
-	}
+	cv::Mat &createCPU();
+	cv::Mat &createCPU(const ftl::rgbd::FormatBase &f);
 
-	/**
-	 * Obtain a mask of all available channels in the frame.
-	 */
-	inline ftl::codecs::Channels<0> getVideoChannels() const { return getChannels(); }
+	const cv::Mat &getCPU() const;
+	const cv::cuda::GpuMat &getGPU() const;
 
-	inline const ftl::rgbd::Camera &getLeftCamera() const { return getLeft(); }
-	inline const ftl::rgbd::Camera &getRightCamera() const { return getRight(); }
+	cv::Mat &setCPU();
+	cv::cuda::GpuMat &setGPU();
 
-	/**
-	 * Is the channel data currently located on GPU. This also returns false if
-	 * the channel does not exist.
-	 */
-	inline bool isGPU(ftl::codecs::Channel channel) const {
-		return hasChannel(channel) && getData(channel).isgpu;
-	}
+	inline bool isGPU() const { return isgpu; };
 
-	/**
-	 * Is the channel data currently located on CPU memory. This also returns
-	 * false if the channel does not exist.
-	 */
-	inline bool isCPU(ftl::codecs::Channel channel) const {
-		return hasChannel(channel) && !getData(channel).isgpu;
-	}
+	inline bool hasOpenGL() const { return opengl_id != 0; }
+	inline void setOpenGL(unsigned int id) { opengl_id = id; }
+	inline unsigned int getOpenGL() const { return opengl_id; }
+
+
+	private:
+	mutable ftl::cuda::TextureObjectBase tex;
+	cv::cuda::GpuMat gpu;
+	mutable cv::Mat host;
+	unsigned int opengl_id=0;
+	bool isgpu=false;
+	mutable bool validhost=false;
 };
 
-// Specialisations
+class Frame : public ftl::data::Frame {
+	public:
+	const ftl::rgbd::Camera &getLeftCamera() const;
+	const ftl::rgbd::Camera &getRightCamera() const;
+	inline const ftl::rgbd::Camera &getLeft() const { return getLeftCamera(); }
+	inline const ftl::rgbd::Camera &getRight() const { return getRightCamera(); }
+	const Eigen::Matrix4d &getPose() const;
+	ftl::rgbd::Camera &setLeft();
+	ftl::rgbd::Camera &setRight();
+	Eigen::Matrix4d &setPose();
 
-template <> cv::Mat &Frame::create(ftl::codecs::Channel c, const ftl::rgbd::FormatBase &);
-template <> cv::cuda::GpuMat &Frame::create(ftl::codecs::Channel c, const ftl::rgbd::FormatBase &);
+	std::string serial() const;
+	std::string device() const;
 
-template <typename T>
-ftl::cuda::TextureObject<T> &Frame::getTexture(ftl::codecs::Channel c) {
-	if (!hasChannel(c)) throw FTL_Error("Texture channel does not exist: " << (int)c);
+	/** Note, this throws exception if channel is missing */
+	const std::unordered_set<ftl::rgbd::Capability> &capabilities() const;
+
+	/** Does not throw exception */
+	bool hasCapability(ftl::rgbd::Capability) const;
+
+	inline bool isLive() const { return hasCapability(ftl::rgbd::Capability::LIVE); }
+	inline bool isVirtual() const { return hasCapability(ftl::rgbd::Capability::VIRTUAL); }
+	inline bool isMovable() const { return hasCapability(ftl::rgbd::Capability::MOVABLE); }
+	inline bool isTouchable() const { return hasCapability(ftl::rgbd::Capability::TOUCH); }
+	inline bool isVR() const { return hasCapability(ftl::rgbd::Capability::VR); }
+	inline bool is360() const { return hasCapability(ftl::rgbd::Capability::EQUI_RECT); }
+	inline bool isSideBySideStereo() const { return hasCapability(ftl::rgbd::Capability::STEREO); }
 
-	auto &m = getData(c);
-	if (!m.isgpu) throw FTL_Error("Texture channel is not on GPU");
+	void upload(ftl::codecs::Channel c);
+
+	bool isGPU(ftl::codecs::Channel c) const;
+	bool hasOpenGL(ftl::codecs::Channel c) const;
+	unsigned int getOpenGL(ftl::codecs::Channel c) const;
+
+	template <typename T>
+	ftl::cuda::TextureObject<T> &getTexture(ftl::codecs::Channel c) { return this->get<VideoFrame>(c).getTexture<T>(c); }
+
+	template <typename T>
+	ftl::cuda::TextureObject<T> &createTexture(ftl::codecs::Channel c, bool interpolated=false) { return this->get<VideoFrame>(c).createTexture<T>(interpolated); }
 
-	if (m.tex.cvType() != ftl::traits::OpenCVType<T>::value || m.tex.width() != static_cast<size_t>(m.gpu.cols) || m.tex.height() != static_cast<size_t>(m.gpu.rows) || m.gpu.type() != m.tex.cvType()) {
-		throw FTL_Error("Texture has not been created properly for this channel: " << (int)c);
+	template <typename T>
+	ftl::cuda::TextureObject<T> &createTexture(ftl::codecs::Channel c, const ftl::rgbd::Format<T> &fmt, bool interpolated=false) { return this->create<VideoFrame>(c).createTexture<T>(fmt, interpolated); }
+
+};
+
+
+template <typename T>
+ftl::cuda::TextureObject<T> &VideoFrame::getTexture(ftl::codecs::Channel c) const {
+	if (!isgpu) throw FTL_Error("Texture channel is not on GPU");
+
+	if (tex.cvType() != ftl::traits::OpenCVType<T>::value || tex.width() != static_cast<size_t>(gpu.cols) || tex.height() != static_cast<size_t>(gpu.rows) || gpu.type() != tex.cvType()) {
+		throw FTL_Error("Texture has not been created properly " << int(c));
 	}
 
-	return ftl::cuda::TextureObject<T>::cast(m.tex);
+	return ftl::cuda::TextureObject<T>::cast(tex);
 }
 
 template <typename T>
-ftl::cuda::TextureObject<T> &Frame::createTexture(ftl::codecs::Channel c, const ftl::rgbd::Format<T> &f, bool interpolated) {
-	//if (!hasChannel(c)) channels_ += c;
-	//using ftl::data::Frame<0,32,ftl::rgbd::FrameState,VideoData>::create;
-
-	create<cv::cuda::GpuMat>(c);
-	auto &m = getData(c);
+ftl::cuda::TextureObject<T> &VideoFrame::createTexture(const ftl::rgbd::Format<T> &f, bool interpolated) {
+	createGPU();
 
 	if (f.empty()) {
 		throw FTL_Error("createTexture needs a non-empty format");
 	} else {
-		m.gpu.create(f.size(), f.cvType);
+		gpu.create(f.size(), f.cvType);
 	}
 
-	if (m.gpu.type() != ftl::traits::OpenCVType<T>::value) {
-		throw FTL_Error("Texture type mismatch: " << (int)c << " " << m.gpu.type() << " != " << ftl::traits::OpenCVType<T>::value);
+	if (gpu.type() != ftl::traits::OpenCVType<T>::value) {
+		throw FTL_Error("Texture type mismatch: " << gpu.type() << " != " << ftl::traits::OpenCVType<T>::value);
 	}
 
 	// TODO: Check tex cvType
 
-	if (m.tex.devicePtr() == nullptr) {
+	if (tex.devicePtr() == nullptr) {
 		//LOG(INFO) << "Creating texture object";
-		m.tex = ftl::cuda::TextureObject<T>(m.gpu, interpolated);
-	} else if (m.tex.cvType() != ftl::traits::OpenCVType<T>::value || m.tex.width() != static_cast<size_t>(m.gpu.cols) || m.tex.height() != static_cast<size_t>(m.gpu.rows)) {
+		tex = ftl::cuda::TextureObject<T>(gpu, interpolated);
+	} else if (tex.cvType() != ftl::traits::OpenCVType<T>::value || tex.width() != static_cast<size_t>(gpu.cols) || tex.height() != static_cast<size_t>(gpu.rows)) {
 		//LOG(INFO) << "Recreating texture object for '" << ftl::codecs::name(c) << "'";
-		m.tex.free();
-		m.tex = ftl::cuda::TextureObject<T>(m.gpu, interpolated);
+		tex.free();
+		tex = ftl::cuda::TextureObject<T>(gpu, interpolated);
 	}
 
-	return ftl::cuda::TextureObject<T>::cast(m.tex);
+	return ftl::cuda::TextureObject<T>::cast(tex);
 }
 
 template <typename T>
-ftl::cuda::TextureObject<T> &Frame::createTexture(ftl::codecs::Channel c, bool interpolated) {
-	if (!hasChannel(c)) throw FTL_Error("createTexture needs a format if the channel does not exist: " << (int)c);
-
-	auto &m = getData(c);
-
-	if (!m.isgpu && !m.host.empty()) {
-		m.gpu.create(m.host.size(), m.host.type());
+ftl::cuda::TextureObject<T> &VideoFrame::createTexture(bool interpolated) const {
+	if (!isgpu && !host.empty()) {
+		//gpu.create(host.size(), host.type());
 		// TODO: Should this upload to GPU or not?
 		//gpu_ += c;
-	} else if (!m.isgpu || (m.isgpu && m.gpu.empty())) {
+		throw FTL_Error("Cannot create a texture on a host frame");
+	} else if (!isgpu || (isgpu && gpu.empty())) {
 		throw FTL_Error("createTexture needs a format if no memory is allocated");
 	}
 
-	if (m.gpu.type() != ftl::traits::OpenCVType<T>::value) {
-		throw FTL_Error("Texture type mismatch: " << (int)c << " " << m.gpu.type() << " != " << ftl::traits::OpenCVType<T>::value);
+	if (gpu.type() != ftl::traits::OpenCVType<T>::value) {
+		throw FTL_Error("Texture type mismatch: " << gpu.type() << " != " << ftl::traits::OpenCVType<T>::value);
 	}
 
 	// TODO: Check tex cvType
 
-	if (m.tex.devicePtr() == nullptr) {
+	if (tex.devicePtr() == nullptr) {
 		//LOG(INFO) << "Creating texture object";
-		m.tex = ftl::cuda::TextureObject<T>(m.gpu, interpolated);
-	} else if (m.tex.cvType() != ftl::traits::OpenCVType<T>::value || m.tex.width() != static_cast<size_t>(m.gpu.cols) || m.tex.height() != static_cast<size_t>(m.gpu.rows) || m.tex.devicePtr() != m.gpu.data) {
+		tex = ftl::cuda::TextureObject<T>(gpu, interpolated);
+	} else if (tex.cvType() != ftl::traits::OpenCVType<T>::value || tex.width() != static_cast<size_t>(gpu.cols) || tex.height() != static_cast<size_t>(gpu.rows) || tex.devicePtr() != gpu.data) {
 		//LOG(INFO) << "Recreating texture object for '" << ftl::codecs::name(c) << "'.";
-		m.tex.free();
-		m.tex = ftl::cuda::TextureObject<T>(m.gpu, interpolated);
+		tex.free();
+		tex = ftl::cuda::TextureObject<T>(gpu, interpolated);
 	}
 
-	return ftl::cuda::TextureObject<T>::cast(m.tex);
+	return ftl::cuda::TextureObject<T>::cast(tex);
 }
 
 }
 }
 
-#endif // _FTL_RGBD_FRAME_HPP_
\ No newline at end of file
+template <>
+cv::Mat &ftl::data::Frame::create<cv::Mat, 0>(ftl::codecs::Channel c);
+
+template <>
+cv::cuda::GpuMat &ftl::data::Frame::create<cv::cuda::GpuMat, 0>(ftl::codecs::Channel c);
+
+template <>
+const cv::Mat &ftl::data::Frame::get<cv::Mat>(ftl::codecs::Channel c) const;
+
+template <>
+const cv::cuda::GpuMat &ftl::data::Frame::get<cv::cuda::GpuMat>(ftl::codecs::Channel c) const;
+
+template <>
+cv::Mat &ftl::data::Frame::set<cv::Mat, 0>(ftl::codecs::Channel c);
+
+template <>
+cv::cuda::GpuMat &ftl::data::Frame::set<cv::cuda::GpuMat, 0>(ftl::codecs::Channel c);
+
+template <>
+inline bool ftl::data::make_type<ftl::rgbd::VideoFrame>() {
+	return false;
+}
+
+template <>
+inline bool ftl::data::decode_type<ftl::rgbd::VideoFrame>(std::any &a, const std::vector<uint8_t> &data) {
+	return false;
+}
+
+#endif // _FTL_RGBD_FRAME_HPP_
diff --git a/components/rgbd-sources/include/ftl/rgbd/frameset.hpp b/components/rgbd-sources/include/ftl/rgbd/frameset.hpp
index 1c4f7da20..02f638e7b 100644
--- a/components/rgbd-sources/include/ftl/rgbd/frameset.hpp
+++ b/components/rgbd-sources/include/ftl/rgbd/frameset.hpp
@@ -4,7 +4,7 @@
 #include <ftl/threads.hpp>
 #include <ftl/timer.hpp>
 #include <ftl/rgbd/frame.hpp>
-#include <ftl/data/frameset.hpp>
+#include <ftl/data/new_frameset.hpp>
 
 //#include <opencv2/core.hpp>
 #include <vector>
@@ -16,176 +16,7 @@ namespace rgbd {
 static const size_t kMaxFramesets = 15;
 static const size_t kMaxFramesInSet = 32;
 
-class Source;
-
-typedef ftl::data::FrameSet<ftl::rgbd::Frame> FrameSet;
-
-/**
- * Represents a set of synchronised frames, each with two channels. This is
- * used to collect all frames from multiple computers that have the same
- * timestamp.
- */
-//struct FrameSet {
-//	int id=0;
-//	int64_t timestamp;				// Millisecond timestamp of all frames
-//	std::vector<ftl::rgbd::Frame> frames;
-//	std::atomic<int> count;				// Number of valid frames
-//	std::atomic<unsigned int> mask;		// Mask of all sources that contributed
-//	bool stale;						// True if buffers have been invalidated
-//	SHARED_MUTEX mtx;
-
-	/**
-	 * Upload all specified host memory channels to GPU memory.
-	 */
-//	void upload(ftl::codecs::Channels<0>, cudaStream_t stream=0);
-
-	/**
-	 * Download all specified GPU memory channels to host memory.
-	 */
-//	void download(ftl::codecs::Channels<0>, cudaStream_t stream=0);
-
-	/**
-	 * Move the entire frameset to another frameset object. This will
-	 * invalidate the current frameset object as all memory buffers will be
-	 * moved.
-	 */
-//	void swapTo(ftl::rgbd::FrameSet &);
-
-	/**
-	 * Clear all channels and all memory allocations within those channels.
-	 * This will perform a resetFull on all frames in the frameset.
-	 */
-//	void resetFull();
-//};
-
-/**
- * Callback type for receiving video frames.
- */
-typedef std::function<bool(ftl::rgbd::FrameSet &)> VideoCallback;
-
-/**
- * Abstract class for any generator of FrameSet structures. A generator
- * produces (decoded) frame sets at regular frame intervals depending on the
- * global timer settings. The `onFrameSet` callback may be triggered from any
- * thread and also may drop frames and not be called for a given timestamp.
- */
-class Generator {
-	public:
-	Generator() {}
-	virtual ~Generator() {}
-
-	/** Number of frames in last frameset. This can change over time. */
-	virtual size_t size()=0;
-
-	/**
-	 * Get the persistent state object for a frame. An exception is thrown
-	 * for a bad index.
-	 */
-	virtual ftl::rgbd::FrameState &state(size_t ix)=0;
-
-	inline ftl::rgbd::FrameState &operator[](int ix) { return state(ix); }
-
-	/** Register a callback to receive new frame sets. */
-	virtual void onFrameSet(const ftl::rgbd::VideoCallback &)=0;
-};
-
-/**
- * Accept frames and generate framesets as they become completed. This can
- * directly act as a generator of framesets, each frameset being generated
- * by the global timer. Once the expected number of frames have been received,
- * a frameset is marked as complete and can then be passed to the callback at
- * the appropriate time. If frames are generated faster than consumed then they
- * are buffered and merged into a single frameset. The buffer has a limited size
- * so a longer delay in a callback will cause buffer failures. If frames are
- * generated below framerate then the on frameset callback is just not called.
- */
-class Builder : public Generator {
-	public:
-	Builder();
-	~Builder();
-
-	size_t size() override;
-
-	ftl::rgbd::FrameState &state(size_t ix) override;
-
-	inline void setID(int id) { id_ = id; }
-
-	void onFrameSet(const ftl::rgbd::VideoCallback &) override;
-
-	/**
-	 * Add a new frame at a given timestamp.
-	 */
-	//void push(int64_t timestamp, size_t ix, ftl::rgbd::Frame &f);
-
-	/**
-	 * Instead of pushing a frame, find or create a direct reference to one.
-	 */
-	ftl::rgbd::Frame &get(int64_t timestamp, size_t ix);
-
-	/**
-	 * Get the entire frameset for a given timestamp.
-	 */
-	ftl::rgbd::FrameSet *get(int64_t timestamp);
-
-	/**
-	 * Mark a frame as completed.
-	 */
-	void completed(int64_t ts, size_t ix);
-
-	void markPartial(int64_t ts);
-
-	void setName(const std::string &name);
-
-	void setBufferSize(size_t n) { bufferSize_ = n; }
-
-	/**
-	 * Retrieve an fps + latency pair, averaged since last call to this
-	 * function.
-	 */
-	static std::pair<float,float> getStatistics();
-
-	private:
-	std::list<FrameSet*> framesets_;  // Active framesets
-	std::list<FrameSet*> allocated_;  // Keep memory allocations
-
-	size_t head_;
-	ftl::rgbd::VideoCallback cb_;
-	MUTEX mutex_;
-	int mspf_;
-	int64_t last_ts_;
-	int64_t last_frame_;
-	int id_;
-	std::atomic<int> jobs_;
-	volatile bool skip_;
-	ftl::timer::TimerHandle main_id_;
-	size_t size_;
-	size_t bufferSize_;
-	std::vector<ftl::rgbd::FrameState*> states_;
-
-	std::string name_;
-
-	static MUTEX msg_mutex__;
-	static float latency__;
-	static float fps__;
-	static int stats_count__;
-
-	/* Insert a new frameset into the buffer, along with all intermediate
-	 * framesets between the last in buffer and the new one.
-	 */
-	ftl::rgbd::FrameSet *_addFrameset(int64_t timestamp);
-
-	/* Find a frameset with given latency in frames. */
-	ftl::rgbd::FrameSet *_getFrameset();
-	ftl::rgbd::FrameSet *_get(int64_t timestamp);
-
-	/* Search for a matching frameset. */
-	ftl::rgbd::FrameSet *_findFrameset(int64_t ts);
-	void _freeFrameset(ftl::rgbd::FrameSet *);
-
-	void _schedule();
-
-	void _recordStats(float fps, float latency);
-};
+typedef ftl::data::FrameSet FrameSet;
 
 }
 }
diff --git a/components/rgbd-sources/include/ftl/rgbd/group.hpp b/components/rgbd-sources/include/ftl/rgbd/group.hpp
index eed75bbc5..1b227ead0 100644
--- a/components/rgbd-sources/include/ftl/rgbd/group.hpp
+++ b/components/rgbd-sources/include/ftl/rgbd/group.hpp
@@ -105,9 +105,9 @@ class Group : public ftl::rgbd::Generator {
 	std::atomic<int> jobs_;
 	std::atomic<int> cjobs_;
 	volatile bool skip_;
-	ftl::timer::TimerHandle cap_id_;
-	ftl::timer::TimerHandle swap_id_;
-	ftl::timer::TimerHandle main_id_;
+	ftl::Handle cap_id_;
+	ftl::Handle swap_id_;
+	ftl::Handle main_id_;
 	std::string name_;
 	MUTEX mutex_;
 
diff --git a/components/rgbd-sources/include/ftl/rgbd/source.hpp b/components/rgbd-sources/include/ftl/rgbd/source.hpp
index 868e1a9a0..c7cc7d574 100644
--- a/components/rgbd-sources/include/ftl/rgbd/source.hpp
+++ b/components/rgbd-sources/include/ftl/rgbd/source.hpp
@@ -13,7 +13,9 @@
 #include <map>
 
 #include <ftl/cuda_common.hpp>
-#include <ftl/rgbd/frame.hpp>
+#include <ftl/data/new_frame.hpp>
+#include <ftl/data/new_frameset.hpp>
+#include <ftl/data/creators.hpp>
 
 namespace ftl {
 
@@ -24,7 +26,6 @@ class Universe;
 namespace rgbd {
 
 class BaseSourceImpl;
-typedef std::function<void(int64_t,ftl::rgbd::Frame&)> FrameCallback;
 
 static inline bool isValidDepth(float d) { return (d > 0.01f) && (d < 39.99f); }
 
@@ -33,15 +34,14 @@ static inline bool isValidDepth(float d) { return (d > 0.01f) && (d < 39.99f); }
  * internal implementation of an RGBD source by providing accessor functions
  * and by automatically changing the implementation in response to any URI
  * changes.
- * 
+ *
  * Cannot be constructed directly, use ftl::create<Source>(...).
  * @see ftl::create
  */
-class Source : public ftl::Configurable {
+class Source : public ftl::Configurable, public ftl::data::DiscreteSource {
 	public:
 	template <typename T, typename... ARGS>
 	friend T *ftl::config::create(ftl::config::json_t &, ARGS ...);
-	friend class VirtualSource;
 
 	// This class cannot be constructed directly, use ftl::create
 	Source()=delete;
@@ -52,126 +52,47 @@ class Source : public ftl::Configurable {
 
 	protected:
 	explicit Source(ftl::config::json_t &cfg);
-	Source(ftl::config::json_t &cfg, ftl::net::Universe *net);
-	virtual ~Source();
 
 	public:
+	virtual ~Source();
+	
 	/**
 	 * Is this source valid and ready to grab?.
 	 */
 	bool isReady();
 
-	/**
-	 * Change the second channel source.
-	 */
-	bool setChannel(ftl::codecs::Channel c);
-
-	/**
-	 * Get the channel allocated to the second source.
-	 */
-	ftl::codecs::Channel getChannel() const { return channel_; }
-
 	/**
 	 * Perform the hardware or virtual frame grab operation. This should be
-	 * fast and non-blocking. 
+	 * fast and non-blocking.
 	 */
-	bool capture(int64_t ts);
+	bool capture(int64_t ts) override;
 
 	/**
 	 * Download captured frame. This could be a blocking IO operation.
 	 */
-	bool retrieve();
-
-	/**
-	 * Generate a thread job using the provided callback for the most recently
-	 * retrieved frame (matching the provided timestamp). If already busy
-	 * dispatching, returns false.
-	 */
-	bool dispatch(int64_t ts);
-
-	/**
-	 * Between frames, do any required buffer swaps.
-	 */
-	//void swap() { if (impl_) impl_->swap(); }
-
-	/**
-	 * Do any post-grab processing. This function
-	 * may take considerable time to return, especially for sources requiring
-	 * software stereo correspondance.
-	 */
-	//bool compute(int64_t ts);
-
-	//bool isVirtual() const { return impl_ == nullptr; }
-
-	/**
-	 * Get the source's camera intrinsics.
-	 */
-	const Camera &parameters() const;
-
-	/**
-	 * Get camera intrinsics for another channel. For example the right camera
-	 * in a stereo pair.
-	 */
-	const Camera parameters(ftl::codecs::Channel) const;
-
-	cv::Mat cameraMatrix() const;
-
-	/**
-	 * Change the camera extrinsics by providing a new pose matrix. For virtual
-	 * cameras this will move the camera, for physical cameras it is set by the
-	 * registration process as it attempts to work out a cameras relative pose.
-	 */
-	virtual void setPose(const Eigen::Matrix4d &pose);
-
-	/**
-	 * Check what features this source has available.
-	 */
-	[[deprecated]] bool hasCapabilities(capability_t);
-
-	[[deprecated]] capability_t getCapabilities() const;
+	bool retrieve(ftl::data::Frame &) override;
 
 	/**
 	 * Force the internal implementation to be reconstructed.
 	 */
 	void reset();
 
-	[[deprecated]] ftl::net::Universe *getNet() const { return net_; }
-
 	std::string getURI() { return value("uri", std::string("")); }
 
-	ftl::rgbd::FrameState &state();
-
 	SHARED_MUTEX &mutex() { return mutex_; }
 
-	const FrameCallback &callback() { return callback_; }
-
-	/**
-	 * Set the callback that receives decoded frames as they are generated.
-	 * There can be only a single such callback as the buffers can be swapped
-	 * by the callback.
-	 */
-	void setCallback(const FrameCallback &cb);
-	void removeCallback() { callback_ = nullptr; }
-
 	/**
-	 * Notify of a decoded or available pair of frames. This calls the source
-	 * callback after having verified the correct resolution of the frames.
+	 * Check whether a given device URI is supported. This will check hardware
+	 * for physical availability of devices.
 	 */
-	//void notify(int64_t ts, cv::cuda::GpuMat &c1, cv::cuda::GpuMat &c2);
-	//void notify(int64_t ts, ftl::rgbd::Frame &f);
+	static bool supports(const std::string &uri);
 
 
 	private:
 	BaseSourceImpl *impl_;
-	Eigen::Matrix4d pose_;
-	ftl::net::Universe *net_;
 	SHARED_MUTEX mutex_;
-	ftl::codecs::Channel channel_;
 	cudaStream_t stream_;
-	FrameCallback callback_;
-	ftl::rgbd::Frame frames_[2];
-	bool is_dispatching;
-	bool is_retrieving;
+	std::atomic_bool is_retrieving;
 
 	void _swap();
 };
diff --git a/components/rgbd-sources/src/basesource.hpp b/components/rgbd-sources/src/basesource.hpp
index e3996d8af..294154cdb 100644
--- a/components/rgbd-sources/src/basesource.hpp
+++ b/components/rgbd-sources/src/basesource.hpp
@@ -28,7 +28,7 @@ class BaseSourceImpl {
 	friend class ftl::rgbd::Source;
 
 	public:
-	explicit BaseSourceImpl(ftl::rgbd::Source *host) : capabilities_(0), host_(host), params_(state_.getLeft()) { }
+	explicit BaseSourceImpl(ftl::rgbd::Source *host) : capabilities_(0), host_(host) { }
 	virtual ~BaseSourceImpl() {}
 
 	/**
@@ -49,18 +49,11 @@ class BaseSourceImpl {
 	 */
 	virtual bool isReady() { return false; };
 
-	// These should be accessed via the state object in a frame.
-	[[deprecated]] virtual void setPose(const Eigen::Matrix4d &pose) { state_.setPose(pose); };
-	[[deprecated]] virtual Camera parameters(ftl::codecs::Channel) { return params_; };
-
 	ftl::rgbd::Source *host() { return host_; }
-	ftl::rgbd::FrameState &state() { return state_; }
 
 	protected:
-	ftl::rgbd::FrameState state_;
 	capability_t capabilities_;    // To be deprecated
 	ftl::rgbd::Source *host_;
-	ftl::rgbd::Camera &params_;
 };
 
 }
diff --git a/components/rgbd-sources/src/camera.cpp b/components/rgbd-sources/src/camera.cpp
index b4631a69d..3876fa1ed 100644
--- a/components/rgbd-sources/src/camera.cpp
+++ b/components/rgbd-sources/src/camera.cpp
@@ -1,6 +1,24 @@
 #include <ftl/rgbd/camera.hpp>
+#include <ftl/rgbd/capabilities.hpp>
 
 using ftl::rgbd::Camera;
+using ftl::rgbd::Capability;
+
+// TODO: Put in better place?
+std::string ftl::rgbd::capabilityName(Capability c) {
+	switch (c) {
+	case Capability::MOVABLE		: return "movable";
+	case Capability::ACTIVE			: return "active";
+	case Capability::VIDEO			: return "video";
+	case Capability::ADJUSTABLE		: return "adjustable";
+	case Capability::VIRTUAL		: return "virtual";
+	case Capability::TOUCH			: return "touch";
+	case Capability::VR				: return "vr";
+	case Capability::LIVE			: return "live";
+	case Capability::FUSED			: return "fused";
+	default: return "Unknown";
+	}
+}
 
 Camera Camera::from(ftl::Configurable *cfg) {
 	Camera r;
diff --git a/components/rgbd-sources/src/cb_segmentation.cpp b/components/rgbd-sources/src/cb_segmentation.cpp
deleted file mode 100644
index 5fd0f7571..000000000
--- a/components/rgbd-sources/src/cb_segmentation.cpp
+++ /dev/null
@@ -1,216 +0,0 @@
-#include "ftl/cb_segmentation.hpp"
-
-#include<algorithm>
-#include <math.h>
-
-using cv::Mat;
-using cv::Vec3f, cv::Vec4f;
-
-using std::vector;
-using std::min;
-using std::max;
-using std::pair;
-
-using namespace ftl;
-
-CBSegmentation::Pixel::Pixel(const int &index, const uchar *bgr, const int &depth, const long &time) {
-    idx = index;
-	r = bgr[2];
-	g = bgr[1];
-	b = bgr[0];
-	i = sqrt(r*r + g*g + b*b);
-	d = depth;
-	t = time;
-}
-
-
-void CBSegmentation::Codeword::set(CBSegmentation::Pixel &px) {
-	r = px.r;
-	g = px.g;
-	b = px.b;
-	i_min = px.i;
-	i_max = px.i;
-	f = px.t;
-	lambda = px.t - 1;
-	p = px.t;
-	q = px.t;
-	
-	d_m = px.d;
-	d_S = 0.0;
-	d_f = 1.0;
-}
-
-void CBSegmentation::Codeword::update(CBSegmentation::Pixel &px) {
-	r = (f * r + px.r) / (f + 1);
-	g = (f * g + px.g) / (f + 1);
-	b = (f * b + px.b) / (f + 1);
-	i_min = min(px.i, i_min);
-	i_max = max(px.i, i_max);
-	f = f + 1;
-	lambda = max(lambda, px.t - q);
-	q = px.t;
-
-	if (false /*isValidDepth(px.d)*/) { // check value valid
-		float d_prev = d_m;
-		d_f = d_f + 1;
-		d_m = d_m + (px.d - d_m) / d_f;
-		d_S = d_S + (px.d - d_m) * (px.d - d_prev);
-	}
-}
-
-// eq (2) and BSG
-//
-bool CBSegmentation::Codeword::colordiff(CBSegmentation::Pixel &px, float epsilon) {
-	float x_2 = px.r * px.r + px.g * px.g + px.b * px.b;
-	float v_2 = r*r + g*g + b*b;
-	float xv_2 = pow(px.r * r + px.g * g + px.b * b, 2);
-	float p_2 = xv_2 / v_2;
-	return sqrt(x_2 - p_2) < epsilon;
-}
-
-// eq (3)
-// note: ||x_t|| in the article is equal to I defined in
-//       "Algorithm for codebook construction"
-//
-bool CBSegmentation::Codeword::brightness(CBSegmentation::Pixel &px, float alpha, float beta) {
-	return true;
-	/*float i_low = alpha * i_max;
-	float i_hi = min(beta * i_max, i_min / alpha);
-	return (i_low <= px.i) && (px.i <= i_hi);*/
-}
-
-CBSegmentation::CBSegmentation(
-		char codebook_size, size_t width, size_t height,
-		float alpha, float beta, float epsilon, float sigma,
-		int T_add, int T_del, int T_h) :
-		size_(codebook_size + 1), width_(width), height_(height),
-		alpha_(alpha), beta_(beta), epsilon_(epsilon), sigma_(sigma),
-		T_add_(T_add), T_del_(T_del), T_h_(T_h) {
-	
-	cb_ = vector<Entry>(width * height * size_);
-	for (size_t i = 0; i < cb_.size(); i += size_) {
-		cb_[i].size = 0;
-	}
-}
-
-bool CBSegmentation::processPixel(CBSegmentation::Pixel &px, CBSegmentation::Codeword *codeword) {
-	char &size = cb_[size_ * px.idx].size;
-	size_t idx_begin = size_ * px.idx + 1;	
-
-	CBSegmentation::Entry::Data *start = &(cb_[idx_begin].data);
-	CBSegmentation::Entry::Data *entry = start;
-
-	CBSegmentation::Entry::Data *lru = nullptr;
-	CBSegmentation::Entry::Data *lfu = nullptr;
-	
-	// TODO: benchmark sorting
-
-	// if value is found (in M or H), loop exits early and no further maintenance
-	// is done. Maintenance may happen when codeword is not found and all entries
-	// are evaluated.
-	
-	for (int i = 0; i < size; i++) {
-		if (entry->type == M) {
-			// matching codeword, update and return
-			if (entry->cw.brightness(px, alpha_, beta_) && entry->cw.colordiff(px, epsilon_)) {
-				entry->cw.update(px);
-				codeword = &(entry->cw);
-				return true;
-			}
-
-			// delete (move last to here) if not accessed for longer time than T_del
-			if ((px.t - entry->cw.atime()) > T_del_) {
-				size--;
-				*entry = *(start + size);
-				//std::sort(	cb_.begin() + idx_begin,
-				//				cb_.begin() + idx_begin + size,
-				//				CompareEntry());
-				continue;
-			}
-			
-			// update LFU
-			if (!lfu || lfu->cw.freq() > entry->cw.freq()) {
-				lfu = entry;
-			}
-		}
-		else if (entry->type == H) {
-			// matching codeword, update and return
-			if (entry->cw.brightness(px, alpha_, beta_) && entry->cw.colordiff(px, epsilon_)) {
-				entry->cw.update(px);
-
-				// should be moved to M? if so, move and return true
-				if ((px.t - entry->cw.ctime()) > T_add_) {
-					entry->type = M;
-					//std::sort(	cb_.begin() + idx_begin,
-					//				cb_.begin() + idx_begin + size,
-					//				CompareEntry());
-					return true;
-				}
-				else {
-					return false;
-				}
-			}
-			
-			// delete if lambda lt T_h
-			if (entry->cw.getLambda() < T_h_) {
-				size--;
-				*entry = *(start + size);
-				continue;
-			}
-
-			// update LRU
-			if (!lru || lru->cw.atime() > entry->cw.atime()) {
-				lru = entry;
-			}
-		}
-	}
-
-	// not found, create new codeword (to empty position or lru h or lfu m)
-	// TODO: Should not prefer H codewords over M codewords?
-	if ((size_t)size < (size_ - 1)) {
-		entry = start + size;
-		// size++;  FIXME: This doesn't do anything (nick)
-		entry->type = H;
-		entry->cw.set(px);
-	}
-	else if (lru) {
-		lru->type = H;
-		lru->cw.set(px);
-	}
-	else {
-		lfu->type = H;
-		lfu->cw.set(px);
-	}
-
-	// sort anyways (frequencies may have changed during earlier iterations)
-	//std::sort(cb_.begin() + idx_begin, cb_.begin() + idx_begin + size, CompareEntry());
-
-	return false;
-}
-
-void CBSegmentation::apply(Mat &in, Mat &out) {
-	if (((size_t)out.rows != height_) || ((size_t)out.cols != width_) 
-		|| (out.type() != CV_8UC1) || !out.isContinuous()) {
-		out = Mat(height_, width_, CV_8UC1, cv::Scalar(0));
-	}
-	
-	// TODO: thread pool, queue N rows
-	// #pragma omp parallel for   -  FIXME: Use thread pool. (nick)
-	for (size_t y = 0; y < height_; ++y) {
-		size_t idx = y * width_;
-		uchar *ptr_in = in.ptr<uchar>(y);
-		uchar *ptr_out = out.ptr<uchar>(y);
-		
-		for (size_t x = 0; x < width_; ++x, ++idx, ptr_in += 3) {
-			auto px = Pixel(idx, ptr_in, 0, t_);
-			if(processPixel(px)) {
-				ptr_out[x] = 0;
-			}
-			else {
-				ptr_out[x] = 255;
-			}
-		}
-	}
-
-	t_++;
-}
diff --git a/components/rgbd-sources/src/frame.cpp b/components/rgbd-sources/src/frame.cpp
index 8c809c026..10bacd8f0 100644
--- a/components/rgbd-sources/src/frame.cpp
+++ b/components/rgbd-sources/src/frame.cpp
@@ -4,218 +4,199 @@
 #define LOGURU_REPLACE_GLOG 1
 #include <loguru.hpp>
 
-using ftl::rgbd::Frame;
-using ftl::rgbd::FrameState;
 using ftl::codecs::Channels;
 using ftl::codecs::Channel;
-using ftl::rgbd::VideoData;
+using ftl::rgbd::VideoFrame;
 
-static cv::Mat none;
-static cv::cuda::GpuMat noneGPU;
-static std::atomic<int> frame_count = 0;
-
-template <>
-cv::Mat &VideoData::as<cv::Mat>() {
-	if (isgpu) throw FTL_Error("Host request for GPU data without download");
-	return host;
+VideoFrame::VideoFrame(const VideoFrame &f) {
+	gpu = f.gpu;
+	host = f.host;
+	isgpu = f.isgpu;
+	validhost = f.validhost;
 }
 
-template <>
-const cv::Mat &VideoData::as<cv::Mat>() const {
-	if (isgpu) throw FTL_Error("Host request for GPU data without download");
-	return host;
+VideoFrame &VideoFrame::operator=(const VideoFrame &f) {
+	gpu = f.gpu;
+	host = f.host;
+	isgpu = f.isgpu;
+	validhost = f.validhost;
+	return *this;
 }
 
-template <>
-cv::cuda::GpuMat &VideoData::as<cv::cuda::GpuMat>() {
-	if (!isgpu) throw FTL_Error("GPU request for Host data without upload");
-	return gpu;
-}
 
-template <>
-const cv::cuda::GpuMat &VideoData::as<cv::cuda::GpuMat>() const {
-	if (!isgpu) throw FTL_Error("GPU request for Host data without upload");
-	return gpu;
-}
+/*cv::Mat &Frame::fastDownload(ftl::codecs::Channel c, cv::cuda::Stream stream) {
+	if (hasChannel(c)) {
+		auto &data = getData(static_cast<Channel>(c));
+		if (!data.isgpu) return data.host;
 
-template <>
-cv::Mat &VideoData::make<cv::Mat>() {
-	validhost = true;
+		if (data.validhost && !data.host.empty()) return data.host;
+
+		// TODO: Perhaps allocated page locked here?
+		data.gpu.download(data.host, stream);
+		data.validhost = true;
+		return data.host;
+	}
+	throw FTL_Error("Fast download channel does not exist: " << (int)c);
+}*/
+
+cv::Mat &VideoFrame::createCPU() {
 	isgpu = false;
-	encoded.clear();
 	return host;
 }
 
-template <>
-cv::cuda::GpuMat &VideoData::make<cv::cuda::GpuMat>() {
+cv::cuda::GpuMat &VideoFrame::createGPU() {
 	isgpu = true;
-	encoded.clear();
+	validhost = false;
 	return gpu;
 }
 
-// =============================================================================
-
-/*void Frame::reset() {
-	origin_ = nullptr;
-	channels_.clear();
-	gpu_.clear();
-	data_channels_.clear();
-	for (size_t i=0u; i<Channels<0>::kMax; ++i) {
-		data_[i].encoded.clear();
+cv::Mat &VideoFrame::createCPU(const ftl::rgbd::FormatBase &f) {
+	if (!f.empty()) {
+		host.create(f.size(), f.cvType);
 	}
-}*/
+	isgpu = false;
 
-/*void Frame::resetFull() {
-	origin_ = nullptr;
-	channels_.clear();
-	gpu_.clear();
-	for (size_t i=0u; i<Channels<0>::kMax; ++i) {
-		data_[i].gpu = cv::cuda::GpuMat();
-		data_[i].host = cv::Mat();
-		data_[i].encoded.clear();
+	return host;
+}
+
+cv::cuda::GpuMat &VideoFrame::createGPU(const ftl::rgbd::FormatBase &f) {
+	if (!f.empty()) {
+		gpu.create(f.size(), f.cvType);
 	}
-}*/
+	isgpu = true;
+	validhost = false;
 
-Frame::Frame() {
-	++frame_count;
-	//LOG(INFO) << "Frames: " << frame_count;
+	return gpu;
 }
 
-Frame::Frame(Frame &&f) : ftl::data::Frame<0,32,ftl::rgbd::FrameState,VideoData>(std::move(f)) {
+const cv::Mat &VideoFrame::getCPU() const {
+	if (!validhost) {
+		// TODO: Use stream and page locked mem.
+		gpu.download(host);
+		validhost = true;
+	}
+	return host;
+}
 
+const cv::cuda::GpuMat &VideoFrame::getGPU() const {
+	// TODO: Upload?
+	return gpu;
 }
 
-Frame &Frame::operator=(Frame &&f) {
-	ftl::data::Frame<0,32,ftl::rgbd::FrameState,VideoData>::operator=(std::move(f));
-	return *this;
+cv::Mat &VideoFrame::setCPU() {
+	return host;
 }
 
-Frame::~Frame() {
-	--frame_count;
+cv::cuda::GpuMat &VideoFrame::setGPU() {
+	validhost = false;
+	return gpu;
 }
 
-void Frame::download(Channel c, cv::cuda::Stream stream) {
-	download(Channels(c), stream);
+void ftl::rgbd::Frame::upload(ftl::codecs::Channel c) {
+	auto &vframe = set<VideoFrame>(c);
+	const auto &cpumat = vframe.getCPU();
+	vframe.createGPU().upload(cpumat);
 }
 
-void Frame::upload(Channel c, cv::cuda::Stream stream) {
-	upload(Channels(c), stream);
+bool ftl::rgbd::Frame::isGPU(ftl::codecs::Channel c) const {
+	const auto &vframe = get<VideoFrame>(c);
+	return vframe.isGPU();
 }
 
-void Frame::download(Channels<0> c, cv::cuda::Stream stream) {
-	for (size_t i=0u; i<Channels<0>::kMax; ++i) {
-		if (c.has(i) && hasChannel(static_cast<Channel>(i)) && isGPU(static_cast<Channel>(i))) {
-			auto &data = getData(static_cast<Channel>(i));
-			data.validhost = true;
-			data.gpu.download(data.host, stream);
-			data.isgpu = false;
-		}
-	}
+bool ftl::rgbd::Frame::hasOpenGL(ftl::codecs::Channel c) const {
+	const auto &vframe = get<VideoFrame>(c);
+	return vframe.hasOpenGL();
 }
 
-void Frame::upload(Channels<0> c, cv::cuda::Stream stream) {
-	for (size_t i=0u; i<Channels<0>::kMax; ++i) {
-		if (c.has(i) && hasChannel(static_cast<Channel>(i)) && !isGPU(static_cast<Channel>(i))) {
-			auto &data = getData(static_cast<Channel>(i));
-			data.gpu.upload(data.host, stream);
-			data.isgpu = true;
-		}
-	}
+unsigned int ftl::rgbd::Frame::getOpenGL(ftl::codecs::Channel c) const {
+	const auto &vframe = get<VideoFrame>(c);
+	return vframe.getOpenGL();
 }
 
-cv::Mat &Frame::fastDownload(ftl::codecs::Channel c, cv::cuda::Stream stream) {
-	if (hasChannel(c)) {
-		auto &data = getData(static_cast<Channel>(c));
-		if (!data.isgpu) return data.host;
 
-		if (data.validhost && !data.host.empty()) return data.host;
+const ftl::rgbd::Camera &ftl::rgbd::Frame::getLeftCamera() const {
+	return std::get<0>(this->get<std::tuple<ftl::rgbd::Camera, ftl::codecs::Channel, int>>(ftl::codecs::Channel::Calibration));
+}
 
-		// TODO: Perhaps allocated page locked here?
-		data.gpu.download(data.host, stream);
-		data.validhost = true;
-		return data.host;
-	}
-	throw FTL_Error("Fast download channel does not exist: " << (int)c);
+const ftl::rgbd::Camera &ftl::rgbd::Frame::getRightCamera() const {
+	return std::get<0>(this->get<std::tuple<ftl::rgbd::Camera, ftl::codecs::Channel, int>>(ftl::codecs::Channel::Calibration2));
 }
 
-void Frame::pushPacket(ftl::codecs::Channel c, ftl::codecs::Packet &pkt) {
-	if (hasChannel(c)) {
-		auto &m1 = getData(c);
-		m1.encoded.emplace_back() = std::move(pkt);
-	} else {
-		throw FTL_Error("Channel " << (int)c << " doesn't exist for packet push");
-	}
+const Eigen::Matrix4d &ftl::rgbd::Frame::getPose() const {
+	return this->get<Eigen::Matrix4d>(ftl::codecs::Channel::Pose);
 }
 
-const std::list<ftl::codecs::Packet> &Frame::getPackets(ftl::codecs::Channel c) const {
-	if (!hasChannel(c)) {
-		throw FTL_Error("Frame channel does not exist: " << (int)c);
-	}
+ftl::rgbd::Camera &ftl::rgbd::Frame::setLeft() {
+	return std::get<0>(this->create<std::tuple<ftl::rgbd::Camera, ftl::codecs::Channel, int>>(ftl::codecs::Channel::Calibration));
+}
 
-	auto &m1 = getData(c);
-	return m1.encoded;
+ftl::rgbd::Camera &ftl::rgbd::Frame::setRight() {
+	return std::get<0>(this->create<std::tuple<ftl::rgbd::Camera, ftl::codecs::Channel, int>>(ftl::codecs::Channel::Calibration2));
 }
 
-void Frame::mergeEncoding(ftl::rgbd::Frame &f) {
-	//LOG(INFO) << "MERGE " << (unsigned int)f.channels_;
-	for (auto c : getChannels()) {
-		//if (!f.hasChannel(c)) f.create<cv::cuda::GpuMat>(c);
-		if (f.hasChannel(c)) {
-			auto &m1 = getData(c);
-			auto &m2 = f.getData(c);
-			m1.encoded.splice(m1.encoded.begin(), m2.encoded);
-			//LOG(INFO) << "SPLICED: " << m1.encoded.size();
-		}
-	}
+Eigen::Matrix4d &ftl::rgbd::Frame::setPose() {
+	return this->create<Eigen::Matrix4d>(ftl::codecs::Channel::Pose);
 }
 
-bool Frame::empty(ftl::codecs::Channels<0> channels) {
-	for (auto c : channels) {
-		if (empty(c)) return true;
+std::string ftl::rgbd::Frame::serial() const {
+	if (hasChannel(Channel::MetaData)) {
+		const auto &meta = get<std::map<std::string,std::string>>(Channel::MetaData);
+		auto i = meta.find("serial");
+		if (i != meta.end()) return i->second;
 	}
-	return false;
+	return "";
 }
 
-template <> cv::Mat &Frame::create(ftl::codecs::Channel c, const ftl::rgbd::FormatBase &f) {
-	if (c == Channel::None) {
-		throw FTL_Error("Cannot create a None channel");
+std::string ftl::rgbd::Frame::device() const {
+	if (hasChannel(Channel::MetaData)) {
+		const auto &meta = get<std::map<std::string,std::string>>(Channel::MetaData);
+		auto i = meta.find("device");
+		if (i != meta.end()) return i->second;
 	}
-	
-	create<cv::Mat>(c);
-	auto &m = getData(c);
+	return "";
+}
 
-	m.encoded.clear();  // Remove all old encoded data
+const std::unordered_set<ftl::rgbd::Capability> &ftl::rgbd::Frame::capabilities() const {
+	return get<std::unordered_set<ftl::rgbd::Capability>>(Channel::Capabilities);
+}
 
-	if (!f.empty()) {
-		m.host.create(f.size(), f.cvType);
+bool ftl::rgbd::Frame::hasCapability(ftl::rgbd::Capability c) const {
+	if (hasChannel(Channel::Capabilities)) {
+		const auto &cap = get<std::unordered_set<ftl::rgbd::Capability>>(Channel::Capabilities);
+		return cap.count(c) > 0;
 	}
-
-	return m.host;
+	return false;
 }
 
-template <> cv::cuda::GpuMat &Frame::create(ftl::codecs::Channel c, const ftl::rgbd::FormatBase &f) {
-	if (c == Channel::None) {
-		throw FTL_Error("Cannot create a None channel");
-	}
 
-	create<cv::cuda::GpuMat>(c);
-	auto &m = getData(c);
+template <>
+cv::Mat &ftl::data::Frame::create<cv::Mat, 0>(ftl::codecs::Channel c) {
+	return create<ftl::rgbd::VideoFrame>(c).createCPU();
+}
 
-	m.encoded.clear();  // Remove all old encoded data
+template <>
+cv::cuda::GpuMat &ftl::data::Frame::create<cv::cuda::GpuMat, 0>(ftl::codecs::Channel c) {
+	return create<ftl::rgbd::VideoFrame>(c).createGPU();
+}
 
-	if (!f.empty()) {
-		m.gpu.create(f.size(), f.cvType);
-	}
+template <>
+const cv::Mat &ftl::data::Frame::get<cv::Mat>(ftl::codecs::Channel c) const {
+	return get<ftl::rgbd::VideoFrame>(c).getCPU();
+}
 
-	return m.gpu;
+template <>
+const cv::cuda::GpuMat &ftl::data::Frame::get<cv::cuda::GpuMat>(ftl::codecs::Channel c) const {
+	return get<ftl::rgbd::VideoFrame>(c).getGPU();
 }
 
-void Frame::clearPackets(ftl::codecs::Channel c) {
-	auto &m = getData(c);
-	m.encoded.clear();
+template <>
+cv::Mat &ftl::data::Frame::set<cv::Mat, 0>(ftl::codecs::Channel c) {
+	return set<ftl::rgbd::VideoFrame>(c).setCPU();
 }
 
-void Frame::resetTexture(ftl::codecs::Channel c) {
-	auto &m = getData(c);
-	m.tex.free();
+template <>
+cv::cuda::GpuMat &ftl::data::Frame::set<cv::cuda::GpuMat, 0>(ftl::codecs::Channel c) {
+	return set<ftl::rgbd::VideoFrame>(c).setGPU();
 }
+
+
diff --git a/components/rgbd-sources/src/init.cpp b/components/rgbd-sources/src/init.cpp
new file mode 100644
index 000000000..712b933ca
--- /dev/null
+++ b/components/rgbd-sources/src/init.cpp
@@ -0,0 +1,16 @@
+#include <ftl/data/new_frame.hpp>
+#include <ftl/rgbd/camera.hpp>
+#include <ftl/codecs/channels.hpp>
+
+using ftl::codecs::Channel;
+using ftl::data::StorageMode;
+
+bool ftl_video_init =
+	ftl::data::make_channel<ftl::rgbd::Camera>(Channel::Calibration, "calibration", ftl::data::StorageMode::PERSISTENT) &&
+	ftl::data::make_channel<ftl::rgbd::Camera>(Channel::Pose, "pose", ftl::data::StorageMode::PERSISTENT) &&
+	ftl::data::make_channel<ftl::rgbd::Camera>(Channel::Calibration2, "calibration_right", ftl::data::StorageMode::PERSISTENT); // &&
+	//ftl::data::make_channel<ftl::rgbd::Camera>(Channel::Name, "name", ftl::data::StorageMode::PERSISTENT);
+
+bool ftl_video_initialised() {
+	return ftl_video_init;
+}
diff --git a/components/rgbd-sources/src/source.cpp b/components/rgbd-sources/src/source.cpp
index 2a9af5889..4a4d5e925 100644
--- a/components/rgbd-sources/src/source.cpp
+++ b/components/rgbd-sources/src/source.cpp
@@ -26,36 +26,16 @@ using ftl::rgbd::detail::StereoVideoSource;
 using ftl::rgbd::detail::ImageSource;
 using ftl::rgbd::detail::MiddleburySource;
 using ftl::rgbd::detail::ScreenCapture;
-using ftl::rgbd::capability_t;
 using ftl::codecs::Channel;
-//using ftl::rgbd::detail::FileSource;
 using ftl::rgbd::Camera;
-using ftl::rgbd::FrameCallback;
 
-
-Source::Source(ftl::config::json_t &cfg) : Configurable(cfg), pose_(Eigen::Matrix4d::Identity()), net_(nullptr) {
+Source::Source(ftl::config::json_t &cfg) : Configurable(cfg) {
 	impl_ = nullptr;
-	//params_ = {};
 	stream_ = 0;
-	is_dispatching = false;
 	is_retrieving = false;
 	reset();
 
-	on("uri", [this](const ftl::config::Event &e) {
-		LOG(INFO) << "URI change for source: " << getURI();
-		reset();
-	});
-}
-
-Source::Source(ftl::config::json_t &cfg, ftl::net::Universe *net) : Configurable(cfg), pose_(Eigen::Matrix4d::Identity()), net_(net) {
-	impl_ = nullptr;
-	//params_ = {};
-	stream_ = 0;
-	is_dispatching = false;
-	is_retrieving = false;
-	reset();
-
-	on("uri", [this](const ftl::config::Event &e) {
+	on("uri", [this]() {
 		LOG(INFO) << "URI change for source: " << getURI();
 		reset();
 	});
@@ -67,18 +47,6 @@ Source::~Source() {
 
 bool Source::isReady() { return (impl_) ? impl_->isReady() : false; }
 
-const Camera &Source::parameters() const {
-	if (impl_) return impl_->params_;
-	else throw FTL_Error("Cannot get parameters for bad source");
-}
-
-ftl::rgbd::FrameState &Source::state() { return impl_->state_; }
-
-cv::Mat Source::cameraMatrix() const {
-	cv::Mat m = (cv::Mat_<float>(3,3) << parameters().fx, 0.0, -parameters().cx, 0.0, parameters().fy, -parameters().cy, 0.0, 0.0, 1.0);
-	return m;
-}
-
 static ftl::rgbd::BaseSourceImpl *createFileImpl(const ftl::URI &uri, Source *host) {
 	std::string path = uri.getPath();
 	// Note: This is non standard
@@ -123,7 +91,7 @@ static ftl::rgbd::BaseSourceImpl *createFileImpl(const ftl::URI &uri, Source *ho
 }
 
 static ftl::rgbd::BaseSourceImpl *createDeviceImpl(const ftl::URI &uri, Source *host) {
-	if (uri.getPathSegment(0) == "video" || uri.getPathSegment(0) == "camera" || uri.getPathSegment(0) == "pylon") {
+	if (uri.getPathSegment(0) == "stereo" || uri.getPathSegment(0) == "video" || uri.getPathSegment(0) == "camera" || uri.getPathSegment(0) == "pylon") {
 		return new StereoVideoSource(host);
 	} else if (uri.getPathSegment(0) == "realsense") {
 #ifdef HAVE_REALSENSE
@@ -154,79 +122,68 @@ static ftl::rgbd::BaseSourceImpl *createImplementation(const std::string &uristr
 	return nullptr;
 }
 
-void Source::setPose(const Eigen::Matrix4d &pose) {
-	pose_ = pose;
-	if (impl_) impl_->setPose(pose);
-}
-
-bool Source::hasCapabilities(capability_t c) {
-	return (getCapabilities() & c) == c;
-}
-
-capability_t Source::getCapabilities() const {
-	if (impl_) return impl_->capabilities_;
-	else return kCapMovable | kCapVideo | kCapStereo;  // FIXME: Don't assume these
-}
-
 void Source::reset() {
 	UNIQUE_LOCK(mutex_,lk);
-	channel_ = Channel::None;
 	if (impl_) delete impl_;
 	impl_ = nullptr;
 
 	auto uristr = get<string>("uri");
 	if (!uristr) return;
+
+	ftl::URI uri(*uristr);
+
+	restore(uri.getBaseURI(), {
+		"min_depth",
+		"max_depth",
+		"name",
+		"offset_z",
+		"size",
+		"focal",
+		"device_left",
+		"enable_touch",
+		"feed",
+		"pipeline"
+	});
+
+	uri.to_json(getConfig());
+
 	impl_ = createImplementation(*uristr, this);
 }
 
 bool Source::capture(int64_t ts) {
-	//timestamp_ = ts;
 	if (impl_) return impl_->capture(ts);
 	else return true;
 }
 
-bool Source::retrieve() {
+bool Source::retrieve(ftl::data::Frame &f) {
+	if (is_retrieving) return false;
 	is_retrieving = true;
 	bool status = false;
-	if (impl_) status = impl_->retrieve(frames_[0]);
+	if (impl_) status = impl_->retrieve(f.cast<ftl::rgbd::Frame>());
 	is_retrieving = false;
 	return status;
 }
 
-bool Source::dispatch(int64_t ts) {
-	if (!callback_) return false;
-	if (is_dispatching || is_retrieving) {
-		LOG(WARNING) << "Previous distance not completed";
-		return false;
-	}
-	is_dispatching = true;
-	_swap();
-	ftl::pool.push([this,ts](int id) {
-		callback_(ts, frames_[1]);
-		is_dispatching = false;
-	});
-	return true;
-}
-
-void Source::_swap() {
-	auto tmp = std::move(frames_[0]);
-	frames_[0] = std::move(frames_[1]);
-	frames_[1] = std::move(tmp);
-}
-
-bool Source::setChannel(ftl::codecs::Channel c) {
-	channel_ = c;
-	// FIXME:(Nick) Verify channel is supported by this source...
-	return true;
-}
+bool Source::supports(const std::string &puri) {
+	ftl::URI uri(puri);
 
-const ftl::rgbd::Camera Source::parameters(ftl::codecs::Channel chan) const {
-	return (impl_) ? impl_->parameters(chan) : parameters();
-}
+	if (uri.getPathSegment(0) == "video") {
+		return StereoVideoSource::supported(uri.getPathSegment(0));
+	} else if (uri.getPathSegment(0) == "camera" || uri.getPathSegment(0) == "stereo") {
+		return StereoVideoSource::supported(uri.getPathSegment(0));
+	} else if (uri.getPathSegment(0) == "pylon") {
+		return StereoVideoSource::supported(uri.getPathSegment(0));
+	} else if (uri.getPathSegment(0) == "realsense") {
+		#ifdef HAVE_REALSENSE
+		return RealsenseSource::supported();
+		#endif
+	} else if (uri.getPathSegment(0) == "screen") {
+		#ifndef WIN32
+		return true;
+		#endif
+	}
 
-void Source::setCallback(const FrameCallback &cb) {
-	if (bool(callback_)) LOG(ERROR) << "Source already has a callback: " << getURI();
-	callback_ = cb;
+	return false;
 }
 
 
diff --git a/components/rgbd-sources/src/sources/middlebury/middlebury_source.cpp b/components/rgbd-sources/src/sources/middlebury/middlebury_source.cpp
index 656310c26..eee884d2f 100644
--- a/components/rgbd-sources/src/sources/middlebury/middlebury_source.cpp
+++ b/components/rgbd-sources/src/sources/middlebury/middlebury_source.cpp
@@ -91,20 +91,20 @@ MiddleburySource::MiddleburySource(ftl::rgbd::Source *host, const string &dir)
 	host_->getConfig()["doffs"] = params_.doffs;
 
 	// Add event handlers to allow calibration changes...
-	host_->on("baseline", [this](const ftl::config::Event &e) {
+	host_->on("baseline", [this]() {
 		params_.baseline = host_->value("baseline", params_.baseline);
 	});
 
-	host_->on("focal", [this](const ftl::config::Event &e) {
+	host_->on("focal", [this]() {
 		params_.fx = host_->value("focal", params_.fx);
 		params_.fy = params_.fx;
 	});
 
-	host_->on("doffs", [this](const ftl::config::Event &e) {
+	host_->on("doffs", [this]() {
 		params_.doffs = host_->value("doffs", params_.doffs);
 	});
 
-	host_->on("centre_x", [this](const ftl::config::Event &e) {
+	host_->on("centre_x", [this]() {
 		params_.cx = host_->value("centre_x", params_.cx);
 	});
 
diff --git a/components/rgbd-sources/src/sources/middlebury/middlebury_source.hpp b/components/rgbd-sources/src/sources/middlebury/middlebury_source.hpp
index d102dbeba..051df8d2c 100644
--- a/components/rgbd-sources/src/sources/middlebury/middlebury_source.hpp
+++ b/components/rgbd-sources/src/sources/middlebury/middlebury_source.hpp
@@ -2,7 +2,7 @@
 #ifndef _FTL_RGBD_MIDDLEBURY_SOURCE_HPP_
 #define _FTL_RGBD_MIDDLEBURY_SOURCE_HPP_
 
-#include <loguru.hpp>
+//#include <loguru.hpp>
 
 #include "../../basesource.hpp"
 #include <ftl/cuda_common.hpp>
diff --git a/components/rgbd-sources/src/sources/realsense/realsense_source.cpp b/components/rgbd-sources/src/sources/realsense/realsense_source.cpp
index 57e212027..5b3ee5b6d 100644
--- a/components/rgbd-sources/src/sources/realsense/realsense_source.cpp
+++ b/components/rgbd-sources/src/sources/realsense/realsense_source.cpp
@@ -2,11 +2,31 @@
 #include <loguru.hpp>
 #include <ftl/threads.hpp>
 #include <ftl/rgbd/source.hpp>
+#include <ftl/rgbd/capabilities.hpp>
 
 using ftl::rgbd::detail::RealsenseSource;
 using std::string;
 using ftl::codecs::Channel;
 using cv::cuda::GpuMat;
+using ftl::rgbd::Capability;
+
+static std::string get_device_name(const rs2::device& dev) {
+	// Each device provides some information on itself, such as name:
+	std::string name = "Unknown Device";
+	if (dev.supports(RS2_CAMERA_INFO_NAME))
+		name = dev.get_info(RS2_CAMERA_INFO_NAME);
+
+	return name;
+}
+
+static std::string get_device_serial(const rs2::device& dev) {
+	// the serial number of the device:
+	std::string sn = "########";
+	if (dev.supports(RS2_CAMERA_INFO_SERIAL_NUMBER))
+		sn = dev.get_info(RS2_CAMERA_INFO_SERIAL_NUMBER);
+
+	return sn;
+}
 
 RealsenseSource::RealsenseSource(ftl::rgbd::Source *host)
         : ftl::rgbd::BaseSourceImpl(host), align_to_depth_(RS2_STREAM_COLOR) {
@@ -20,11 +40,14 @@ RealsenseSource::RealsenseSource(ftl::rgbd::Source *host)
     //pipe_.start(cfg);
     rs2::pipeline_profile profile = pipe_.start(cfg);
     rs2::device dev = profile.get_device();
+	name_ = get_device_name(dev);
+	serial_ = get_device_serial(dev);
     rs2_intrinsics intrin = profile.get_stream(rs2_stream::RS2_STREAM_DEPTH).as<rs2::video_stream_profile>().get_intrinsics();
 
     rs2::depth_sensor ds = dev.query_sensors().front().as<rs2::depth_sensor>();
     scale_ = ds.get_depth_scale();
-    LOG(INFO) << "RS Scale = " << scale_;
+
+	LOG(INFO) << "Realsense device: " << name_ << " #" << serial_;
 
     params_.width = intrin.width;
     params_.height = intrin.height;
@@ -35,22 +58,65 @@ RealsenseSource::RealsenseSource(ftl::rgbd::Source *host)
     params_.maxDepth = 3.0;
     params_.minDepth = 0.1;
 	params_.doffs = 0.0;
+	params_.baseline = 0.055f;  // TODO: Get from device extrinsics
 
-    state_.getLeft() = params_;
+    do_update_params_ = true;
 
     LOG(INFO) << "Realsense Intrinsics: " << params_.fx << "," << params_.fy << " - " << params_.cx << "," << params_.cy << " - " << params_.width;
 }
 
 RealsenseSource::~RealsenseSource() {
+	
+}
+
+static bool rs_supported = false;
+static bool rs_init = false;
+
+bool RealsenseSource::supported() {
+	if (rs_init) return rs_supported;
+	rs_init = true;
 
+	rs2::context ctx;
+	auto devs = ctx.query_devices();
+	rs_supported = devs.size() > 0;
+	return rs_supported;
+}
+
+bool RealsenseSource::capture(int64_t ts) {
+	return true;
 }
 
 bool RealsenseSource::retrieve(ftl::rgbd::Frame &frame) {
-    frame.reset();
-	frame.setOrigin(&state_);
+    if (do_update_params_) {
+		do_update_params_ = false;
+		frame.setLeft() = params_;
+		frame.setPose() = Eigen::Matrix4d::Identity();
+
+		auto &meta = frame.create<std::map<std::string,std::string>>(Channel::MetaData);
+		meta["name"] = host_->value("name", host_->getID());
+		meta["id"] = host_->getID();
+		meta["uri"] = host_->value("uri", std::string(""));
+		meta["device"] = name_;
+		meta["serial"] = serial_;
+
+		if (!frame.has(Channel::Capabilities)) {
+			auto &cap = frame.create<std::unordered_set<Capability>>(Channel::Capabilities);
+			cap.emplace(Capability::VIDEO);
+			cap.emplace(Capability::LIVE);
+			cap.emplace(Capability::ACTIVE);
+			cap.emplace(Capability::ADJUSTABLE);
+		}
+	}
 
     rs2::frameset frames;
-	if (!pipe_.poll_for_frames(&frames)) return false;  //wait_for_frames();
+	//if (!pipe_.poll_for_frames(&frames)) return false;  //wait_for_frames();
+
+	// TODO: Move to capture function
+	try {
+		frames = pipe_.wait_for_frames(10);
+	} catch (const std::exception &e) {
+		return false;
+	}
 
 	//std::this_thread::sleep_for(std::chrono::milliseconds(10000));
 
@@ -62,24 +128,29 @@ bool RealsenseSource::retrieve(ftl::rgbd::Frame &frame) {
         if (params_.width != w) {
             params_.width = w;
             params_.height = h;
-            state_.getLeft() = params_;
+            //state_.getLeft() = params_;
         }
 
         cv::Mat tmp_rgb(cv::Size(w, h), CV_8UC4, (void*)cframe.get_data(), cv::Mat::AUTO_STEP);
         frame.create<GpuMat>(Channel::Colour).upload(tmp_rgb);
     } else {
+		auto cframe = frames.get_color_frame(); //first(RS2_STREAM_COLOR);
+		size_t w = cframe.get_width();
+        size_t h = cframe.get_height();
+		cv::Mat wrap_rgb(cv::Size(w, h), CV_8UC4, (void*)cframe.get_data(), cv::Mat::AUTO_STEP);
+        frame.create<GpuMat>(Channel::Colour).upload(wrap_rgb, stream_);
+
         frames = align_to_depth_.process(frames);
 
         rs2::depth_frame depth = frames.get_depth_frame();
-        float w = depth.get_width();
-        float h = depth.get_height();
-        rscolour_ = frames.first(RS2_STREAM_COLOR); //.get_color_frame();
-
-        cv::Mat tmp_depth(cv::Size((int)w, (int)h), CV_16UC1, (void*)depth.get_data(), depth.get_stride_in_bytes());
-        tmp_depth.convertTo(tmp_depth, CV_32FC1, scale_);
-        frame.create<GpuMat>(Channel::Depth).upload(tmp_depth);
-        cv::Mat tmp_rgb(cv::Size(w, h), CV_8UC4, (void*)rscolour_.get_data(), cv::Mat::AUTO_STEP);
-        frame.create<GpuMat>(Channel::Colour).upload(tmp_rgb);
+        w = depth.get_width();
+        h = depth.get_height();
+
+        cv::Mat wrap_depth(cv::Size((int)w, (int)h), CV_16UC1, (void*)depth.get_data(), depth.get_stride_in_bytes());
+		tmp_depth_.upload(wrap_depth, stream_);
+        tmp_depth_.convertTo(frame.create<GpuMat>(Channel::Depth), CV_32FC1, scale_, stream_);
+
+		stream_.waitForCompletion();
     }
 
 	return true;
diff --git a/components/rgbd-sources/src/sources/realsense/realsense_source.hpp b/components/rgbd-sources/src/sources/realsense/realsense_source.hpp
index 83274aecc..37f543664 100644
--- a/components/rgbd-sources/src/sources/realsense/realsense_source.hpp
+++ b/components/rgbd-sources/src/sources/realsense/realsense_source.hpp
@@ -17,16 +17,24 @@ class RealsenseSource : public ftl::rgbd::BaseSourceImpl {
 	explicit RealsenseSource(ftl::rgbd::Source *host);
 	~RealsenseSource();
 
-	bool capture(int64_t ts) { return true; }
-	bool retrieve(ftl::rgbd::Frame &frame);
-	bool isReady();
+	bool capture(int64_t ts) override;
+	bool retrieve(ftl::rgbd::Frame &frame) override;
+	bool isReady() override;
+
+	static bool supported();
 
 	private:
 	bool ready_;
+	bool do_update_params_ = false;
     float scale_;
     rs2::pipeline pipe_;
     rs2::align align_to_depth_;
 	rs2::frame rscolour_;
+	ftl::rgbd::Camera params_;
+	std::string name_;
+	std::string serial_;
+	cv::cuda::GpuMat tmp_depth_;
+	cv::cuda::Stream stream_;
 };
 
 }
diff --git a/components/rgbd-sources/src/sources/screencapture/screencapture.cpp b/components/rgbd-sources/src/sources/screencapture/screencapture.cpp
index 39ec56c09..87b34497b 100644
--- a/components/rgbd-sources/src/sources/screencapture/screencapture.cpp
+++ b/components/rgbd-sources/src/sources/screencapture/screencapture.cpp
@@ -7,16 +7,24 @@
 #include <opencv2/calib3d.hpp>
 #include <Eigen/Eigen>
 #include <opencv2/core/eigen.hpp>
+#include <ftl/rgbd/capabilities.hpp>
+#include <ftl/codecs/touch.hpp>
+
+#include <opencv2/imgproc.hpp>
 
 using ftl::rgbd::detail::ScreenCapture;
 using ftl::codecs::Channel;
 using cv::cuda::GpuMat;
+using ftl::rgbd::Capability;
+using ftl::codecs::Touch;
+using ftl::codecs::TouchType;
 
 #ifdef HAVE_X11
 #include <X11/Xlib.h>
 #include <X11/Xutil.h>
 
 #include <X11/extensions/XShm.h>
+#include <X11/extensions/XTest.h>
 #include <sys/ipc.h>
 #include <sys/shm.h>
 
@@ -59,6 +67,7 @@ ScreenCapture::ScreenCapture(ftl::rgbd::Source *host)
 	capabilities_ = kCapVideo;
 
 	ready_ = false;
+	primary_touch_.id = -1;
 
     #ifdef HAVE_X11
 
@@ -162,26 +171,42 @@ ScreenCapture::ScreenCapture(ftl::rgbd::Source *host)
 	params_.doffs = 0.0;
 	params_.baseline = 0.1f;
 
-	state_.getLeft() = params_;
-	state_.set("name", std::string("[ScreenCapture] ") + host_->value("name", host_->getID()));
+	do_update_params_ = true;
+
+	//state_.getLeft() = params_;
+	//state_.set("name", std::string("[ScreenCapture] ") + host_->value("name", host_->getID()));
 
 	float offsetz = host_->value("offset_z",0.0f);
-	state_.setPose(matrix(cv::Vec3d(0.0, 3.14159, 0.0), cv::Vec3d(0.0,0.0,params_.maxDepth+offsetz)));
+	//state_.setPose(matrix(cv::Vec3d(0.0, 3.14159, 0.0), cv::Vec3d(0.0,0.0,params_.maxDepth+offsetz)));
+	pose_ = matrix(cv::Vec3d(0.0, 3.14159, 0.0), cv::Vec3d(0.0,0.0,params_.maxDepth+offsetz));
+
+	host_->on("size", [this]() {
+		float offsetz = host_->value("offset_z",0.0f);
+		params_.maxDepth = host_->value("size", 1.0f);
+		//state_.getLeft() = params_;
+		pose_ = matrix(cv::Vec3d(0.0, 3.14159, 0.0), cv::Vec3d(0.0,0.0,params_.maxDepth+offsetz));
+		do_update_params_ = true;
+	});
 
-	host_->on("size", [this](const ftl::config::Event &e) {
+	host_->on("offset_z", [this]() {
 		float offsetz = host_->value("offset_z",0.0f);
 		params_.maxDepth = host_->value("size", 1.0f);
-		state_.getLeft() = params_;
-		state_.setPose(matrix(cv::Vec3d(0.0, 3.14159, 0.0), cv::Vec3d(0.0,0.0,params_.maxDepth+offsetz)));
+		//state_.getLeft() = params_;
+		pose_ = matrix(cv::Vec3d(0.0, 3.14159, 0.0), cv::Vec3d(0.0,0.0,params_.maxDepth+offsetz));
+		do_update_params_ = true;
 	});
 
-	host_->on("offset_x", [this](const ftl::config::Event &e) {
+	host_->on("offset_x", [this]() {
 		offset_x_ = host_->value("offset_x", 0);
 	});
 
-	host_->on("offset_y", [this](const ftl::config::Event &e) {
+	host_->on("offset_y", [this]() {
 		offset_y_ = host_->value("offset_y", 0);
 	});
+
+	host_->on("enable_touch", [this]() {
+		do_update_params_ = true;
+	});
 }
 
 ScreenCapture::~ScreenCapture() {
@@ -190,20 +215,152 @@ ScreenCapture::~ScreenCapture() {
 	#endif
 }
 
+/*void ScreenCapture::_mouseClick(int button, int x, int y) {
+	#ifdef HAVE_X11
+
+	auto &s = *impl_state_;
+
+	XTestFakeMotionEvent (s.display, 0, x, y, CurrentTime);
+	XSync(s.display, 0);
+
+	XTestFakeButtonEvent (s.display, Button1, True,  CurrentTime);
+	XTestFakeButtonEvent (s.display, Button1, False, CurrentTime);
+
+	#endif
+}*/
+
+void ScreenCapture::_release() {
+	pressed_ = false;
+	#ifdef HAVE_X11
+	auto &s = *impl_state_;
+	//XTestFakeButtonEvent (s.display, Button1, False, CurrentTime);
+	#endif
+}
+
+void ScreenCapture::_press() {
+	pressed_ = true;
+	#ifdef HAVE_X11
+	auto &s = *impl_state_;
+	//XTestFakeButtonEvent (s.display, Button1, True, CurrentTime);
+	#endif
+
+	LOG(INFO) << "PRESS";
+}
+
+void ScreenCapture::_move(int x, int y) {
+	#ifdef HAVE_X11
+
+	auto &s = *impl_state_;
+	XTestFakeMotionEvent (s.display, 0, x, y, CurrentTime);
+	XSync(s.display, 0);
+	#endif
+}
+
+void ScreenCapture::_noTouch() {
+	if (primary_touch_.id >= 0 && primary_touch_.strength > 0) {
+		// RELEASE BUTTON
+		_release();
+	}
+	primary_touch_.id = -1;
+}
+
+void ScreenCapture::_singleTouch(const ftl::codecs::Touch &t) {
+	// Ignore right clicks currently
+	if (t.type != TouchType::MOUSE_LEFT && t.type != TouchType::COLLISION) return;
+
+	if ((primary_touch_.id >= 0 && primary_touch_.id != t.id) || (primary_touch_.id == t.id && primary_touch_.strength > 0 && t.strength == 0)) {
+		// RELEASE BUTTON
+		_release();
+	}
+
+	// Move mouse if no primary or ID is the same.
+	if (primary_touch_.id == -1 || t.id == primary_touch_.id) {
+		// But only if changed...?
+		// MOVE MOUSE
+		_move(t.x, t.y);
+	}
+
+	// If no primary or same and intensity is > 0, then press
+	if ((primary_touch_.id == -1 && t.strength > 0) || (primary_touch_.id == t.id && primary_touch_.strength == 0 && t.strength > 0)) {
+		// PRESS EVENT
+		_press();
+	}
+
+	primary_touch_ = t;
+}
+
+void ScreenCapture::_multiTouch(const std::vector<ftl::codecs::Touch> &touches) {
+
+}
+
 bool ScreenCapture::retrieve(ftl::rgbd::Frame &frame) {
 	if (!ready_) return false;
 	cv::Mat img;
 
+	// TODO: Proper, press, release and motion behaviour
+	// Also, render the cursor location
+
 	#ifdef HAVE_X11
 	XShmGetImage(impl_state_->display, impl_state_->root, impl_state_->ximg, getOffsetX(), getOffsetY(), 0x00ffffff);
     img = cv::Mat(params_.height, params_.width, CV_8UC4, impl_state_->ximg->data);
 	#endif
 
-	frame.reset();
-	frame.setOrigin(&state_);
+	if (host_->value("enable_touch", false)) {
+		if (frame.changed(Channel::Touch)) {
+			const auto &touches = frame.get<std::vector<ftl::codecs::Touch>>(Channel::Touch);
+			//LOG(INFO) << "GOT TOUCH DATA " << touches.size();
+
+			/*for (const auto &t : touches) {
+				LOG(INFO) << " -- " << t.x << "," << t.y;
+			}*/
+
+			if (touches.size() == 0) {
+				_noTouch();
+			} else if (touches.size() == 1) {
+				//_mouseClick(1, touches[0].x, touches[0].y);
+				_singleTouch(touches[0]);
+			} else if (touches.size() == 2) {
+				_multiTouch(touches);
+			} else {
+				// Too many touches, not supported
+			}
+		} else {
+			_noTouch();
+		}
+
+		// If there is a touch, render it.
+		if (primary_touch_.id >= 0) {
+			if (pressed_) {
+				cv::circle(img, cv::Point(primary_touch_.x, primary_touch_.y), 10, cv::Scalar(0,0,255), 5);
+			} else {
+				cv::circle(img, cv::Point(primary_touch_.x, primary_touch_.y), 10, cv::Scalar(0,0,255), 3);
+			}
+		}
+	}
+
+	if (do_update_params_) {
+		frame.setPose() = pose_;
+		frame.setLeft() = params_;
+
+		auto &meta = frame.create<std::map<std::string,std::string>>(Channel::MetaData);
+		meta["name"] = host_->value("name", host_->getID());
+		meta["id"] = host_->getID();
+		meta["uri"] = host_->value("uri", std::string(""));
+		meta["device"] = std::string("X11 Screen Capture");
+
+		//if (!frame.has(Channel::Capabilities)) {
+			auto &cap = frame.create<std::unordered_set<Capability>>(Channel::Capabilities);
+			cap.clear();
+			cap.emplace(Capability::VIDEO);
+			cap.emplace(Capability::LIVE);
+			if (host_->value("enable_touch", false)) cap.emplace(Capability::TOUCH);
+		//}
+
+		do_update_params_ = false;
+	}
 
 	if (!img.empty()) {
-		frame.create<cv::Mat>(Channel::Colour) = img;
+		frame.create<cv::cuda::GpuMat>(Channel::Colour).upload(img);
 	}
 
 	return true;
diff --git a/components/rgbd-sources/src/sources/screencapture/screencapture.hpp b/components/rgbd-sources/src/sources/screencapture/screencapture.hpp
index 163ab106c..3bcd84eba 100644
--- a/components/rgbd-sources/src/sources/screencapture/screencapture.hpp
+++ b/components/rgbd-sources/src/sources/screencapture/screencapture.hpp
@@ -3,6 +3,7 @@
 
 #include "../../basesource.hpp"
 #include <ftl/config.h>
+#include <ftl/codecs/touch.hpp>
 
 namespace ftl {
 
@@ -33,14 +34,27 @@ class ScreenCapture : public ftl::rgbd::BaseSourceImpl {
 	bool ready_;
 	int64_t cap_ts_;
 	int64_t cur_ts_;
-	ftl::rgbd::Frame sframe_;
+	//ftl::rgbd::Frame sframe_;
 
 	size_t full_width_;
 	size_t full_height_;
 	size_t offset_x_;
 	size_t offset_y_;
+	Eigen::Matrix4d pose_;
+	bool do_update_params_ = false;
+	bool pressed_ = false;
+	ftl::codecs::Touch primary_touch_;
 
 	ImplState *impl_state_;
+	ftl::rgbd::Camera params_;
+	//void _mouseClick(int button, int x, int y);
+
+	void _singleTouch(const ftl::codecs::Touch &t);
+	void _press();
+	void _release();
+	void _move(int x, int y);
+	void _noTouch();
+	void _multiTouch(const std::vector<ftl::codecs::Touch> &);
 };
 
 }
diff --git a/components/rgbd-sources/src/sources/stereovideo/calibrate.cpp b/components/rgbd-sources/src/sources/stereovideo/calibrate.cpp
deleted file mode 100644
index 07115839c..000000000
--- a/components/rgbd-sources/src/sources/stereovideo/calibrate.cpp
+++ /dev/null
@@ -1,334 +0,0 @@
-/*
- * Copyright 2019 Nicolas Pope
- */
-
-#include <loguru.hpp>
-#include <ftl/config.h>
-#include <ftl/configuration.hpp>
-#include <ftl/threads.hpp>
-#include <ftl/calibration/parameters.hpp>
-
-#include "calibrate.hpp"
-#include "ftl/exception.hpp"
-
-#include <opencv2/core.hpp>
-#include <opencv2/core/utility.hpp>
-#include <opencv2/imgproc.hpp>
-#include <opencv2/calib3d.hpp>
-#include <opencv2/cudawarping.hpp>
-
-using ftl::rgbd::detail::Calibrate;
-
-using cv::FileStorage;
-
-using cv::INTER_LINEAR;
-
-using cv::FileNode;
-using cv::FileNodeIterator;
-
-using cv::Mat;
-using cv::cuda::GpuMat;
-using cv::cuda::Stream;
-
-using cv::Size;
-
-using cv::Point2f;
-using cv::Point3f;
-using cv::Matx33d;
-using cv::Scalar;
-
-using std::string;
-using std::vector;
-
-Calibrate::Calibrate(nlohmann::json &config, Size image_size, cv::cuda::Stream &stream) :
-		ftl::Configurable(config) {
-	
-	img_size_ = image_size;
-	calib_size_ = image_size;
-
-	K_ = vector<Mat>(2);
-	K_[0] = Mat::eye(Size(3, 3), CV_64FC1);
-	K_[1] = Mat::eye(Size(3, 3), CV_64FC1);
-	D_ = vector<Mat>(2);
-	D_[0] = Mat::zeros(Size(5, 1), CV_64FC1);
-	D_[1] = Mat::zeros(Size(5, 1), CV_64FC1);
-	pose_ = Mat::eye(Size(4, 4), CV_64FC1);
-	pose_adjustment_ = Mat::eye(Size(4, 4), CV_64FC1);
-	Q_ = Mat::eye(Size(4, 4), CV_64FC1);
-	Q_.at<double>(3, 2) = -1;
-	Q_.at<double>(2, 3) = 1;
-
-	setRectify(true);
-}
-
-Mat Calibrate::_getK(size_t idx, Size size) {
-	CHECK(idx < K_.size());
-	CHECK(!size.empty());
-	return ftl::calibration::scaleCameraMatrix(K_[idx], size, calib_size_);
-}
-
-Mat Calibrate::_getK(size_t idx) {
-	return _getK(idx, img_size_);
-}
-
-double Calibrate::getBaseline() const {
-	if (t_.empty()) { return 0.0; }
-	return cv::norm(t_);
-}
-
-double Calibrate::getDoff() const {
-	return -(Q_.at<double>(3,3) * getBaseline());
-}
-
-double Calibrate::getDoff(const Size& size) const {
-	return getDoff() * ((double) size.width / (double) img_size_.width);
-}
-
-Mat Calibrate::getCameraMatrixLeft(const cv::Size res) {
-	if (rectify_) {
-		return ftl::calibration::scaleCameraMatrix(Mat(P1_, cv::Rect(0, 0, 3, 3)), res, img_size_);
-	} else {
-		return ftl::calibration::scaleCameraMatrix(K_[0], res, calib_size_);
-	}
-}
-
-Mat Calibrate::getCameraMatrixRight(const cv::Size res) {
-	if (rectify_) {
-		return ftl::calibration::scaleCameraMatrix(Mat(P2_, cv::Rect(0, 0, 3, 3)), res, img_size_);
-	} else {
-		return ftl::calibration::scaleCameraMatrix(K_[1], res, calib_size_);
-	}
-}
-
-Mat Calibrate::getCameraDistortionLeft() {
-	if (rectify_) {	return Mat::zeros(Size(5, 1), CV_64FC1); }
-	else { return D_[0]; }
-}
-
-Mat Calibrate::getCameraDistortionRight() {
-	if (rectify_) {	return Mat::zeros(Size(5, 1), CV_64FC1); }
-	else { return D_[1]; }
-}
-
-Mat Calibrate::getPose() const {
-	Mat T;
-	if (rectify_) {
-		Mat R1 = Mat::eye(4, 4, CV_64FC1);
-		R1_.copyTo(R1(cv::Rect(0, 0, 3, 3)));
-		T = pose_ * R1.inv();
-	}
-	else {
-		pose_.copyTo(T);
-	}
-	return pose_adjustment_ * T;
-}
-
-bool Calibrate::setRectify(bool enabled) {
-	if (t_.empty() || R_.empty()) { enabled = false; }
-	if (enabled) { 
-		rectify_ = calculateRectificationParameters(); 
-	}
-	else {
-		rectify_ = false;
-	}
-	return rectify_;
-}
-
-bool Calibrate::setDistortion(const vector<Mat> &D) {
-	if (D.size() != 2) { return false; }
-	for (const auto d : D) { if (d.size() != Size(5, 1)) { return false; }}
-	D[0].copyTo(D_[0]);
-	D[1].copyTo(D_[1]);
-	return true;
-}
-
-bool Calibrate::setIntrinsics(const Size &size, const vector<Mat> &K) {
-	if (K.size() != 2) { return false; }
-	if (size.empty() || size.width <= 0 || size.height <= 0) { return false; }
-	for (const auto k : K) {
-		if (!ftl::calibration::validate::cameraMatrix(k)) {
-			return false;
-		}
-	}
-
-	calib_size_ = Size(size);
-	K[0].copyTo(K_[0]);
-	K[1].copyTo(K_[1]);
-	return true;
-}
-
-bool Calibrate::setExtrinsics(const Mat &R, const Mat &t) {
-	if (!ftl::calibration::validate::rotationMatrix(R) ||
-		!ftl::calibration::validate::translationStereo(t)) { return false; }
-	
-	R.copyTo(R_);
-	t.copyTo(t_);
-	return true;
-}
-
-bool Calibrate::setPose(const Mat &P) {
-	if (!ftl::calibration::validate::pose(P)) { return false; }
-	P.copyTo(pose_);
-	return true;
-}
-
-bool Calibrate::setPoseAdjustment(const Mat &T) {
-	if (!ftl::calibration::validate::pose(T)) { return false; }
-	pose_adjustment_ = T * pose_adjustment_;
-	return true;
-}
-
-bool Calibrate::loadCalibration(const string &fname) {
-	FileStorage fs;
-
-	fs.open((fname).c_str(), FileStorage::READ);
-	if (!fs.isOpened()) {
-		LOG(WARNING) << "Could not open calibration file";
-		return false;
-	}
-
-	Size calib_size;
-	vector<Mat> K;
-	vector<Mat> D;
-	Mat R;
-	Mat t;
-	Mat pose;
-	Mat pose_adjustment;
-
-	fs["resolution"] >> calib_size;
-	fs["K"] >> K;
-	fs["D"] >> D;
-	fs["R"] >> R;
-	fs["t"] >> t;
-	fs["P"] >> pose;
-	fs["adjustment"] >> pose_adjustment;
-	fs.release();
-
-	bool retval = true;
-	if (calib_size.empty()) {
-		LOG(ERROR) << "calibration resolution missing in calibration file";
-		retval = false;
-	}
-	if (!setIntrinsics(calib_size, K)) {
-		LOG(ERROR) << "invalid intrinsics in calibration file";
-		retval = false;
-	}
-	if (!setDistortion(D)) {
-		LOG(ERROR) << "invalid distortion parameters in calibration file";
-		retval = false;
-	}
-	if (!setExtrinsics(R, t)) {
-		LOG(ERROR) << "invalid extrinsics in calibration file";
-		retval = false;
-	}
-	if (!setPose(pose)) {
-		LOG(ERROR) << "invalid pose in calibration file";
-		retval = false;
-	}
-	if (!setPoseAdjustment(pose_adjustment)) {
-		LOG(WARNING) << "invalid pose adjustment in calibration file (using identity)";
-	}
-
-	LOG(INFO) << "calibration loaded from: " << fname;
-	return retval;
-}
-
-bool Calibrate::writeCalibration(	const string &fname, const Size &size,
-									const vector<Mat> &K, const vector<Mat> &D, 
-									const Mat &R, const Mat &t, const Mat &pose,
-									const Mat &pose_adjustment) {
-	
-	cv::FileStorage fs(fname, cv::FileStorage::WRITE);
-	if (!fs.isOpened()) { return false; }
-
-	fs	<< "resolution" << size
-		<< "K" << K
-		<< "D" << D
-		<< "R" << R
-		<< "t" << t
-		<< "P" << pose
-		<< "adjustment" << pose_adjustment;
-	;
-	
-	fs.release();
-	return true;
-}
-
-bool Calibrate::saveCalibration(const string &fname) {
-	// note: never write rectified parameters!
-
-	// TODO: make a backup of old file
-	//if (std::filesystem::is_regular_file(fname)) {
-	//	// copy to fname + ".bak"
-	//}
-
-	return writeCalibration(fname, calib_size_, K_, D_, R_, t_, pose_, pose_adjustment_);
-}
-
-bool Calibrate::calculateRectificationParameters() {
-	
-	Mat K1 = _getK(0, img_size_);
-	Mat D1 = D_[0];
-	Mat K2 = _getK(1, img_size_);
-	Mat D2 = D_[1];
-	double alpha = value("alpha", 0.0);
-
-	try {
-		cv::stereoRectify(	K1, D1, K2, D2,
-							img_size_, R_, t_,
-							R1_, R2_, P1_, P2_, Q_, 0, alpha);
-		
-		initUndistortRectifyMap(K1, D1, R1_, P1_, img_size_, CV_32FC1, map1_.first, map2_.first);
-		initUndistortRectifyMap(K2, D2, R2_, P2_, img_size_, CV_32FC1, map1_.second, map2_.second);
-		
-		// CHECK Is this thread safe!!!!
-		map1_gpu_.first.upload(map1_.first);
-		map1_gpu_.second.upload(map1_.second);
-		map2_gpu_.first.upload(map2_.first);
-		map2_gpu_.second.upload(map2_.second);
-
-		Mat map0 = map1_.first.clone();
-		Mat map1 = map2_.first.clone();
-		cv::convertMaps(map0, map1, map1_.first, map2_.first, CV_16SC2);
-
-		map0 = map1_.second.clone();
-		map1 = map2_.second.clone();
-		cv::convertMaps(map0, map1, map1_.second, map2_.second, CV_16SC2);
-	}
-	catch (cv::Exception &ex) {
-		LOG(ERROR) << ex.what();
-		return false;
-	}
-
-	return true;
-}
-
-void Calibrate::rectifyStereo(GpuMat &l, GpuMat &r, Stream &stream) {
-	if (!rectify_) { return; }
-	// cv::cuda::remap() can not use same Mat for input and output
-	// TODO: create tmp buffers only once
-	GpuMat l_tmp(l.size(), l.type());
-	GpuMat r_tmp(r.size(), r.type());
-	cv::cuda::remap(l, l_tmp, map1_gpu_.first, map2_gpu_.first, cv::INTER_LINEAR, 0, cv::Scalar(), stream);
-	cv::cuda::remap(r, r_tmp, map1_gpu_.second, map2_gpu_.second, cv::INTER_LINEAR, 0, cv::Scalar(), stream);
-	stream.waitForCompletion();
-	l = l_tmp;
-	r = r_tmp;
-}
-
-void Calibrate::rectifyStereo(cv::Mat &l, cv::Mat &r) {
-	if (!rectify_) { return; }
-	// cv::cuda::remap() can not use same Mat for input and output
-	cv::remap(l, l, map1_.first, map2_.first, cv::INTER_LINEAR, 0, cv::Scalar());
-	cv::remap(r, r, map1_.second, map2_.second, cv::INTER_LINEAR, 0, cv::Scalar());
-}
-
-void Calibrate::rectifyLeft(cv::Mat &l) {
-	if (!rectify_) { return; }
-	cv::remap(l, l, map1_.first, map2_.first, cv::INTER_LINEAR, 0, cv::Scalar());
-}
-
-void Calibrate::rectifyRight(cv::Mat &r) {
-	if (!rectify_) { return; }
-	cv::remap(r, r, map1_.second, map2_.second, cv::INTER_LINEAR, 0, cv::Scalar());
-}
diff --git a/components/rgbd-sources/src/sources/stereovideo/calibrate.hpp b/components/rgbd-sources/src/sources/stereovideo/calibrate.hpp
deleted file mode 100644
index 3eaa1bc2d..000000000
--- a/components/rgbd-sources/src/sources/stereovideo/calibrate.hpp
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * Copyright 2019 Nicolas Pope
- */
-
-#ifndef _FTL_CALIBRATION_HPP_
-#define _FTL_CALIBRATION_HPP_
-
-#include <opencv2/core.hpp>
-#include <opencv2/core/cuda.hpp>
-#include <string>
-#include <vector>
-#include <ftl/rgbd/camera.hpp>
-
-namespace cv {
-class FileStorage;
-class FileNode;
-};
-
-namespace ftl {
-namespace rgbd {
-namespace detail {
-
-/**
- * Manage local calibration details: undistortion, rectification and camera
- * parameters.
- */
-class Calibrate : public ftl::Configurable {
-	public:
-	Calibrate(nlohmann::json &config, cv::Size image_size, cv::cuda::Stream &stream);
-
-	/**
-	 * @brief	Rectify and undistort stereo pair images (GPU)
-	 */
-	void rectifyStereo(cv::cuda::GpuMat &l, cv::cuda::GpuMat &r, cv::cuda::Stream &stream);
-
-	/**
-	 * @brief	Rectify and undistort stereo pair images (CPU)
-	 */
-	void rectifyStereo(cv::Mat &l, cv::Mat &r);
-
-	/**
-	 * @brief	Rectify and undistort left image (CPU)
-	 */
-	void rectifyLeft(cv::Mat &l);
-
-	/**
-	 * @brief	Rectify and undistort right image (CPU)
-	 */
-	void rectifyRight(cv::Mat &r);
-
-	void updateCalibration(const ftl::rgbd::Camera &p);
-	
-	/**
-	 * @brief Get disparity to depth matrix
-	 * @note Disparity offset is in image_size (scale)
-	 * 
-	 * 2020/01/15:	StereoVideoSource creates a Camera object which is used to
-	 * 				calculate depth from disparity (disp2depth.cu). Seems to be
-	 * 				used only in StereoVideoSource to get doff and baseline
-	 * 				parameter values in updateParameters()
-	 */
-	[[deprecated]]
-	const cv::Mat &getQ() const { return Q_; }
-
-	/**
-	 * @brief Get camera pair baseline
-	 */
-	double getBaseline() const;
-
-	/**
-	 * @brief Get camera pair disparity offset
-	 * @param size (optional) scale to given resolution.
-	 * 
-	 * Returns disparity offset for image_size resolution if size not provided.
-	 */
-	double getDoff() const;
-	double getDoff(const cv::Size& size) const;
-
-	/**
-	 * @brief	Get intrinsic paramters. If rectification is enabled, returns
-	 *			rectified intrinsic parameters, otherwise returns values from
-	 *			calibration. Parameters are scaled for given resolution.
-	 * @param	res		camera resolution
-	 */
-	cv::Mat getCameraMatrixLeft(const cv::Size res);
-	/** @brief	Same as getCameraMatrixLeft() for right camera */
-	cv::Mat getCameraMatrixRight(const cv::Size res);
-
-	/** @brief	Get camera distortion parameters. If rectification is enabled,
-	 * 			returns zeros. Otherwise returns calibrated distortion 
-	 * 			parameters values.
-	 */
-	cv::Mat getCameraDistortionLeft();
-	/** @brief	Same as getCameraDistortionLeft() for right camera */
-	cv::Mat getCameraDistortionRight();
-
-	/**
-	 * @brief	Get camera pose from calibration. Returns pose to rectified
-	 * 			camera if rectification is enabled.
-	 */
-	cv::Mat getPose() const;
-	
-	/**
-	 * @brief	Enable/disable recitification. If disabled, instance returns
-	 *			original camera intrinsic parameters (getCameraMatrixLeft() and
-	 *			getCameraMatrixRight() methods). When enabled (default), those
-	 *			methods return camera parameters for rectified images. Does not
-	 *			enable rectification, if valid parameters are missing.
-	 * @param	Rectification on/off
-	 * @returns	Status after call
-	 */
-	bool setRectify(bool enabled);
-
-	/**
-	 * @brief	Set intrinsic parameters for both cameras.
-	 * 
-	 * @param	size	calibration size
-	 * @param	K		2 camera matricies (3x3)
-	 * @returns	true if valid parameters
-	 */
-	bool setIntrinsics(const cv::Size &size, const std::vector<cv::Mat> &K);
-
-	/**
-	 * @brief	Set lens distortion parameters
-	 * @param	D 		2 distortion parameters (5x1)
-	 */
-	bool setDistortion(const std::vector<cv::Mat> &D);
-
-	/**
-	 * @brief	Set extrinsic parameters.
-	 * 
-	 * @param	R	Rotation matrix (3x3) from left to right camera
-	 * @param	t	Translation vector (1x3) from left to right camera
-	 * @returns	true if valid parameters
-	 */
-	bool setExtrinsics(const cv::Mat &R, const cv::Mat &t);
-
-	/**
-	 * @brief	Set pose
-	 * @param	pose	Pose for left camera
-	 * @returns	true if valid pose
-	 */
-	bool setPose(const cv::Mat &P);
-
-	/**
-	 * @brief	Set adjustment, which is applied to pose: T_update*T_pose 
-	 */
-	bool setPoseAdjustment(const cv::Mat &T);
-
-	/**
-	 * @brief	Calculate rectification parameters and maps. Can fail if
-	 * 			calibration parameters are invalid.
-	 * @returns	true if successful
-	 */
-	bool calculateRectificationParameters();
-
-	/**
-	 * @brief	Load calibration from file
-	 * @param	fname	File name
-	 */
-	bool loadCalibration(const std::string &fname);
-
-	/**
-	 * @brief	Write calibration parameters to file
-	 * 
-	 * Assumes two cameras and intrinsic calibration parameters have the same
-	 * resolution.
-	 * 
-	 * @todo	Validate loaded values
-	 * 
-	 * @param	fname file name
-	 * @param	size calibration resolution (intrinsic parameters)
-	 * @param	K intrinsic matrices
-	 * @param	D distortion coefficients
-	 * @param	R rotation from first camera to second
-	 * @param	t translation from first camera to second
-	 * @param	pose first camera's pose 
-	 */
-	static bool writeCalibration(const std::string &fname,
-								const cv::Size &size,
-								const std::vector<cv::Mat> &K,
-								const std::vector<cv::Mat> &D,
-								const cv::Mat &R, const cv::Mat &t,
-								const cv::Mat &pose,
-								const cv::Mat &pose_adjustment);
-
-	/*	@brief	Save current calibration to file
-	 *	@param	File name
-	 */
-	bool saveCalibration(const std::string &fname);
-
-private:
-	// rectification enabled/disabled
-	volatile bool rectify_;
-
-	/**
-	 * @brief	Get intrinsic matrix saved in calibration.
-	 * @param	Camera index (0 left, 1 right)
-	 * @param	Resolution
-	 */
-	cv::Mat _getK(size_t idx, cv::Size size);
-	cv::Mat _getK(size_t idx);
-
-	// calibration resolution (loaded from file by loadCalibration)
-	cv::Size calib_size_;
-	// camera resolution
-	cv::Size img_size_;
-
-	// rectification maps
-	std::pair<cv::Mat, cv::Mat> map1_;
-	std::pair<cv::Mat, cv::Mat> map2_;
-	std::pair<cv::cuda::GpuMat, cv::cuda::GpuMat> map1_gpu_;
-	std::pair<cv::cuda::GpuMat, cv::cuda::GpuMat> map2_gpu_;
-
-	// parameters for rectification, see cv::stereoRectify() documentation
-	cv::Mat R1_;
-	cv::Mat P1_;
-	cv::Mat R2_;
-	cv::Mat P2_;
-
-	// disparity to depth matrix
-	cv::Mat Q_;
-	
-	// intrinsic parameters and distortion coefficients
-	std::vector<cv::Mat> K_;
-	std::vector<cv::Mat> D_;
-
-	// transformation from left to right camera: R_ and T_
-	cv::Mat R_;
-	cv::Mat t_;
-	// pose for left camera
-	cv::Mat pose_;
-	cv::Mat pose_adjustment_;
-};
-
-}
-}
-}
-
-#endif // _FTL_CALIBRATION_HPP_
-
diff --git a/components/rgbd-sources/src/sources/stereovideo/device.hpp b/components/rgbd-sources/src/sources/stereovideo/device.hpp
index 7a7fcb45b..f575ea3ae 100644
--- a/components/rgbd-sources/src/sources/stereovideo/device.hpp
+++ b/components/rgbd-sources/src/sources/stereovideo/device.hpp
@@ -7,9 +7,11 @@
 
 namespace ftl {
 namespace rgbd {
+class Frame;
+
 namespace detail {
 
-class Calibrate;
+class StereoRectification;
 
 struct DeviceDetails {
 	std::string name;
@@ -21,7 +23,7 @@ struct DeviceDetails {
 /**
  * Abstract base class for camera or stereo camera sources. Just wraps the
  * basic grab and retrieve functionality with rectification.
- * 
+ *
  * @see OpenCVDevice
  * @see PylonDevice
  */
@@ -33,7 +35,7 @@ class Device : public Configurable {
 	//virtual const std::vector<DeviceDetails> &listDevices()=0;
 
 	virtual bool grab()=0;
-	virtual bool get(cv::cuda::GpuMat &l, cv::cuda::GpuMat &r, cv::cuda::GpuMat &h_l, cv::Mat &h_r, Calibrate *c, cv::cuda::Stream &stream)=0;
+	virtual bool get(ftl::rgbd::Frame &frame, cv::cuda::GpuMat &l, cv::cuda::GpuMat &r, cv::cuda::GpuMat &h_l, cv::Mat &h_r, StereoRectification *c, cv::cuda::Stream &stream)=0;
 
 	virtual unsigned int width() const =0;
 	virtual unsigned int height() const =0;
@@ -42,14 +44,16 @@ class Device : public Configurable {
 	virtual unsigned int fullHeight() const =0;
 
 	inline bool hasHigherRes() const { return fullWidth() != width(); }
-	
+
 	virtual double getTimestamp() const =0;
-	
+
 	virtual bool isStereo() const =0;
+
+	virtual void populateMeta(std::map<std::string,std::string> &meta) const {}
 };
 
 }
 }
 }
 
-#endif
\ No newline at end of file
+#endif
diff --git a/components/rgbd-sources/src/sources/stereovideo/opencv.cpp b/components/rgbd-sources/src/sources/stereovideo/opencv.cpp
index 23c5165a8..b3c2ba810 100644
--- a/components/rgbd-sources/src/sources/stereovideo/opencv.cpp
+++ b/components/rgbd-sources/src/sources/stereovideo/opencv.cpp
@@ -8,12 +8,14 @@
 #include <chrono>
 #include <ftl/threads.hpp>
 #include <ftl/profiler.hpp>
+#include <ftl/rgbd/frame.hpp>
 
 #include "opencv.hpp"
-#include "calibrate.hpp"
+#include "rectification.hpp"
 #include <opencv2/core.hpp>
 #include <opencv2/opencv.hpp>
 #include <opencv2/xphoto.hpp>
+#include <opencv2/imgcodecs.hpp>
 
 #include <ftl/timer.hpp>
 
@@ -31,7 +33,8 @@
 #endif
 
 using ftl::rgbd::detail::OpenCVDevice;
-using ftl::rgbd::detail::Calibrate;
+using ftl::rgbd::detail::StereoRectification;
+using ftl::codecs::Channel;
 using cv::Mat;
 using cv::VideoCapture;
 using cv::Rect;
@@ -42,18 +45,18 @@ using std::chrono::high_resolution_clock;
 using std::chrono::milliseconds;
 using std::this_thread::sleep_for;
 
-OpenCVDevice::OpenCVDevice(nlohmann::json &config)
+OpenCVDevice::OpenCVDevice(nlohmann::json &config, bool stereo)
 		: ftl::rgbd::detail::Device(config), timestamp_(0.0) {
 
-	std::vector<ftl::rgbd::detail::DeviceDetails> devices = _selectDevices();
+	std::vector<ftl::rgbd::detail::DeviceDetails> devices_ = getDevices();
 
 	int device_left = 0;
 	int device_right = -1;
 
-	LOG(INFO) << "Found " << devices.size() << " cameras";
+	LOG(INFO) << "Found " << devices_.size() << " cameras";
 
 	if (Configurable::get<std::string>("device_left")) {
-		for (auto &d : devices) {
+		for (auto &d : devices_) {
 			if (d.name.find(*Configurable::get<std::string>("device_left")) != std::string::npos) {
 				device_left = d.id;
 				LOG(INFO) << "Device left = " << device_left;
@@ -61,11 +64,11 @@ OpenCVDevice::OpenCVDevice(nlohmann::json &config)
 			}
 		}
 	} else {
-		device_left = value("device_left", (devices.size() > 0) ? devices[0].id : 0);
+		device_left = value("device_left", (devices_.size() > 0) ? devices_[0].id : 0);
 	}
 
 	if (Configurable::get<std::string>("device_right")) {
-		for (auto &d : devices) {
+		for (auto &d : devices_) {
 			if (d.name.find(*Configurable::get<std::string>("device_right")) != std::string::npos) {
 				if (d.id == device_left) continue;
 				device_right = d.id;
@@ -73,16 +76,19 @@ OpenCVDevice::OpenCVDevice(nlohmann::json &config)
 			}
 		}
 	} else {
-		device_right = value("device_right", (devices.size() > 1) ? devices[1].id : 1);
+		device_right = value("device_right", (devices_.size() > 1) ? devices_[1].id : 1);
 	}
 
-	nostereo_ = value("nostereo", false);
+	nostereo_ = value("nostereo", !stereo);
 
 	if (device_left < 0) {
 		LOG(ERROR) << "No available cameras";
 		return;
 	}
 
+	dev_ix_left_ = device_left;
+	dev_ix_right_ = device_right;
+
 	// Use cameras
 	camera_a_ = new VideoCapture;
 	LOG(INFO) << "Cameras check... ";
@@ -125,7 +131,7 @@ OpenCVDevice::OpenCVDevice(nlohmann::json &config)
 	camera_a_->set(cv::CAP_PROP_FRAME_HEIGHT, value("height", 720));
 	camera_a_->set(cv::CAP_PROP_FPS, 1000 / ftl::timer::getInterval());
 	//camera_a_->set(cv::CAP_PROP_BUFFERSIZE, 0);  // Has no effect
-	
+
 	Mat frame;
 	if (!camera_a_->grab()) LOG(ERROR) << "Could not grab a video frame";
 	camera_a_->retrieve(frame);
@@ -134,7 +140,8 @@ OpenCVDevice::OpenCVDevice(nlohmann::json &config)
 	height_ = frame.rows;
 
 	dwidth_ = value("depth_width", width_);
-	dheight_ = value("depth_height", height_);
+	float aspect = float(height_) / float(width_);
+	dheight_ = value("depth_height", std::min(uint32_t(aspect*float(dwidth_)), height_)) & 0xFFFe;
 
 	// Allocate page locked host memory for fast GPU transfer
 	left_hm_ = cv::cuda::HostMem(dheight_, dwidth_, CV_8UC4);
@@ -146,7 +153,13 @@ OpenCVDevice::~OpenCVDevice() {
 
 }
 
-std::vector<ftl::rgbd::detail::DeviceDetails> OpenCVDevice::_selectDevices() {
+static std::vector<ftl::rgbd::detail::DeviceDetails> opencv_devices;
+static bool opencv_dev_init = false;
+
+std::vector<ftl::rgbd::detail::DeviceDetails> OpenCVDevice::getDevices() {
+	if (opencv_dev_init) return opencv_devices;
+	opencv_dev_init = true;
+
 	std::vector<ftl::rgbd::detail::DeviceDetails> devices;
 
 #ifdef WIN32
@@ -207,7 +220,7 @@ std::vector<ftl::rgbd::detail::DeviceDetails> OpenCVDevice::_selectDevices() {
 		}
 		else
 		{
-			
+
 		}
 	}
 
@@ -219,7 +232,7 @@ std::vector<ftl::rgbd::detail::DeviceDetails> OpenCVDevice::_selectDevices() {
 #else
 
 	int fd;
-    v4l2_capability video_cap;
+	v4l2_capability video_cap;
 	v4l2_frmsizeenum video_fsize;
 
 	LOG(INFO) << "Video Devices:";
@@ -290,6 +303,7 @@ std::vector<ftl::rgbd::detail::DeviceDetails> OpenCVDevice::_selectDevices() {
 
 #endif
 
+	opencv_devices = devices;
 	return devices;
 }
 
@@ -311,9 +325,9 @@ bool OpenCVDevice::grab() {
 	return true;
 }
 
-bool OpenCVDevice::get(cv::cuda::GpuMat &l_out, cv::cuda::GpuMat &r_out,
-	cv::cuda::GpuMat &l_hres_out, cv::Mat &r_hres_out, Calibrate *c, cv::cuda::Stream &stream) {
-	
+bool OpenCVDevice::get(ftl::rgbd::Frame &frame, cv::cuda::GpuMat &l_out, cv::cuda::GpuMat &r_out,
+	cv::cuda::GpuMat &l_hres_out, cv::Mat &r_hres_out, StereoRectification *c, cv::cuda::Stream &stream) {
+
 	Mat l, r ,hres;
 
 	// Use page locked memory
@@ -337,7 +351,7 @@ bool OpenCVDevice::get(cv::cuda::GpuMat &l_out, cv::cuda::GpuMat &r_out,
 			cv::cvtColor(frame_r_, rfull, cv::COLOR_BGR2BGRA);
 
 			if (stereo_) {
-				c->rectifyRight(rfull);
+				c->rectify(rfull, Channel::Right);
 
 				if (hasHigherRes()) {
 					// TODO: Use threads?
@@ -378,8 +392,8 @@ bool OpenCVDevice::get(cv::cuda::GpuMat &l_out, cv::cuda::GpuMat &r_out,
 	if (stereo_) {
 		//FTL_Profile("Rectification", 0.01);
 		//c->rectifyStereo(lfull, rfull);
-		c->rectifyLeft(lfull);
-		
+		c->rectify(lfull, Channel::Left);
+
 		// Need to resize
 		//if (hasHigherRes()) {
 			// TODO: Use threads?
@@ -401,6 +415,14 @@ bool OpenCVDevice::get(cv::cuda::GpuMat &l_out, cv::cuda::GpuMat &r_out,
 	}
 	//r_out.upload(r, stream);
 
+	if (!frame.hasChannel(Channel::Thumbnail)) {
+		cv::Mat thumb;
+		cv::resize(l, thumb, cv::Size(320,240));
+		auto &thumbdata = frame.create<std::vector<uint8_t>>(Channel::Thumbnail);
+		std::vector<int> params = {cv::IMWRITE_JPEG_QUALITY, 70};
+		cv::imencode(".jpg", thumb, thumbdata, params);
+	}
+
 	if (camera_b_) {
 		//FTL_Profile("WaitCamB", 0.05);
 		future_b.wait();
@@ -417,3 +439,9 @@ bool OpenCVDevice::isStereo() const {
 	return stereo_ && !nostereo_;
 }
 
+void OpenCVDevice::populateMeta(std::map<std::string,std::string> &meta) const {
+	if (dev_ix_left_ >= 0 && dev_ix_left_ < static_cast<int>(opencv_devices.size())) {
+		meta["device"] = opencv_devices[dev_ix_left_].name;
+	}
+}
+
diff --git a/components/rgbd-sources/src/sources/stereovideo/opencv.hpp b/components/rgbd-sources/src/sources/stereovideo/opencv.hpp
index 1db067b8d..e0f10ca1e 100644
--- a/components/rgbd-sources/src/sources/stereovideo/opencv.hpp
+++ b/components/rgbd-sources/src/sources/stereovideo/opencv.hpp
@@ -14,25 +14,32 @@ namespace detail {
 
 class OpenCVDevice : public ftl::rgbd::detail::Device {
 	public:
-	explicit OpenCVDevice(nlohmann::json &config);
+	explicit OpenCVDevice(nlohmann::json &config, bool stereo);
 	~OpenCVDevice();
 
 	static std::vector<DeviceDetails> listDevices();
-	
+
 	bool grab() override;
-	bool get(cv::cuda::GpuMat &l, cv::cuda::GpuMat &r, cv::cuda::GpuMat &h_l, cv::Mat &h_r, Calibrate *c, cv::cuda::Stream &stream) override;
+	bool get(ftl::rgbd::Frame &frame, cv::cuda::GpuMat &l, cv::cuda::GpuMat &r, cv::cuda::GpuMat &h_l, cv::Mat &h_r, StereoRectification *c, cv::cuda::Stream &stream) override;
 
 	unsigned int width() const override { return dwidth_; }
 	unsigned int height() const override { return dheight_; }
 
 	unsigned int fullWidth() const override { return width_; }
 	unsigned int fullHeight() const override { return height_; }
-	
+
 	double getTimestamp() const override;
-	
+
 	bool isStereo() const override;
-	
+
+	void populateMeta(std::map<std::string,std::string> &meta) const override;
+
+	static std::vector<DeviceDetails> getDevices();
+
 	private:
+	std::vector<ftl::rgbd::detail::DeviceDetails> devices_;
+	int dev_ix_left_ = -1;
+	int dev_ix_right_ = -1;
 	double timestamp_;
 	//double tps_;
 	bool stereo_;
@@ -55,8 +62,6 @@ class OpenCVDevice : public ftl::rgbd::detail::Device {
 
 	cv::Mat frame_l_;
 	cv::Mat frame_r_;
-
-	std::vector<DeviceDetails> _selectDevices();
 };
 
 }
diff --git a/components/rgbd-sources/src/sources/stereovideo/pylon.cpp b/components/rgbd-sources/src/sources/stereovideo/pylon.cpp
index b8155489e..98e86714b 100644
--- a/components/rgbd-sources/src/sources/stereovideo/pylon.cpp
+++ b/components/rgbd-sources/src/sources/stereovideo/pylon.cpp
@@ -1,16 +1,21 @@
 #include "pylon.hpp"
 
-#include "calibrate.hpp"
+#include "rectification.hpp"
+
 #include <loguru.hpp>
 #include <ftl/threads.hpp>
 #include <ftl/rgbd/source.hpp>
 #include <ftl/profiler.hpp>
+#include <ftl/rgbd/frame.hpp>
 
 #include <pylon/PylonIncludes.h>
 #include <pylon/BaslerUniversalInstantCamera.h>
 
 #include <opencv2/imgproc.hpp>
 
+#include <nlohmann/json.hpp>
+
+using ftl::rgbd::detail::StereoRectification;
 using ftl::rgbd::detail::PylonDevice;
 using std::string;
 using ftl::codecs::Channel;
@@ -19,29 +24,55 @@ using cv::Mat;
 using namespace Pylon;
 
 PylonDevice::PylonDevice(nlohmann::json &config)
-        : ftl::rgbd::detail::Device(config), ready_(false), lcam_(nullptr), rcam_(nullptr) {
+		: ftl::rgbd::detail::Device(config), ready_(false), lcam_(nullptr), rcam_(nullptr) {
 
 	auto &inst = CTlFactory::GetInstance();
 
 	Pylon::DeviceInfoList_t devices;
 	inst.EnumerateDevices(devices);
 
+	int dev_left_num = -1;
+	std::string dev_left;
+
+	if (getConfig()["device_left"].is_number()) {
+		dev_left = std::to_string(value("device_left",0));
+	} else {
+		dev_left = value("device_left", std::string("default"));
+	}
+
 	if (devices.size() == 0) {
 		LOG(ERROR) << "No Pylon devices attached";
 		return;
 	} else {
+		int i=0;
 		for (auto d : devices) {
-			LOG(INFO) << " - found Pylon device - " << d.GetFullName() << "(" << d.GetModelName() << ")";
+			if (std::string(d.GetSerialNumber()) == dev_left) {
+				dev_left_num = i;
+			}
+
+			if (dev_left_num == i) {
+				LOG(INFO) << " - found Pylon device - " << d.GetSerialNumber() << " (" << d.GetModelName() << ") [primary]";
+			} else {
+				LOG(INFO) << " - found Pylon device - " << d.GetSerialNumber() << " (" << d.GetModelName() << ")";
+			}
+
+			++i;
 		}
 	}
 
+	if (dev_left_num == -1) dev_left_num = 0;
+
+	name_ = devices[dev_left_num].GetModelName();
+	serial_ = devices[dev_left_num].GetSerialNumber();
+
 	try {
-    	lcam_ = new CBaslerUniversalInstantCamera( CTlFactory::GetInstance().CreateDevice(devices[0]));
+		lcam_ = new CBaslerUniversalInstantCamera( CTlFactory::GetInstance().CreateDevice(devices[dev_left_num]));
 		lcam_->RegisterConfiguration( new Pylon::CSoftwareTriggerConfiguration, Pylon::RegistrationMode_ReplaceAll, Pylon::Cleanup_Delete);
 		lcam_->Open();
 
 		if (devices.size() >= 2) {
-			rcam_ = new CBaslerUniversalInstantCamera( CTlFactory::GetInstance().CreateDevice(devices[1]));
+			int dev_right = (dev_left_num == 0) ? 1 : 0;
+			rcam_ = new CBaslerUniversalInstantCamera( CTlFactory::GetInstance().CreateDevice(devices[dev_right]));
 			rcam_->RegisterConfiguration( new Pylon::CSoftwareTriggerConfiguration, Pylon::RegistrationMode_ReplaceAll, Pylon::Cleanup_Delete);
 			rcam_->Open();
 		}
@@ -52,10 +83,13 @@ PylonDevice::PylonDevice(nlohmann::json &config)
 		lcam_->StartGrabbing( Pylon::GrabStrategy_OneByOne);
 		if (rcam_) rcam_->StartGrabbing( Pylon::GrabStrategy_OneByOne);
 
+		if (rcam_) rcam_->WaitForFrameTriggerReady( 300, Pylon::TimeoutHandling_ThrowException);
+		lcam_->WaitForFrameTriggerReady( 300, Pylon::TimeoutHandling_ThrowException);
+
 		ready_ = true;
 	} catch (const Pylon::GenericException &e) {
 		// Error handling.
-        LOG(ERROR) << "Pylon: An exception occurred - " << e.GetDescription();
+		LOG(ERROR) << "Pylon: An exception occurred - " << e.GetDescription();
 	}
 
 	// Choose a good default depth res
@@ -69,13 +103,28 @@ PylonDevice::PylonDevice(nlohmann::json &config)
 	left_hm_ = cv::cuda::HostMem(height_, width_, CV_8UC4);
 	right_hm_ = cv::cuda::HostMem(height_, width_, CV_8UC4);
 	hres_hm_ = cv::cuda::HostMem(fullheight_, fullwidth_, CV_8UC4);
+
+	on("exposure", [this]() {
+		if (lcam_->GetDeviceInfo().GetModelName() != "Emulation") {
+			lcam_->ExposureTime.SetValue(value("exposure", 24000.0f));  // Exposure time in microseconds
+		}
+		if (rcam_ && rcam_->GetDeviceInfo().GetModelName() != "Emulation") {
+			rcam_->ExposureTime.SetValue(value("exposure", 24000.0f));  // Exposure time in microseconds
+		}
+	});
 }
 
 PylonDevice::~PylonDevice() {
 
 }
 
+static std::vector<ftl::rgbd::detail::DeviceDetails> pylon_devices;
+static bool pylon_dev_init = false;
+
 std::vector<ftl::rgbd::detail::DeviceDetails> PylonDevice::listDevices() {
+	if (pylon_dev_init) return pylon_devices;
+	pylon_dev_init = true;
+
 	auto &inst = CTlFactory::GetInstance();
 
 	Pylon::DeviceInfoList_t devices;
@@ -93,6 +142,7 @@ std::vector<ftl::rgbd::detail::DeviceDetails> PylonDevice::listDevices() {
 		r.maxwidth = 0;
 	}
 
+	pylon_devices = results;
 	return results;
 }
 
@@ -119,6 +169,15 @@ void PylonDevice::_configureCamera(CBaslerUniversalInstantCamera *cam) {
 	} else {
 		LOG(WARNING) << "Could not change pixel format";
 	}
+
+	if (cam->GetDeviceInfo().GetModelName() != "Emulation") {
+		// Emulated device throws exception with these
+		cam->ExposureTime.SetValue(value("exposure", 24000.0f));  // Exposure time in microseconds
+		cam->AutoTargetBrightness.SetValue(0.3);
+		cam->LightSourcePreset.SetValue(Basler_UniversalCameraParams::LightSourcePreset_Tungsten2800K);  // White balance option
+		cam->BalanceWhiteAuto.SetValue(Basler_UniversalCameraParams::BalanceWhiteAuto_Once);
+		cam->GainAuto.SetValue(Basler_UniversalCameraParams::GainAuto_Once);
+	}
 }
 
 bool PylonDevice::grab() {
@@ -130,8 +189,8 @@ bool PylonDevice::grab() {
 
 	try {
 		FTL_Profile("Frame Capture", 0.001);
-		if (rcam_) rcam_->WaitForFrameTriggerReady( 30, Pylon::TimeoutHandling_ThrowException);
-		else lcam_->WaitForFrameTriggerReady( 30, Pylon::TimeoutHandling_ThrowException);
+		if (rcam_) rcam_->WaitForFrameTriggerReady( 0, Pylon::TimeoutHandling_ThrowException);
+		lcam_->WaitForFrameTriggerReady( 0, Pylon::TimeoutHandling_ThrowException);
 
 		lcam_->ExecuteSoftwareTrigger();
 		if (rcam_) rcam_->ExecuteSoftwareTrigger();
@@ -143,7 +202,21 @@ bool PylonDevice::grab() {
 	return true;
 }
 
-bool PylonDevice::get(cv::cuda::GpuMat &l_out, cv::cuda::GpuMat &r_out, cv::cuda::GpuMat &h_l, cv::Mat &h_r, Calibrate *c, cv::cuda::Stream &stream) {
+bool PylonDevice::_retrieveFrames(Pylon::CGrabResultPtr &result, Pylon::CBaslerUniversalInstantCamera *cam) {
+	do {
+		if (cam->RetrieveResult(0, result, Pylon::TimeoutHandling_Return)) {
+			if (!result->GrabSucceeded()) {
+				LOG(ERROR) << "Retrieve failed " << result->GetErrorDescription();
+			}
+		} else {
+			LOG(ERROR) << "Pylon frame missing";
+			return false;
+		}
+	} while (!result->GrabSucceeded());
+	return true;
+}
+
+bool PylonDevice::get(ftl::rgbd::Frame &frame, cv::cuda::GpuMat &l_out, cv::cuda::GpuMat &r_out, cv::cuda::GpuMat &h_l, cv::Mat &h_r, StereoRectification *c, cv::cuda::Stream &stream) {
 	if (!isReady()) return false;
 
 	Mat l, r ,hres;
@@ -162,19 +235,50 @@ bool PylonDevice::get(cv::cuda::GpuMat &l_out, cv::cuda::GpuMat &r_out, cv::cuda
 	//cudaGetDevice(&dev);
 	//LOG(INFO) << "Current cuda device = " << dev;
 
+	if (isStereo()) {
+		auto lcount = lcam_->NumReadyBuffers.GetValue();
+		auto rcount = rcam_->NumReadyBuffers.GetValue();
+
+		/*if (left_fail_) {
+			left_fail_ = 0;
+			Pylon::CGrabResultPtr tmp_result;
+			lcam_->RetrieveResult(0, tmp_result, Pylon::TimeoutHandling_Return);
+		}
+		if (rcam_ && right_fail_) {
+			right_fail_ = 0;
+			Pylon::CGrabResultPtr tmp_result;
+			rcam_->RetrieveResult(0, tmp_result, Pylon::TimeoutHandling_Return);
+		}*/
+
+		if (rcount == 0 || lcount == 0) {
+			LOG(WARNING) << "Retrieve failed for L+R";
+			return false;
+		}
+
+		if (rcount > 1 && lcount > 1) {
+			LOG(WARNING) << "Pylon buffer latency problem : " << lcount << " vs " << rcount << " frames";
+			Pylon::CGrabResultPtr tmp_result;
+			//lcam_->RetrieveResult(0, tmp_result, Pylon::TimeoutHandling_Return);
+			//rcam_->RetrieveResult(0, tmp_result, Pylon::TimeoutHandling_Return);
+			_retrieveFrames(tmp_result, lcam_);
+			_retrieveFrames(tmp_result, rcam_);
+		} else if (rcount > 1) LOG(ERROR) << "Buffers (R) out of sync by " << rcount;
+		else if (lcount > 1) LOG(ERROR) << "Buffers (L) out of sync by " << lcount;
+	} else {
+		if (lcam_->NumReadyBuffers.GetValue() == 0) {
+			LOG(INFO) << "Retrieve failed for L";
+			return false;
+		}
+	}
+
 	try {
 		FTL_Profile("Frame Retrieve", 0.005);
 		std::future<bool> future_b;
 		if (rcam_) {
 			future_b = std::move(ftl::pool.push([this,&rfull,&r,&l,c,&r_out,&h_r,&stream](int id) {
 				Pylon::CGrabResultPtr result_right;
-				int rcount = 0;
-				if (rcam_ && rcam_->RetrieveResult(0, result_right, Pylon::TimeoutHandling_Return)) ++rcount;
 
-				if (rcount == 0 || !result_right->GrabSucceeded()) {
-					LOG(ERROR) << "Retrieve failed";
-					return false;
-				}
+				if (!_retrieveFrames(result_right, rcam_)) return false;
 
 				cv::Mat wrap_right(
 				result_right->GetHeight(),
@@ -182,10 +286,14 @@ bool PylonDevice::get(cv::cuda::GpuMat &l_out, cv::cuda::GpuMat &r_out, cv::cuda
 				CV_8UC1,
 				(uint8_t*)result_right->GetBuffer());
 
-				cv::cvtColor(wrap_right, rfull, cv::COLOR_BayerBG2BGRA);
+				{
+					FTL_Profile("Bayer Colour (R)", 0.005);
+					cv::cvtColor(wrap_right, rfull, cv::COLOR_BayerRG2BGRA);
+				}
 
 				if (isStereo()) {
-					c->rectifyRight(rfull);
+					FTL_Profile("Rectify and Resize (R)", 0.005);
+					c->rectify(rfull, Channel::Right);
 
 					if (hasHigherRes()) {
 						cv::resize(rfull, r, r.size(), 0.0, 0.0, cv::INTER_CUBIC);
@@ -202,13 +310,11 @@ bool PylonDevice::get(cv::cuda::GpuMat &l_out, cv::cuda::GpuMat &r_out, cv::cuda
 		}
 
 		Pylon::CGrabResultPtr result_left;
-		int lcount = 0;
-		{
-			if (lcam_->RetrieveResult(0, result_left, Pylon::TimeoutHandling_Return)) ++lcount;
-		}
 
-		if (lcount == 0 || !result_left->GrabSucceeded()) {
-			LOG(ERROR) << "Retrieve failed";
+		if (!_retrieveFrames(result_left, lcam_)) {
+			if (rcam_) {
+				future_b.wait();
+			}
 			return false;
 		}
 
@@ -218,23 +324,30 @@ bool PylonDevice::get(cv::cuda::GpuMat &l_out, cv::cuda::GpuMat &r_out, cv::cuda
 			CV_8UC1,
 			(uint8_t*)result_left->GetBuffer());
 
-		cv::cvtColor(wrap_left, lfull, cv::COLOR_BayerBG2BGRA);
-
-		if (isStereo()) {
-			c->rectifyLeft(lfull);
+		{
+			FTL_Profile("Bayer Colour (L)", 0.005);
+			cv::cvtColor(wrap_left, lfull, cv::COLOR_BayerRG2BGRA);
 		}
 
-		if (hasHigherRes()) {
-			cv::resize(lfull, l, l.size(), 0.0, 0.0, cv::INTER_CUBIC);
-			h_l.upload(hres, stream);
-		} else {
-			h_l = cv::cuda::GpuMat();
+		{
+			FTL_Profile("Rectify and Resize (L)", 0.005);
+			if (isStereo()) {
+				c->rectify(lfull, Channel::Left);
+			}
+
+			if (hasHigherRes()) {
+				cv::resize(lfull, l, l.size(), 0.0, 0.0, cv::INTER_CUBIC);
+				h_l.upload(hres, stream);
+			} else {
+				h_l = cv::cuda::GpuMat();
+			}
 		}
 
 		l_out.upload(l, stream);
 
 		if (rcam_) {
 			future_b.wait();
+			if (!future_b.get()) return false;
 		}
 
 	} catch (const GenericException &e) {
@@ -245,6 +358,11 @@ bool PylonDevice::get(cv::cuda::GpuMat &l_out, cv::cuda::GpuMat &r_out, cv::cuda
 }
 
 bool PylonDevice::isReady() const {
-    return lcam_ && lcam_->IsOpen();
+	return lcam_ && lcam_->IsOpen();
+}
+
+void PylonDevice::populateMeta(std::map<std::string,std::string> &meta) const {
+	meta["device"] = name_;
+	meta["serial"] = serial_;
 }
 
diff --git a/components/rgbd-sources/src/sources/stereovideo/pylon.hpp b/components/rgbd-sources/src/sources/stereovideo/pylon.hpp
index 84b0742fb..70c82a3da 100644
--- a/components/rgbd-sources/src/sources/stereovideo/pylon.hpp
+++ b/components/rgbd-sources/src/sources/stereovideo/pylon.hpp
@@ -7,6 +7,7 @@
 
 namespace Pylon {
 class CBaslerUniversalInstantCamera;
+class CGrabResultPtr;
 }
 
 namespace ftl {
@@ -21,20 +22,22 @@ class PylonDevice : public ftl::rgbd::detail::Device {
 	static std::vector<DeviceDetails> listDevices();
 
 	bool grab() override;
-	bool get(cv::cuda::GpuMat &l, cv::cuda::GpuMat &r, cv::cuda::GpuMat &h_l, cv::Mat &h_r, Calibrate *c, cv::cuda::Stream &stream) override;
+	bool get(ftl::rgbd::Frame &frame, cv::cuda::GpuMat &l, cv::cuda::GpuMat &r, cv::cuda::GpuMat &h_l, cv::Mat &h_r, StereoRectification *c, cv::cuda::Stream &stream) override;
 
 	unsigned int width() const override { return width_; }
 	unsigned int height() const override { return height_; };
 
 	unsigned int fullWidth() const override { return fullwidth_; }
 	unsigned int fullHeight() const override { return fullheight_; }
-	
+
 	double getTimestamp() const override { return 0.0; }
-	
+
 	bool isStereo() const override { return lcam_ && rcam_; }
 
 	bool isReady() const;
 
+	void populateMeta(std::map<std::string,std::string> &meta) const override;
+
 	private:
 	bool ready_;
 	Pylon::CBaslerUniversalInstantCamera *lcam_;
@@ -44,6 +47,10 @@ class PylonDevice : public ftl::rgbd::detail::Device {
 	uint32_t fullheight_;
 	uint32_t width_;
 	uint32_t height_;
+	std::string name_;
+	std::string serial_;
+	int left_fail_=0;
+	int right_fail_=0;
 
 	cv::cuda::HostMem left_hm_;
 	cv::cuda::HostMem right_hm_;
@@ -51,6 +58,7 @@ class PylonDevice : public ftl::rgbd::detail::Device {
 	cv::Mat rtmp_;
 
 	void _configureCamera(Pylon::CBaslerUniversalInstantCamera *cam);
+	bool _retrieveFrames(Pylon::CGrabResultPtr &result, Pylon::CBaslerUniversalInstantCamera *cam);
 };
 
 }
diff --git a/components/rgbd-sources/src/sources/stereovideo/rectification.cpp b/components/rgbd-sources/src/sources/stereovideo/rectification.cpp
new file mode 100644
index 000000000..a9395664c
--- /dev/null
+++ b/components/rgbd-sources/src/sources/stereovideo/rectification.cpp
@@ -0,0 +1,208 @@
+/*
+ * Copyright 2019 Nicolas Pope
+ */
+
+#include <loguru.hpp>
+#include <ftl/config.h>
+#include <ftl/configuration.hpp>
+#include <ftl/calibration/parameters.hpp>
+
+#include "rectification.hpp"
+#include "ftl/exception.hpp"
+
+#include <opencv2/core.hpp>
+#include <opencv2/core/utility.hpp>
+#include <opencv2/imgproc.hpp>
+#include <opencv2/calib3d.hpp>
+
+using ftl::rgbd::detail::StereoRectification;
+using ftl::calibration::CalibrationData;
+using ftl::codecs::Channel;
+
+StereoRectification::StereoRectification(nlohmann::json &config, cv::Size image_size) :
+	ftl::Configurable(config), image_resolution_(image_size),
+	enabled_(false), valid_(false), interpolation_(cv::INTER_LINEAR),
+	baseline_(0.0) {
+
+}
+
+void StereoRectification::setSize(cv::Size size) {
+	image_resolution_ = size;
+	if (calibrated()) {
+		calculateParameters();
+	}
+}
+
+void StereoRectification::setInterpolation(int interpolation) {
+	interpolation_ = interpolation;
+}
+
+void StereoRectification::setEnabled(bool value) {
+	enabled_ = value;
+}
+
+bool StereoRectification::enabled() {
+	return enabled_;
+}
+
+bool StereoRectification::calibrated() {
+	return valid_;
+}
+
+void StereoRectification::setCalibration(CalibrationData &calib) {
+	if (calib.hasCalibration(Channel::Left) && calib.hasCalibration(Channel::Right)) {
+		calib_left_ = calib.get(Channel::Left);
+		calib_right_ = calib.get(Channel::Right);
+		calculateParameters();
+	}
+}
+
+void StereoRectification::calculateParameters() {
+	using namespace ftl::calibration;
+	// TODO: lock
+	{
+		bool valid = true;
+		valid &= calib_left_.intrinsic.resolution != cv::Size{0, 0};
+		valid &= calib_right_.intrinsic.resolution != cv::Size{0, 0};
+		valid &= validate::cameraMatrix(calib_left_.intrinsic.matrix());
+		valid &= validate::cameraMatrix(calib_right_.intrinsic.matrix());
+		if (!valid) { return; }
+	}
+
+	valid_ = false;
+
+	// create temporary buffers for rectification
+	if (tmp_l_.size() != image_resolution_) {
+		//using cv::cuda::HostMem;
+		//tmp_l_ = HostMem(image_resolution_, CV_8UC4, HostMem::AllocType::PAGE_LOCKED);
+		tmp_l_ = cv::Mat(image_resolution_, CV_8UC4);
+	}
+	if (tmp_l_.size() != image_resolution_) {
+		//using cv::cuda::HostMem;
+		//tmp_r_ = HostMem(image_resolution_, CV_8UC4, HostMem::AllocType::PAGE_LOCKED);
+		tmp_r_ = cv::Mat(image_resolution_, CV_8UC4);
+	}
+
+	cv::Mat K_l = calib_left_.intrinsic.matrix(image_resolution_);
+	cv::Mat K_r = calib_right_.intrinsic.matrix(image_resolution_);
+	cv::Mat dc_l = calib_left_.intrinsic.distCoeffs.Mat();
+	cv::Mat dc_r = calib_right_.intrinsic.distCoeffs.Mat();
+
+	// calculate rotation and translation from left to right using calibration
+	cv::Mat T_l = calib_left_.extrinsic.matrix();
+	cv::Mat T_r = calib_right_.extrinsic.matrix();
+	cv::Mat T = T_l * transform::inverse(T_r);
+	cv::Mat R, t;
+
+	transform::getRotationAndTranslation(T, R, t);
+	baseline_ = cv::norm(t);
+
+	if (baseline_ == 0.0) { return; }
+
+	// calculate rectification parameters
+	cv::stereoRectify(	K_l, dc_l, K_r, dc_r, image_resolution_,
+						R, t, R_l_, R_r_, P_l_, P_r_, Q_, 0, 0);
+
+	// for CPU remap, CV_16SC2 should give best performance
+	// https://docs.opencv.org/master/da/d54/group__imgproc__transform.html
+	cv::initUndistortRectifyMap(K_l, dc_l, R_l_, P_l_, image_resolution_,
+								CV_16SC2, map_l_.first, map_l_.second);
+	cv::initUndistortRectifyMap(K_r, dc_r, R_r_, P_r_, image_resolution_,
+								CV_16SC2, map_r_.first, map_r_.second);
+
+	valid_ = true;
+}
+
+void StereoRectification::rectify(cv::InputOutputArray im, Channel c) {
+
+	if (!enabled_ || !valid_) { return; }
+
+	if (im.size() != image_resolution_) {
+		throw ftl::exception("Input has wrong size");
+	}
+	if (im.isMat()) {
+		cv::Mat &in = im.getMatRef();
+		if (c == Channel::Left) {
+			cv::remap(in, tmp_l_, map_l_.first, map_l_.second, interpolation_);
+			cv::swap(in, tmp_l_);
+		}
+		else if (c == Channel::Right) {
+			cv::remap(in, tmp_r_, map_r_.first, map_r_.second, interpolation_);
+			cv::swap(in, tmp_r_);
+		}
+		else {
+			throw ftl::exception("Bad channel for rectification");
+		}
+	}
+	else if (im.isGpuMat()) {
+		throw ftl::exception("GPU rectification not implemented");
+	}
+	else {
+		throw ftl::exception("Input not Mat/GpuMat");
+	}
+}
+
+cv::Mat StereoRectification::getPose(Channel c) {
+	using ftl::calibration::transform::inverse;
+
+	if (enabled_ && valid_) {
+		cv::Mat T = cv::Mat::eye(4, 4, CV_64FC1);
+		if (c == Channel::Left) {
+			R_l_.copyTo(T(cv::Rect(0, 0, 3, 3)));
+			return calib_left_.extrinsic.matrix() * inverse(T);
+		}
+		else if (c == Channel::Right) {
+			R_r_.copyTo(T(cv::Rect(0, 0, 3, 3)));
+			return calib_right_.extrinsic.matrix() * inverse(T);
+		}
+	}
+	else {
+		if (c == Channel::Left) {
+			return calib_left_.extrinsic.matrix();
+		}
+		else if (c == Channel::Right) {
+			return calib_right_.extrinsic.matrix();
+		}
+	}
+	throw ftl::exception("Invalid channel, expected Left or Right");
+}
+
+double StereoRectification::baseline() {
+	return baseline_;
+}
+
+double StereoRectification::doff() {
+	if (!enabled_ || !valid_) return 0.0;
+	return -(Q_.at<double>(3,3) * baseline_);
+}
+
+double StereoRectification::doff(cv::Size size) {
+	return doff() * double(size.width)/double(image_resolution_.width);
+}
+
+cv::Mat StereoRectification::cameraMatrix(Channel c) {
+	if (enabled_ && valid_) {
+		if (c == Channel::Left) {
+			// P_l_: Left camera is origin in rectified system, there extrinsic
+			// is no rotation and intrinsic matrix can be directly extracted.
+			return cv::Mat(P_l_, cv::Rect(0, 0, 3, 3)).clone();
+		}
+		else if (c == Channel::Right) {
+			// Extrinsics are included in P_r_, can't do same as above
+			throw ftl::exception("Not implemented");
+		}
+	}
+	else {
+		if (c == Channel::Left) {
+			return calib_left_.intrinsic.matrix();
+		}
+		else if (c == Channel::Right) {
+			return calib_right_.intrinsic.matrix();
+		}
+	}
+	throw ftl::exception("Invalid channel, expected Left or Right");
+}
+
+cv::Mat StereoRectification::cameraMatrix(cv::Size size, Channel c) {
+	return ftl::calibration::scaleCameraMatrix(cameraMatrix(c), image_resolution_, size);
+}
diff --git a/components/rgbd-sources/src/sources/stereovideo/rectification.hpp b/components/rgbd-sources/src/sources/stereovideo/rectification.hpp
new file mode 100644
index 000000000..554ddaec5
--- /dev/null
+++ b/components/rgbd-sources/src/sources/stereovideo/rectification.hpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2019 Nicolas Pope
+ */
+
+#ifndef _FTL_CALIBRATION_HPP_
+#define _FTL_CALIBRATION_HPP_
+
+#include <opencv2/core.hpp>
+#include <opencv2/core/cuda.hpp>
+
+#include <string>
+#include <vector>
+
+#include <ftl/codecs/channels.hpp>
+#include <ftl/rgbd/camera.hpp>
+#include <ftl/calibration/structures.hpp>
+
+namespace ftl {
+namespace rgbd {
+namespace detail {
+
+/**
+ * Stereo rectification. Performs rectification for left and right channels.
+ * Rectified image is same size as input image.
+ */
+class StereoRectification : public ftl::Configurable {
+public:
+	StereoRectification(nlohmann::json &config, cv::Size image_size);
+
+	void setInterpolation(int interpolation);
+
+	void setSize(cv::Size);
+	/**
+	 * Calculate rectification parameters from given calibration.
+	 */
+	void setCalibration(ftl::calibration::CalibrationData &calib);
+	bool calibrated();
+
+	void rectify(cv::InputOutputArray im, ftl::codecs::Channel c);
+
+	/**
+	 * Enable/disable rectification.
+	 */
+	void setEnabled(bool enabled);
+	bool enabled();
+
+	/**
+	 * Get camera pose (rectified if enabled and valid)
+	 */
+	cv::Mat getPose(ftl::codecs::Channel c = ftl::codecs::Channel::Left);
+
+	/**
+	 * Get intrinsic matrix.
+	 */
+	cv::Mat cameraMatrix(ftl::codecs::Channel c = ftl::codecs::Channel::Left);
+	cv::Mat cameraMatrix(cv::Size size, ftl::codecs::Channel c = ftl::codecs::Channel::Left);
+
+	/** Stereo baseline */
+	double baseline();
+	/** Disparity offset */
+	double doff();
+	double doff(cv::Size);
+
+protected:
+	void calculateParameters();
+
+private:
+	cv::Size calib_resolution_;
+	ftl::calibration::CalibrationData::Calibration calib_left_;
+	ftl::calibration::CalibrationData::Calibration calib_right_;
+
+	cv::Size image_resolution_;
+
+	// rectification parameters and maps
+	bool enabled_;
+	bool valid_;
+	int interpolation_;
+	double baseline_;
+	cv::Mat Q_;
+	cv::Mat R_l_;
+	cv::Mat R_r_;
+	cv::Mat P_l_;
+	cv::Mat P_r_;
+
+	std::pair<cv::Mat,cv::Mat> map_l_;
+	std::pair<cv::Mat,cv::Mat> map_r_;
+
+	// temporary buffers
+	// cv::cuda::HostMem tmp_l_;
+	// cv::cuda::HostMem tmp_r_;
+	cv::Mat tmp_l_;
+	cv::Mat tmp_r_;
+};
+
+}
+}
+}
+
+#endif // _FTL_CALIBRATION_HPP_
+
diff --git a/components/rgbd-sources/src/sources/stereovideo/stereovideo.cpp b/components/rgbd-sources/src/sources/stereovideo/stereovideo.cpp
index 21561726c..7458f5c53 100644
--- a/components/rgbd-sources/src/sources/stereovideo/stereovideo.cpp
+++ b/components/rgbd-sources/src/sources/stereovideo/stereovideo.cpp
@@ -1,39 +1,63 @@
 #include <loguru.hpp>
 
+#include <unordered_set>
+
 #include <Eigen/Eigen>
 #include <opencv2/core/eigen.hpp>
 
-#include "stereovideo.hpp"
-
 #include <ftl/configuration.hpp>
 #include <ftl/profiler.hpp>
 
 #include <nlohmann/json.hpp>
 
 #ifdef HAVE_OPTFLOW
-#include "ftl/operators/opticalflow.hpp"
+#include <ftl/operators/opticalflow.hpp>
 #endif
 
-#include "ftl/operators/smoothing.hpp"
-#include "ftl/operators/colours.hpp"
-#include "ftl/operators/normals.hpp"
-#include "ftl/operators/filling.hpp"
-#include "ftl/operators/segmentation.hpp"
-#include "ftl/operators/disparity.hpp"
-#include "ftl/operators/mask.hpp"
+#include <ftl/operators/smoothing.hpp>
+#include <ftl/operators/colours.hpp>
+#include <ftl/operators/normals.hpp>
+#include <ftl/operators/filling.hpp>
+#include <ftl/operators/segmentation.hpp>
+#include <ftl/operators/disparity.hpp>
+#include <ftl/operators/mask.hpp>
+
+#include <ftl/rgbd/capabilities.hpp>
+#include <ftl/calibration/structures.hpp>
 
+#include "stereovideo.hpp"
 #include "ftl/threads.hpp"
-#include "calibrate.hpp"
+#include "rectification.hpp"
+
 #include "opencv.hpp"
 
 #ifdef HAVE_PYLON
 #include "pylon.hpp"
 #endif
 
-using ftl::rgbd::detail::Calibrate;
 using ftl::rgbd::detail::StereoVideoSource;
 using ftl::codecs::Channel;
 using std::string;
+using ftl::rgbd::Capability;
+
+
+static cv::Mat rmat(const cv::Vec3d &rvec) {
+	cv::Mat R(cv::Size(3, 3), CV_64FC1);
+	cv::Rodrigues(rvec, R);
+	return R;
+}
+
+static Eigen::Matrix4d matrix(const cv::Vec3d &rvec, const cv::Vec3d &tvec) {
+	cv::Mat M = cv::Mat::eye(cv::Size(4, 4), CV_64FC1);
+	rmat(rvec).copyTo(M(cv::Rect(0, 0, 3, 3)));
+	M.at<double>(0, 3) = tvec[0];
+	M.at<double>(1, 3) = tvec[1];
+	M.at<double>(2, 3) = tvec[2];
+	Eigen::Matrix4d r;
+	cv::cv2eigen(M,r);
+	return r;
+}
+
 
 ftl::rgbd::detail::Device::Device(nlohmann::json &config) : Configurable(config) {
 
@@ -61,8 +85,27 @@ StereoVideoSource::StereoVideoSource(ftl::rgbd::Source *host, const string &file
 }
 
 StereoVideoSource::~StereoVideoSource() {
-	delete calib_;
 	delete lsrc_;
+	if (pipeline_input_) delete pipeline_input_;
+}
+
+bool StereoVideoSource::supported(const std::string &dev) {
+	if (dev == "pylon") {
+		#ifdef HAVE_PYLON
+		auto pylon_devices = ftl::rgbd::detail::PylonDevice::listDevices();
+		return pylon_devices.size() > 0;
+		#else
+		return false;
+		#endif
+	} else if (dev == "video") {
+		return true;
+	} else if (dev == "camera") {
+		return ftl::rgbd::detail::OpenCVDevice::getDevices().size() > 0;
+	} else if (dev == "stereo") {
+		return ftl::rgbd::detail::OpenCVDevice::getDevices().size() > 1;
+	}
+
+	return false;
 }
 
 void StereoVideoSource::init(const string &file) {
@@ -81,24 +124,15 @@ void StereoVideoSource::init(const string &file) {
 		} else if (uri.getPathSegment(0) == "opencv") {
 			// Use cameras
 			LOG(INFO) << "Using OpenCV cameras...";
-			lsrc_ = ftl::create<ftl::rgbd::detail::OpenCVDevice>(host_, "feed");
+			lsrc_ = ftl::create<ftl::rgbd::detail::OpenCVDevice>(host_, "feed", true);
 		} else if (uri.getPathSegment(0) == "video" || uri.getPathSegment(0) == "camera") {
-			// Now detect automatically which device to use
-			#ifdef HAVE_PYLON
-			auto pylon_devices = ftl::rgbd::detail::PylonDevice::listDevices();
-			if (pylon_devices.size() > 0) {
-				LOG(INFO) << "Using Pylon...";
-				lsrc_ = ftl::create<ftl::rgbd::detail::PylonDevice>(host_, "feed");
-			} else {
-				// Use cameras
-				LOG(INFO) << "Using OpenCV cameras...";
-				lsrc_ = ftl::create<ftl::rgbd::detail::OpenCVDevice>(host_, "feed");
-			}
-			#else
+			// Use cameras
+			LOG(INFO) << "Using OpenCV camera...";
+			lsrc_ = ftl::create<ftl::rgbd::detail::OpenCVDevice>(host_, "feed", false);
+		} else if (uri.getPathSegment(0) == "stereo") {
 			// Use cameras
 			LOG(INFO) << "Using OpenCV cameras...";
-			lsrc_ = ftl::create<ftl::rgbd::detail::OpenCVDevice>(host_, "feed");
-			#endif
+			lsrc_ = ftl::create<ftl::rgbd::detail::OpenCVDevice>(host_, "feed", true);
 		}
 	}
 
@@ -112,207 +146,192 @@ void StereoVideoSource::init(const string &file) {
 	#endif
 	pipeline_input_->append<ftl::operators::ColourChannels>("colour");
 
-	calib_ = ftl::create<Calibrate>(host_, "calibration", cv::Size(lsrc_->fullWidth(), lsrc_->fullHeight()), stream_);
+	cv::Size size_full = cv::Size(lsrc_->fullWidth(), lsrc_->fullHeight());
+	rectification_ = std::unique_ptr<StereoRectification>
+		(ftl::create<StereoRectification>(host_, "rectification", size_full));
 
 	string fname_default = "calibration.yml";
-	auto fname_config = calib_->get<string>("calibration");
+	auto fname_config = host_->get<string>("calibration");
 	string fname = fname_config ? *fname_config : fname_default;
 	auto calibf = ftl::locateFile(fname);
 	if (calibf) {
-		fname = *calibf;
-		if (calib_->loadCalibration(fname)) {
-			calib_->calculateRectificationParameters();
-			calib_->setRectify(true);
-		}
+		fname_calib_ = *calibf;
+		calibration_ = ftl::calibration::CalibrationData::readFile(fname_calib_);
+		calibration_.enabled = host_->value("rectify", calibration_.enabled);
+		rectification_->setCalibration(calibration_);
+		rectification_->setEnabled(calibration_.enabled);
 	}
 	else {
-		fname = fname_config ? *fname_config :
-								string(FTL_LOCAL_CONFIG_ROOT) + "/"
-								+ std::string("calibration.yml");
+		fname_calib_ = fname_config ?	*fname_config :
+										string(FTL_LOCAL_CONFIG_ROOT) + "/"
+										+ std::string("calibration.yml");
 
-		LOG(ERROR) << "No calibration, default path set to " + fname;
+		LOG(ERROR) << "No calibration file found, calibration will be saved to " + fname;
+	}
 
-		// set use config file/set (some) default values
+	// Generate camera parameters for next frame
+	do_update_params_ = true;
 
-		cv::Mat K = cv::Mat::eye(cv::Size(3, 3), CV_64FC1);
-		K.at<double>(0,0) = host_->value("focal", 800.0);
-		K.at<double>(1,1) = host_->value("focal", 800.0);
-		K.at<double>(0,2) = host_->value("centre_x", color_size_.width/2.0f);
-		K.at<double>(1,2) = host_->value("centre_y", color_size_.height/2.0f);
+	LOG(INFO) << "StereoVideo source ready...";
+	ready_ = true;
 
-		calib_->setIntrinsics(color_size_, {K, K});
-	}
+	host_->on("size", [this]() {
+		do_update_params_ = true;
+	});
 
-	////////////////////////////////////////////////////////////////////////////
-	// RPC callbacks to update calibration
-	// Should only be used by calibration app (interface may change)
-	// Tries to follow interface of ftl::Calibrate
-
-	host_->getNet()->bind("set_pose",
-		[this](cv::Mat pose){
-			if (!calib_->setPose(pose)) {
-				LOG(ERROR) << "invalid pose received (bad value)";
-				return false;
-			}
-			updateParameters();
-			LOG(INFO) << "new pose";
-			return true;
+	host_->on("rectify", [this]() {
+		calibration_.enabled = host_->value("rectify", true);
+		do_update_params_ = true;
 	});
 
-	host_->getNet()->bind("set_pose_adjustment",
-		[this](cv::Mat T){
-			if (!calib_->setPoseAdjustment(T)) {
-				LOG(ERROR) << "invalid pose received (bad value)";
-				return false;
-			}
-			updateParameters();
-			LOG(INFO) << "new pose adjustment";
-			return true;
+	host_->on("offset_z", [this]() {
+		do_update_params_ = true;
 	});
+}
 
+void StereoVideoSource::updateParameters(ftl::rgbd::Frame &frame) {
+	auto &meta = frame.create<std::map<std::string,std::string>>(Channel::MetaData);
+	meta["name"] = host_->value("name", host_->getID());
+	meta["id"] = host_->getID();
+	meta["uri"] = host_->value("uri", std::string(""));
 
-	host_->getNet()->bind("set_intrinsics",
-		[this](cv::Size size, cv::Mat K_l, cv::Mat D_l, cv::Mat K_r, cv::Mat D_r) {
+	if (lsrc_) lsrc_->populateMeta(meta);
 
-			if (!calib_->setIntrinsics(size, {K_l, K_r})) {
-				LOG(ERROR) << "bad intrinsic parameters (bad values)";
-				return false;
-			}
+	if (!frame.has(Channel::Capabilities)) {
+		auto &cap = frame.create<std::unordered_set<Capability>>(Channel::Capabilities);
+		cap.emplace(Capability::VIDEO);
+		cap.emplace(Capability::LIVE);
+	}
 
-			if (!D_l.empty() && !D_r.empty()) {
-				if (!calib_->setDistortion({D_l, D_r})) {
-					LOG(ERROR) << "bad distortion parameters (bad values)";
-					return false;
-				}
-			}
-			updateParameters();
-			LOG(INFO) << "new intrinsic parameters";
-			return true;
-	});
+	frame.create<ftl::calibration::CalibrationData>(Channel::CalibrationData) = calibration_;
 
-	host_->getNet()->bind("set_extrinsics",
-		[this](cv::Mat R, cv::Mat t){
-			if (!calib_->setExtrinsics(R, t)) {
-				LOG(ERROR) << "invalid extrinsic parameters (bad values)";
-				return false;
-			}
-			updateParameters();
-			LOG(INFO) << "new extrinsic (stereo) parameters";
-			return true;
-	});
+	calibration_change_ = frame.onChange(Channel::CalibrationData, [this]
+			(ftl::data::Frame& frame, ftl::codecs::Channel) {
 
-	host_->getNet()->bind("save_calibration",
-		[this, fname](){
-			LOG(INFO) << "saving calibration to " << fname;
-			return calib_->saveCalibration(fname);
-	});
+		if (!lsrc_->isStereo()) return true;
 
-	host_->getNet()->bind("set_rectify",
-		[this](bool enable){
-			bool retval = calib_->setRectify(enable);
-			updateParameters();
-			LOG(INFO) << "rectification " << (retval ? "on" : "off");
-			return retval;
-	});
+		auto &change = frame.get<ftl::calibration::CalibrationData>(Channel::CalibrationData);
+		try {
+			change.writeFile(fname_calib_);
+		}
+		catch (...) {
+			LOG(ERROR) << "Saving calibration to file failed";
+		}
 
-	host_->getNet()->bind("get_distortion", [this]() {
-		return std::vector<cv::Mat>{
-			cv::Mat(calib_->getCameraDistortionLeft()),
-			cv::Mat(calib_->getCameraDistortionRight()) };
+		calibration_ = ftl::calibration::CalibrationData(change);
+		rectification_->setCalibration(calibration_);
+		rectification_->setEnabled(change.enabled);
+		return true;
 	});
 
-	////////////////////////////////////////////////////////////////////////////
+	if (lsrc_->isStereo()) {
+		Eigen::Matrix4d pose;
+		cv::cv2eigen(rectification_->getPose(Channel::Left), pose);
 
-	// Generate camera parameters from camera matrix
-	updateParameters();
+		frame.setPose() = pose;
 
-	LOG(INFO) << "StereoVideo source ready...";
-	ready_ = true;
+		cv::Mat K;
 
-	state_.set("name", host_->value("name", host_->getID()));
-}
+		// same for left and right
+		float baseline = static_cast<float>(rectification_->baseline());
+		float doff = rectification_->doff(color_size_);
 
-ftl::rgbd::Camera StereoVideoSource::parameters(Channel chan) {
-	if (chan == Channel::Right) {
-		return state_.getRight();
-	} else {
-		return state_.getLeft();
-	}
-}
+		double d_resolution = this->host_->getConfig().value<double>("depth_resolution", 0.0);
+		float min_depth = this->host_->getConfig().value<double>("min_depth", 0.45);
+		float max_depth = this->host_->getConfig().value<double>("max_depth", (lsrc_->isStereo()) ? 12.0 : 1.0);
 
-void StereoVideoSource::updateParameters() {
-	Eigen::Matrix4d pose;
-	cv::cv2eigen(calib_->getPose(), pose);
-	setPose(pose);
-
-	cv::Mat K;
-
-	// same for left and right
-	float baseline = static_cast<float>(calib_->getBaseline());
-	float doff = calib_->getDoff(color_size_);
-
-	double d_resolution = this->host_->getConfig().value<double>("depth_resolution", 0.0);
-	float min_depth = this->host_->getConfig().value<double>("min_depth", 0.45);
-	float max_depth = this->host_->getConfig().value<double>("max_depth", 12.0);
-
-	// left
-	
-	K = calib_->getCameraMatrixLeft(color_size_);
-	float fx = static_cast<float>(K.at<double>(0,0));
-	
-	if (d_resolution > 0.0) {
-		// Learning OpenCV p. 442
-		// TODO: remove, should not be used here
-		float max_depth_new = sqrt(d_resolution * fx * baseline);
-		max_depth = (max_depth_new > max_depth) ? max_depth : max_depth_new;
-	}
+		// left
 
-	state_.getLeft() = {
-		fx,
-		static_cast<float>(K.at<double>(1,1)),	// Fy
-		static_cast<float>(-K.at<double>(0,2)),	// Cx
-		static_cast<float>(-K.at<double>(1,2)),	// Cy
-		(unsigned int) color_size_.width,
-		(unsigned int) color_size_.height,
-		min_depth,
-		max_depth,
-		baseline,
-		doff
-	};
-	
-	host_->getConfig()["focal"] = params_.fx;
-	host_->getConfig()["centre_x"] = params_.cx;
-	host_->getConfig()["centre_y"] = params_.cy;
-	host_->getConfig()["baseline"] = params_.baseline;
-	host_->getConfig()["doffs"] = params_.doffs;
-	
-	// right
-
-	K = calib_->getCameraMatrixRight(color_size_);
-	state_.getRight() = {
-		static_cast<float>(K.at<double>(0,0)),	// Fx
-		static_cast<float>(K.at<double>(1,1)),	// Fy
-		static_cast<float>(-K.at<double>(0,2)),	// Cx
-		static_cast<float>(-K.at<double>(1,2)),	// Cy
-		(unsigned int) color_size_.width,
-		(unsigned int) color_size_.height,
-		min_depth,
-		max_depth,
-		baseline,
-		doff
-	};
+		K = rectification_->cameraMatrix(color_size_);
+		float fx = static_cast<float>(K.at<double>(0,0));
+
+		if (d_resolution > 0.0) {
+			// Learning OpenCV p. 442
+			// TODO: remove, should not be used here
+			float max_depth_new = sqrt(d_resolution * fx * baseline);
+			max_depth = (max_depth_new > max_depth) ? max_depth : max_depth_new;
+		}
+
+		auto& params = frame.setLeft();
+		params = {
+			fx,
+			static_cast<float>(K.at<double>(1,1)),	// Fy
+			static_cast<float>(-K.at<double>(0,2)),	// Cx
+			static_cast<float>(-K.at<double>(1,2)),	// Cy
+			(unsigned int) color_size_.width,
+			(unsigned int) color_size_.height,
+			min_depth,
+			max_depth,
+			baseline,
+			doff
+		};
+
+		host_->getConfig()["focal"] = params.fx;
+		host_->getConfig()["centre_x"] = params.cx;
+		host_->getConfig()["centre_y"] = params.cy;
+		host_->getConfig()["baseline"] = params.baseline;
+		host_->getConfig()["doffs"] = params.doffs;
+
+		// right
+		/* not used
+		K = rectification_->cameraMatrix(color_size_, Channel::Right);
+		frame.setRight() = {
+			static_cast<float>(K.at<double>(0,0)),	// Fx
+			static_cast<float>(K.at<double>(1,1)),	// Fy
+			static_cast<float>(-K.at<double>(0,2)),	// Cx
+			static_cast<float>(-K.at<double>(1,2)),	// Cy
+			(unsigned int) color_size_.width,
+			(unsigned int) color_size_.height,
+			min_depth,
+			max_depth,
+			baseline,
+			doff
+		};*/
+	} else {
+		Eigen::Matrix4d pose;
+		auto& params = frame.setLeft();
+
+		params.cx = -(color_size_.width / 2.0);
+		params.cy = -(color_size_.height / 2.0);
+		params.fx = 700.0;
+		params.fy = 700.0;
+		params.maxDepth = host_->value("size", 1.0f);
+		params.minDepth = 0.0f;
+		params.doffs = 0.0;
+		params.baseline = 0.1f;
+		params.width = color_size_.width;
+		params.height = color_size_.height;;
+
+		float offsetz = host_->value("offset_z", 0.0f);
+		//state_.setPose(matrix(cv::Vec3d(0.0, 3.14159, 0.0), cv::Vec3d(0.0,0.0,params_.maxDepth+offsetz)));
+		pose = matrix(cv::Vec3d(0.0, 3.14159, 0.0), cv::Vec3d(0.0,0.0,params.maxDepth + offsetz));
+
+		/*host_->on("size", [this](const ftl::config::Event &e) {
+			float offsetz = host_->value("offset_z",0.0f);
+			params_.maxDepth = host_->value("size", 1.0f);
+			//state_.getLeft() = params_;
+			pose_ = matrix(cv::Vec3d(0.0, 3.14159, 0.0), cv::Vec3d(0.0,0.0,params_.maxDepth+offsetz));
+			do_update_params_ = true;
+		});*/
+
+		frame.setPose() = pose;
+	}
 }
 
 bool StereoVideoSource::capture(int64_t ts) {
-	lsrc_->grab();
-	return true;
+	cap_status_ = lsrc_->grab();
+	return cap_status_;
 }
 
 bool StereoVideoSource::retrieve(ftl::rgbd::Frame &frame) {
 	FTL_Profile("Stereo Retrieve", 0.03);
-	
-	frame.reset();
-	frame.setOrigin(&state_);
+
+	if (!cap_status_) return false;
+
+	if (do_update_params_) {
+		updateParameters(frame);
+		do_update_params_ = false;
+	}
 
 	cv::cuda::GpuMat gpu_dummy;
 	cv::Mat dummy;
@@ -322,12 +341,17 @@ bool StereoVideoSource::retrieve(ftl::rgbd::Frame &frame) {
 	if (lsrc_->isStereo()) {
 		cv::cuda::GpuMat &left = frame.create<cv::cuda::GpuMat>(Channel::Left);
 		cv::cuda::GpuMat &right = frame.create<cv::cuda::GpuMat>(Channel::Right);
-		lsrc_->get(left, right, hres, hres_r, calib_, stream2_);
+		if (!lsrc_->get(frame, left, right, hres, hres_r, rectification_.get(), stream2_)) {
+			frame.remove(Channel::Left);
+			frame.remove(Channel::Right);
+		}
 	}
 	else {
 		cv::cuda::GpuMat &left = frame.create<cv::cuda::GpuMat>(Channel::Left);
 		cv::cuda::GpuMat right;
-		lsrc_->get(left, right, hres, hres_r, calib_, stream2_);
+		if (!lsrc_->get(frame, left, right, hres, hres_r, rectification_.get(), stream2_)) {
+			frame.remove(Channel::Left);
+		}
 	}
 
 	//LOG(INFO) << "Channel size: " << hres.size();
diff --git a/components/rgbd-sources/src/sources/stereovideo/stereovideo.hpp b/components/rgbd-sources/src/sources/stereovideo/stereovideo.hpp
index 2b6fc3a83..dd08f4f68 100644
--- a/components/rgbd-sources/src/sources/stereovideo/stereovideo.hpp
+++ b/components/rgbd-sources/src/sources/stereovideo/stereovideo.hpp
@@ -4,7 +4,9 @@
 
 #include "../../basesource.hpp"
 #include <ftl/operators/operator.hpp>
+#include <ftl/calibration/structures.hpp>
 #include <string>
+#include <memory>
 
 namespace ftl {
 
@@ -12,15 +14,15 @@ namespace rgbd {
 namespace detail {
 
 class Device;
-class Calibrate;
+class StereoRectification;
 class Disparity;
 
 /**
  * RGBD source from either a stereo video file with left + right images, or
- * direct from two camera devices. 
+ * direct from two camera devices.
  */
 class StereoVideoSource : public BaseSourceImpl {
-	public:
+public:
 	explicit StereoVideoSource(ftl::rgbd::Source*);
 	StereoVideoSource(ftl::rgbd::Source*, const std::string &);
 	~StereoVideoSource();
@@ -29,31 +31,38 @@ class StereoVideoSource : public BaseSourceImpl {
 	bool retrieve(ftl::rgbd::Frame &frame) override;
 	bool isReady() override;
 
-	Camera parameters(ftl::codecs::Channel chan) override;
+	static bool supported(const std::string &dev);
 
-	private:
-	void updateParameters();
+private:
+	void updateParameters(ftl::rgbd::Frame &);
 
 	Device *lsrc_;
-	Calibrate *calib_;
+	std::unique_ptr<StereoRectification> rectification_;
+	ftl::calibration::CalibrationData calibration_;
+
 	int64_t capts_;
 
 	cv::Size color_size_;
 	cv::Size depth_size_;
 
-	ftl::operators::Graph *pipeline_input_;
+	ftl::operators::Graph *pipeline_input_=nullptr;
 	ftl::operators::Graph *pipeline_depth_;
 
 	cv::cuda::GpuMat fullres_left_;
 	cv::cuda::GpuMat fullres_right_;
 
 	bool ready_;
-	
+	bool do_update_params_ = false;
+	bool cap_status_ = false;
+
 	cv::cuda::Stream stream_;
 	cv::cuda::Stream stream2_;
 
 	cv::Mat mask_l_;
 
+	ftl::Handle calibration_change_;
+	std::string fname_calib_;
+
 	void init(const std::string &);
 };
 
diff --git a/components/rgbd-sources/test/CMakeLists.txt b/components/rgbd-sources/test/CMakeLists.txt
index b0d54ac91..4a4b0d2c5 100644
--- a/components/rgbd-sources/test/CMakeLists.txt
+++ b/components/rgbd-sources/test/CMakeLists.txt
@@ -10,14 +10,3 @@ target_link_libraries(source_unit
 
 add_test(SourceUnitTest source_unit)
 
-### Frame Unit #################################################################
-add_executable(frame_unit
-$<TARGET_OBJECTS:CatchTest>
-	./frame_unit.cpp
-	../src/frame.cpp
-)
-target_include_directories(frame_unit PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include")
-target_link_libraries(frame_unit
-	ftlcommon ftlcodecs ftldata)
-
-add_test(FrameUnitTest frame_unit)
diff --git a/components/rgbd-sources/test/source_unit.cpp b/components/rgbd-sources/test/source_unit.cpp
index c4d31a8fb..fa6e861f7 100644
--- a/components/rgbd-sources/test/source_unit.cpp
+++ b/components/rgbd-sources/test/source_unit.cpp
@@ -68,6 +68,8 @@ class StereoVideoSource : public ftl::rgbd::BaseSourceImpl {
 	bool capture(int64_t ts) { return true; }
 	bool retrieve(ftl::rgbd::Frame &) { return true; }
 	bool isReady() { return true; };
+
+	static bool supported(const std::string &dev) { return true; }
 };
 
 class NetSource : public ftl::rgbd::BaseSourceImpl {
@@ -112,6 +114,8 @@ class RealsenseSource : public ftl::rgbd::BaseSourceImpl {
 	bool capture(int64_t ts) { return true; }
 	bool retrieve(ftl::rgbd::Frame &) { return true; }
 	bool isReady() { return true; };
+
+	static bool supported() { return true; }
 };
 
 class MiddleburySource : public ftl::rgbd::BaseSourceImpl {
diff --git a/components/streams/CMakeLists.txt b/components/streams/CMakeLists.txt
index 43722b888..cdbf7be84 100644
--- a/components/streams/CMakeLists.txt
+++ b/components/streams/CMakeLists.txt
@@ -1,4 +1,4 @@
-#add_library(FtlStream OBJECT src/stream.cpp) 
+#add_library(FtlStream OBJECT src/stream.cpp)
 #target_include_directories(FtlStream PUBLIC
 #	$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
 #	$<INSTALL_INTERFACE:include>
@@ -10,9 +10,15 @@ set(STREAMSRC
 	src/filestream.cpp
 	src/receiver.cpp
 	src/sender.cpp
+	src/feed.cpp
 	src/netstream.cpp
 	src/injectors.cpp
 	src/parsers.cpp
+	src/builder.cpp
+	src/renderer.cpp
+	src/renderers/screen_render.cpp
+	src/renderers/openvr_render.cpp
+	src/renderers/collisions.cpp
 )
 
 add_library(ftlstreams ${STREAMSRC})
@@ -26,7 +32,9 @@ target_include_directories(ftlstreams PUBLIC
 	PRIVATE src)
 
 #target_include_directories(cv-node PUBLIC ${PROJECT_SOURCE_DIR}/include)
-target_link_libraries(ftlstreams ftlrgbd ftlcommon ${OpenCV_LIBS} Eigen3::Eigen ftlnet ftlcodecs ftlaudio)
+target_link_libraries(ftlstreams ftlrgbd ftlrender ftlcommon ${OpenCV_LIBS} Eigen3::Eigen ftlnet ftlcodecs ftlaudio openvr)
+
+target_precompile_headers(ftlstreams REUSE_FROM ftldata)
 
 if (BUILD_TESTS)
 add_subdirectory(test)
diff --git a/components/streams/include/ftl/streams/builder.hpp b/components/streams/include/ftl/streams/builder.hpp
new file mode 100644
index 000000000..cd8f6b5fa
--- /dev/null
+++ b/components/streams/include/ftl/streams/builder.hpp
@@ -0,0 +1,185 @@
+#ifndef _FTL_STREAM_BUILDER_HPP_
+#define _FTL_STREAM_BUILDER_HPP_
+
+#include <ftl/data/new_frameset.hpp>
+#include <ftl/data/framepool.hpp>
+#include <ftl/handle.hpp>
+#include <ftl/transactional.hpp>
+#include <list>
+
+namespace ftl {
+namespace streams {
+
+using LockedFrameSet = ftl::Transactional<ftl::data::FrameSet*>;
+
+/**
+ * An abstract base class for a FrameSet database. A builder stores incomplete
+ * framesets whilst they are being created, allowing partial data to be buffered
+ * and searched for using timestamp and frameset id. One instance of a builder
+ * should be created for each frameset id.
+ */
+class BaseBuilder : public ftl::data::Generator {
+	public:
+	BaseBuilder(ftl::data::Pool *pool, int id);
+	BaseBuilder();
+	virtual ~BaseBuilder();
+
+	virtual LockedFrameSet get(int64_t timestamp, size_t ix)=0;
+
+	virtual LockedFrameSet get(int64_t timestamp)=0;
+
+	//void setName(const std::string &name);
+
+	void setID(uint32_t id) { id_ = id; }
+	void setPool(ftl::data::Pool *p) { pool_ = p; }
+	void setBufferSize(size_t s) { bufferSize_ = s; }
+
+	inline ftl::Handle onFrameSet(const ftl::data::FrameSetCallback &cb) override { return cb_.on(cb); }
+
+	/**
+	 * Retrieve an fps + latency pair, averaged since last call to this
+	 * function.
+	 */
+	static std::pair<float,float> getStatistics();
+
+	inline size_t size() const { return size_; }
+
+	inline const int id() const { return id_; }
+
+	inline const ftl::data::ChangeType changeType() const { return ctype_; }
+
+	protected:
+	ftl::data::Pool *pool_;
+	int id_;
+	size_t size_;
+	size_t bufferSize_ = 1;
+	ftl::Handler<const ftl::data::FrameSetPtr&> cb_;
+	ftl::data::ChangeType ctype_ = ftl::data::ChangeType::COMPLETED;
+};
+
+/**
+ * A version of the frameset database that is used by sources or renderers to
+ * obtain new frames. Timestamps are not used in this version as only a single
+ * frameset is being buffered.
+ */
+class LocalBuilder : public BaseBuilder {
+	public:
+	LocalBuilder(ftl::data::Pool *pool, int id);
+	LocalBuilder();
+	virtual ~LocalBuilder();
+
+	LockedFrameSet get(int64_t timestamp, size_t ix) override;
+
+	LockedFrameSet get(int64_t timestamp) override;
+
+	void setFrameCount(size_t size);
+
+	/**
+	 * Return a smart pointer to a new frameset. The frameset will have the
+	 * number of frames set with `setFrameCount`, or 1 frame by default. Once
+	 * called, another new frameset is buffered internally and ownership of the
+	 * returned frameset is transfered.
+	 */
+	std::shared_ptr<ftl::data::FrameSet> getNextFrameSet(int64_t ts);
+
+	private:
+	std::shared_ptr<ftl::data::FrameSet> frameset_;
+	SHARED_MUTEX mtx_;
+
+	std::shared_ptr<ftl::data::FrameSet> _allocate(int64_t timestamp);
+};
+
+/**
+ * A local builder that generates framesets using a timer and populates the
+ * frames using a discrete source object before generating a callback.
+ */
+class IntervalSourceBuilder : public LocalBuilder {
+	public:
+	IntervalSourceBuilder(ftl::data::Pool *pool, int id, ftl::data::DiscreteSource *src);
+	IntervalSourceBuilder(ftl::data::Pool *pool, int id, const std::list<ftl::data::DiscreteSource*> &srcs);
+	IntervalSourceBuilder();
+	~IntervalSourceBuilder();
+
+	void start();
+	void stop();
+
+	private:
+	ftl::Handle capture_;
+	ftl::Handle retrieve_;
+	std::list<ftl::data::DiscreteSource *> srcs_;
+};
+
+/**
+ * A local builder that generates framesets manually and populates the
+ * frames using a discrete source object before generating a callback.
+ */
+class ManualSourceBuilder : public LocalBuilder {
+	public:
+	ManualSourceBuilder(ftl::data::Pool *pool, int id, ftl::data::DiscreteSource *src);
+	ManualSourceBuilder();
+	~ManualSourceBuilder();
+
+	void tick();
+
+	inline void setFrameRate(int fps) { mspf_ = 1000/fps; };
+
+	private:
+	ftl::data::DiscreteSource *src_;
+	int mspf_ = 30;
+	int64_t last_timestamp_=0;
+};
+
+class ForeignBuilder : public BaseBuilder {
+	public:
+	ForeignBuilder(ftl::data::Pool *pool, int id);
+	ForeignBuilder();
+	~ForeignBuilder();
+
+	//inline void setID(int id) { id_ = id; }
+
+	LockedFrameSet get(int64_t timestamp, size_t ix) override;
+
+	LockedFrameSet get(int64_t timestamp) override;
+
+	private:
+	std::list<std::shared_ptr<ftl::data::FrameSet>> framesets_;  // Active framesets
+	//std::list<ftl::data::FrameSet*> allocated_;  // Keep memory allocations
+
+	size_t head_;
+	MUTEX mutex_;
+	int mspf_;
+	int64_t last_ts_;
+	int64_t last_frame_;
+	std::atomic<int> jobs_;
+	volatile bool skip_;
+	ftl::Handle main_id_;
+
+	std::string name_;
+
+	static MUTEX msg_mutex__;
+	static float latency__;
+	static float fps__;
+	static int stats_count__;
+
+	/* Insert a new frameset into the buffer, along with all intermediate
+	 * framesets between the last in buffer and the new one.
+	 */
+	std::shared_ptr<ftl::data::FrameSet> _addFrameset(int64_t timestamp);
+
+	/* Find a frameset with given latency in frames. */
+	std::shared_ptr<ftl::data::FrameSet> _getFrameset();
+	std::shared_ptr<ftl::data::FrameSet> _get(int64_t timestamp);
+
+	/* Search for a matching frameset. */
+	std::shared_ptr<ftl::data::FrameSet> _findFrameset(int64_t ts);
+	//void _freeFrameset(std::shared_ptr<ftl::data::FrameSet>);
+
+	void _schedule();
+
+	//void _recordStats(float fps, float latency);
+};
+
+}
+}
+
+#endif
diff --git a/components/streams/include/ftl/streams/feed.hpp b/components/streams/include/ftl/streams/feed.hpp
index 890eb64e5..72075b74f 100644
--- a/components/streams/include/ftl/streams/feed.hpp
+++ b/components/streams/include/ftl/streams/feed.hpp
@@ -1,33 +1,229 @@
-#ifndef _FTL_STREAMS_FEED_HPP_
-#define _FTL_STREAMS_FEED_HPP_
+#ifndef _FTL_STREAM_FEED_HPP_
+#define _FTL_STREAM_FEED_HPP_
 
 #include <ftl/configurable.hpp>
+#include <ftl/net/universe.hpp>
+#include <ftl/handle.hpp>
+
+#include <ftl/operators/operator.hpp>
+
+#include <ftl/rgbd/source.hpp>
+#include <ftl/data/framepool.hpp>
+
+#include <ftl/streams/stream.hpp>
+#include <ftl/streams/receiver.hpp>
+#include <ftl/streams/sender.hpp>
+#include <ftl/data/new_frameset.hpp>
+
+#include <ftl/render/CUDARender.hpp>
 
 namespace ftl {
-namespace streams {
+namespace render { class Source; }
+namespace stream {
+
+struct SourceInfo {
+	std::string uri;
+	int64_t last_used;
 
-using FrameSetHandler = std::function<bool(ftl::data::FrameSet&)>;
+	inline bool operator<(const SourceInfo &o) const { return last_used >= o.last_used; }
+};
 
 class Feed : public ftl::Configurable {
+public:
+	/**
+	 * "Filtered feed"
+	 */
+	class Filter {
+		friend Feed;
 	public:
+		const std::unordered_set<ftl::codecs::Channel>& channels() const { return channels_; };
+		const std::unordered_set<ftl::codecs::Channel>& availableChannels() const { return channels_available_; };
+		const std::unordered_set<uint32_t>& sources() const { return sources_; };
 
-	void addStream(ftl::streams::Stream *s);
+		Filter &select(const std::unordered_set<ftl::codecs::Channel> &cs);
 
-	ftl::Handler onFrameSet(const FrameSetHandler &fsh);
+		void on(const ftl::data::FrameSetCallback &cb);
 
-	uint32_t allocateFrameId();
+		ftl::Handle onWithHandle(const ftl::data::FrameSetCallback &cb);
 
-	void createFrame(uint32_t id, ftl::data::Frame &f);
+		void onRemove(const std::function<bool(uint32_t)> &cb);
 
-	void createFrameSet(uint32_t id, int size, ftl::data::FrameSet &fs);
+		/**
+		 * Safely obtain frameset pointers for all framesets matched by this
+		 * filter. This will be the most recently seen frameset at the time of
+		 * this call. Used by renderer, for example.
+		 */
+		std::list<ftl::data::FrameSetPtr> getLatestFrameSets();
+
+		/** remove filter; any references/pointers become invalid */
+		void remove();
+
+	protected:
+		/** removes filters, releases framesets */
+		~Filter();
 
 	private:
-	ftl::streams::Receiver *receiver_;
-	ftl::streams::Sender *sender_;
-	std::unordered_map<uint32_t, ftl::data::Session> stores_;
+		Filter(Feed* feed, const std::unordered_set<uint32_t>& sources,  const std::unordered_set<ftl::codecs::Channel>& channels);
+		Feed* feed_;
+		std::unordered_set<ftl::codecs::Channel> channels_;
+		std::unordered_set<ftl::codecs::Channel> channels_available_;
+		std::unordered_set<uint32_t> sources_;
+		ftl::Handler<const ftl::data::FrameSetPtr&> handler_;
+		ftl::Handler<uint32_t> remove_handler_;
+		std::vector<ftl::Handle> handles_;
+	};
+
+public:
+
+	Feed(nlohmann::json &config, ftl::net::Universe *net);
+	~Feed();
+
+	/** list possible channels
+	 * BUG:/TODO: only returns requested + persistent
+	 */
+	const std::unordered_set<ftl::codecs::Channel> availableChannels(ftl::data::FrameID);
+
+	/** Add source (file path, device path or URI) */
+	uint32_t add(const std::string &str);
+	uint32_t add(const ftl::URI &uri);
+	uint32_t getID(const std::string &source);
+	std::string getURI(uint32_t fsid);
+
+	/**
+	 * Get the configurable ID that corresponds to the original source. For
+	 * net stream sources this may be a remote configurable.
+	 */
+	std::string getSourceURI(ftl::data::FrameID id);
+
+	void remove(const std::string &str);
+	void remove(uint32_t id);
+
+	std::vector<std::string> listSources();
+	std::vector<ftl::data::FrameID> listFrames();
+	std::vector<unsigned int> listFrameSets();
+
+	std::set<ftl::stream::SourceInfo> recentSources();
+	std::vector<std::string> knownHosts();
+	std::vector<std::string> availableNetworkSources();
+	std::vector<std::string> availableFileSources();
+	std::vector<std::string> availableDeviceSources();
+	std::vector<std::string> availableGroups();
+	bool sourceAvailable(const std::string &uri);
+	bool sourceActive(const std::string &uri);
+
+	void clearFileHistory();
+	void clearHostHistory();
+
+	/**
+	 * Perform a render tick for all render sources. Note that this must be
+	 * called from the GUI / OpenGL thread.
+	 */
+	void render();
+
+	void startRecording(Filter *, const std::string &filename);
+	void startStreaming(Filter *, const std::string &filename);
+	void startStreaming(Filter *);
+	void stopRecording();
+	bool isRecording();
+
+	inline ftl::Handle onNewSources(const std::function<bool(const std::vector<std::string> &)> &cb) { return new_sources_cb_.on(cb); }
+
+	inline ftl::Handle onAdd(const std::function<bool(uint32_t)> &cb) { return add_src_cb_.on(cb); }
+
+	inline ftl::Handle onRemoveSources(const std::function<bool(uint32_t)> &cb) { return remove_sources_cb_.on(cb); }
+
+	cv::Mat getThumbnail(const std::string &uri);
+	std::string getName(const std::string &uri);
+
+	void setPipelineCreator(const std::function<void(ftl::operators::Graph*)> &cb);
+
+	ftl::operators::Graph* addPipeline(const std::string &name);
+	ftl::operators::Graph* addPipeline(uint32_t fsid);
+	/** Returns pointer to filter object. Pointers will be invalid after Feed
+	 * is destroyed. User is required to call Filter::remove() when filter
+	 * no longer required */
+	Filter* filter(const std::unordered_set<uint32_t> &framesets, const std::unordered_set<ftl::codecs::Channel> &channels);
+	Filter* filter(const std::unordered_set<std::string> &sources, const std::unordered_set<ftl::codecs::Channel> &channels);
+	/** all framesets, selected channels */
+	Filter* filter(const std::unordered_set<ftl::codecs::Channel> &channels);
+
+	void removeFilter(Filter* filter);
+
+	void autoConnect();
+
+	void lowLatencyMode();
+
+private:
+	// public methods acquire lock if necessary, private methods assume locking
+	// managed by caller
+	SHARED_MUTEX mtx_;
+	std::condition_variable cv_net_connect_;
+
+	ftl::net::Universe* const net_;
+	std::unique_ptr<ftl::data::Pool> pool_;
+	std::unique_ptr<ftl::stream::Intercept> interceptor_;
+	 // multiple streams to single fs
+	std::unique_ptr<ftl::stream::Muxer> stream_;
+
+	 // streams to fs
+	std::unique_ptr<ftl::stream::Receiver> receiver_;
+	ftl::Handle handle_receiver_;
+
+	// framesets to stream
+	std::unique_ptr<ftl::stream::Sender> sender_;
+	ftl::Handle handle_sender_;
+
+	std::unique_ptr<ftl::stream::Sender> recorder_;
+	std::unique_ptr<ftl::stream::Broadcast> record_stream_;
+	ftl::Handle handle_record_;
+	ftl::Handle handle_record2_;
+	ftl::Handle record_recv_handle_;
+	ftl::Handle record_new_client_;
+	Filter *record_filter_;
+
+	//ftl::Handler<const ftl::data::FrameSetPtr&> frameset_cb_;
+	std::unordered_map<std::string, uint32_t> fsid_lookup_;
+	std::map<uint32_t, ftl::data::FrameSetPtr> latest_;
+	std::unordered_map<std::string, uint32_t> groups_;
+
+	std::unordered_map<uint32_t, std::list<ftl::stream::Stream*>> streams_;
+	std::unordered_map<uint32_t, ftl::rgbd::Source*> devices_;
+	std::unordered_map<uint32_t, ftl::render::Source*> renderers_;
+	std::unordered_map<uint32_t, ftl::operators::Graph*> pre_pipelines_;
+	std::list<ftl::streams::ManualSourceBuilder*> render_builders_;
+	std::function<void(ftl::operators::Graph*)> pipe_creator_;
+
+	std::unordered_set<std::string> netcams_;
+	ftl::Handler<const std::vector<std::string> &> new_sources_cb_;
+	ftl::Handler<uint32_t> add_src_cb_;
+	ftl::Handler<uint32_t> remove_sources_cb_;
+
+	std::vector<Filter*> filters_;
+
+	uint32_t fs_counter_ = 0;
+	uint32_t file_counter_ = 0;
+
+	uint32_t allocateFrameSetId(const std::string &group);
+
+	void add(uint32_t fsid, const std::string &uri, ftl::stream::Stream *s);
+	nlohmann::json &_add_recent_source(const ftl::URI &uri);
+	void _saveThumbnail(const ftl::data::FrameSetPtr& fs);
+
+	/** callback for network (adds new sorces on connect/...) */
+	void _updateNetSources(ftl::net::Peer *p, bool autoadd=false);
+	void _updateNetSources(ftl::net::Peer *p, const std::string &uri, bool autoadd=false);
+	/** select channels and sources based on current filters_; */
+	void select();
+
+	void _createPipeline(uint32_t fsid);
+	ftl::operators::Graph* _addPipeline(uint32_t fsid);
+
+	void _beginRecord(Filter *f);
+	void _stopRecording();
+	bool _isRecording();
 };
 
 }
 }
 
-#endif
\ No newline at end of file
+#endif
diff --git a/components/streams/include/ftl/streams/filestream.hpp b/components/streams/include/ftl/streams/filestream.hpp
index d1fea1d9f..5b139546f 100644
--- a/components/streams/include/ftl/streams/filestream.hpp
+++ b/components/streams/include/ftl/streams/filestream.hpp
@@ -2,6 +2,7 @@
 #define _FTL_STREAM_FILESTREAM_HPP_
 
 #include <ftl/streams/stream.hpp>
+#include <ftl/handle.hpp>
 
 namespace ftl {
 namespace stream {
@@ -20,7 +21,7 @@ class File : public Stream {
 	File(nlohmann::json &config, std::ofstream *);
 	~File();
 
-	bool onPacket(const std::function<void(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)> &) override;
+	//bool onPacket(const std::function<void(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)> &) override;
 
 	bool post(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &) override;
 
@@ -71,17 +72,20 @@ class File : public Stream {
 	int64_t first_ts_;
 	bool active_;
 	int version_;
-	ftl::timer::TimerHandle timer_;
+	ftl::Handle timer_;
 	bool is_video_;
 	bool save_data_;
 
-	StreamCallback cb_;
+	//StreamCallback cb_;
 	MUTEX mutex_;
 	MUTEX data_mutex_;
 	std::atomic<int> jobs_;
 
 	bool _open();
 	bool _checkFile();
+
+	/* Apply version patches etc... */
+	void _patchPackets(ftl::codecs::StreamPacket &spkt, ftl::codecs::Packet &pkt);
 };
 
 }
diff --git a/components/streams/include/ftl/streams/injectors.hpp b/components/streams/include/ftl/streams/injectors.hpp
index 0d2a35aee..b1bd70745 100644
--- a/components/streams/include/ftl/streams/injectors.hpp
+++ b/components/streams/include/ftl/streams/injectors.hpp
@@ -2,6 +2,7 @@
 #define _FTL_STREAM_INJECTORS_HPP_
 
 #include <ftl/streams/stream.hpp>
+#include <ftl/rgbd/frameset.hpp>
 
 namespace ftl {
 namespace stream {
diff --git a/components/streams/include/ftl/streams/netstream.hpp b/components/streams/include/ftl/streams/netstream.hpp
index 89330c34f..36ad0d382 100644
--- a/components/streams/include/ftl/streams/netstream.hpp
+++ b/components/streams/include/ftl/streams/netstream.hpp
@@ -6,6 +6,7 @@
 #include <ftl/threads.hpp>
 #include <ftl/codecs/packet.hpp>
 #include <ftl/streams/stream.hpp>
+#include <ftl/handle.hpp>
 #include <string>
 
 namespace ftl {
@@ -48,7 +49,7 @@ class Net : public Stream {
 	Net(nlohmann::json &config, ftl::net::Universe *net);
 	~Net();
 
-	bool onPacket(const std::function<void(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)> &) override;
+	//bool onPacket(const std::function<void(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)> &) override;
 
 	bool post(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &) override;
 
@@ -60,6 +61,8 @@ class Net : public Stream {
 
 	inline const ftl::UUID &getPeer() const { return peer_; }
 
+	inline ftl::Handle onClientConnect(const std::function<bool(ftl::net::Peer*)> &cb) { return connect_cb_.on(cb); }
+
 	/**
 	 * Return the average bitrate of all streams since the last call to this
 	 * function. Units are Mbps.
@@ -78,10 +81,13 @@ class Net : public Stream {
 	int64_t frame_no_;
 	int64_t last_ping_;
 	std::string uri_;
+	std::string base_uri_;
 	bool host_;
 	int tally_;
 	std::array<std::atomic<int>,32> reqtally_;
-	ftl::codecs::Channels<0> last_selected_;
+	std::unordered_set<ftl::codecs::Channel> last_selected_;
+
+	ftl::Handler<ftl::net::Peer*> connect_cb_;
 
 	static float req_bitrate__;
 	static float sample_count__;
@@ -90,7 +96,7 @@ class Net : public Stream {
 
 	std::list<detail::StreamClient> clients_;
 
-	StreamCallback cb_;
+	//StreamCallback cb_;
 
 	bool _processRequest(ftl::net::Peer &p, const ftl::codecs::Packet &pkt);
 	void _checkDataRate(size_t tx_size, int64_t tx_latency, int64_t ts);
diff --git a/components/streams/include/ftl/streams/receiver.hpp b/components/streams/include/ftl/streams/receiver.hpp
index 4edfba5b2..8903f50fa 100644
--- a/components/streams/include/ftl/streams/receiver.hpp
+++ b/components/streams/include/ftl/streams/receiver.hpp
@@ -7,6 +7,7 @@
 #include <ftl/streams/stream.hpp>
 #include <ftl/codecs/decoder.hpp>
 #include <ftl/audio/decoder.hpp>
+#include <ftl/streams/builder.hpp>
 
 namespace ftl {
 namespace stream {
@@ -14,66 +15,66 @@ namespace stream {
 /**
  * Convert packet streams into framesets.
  */
-class Receiver : public ftl::Configurable, public ftl::rgbd::Generator {
+class Receiver : public ftl::Configurable, public ftl::data::Generator {
 	public:
-	explicit Receiver(nlohmann::json &config);
+	explicit Receiver(nlohmann::json &config, ftl::data::Pool *);
 	~Receiver();
 
 	void setStream(ftl::stream::Stream*);
 
 	/**
-	 * Encode and transmit an entire frame set. Frames may already contain
-	 * an encoded form, in which case that is used instead.
+	 * Loop a response frame back into a local buffer. Should only be called
+	 * for local builder framesets and probably only by `Feed`. It takes all
+	 * changes in the frame and puts them as `createChange` in the next
+	 * buffered frame in builder. The response frame is empty afterwards as
+	 * the data is moved, not copied.
 	 */
-	//void post(const ftl::rgbd::FrameSet &fs);
-
-	// void write(const ftl::audio::FrameSet &f);
-
-	size_t size() override;
-
-	ftl::rgbd::FrameState &state(size_t ix) override;
+	void loopback(ftl::data::Frame &, ftl::codecs::Channel);
 
 	/**
 	 * Register a callback for received framesets. Sources are automatically
 	 * created to match the data received.
 	 */
-	void onFrameSet(const ftl::rgbd::VideoCallback &cb) override;
+	ftl::Handle onFrameSet(const ftl::data::FrameSetCallback &cb) override;
 
-	/**
-	 * Add a frameset handler to a specific stream ID.
-	 */
-	void onFrameSet(size_t s, const ftl::rgbd::VideoCallback &cb);
+	ftl::streams::BaseBuilder &builder(uint32_t id);
+
+	void registerBuilder(const std::shared_ptr<ftl::streams::BaseBuilder> &b);
+
+	void processPackets(const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt);
 
-	void onAudio(const ftl::audio::FrameSet::Callback &cb);
+	void removeBuilder(uint32_t id);
 
 	private:
 	ftl::stream::Stream *stream_;
-	ftl::rgbd::VideoCallback fs_callback_;
-	ftl::audio::FrameSet::Callback audio_cb_;
-	ftl::rgbd::Builder builder_[ftl::stream::kMaxStreams];
+	ftl::data::Pool *pool_;
+	ftl::SingletonHandler<const ftl::data::FrameSetPtr&> callback_;
+	std::unordered_map<uint32_t, std::shared_ptr<ftl::streams::BaseBuilder>> builders_;
+	std::unordered_map<uint32_t, ftl::Handle> handles_;
 	ftl::codecs::Channel second_channel_;
 	int64_t timestamp_;
 	SHARED_MUTEX mutex_;
 	unsigned int frame_mask_;
+	ftl::Handle handle_;
 
 	struct InternalVideoStates {
 		InternalVideoStates();
 
 		int64_t timestamp;
-		ftl::rgbd::FrameState state;
-		ftl::rgbd::Frame frame;
+		//ftl::rgbd::Frame frame;
 		ftl::codecs::Decoder* decoders[32];
 		cv::cuda::GpuMat surface[32];
 		MUTEX mutex;
 		ftl::codecs::Channels<0> completed;
+		int width=0;
+		int height=0;
 	};
 
 	struct InternalAudioStates {
 		InternalAudioStates();
 
 		int64_t timestamp;
-		ftl::audio::FrameState state;
-		ftl::audio::Frame frame;
+		//ftl::audio::Frame frame;
 		MUTEX mutex;
 		ftl::codecs::Channels<0> completed;
 		ftl::audio::Decoder *decoder;
diff --git a/components/streams/include/ftl/streams/renderer.hpp b/components/streams/include/ftl/streams/renderer.hpp
new file mode 100644
index 000000000..c922fe20d
--- /dev/null
+++ b/components/streams/include/ftl/streams/renderer.hpp
@@ -0,0 +1,41 @@
+#ifndef _FTL_RENDER_SOURCE_HPP_
+#define _FTL_RENDER_SOURCE_HPP_
+
+#include <ftl/data/creators.hpp>
+#include <ftl/data/new_frameset.hpp>
+#include <ftl/render/renderer.hpp>
+#include <ftl/render/CUDARender.hpp>
+#include <ftl/streams/feed.hpp>
+
+namespace ftl {
+namespace render {
+
+class BaseSourceImpl;
+
+/**
+ * Wrap a renderer into a source entity that manages it. This obtains the
+ * relevant framesets and can be triggered by a builder to generate frames.
+ */
+class Source : public ftl::Configurable, public ftl::data::DiscreteSource {
+    public:
+    Source(nlohmann::json &, ftl::stream::Feed*);
+	~Source();
+
+	inline std::string getURI() { return value("uri", std::string("")); }
+
+    bool capture(int64_t ts) override;
+	bool retrieve(ftl::data::Frame &) override;
+
+	static bool supports(const std::string &uri);
+
+	private:
+	ftl::stream::Feed *feed_;
+	ftl::render::BaseSourceImpl *impl_;
+
+	void reset();
+};
+
+}
+}
+
+#endif
diff --git a/components/streams/include/ftl/streams/sender.hpp b/components/streams/include/ftl/streams/sender.hpp
index eaad7ee8c..09ab6e513 100644
--- a/components/streams/include/ftl/streams/sender.hpp
+++ b/components/streams/include/ftl/streams/sender.hpp
@@ -25,28 +25,52 @@ class Sender : public ftl::Configurable {
 
 	/**
 	 * Encode and transmit an entire frame set. Frames may already contain
-	 * an encoded form, in which case that is used instead.
+	 * an encoded form, in which case that is used instead. If `noencode` is
+	 * set to true then encoding is not performed if required and instead the
+	 * channel is sent with empty data to mark availability.
 	 */
-	void post(ftl::rgbd::FrameSet &fs);
+	void post(ftl::data::FrameSet &fs, ftl::codecs::Channel c, bool noencode=false);
+
+	/**
+	 * Mark channel as posted without sending anything.
+	 */
+	void fakePost(ftl::data::FrameSet &fs, ftl::codecs::Channel c);
+
+	/**
+	 * Make the channel available in the stream even if not available locally.
+	 */
+	void forceAvailable(ftl::data::FrameSet &fs, ftl::codecs::Channel c);
+
+	void post(ftl::data::Frame &f, ftl::codecs::Channel c);
 
 	/**
 	 * Encode and transmit a set of audio channels.
 	 */
-	void post(const ftl::audio::FrameSet &fs);
+	//void post(const ftl::audio::FrameSet &fs);
 
 	//void onStateChange(const std::function<void(ftl::codecs::Channel, int, int)>&);
 
 	void onRequest(const ftl::stream::StreamCallback &);
 
+	inline void resetSender() { do_inject_.clear(); }
+
+	/**
+	 * Force only these channels to be encoded. Any channels that already have
+	 * encoders but are not in this set then have their encoders deallocated.
+	 */
+	void setActiveEncoders(uint32_t fsid, const std::unordered_set<ftl::codecs::Channel> &);
+
 	private:
 	ftl::stream::Stream *stream_;
 	int64_t timestamp_;
+	int64_t injection_timestamp_=0;
 	SHARED_MUTEX mutex_;
 	std::atomic_flag do_inject_;
 	//std::function<void(ftl::codecs::Channel, int, int)> state_cb_;
 	ftl::stream::StreamCallback reqcb_;
 	int add_iframes_;
-	int iframe_;
+	unsigned int iframe_;
+	ftl::Handle handle_;
 
 	struct EncodingState {
 		uint8_t bitrate;
@@ -63,12 +87,22 @@ class Sender : public ftl::Configurable {
 	std::unordered_map<int, AudioState> audio_state_;
 
 	//ftl::codecs::Encoder *_getEncoder(int fsid, int fid, ftl::codecs::Channel c);
-	void _encodeChannel(ftl::rgbd::FrameSet &fs, ftl::codecs::Channel c, bool reset);
+	void _encodeChannel(ftl::rgbd::FrameSet &fs, ftl::codecs::Channel c, bool reset, bool last_flush);
+	void _encodeChannel(ftl::data::Frame &f, ftl::codecs::Channel c, bool reset);
+	void _encodeVideoChannel(ftl::rgbd::FrameSet &fs, ftl::codecs::Channel c, bool reset, bool last_flush);
+	void _encodeAudioChannel(ftl::rgbd::FrameSet &fs, ftl::codecs::Channel c, bool reset, bool last_flush);
+	void _encodeDataChannel(ftl::rgbd::FrameSet &fs, ftl::codecs::Channel c, bool reset, bool last_flush);
+	void _encodeDataChannel(ftl::data::Frame &fs, ftl::codecs::Channel c, bool reset);
+
 	int _generateTiles(const ftl::rgbd::FrameSet &fs, int offset, ftl::codecs::Channel c, cv::cuda::Stream &stream, bool, bool);
 	EncodingState &_getTile(int fsid, ftl::codecs::Channel c);
 	cv::Rect _generateROI(const ftl::rgbd::FrameSet &fs, ftl::codecs::Channel c, int offset, bool stereo);
 	float _selectFloatMax(ftl::codecs::Channel c);
 	ftl::audio::Encoder *_getAudioEncoder(int fsid, int sid, ftl::codecs::Channel c, ftl::codecs::Packet &pkt);
+
+	void _sendPersistent(ftl::data::Frame &frame);
+
+	bool _checkNeedsIFrame(int64_t ts, bool injecting);
 };
 
 }
diff --git a/components/streams/include/ftl/streams/stream.hpp b/components/streams/include/ftl/streams/stream.hpp
index 4baa178a9..f67b6fd84 100644
--- a/components/streams/include/ftl/streams/stream.hpp
+++ b/components/streams/include/ftl/streams/stream.hpp
@@ -3,26 +3,27 @@
 
 #include <ftl/configuration.hpp>
 #include <ftl/configurable.hpp>
-#include <ftl/rgbd/source.hpp>
-#include <ftl/rgbd/group.hpp>
-#include <ftl/net/universe.hpp>
+//#include <ftl/rgbd/source.hpp>
+//#include <ftl/rgbd/group.hpp>
 #include <ftl/codecs/encoder.hpp>
+#include <ftl/handle.hpp>
 #include <ftl/threads.hpp>
 #include <string>
 #include <vector>
 #include <map>
+#include <unordered_set>
 #include <atomic>
 
 namespace ftl {
 namespace stream {
 
-typedef std::function<void(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)> StreamCallback;
+typedef std::function<bool(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)> StreamCallback;
 
 /**
  * Base stream class to be implemented. Provides encode and decode functionality
  * around a generic packet read and write mechanism. Some specialisations will
  * provide and automatically handle control signals.
- * 
+ *
  * Streams are bidirectional, frames can be both received and written.
  */
 class Stream : public ftl::Configurable {
@@ -36,7 +37,7 @@ class Stream : public ftl::Configurable {
 	 * callback even after the read function returns, for example with a
 	 * NetStream.
 	 */
-	virtual bool onPacket(const StreamCallback &)=0;
+	ftl::Handle onPacket(const StreamCallback &cb) { return cb_.on(cb); };
 
 	virtual bool post(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)=0;
 
@@ -65,18 +66,20 @@ class Stream : public ftl::Configurable {
 	/**
 	 * Query available video channels for a frameset.
 	 */
-	const ftl::codecs::Channels<0> &available(int fs) const;
+	const std::unordered_set<ftl::codecs::Channel> &available(int fs) const;
 
 	/**
 	 * Query selected channels for a frameset. Channels not selected may not
 	 * be transmitted, received or decoded.
 	 */
-	const ftl::codecs::Channels<0> &selected(int fs) const;
+	const std::unordered_set<ftl::codecs::Channel> &selected(int fs) const;
+
+	std::unordered_set<ftl::codecs::Channel> selectedNoExcept(int fs) const;
 
 	/**
 	 * Change the video channel selection for a frameset.
 	 */
-	void select(int fs, const ftl::codecs::Channels<0> &, bool make=false);
+	void select(int fs, const std::unordered_set<ftl::codecs::Channel> &, bool make=false);
 
 	/**
 	 * Number of framesets in stream.
@@ -84,17 +87,18 @@ class Stream : public ftl::Configurable {
 	inline size_t size() const { return state_.size(); }
 
 	protected:
+	ftl::Handler<const ftl::codecs::StreamPacket&, const ftl::codecs::Packet&> cb_;
 
 	/**
 	 * Allow modification of available channels. Calling this with an invalid
 	 * fs number will create that frameset and increase the size.
 	 */
-	ftl::codecs::Channels<0> &available(int fs);
+	std::unordered_set<ftl::codecs::Channel> &available(int fs);
 
 	private:
 	struct FSState {
-		ftl::codecs::Channels<0> selected;
-		ftl::codecs::Channels<0> available;
+		std::unordered_set<ftl::codecs::Channel> selected;
+		std::unordered_set<ftl::codecs::Channel> available;
 	};
 
 	std::vector<FSState> state_;
@@ -115,8 +119,9 @@ class Muxer : public Stream {
 	virtual ~Muxer();
 
 	void add(Stream *, size_t fsid=0);
+	void remove(Stream *);
 
-	bool onPacket(const StreamCallback &) override;
+	//bool onPacket(const StreamCallback &) override;
 
 	bool post(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &) override;
 
@@ -126,22 +131,25 @@ class Muxer : public Stream {
 
 	void reset() override;
 
-	int originStream(size_t fsid, int fid);
+	ftl::stream::Stream *originStream(size_t fsid, int fid);
 
 	private:
 	struct StreamEntry {
 		Stream *stream;
 		std::vector<int> maps;
+		uint32_t original_fsid = 0;
+		ftl::Handle handle;
 	};
 
-	std::vector<StreamEntry> streams_;
-	std::vector<std::pair<size_t,int>> revmap_[kMaxStreams];
+	std::list<StreamEntry> streams_;
+	std::vector<std::pair<StreamEntry*,int>> revmap_[kMaxStreams];
+	//std::list<ftl::Handle> handles_;
 	int nid_[kMaxStreams];
-	StreamCallback cb_;
+	//StreamCallback cb_;
 	SHARED_MUTEX mutex_;
 
 	void _notify(const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt);
-	int _lookup(size_t fsid, int sid, int ssid);
+	int _lookup(size_t fsid, StreamEntry *se, int ssid, int count);
 };
 
 /**
@@ -158,7 +166,7 @@ class Broadcast : public Stream {
 	void remove(Stream *);
 	void clear();
 
-	bool onPacket(const std::function<void(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)> &) override;
+	//bool onPacket(const std::function<void(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)> &) override;
 
 	bool post(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &) override;
 
@@ -168,9 +176,12 @@ class Broadcast : public Stream {
 
 	void reset() override;
 
+	const std::list<Stream*> &streams() const { return streams_; }
+
 	private:
 	std::list<Stream*> streams_;
-	StreamCallback cb_;
+	std::list<ftl::Handle> handles_;
+	//StreamCallback cb_;
 	SHARED_MUTEX mutex_;
 };
 
@@ -184,7 +195,7 @@ class Intercept : public Stream {
 
 	void setStream(Stream *);
 
-	bool onPacket(const StreamCallback &) override;
+	//bool onPacket(const StreamCallback &) override;
 	bool onIntercept(const StreamCallback &);
 
 	bool post(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &) override;
@@ -197,7 +208,8 @@ class Intercept : public Stream {
 
 	private:
 	Stream *stream_;
-	StreamCallback cb_;
+	std::list<ftl::Handle> handles_;
+	//StreamCallback cb_;
 	StreamCallback intercept_;
 	SHARED_MUTEX mutex_;
 };
@@ -205,4 +217,34 @@ class Intercept : public Stream {
 }
 }
 
+std::unordered_set<ftl::codecs::Channel> operator&(const std::unordered_set<ftl::codecs::Channel> &a, const std::unordered_set<ftl::codecs::Channel> &b);
+
+std::unordered_set<ftl::codecs::Channel> operator-(const std::unordered_set<ftl::codecs::Channel> &a, const std::unordered_set<ftl::codecs::Channel> &b);
+
+inline std::unordered_set<ftl::codecs::Channel> &operator+=(std::unordered_set<ftl::codecs::Channel> &t, ftl::codecs::Channel c) {
+	t.insert(c);
+	return t;
+}
+
+inline std::unordered_set<ftl::codecs::Channel> &operator-=(std::unordered_set<ftl::codecs::Channel> &t, ftl::codecs::Channel c) {
+	t.erase(c);
+	return t;
+}
+
+inline std::unordered_set<ftl::codecs::Channel> operator+(const std::unordered_set<ftl::codecs::Channel> &t, ftl::codecs::Channel c) {
+	auto r = t;
+	r.insert(c);
+	return r;
+}
+
+inline std::unordered_set<ftl::codecs::Channel> operator+(ftl::codecs::Channel a, ftl::codecs::Channel b) {
+	std::unordered_set<ftl::codecs::Channel> r;
+	r.insert(a);
+	r.insert(b);
+	return r;
+}
+
+bool operator!=(const std::unordered_set<ftl::codecs::Channel> &a, const std::unordered_set<ftl::codecs::Channel> &b);
+
+
 #endif  // _FTL_STREAM_STREAM_HPP_
diff --git a/components/streams/src/baserender.hpp b/components/streams/src/baserender.hpp
new file mode 100644
index 000000000..b8d3e7093
--- /dev/null
+++ b/components/streams/src/baserender.hpp
@@ -0,0 +1,37 @@
+#ifndef _FTL_RENDER_DETAIL_SOURCE_HPP_
+#define _FTL_RENDER_DETAIL_SOURCE_HPP_
+
+#include <Eigen/Eigen>
+#include <ftl/cuda_util.hpp>
+#include <ftl/rgbd/camera.hpp>
+#include <ftl/rgbd/frame.hpp>
+
+namespace ftl{
+namespace render {
+
+class Source;
+
+class BaseSourceImpl {
+	public:
+	friend class ftl::render::Source;
+
+	public:
+	explicit BaseSourceImpl(ftl::render::Source *host) : host_(host) { }
+	virtual ~BaseSourceImpl() {}
+
+	virtual bool capture(int64_t ts)=0;
+
+	virtual bool retrieve(ftl::data::Frame &frame)=0;
+
+	virtual bool isReady() { return false; };
+
+	ftl::render::Source *host() { return host_; }
+
+	protected:
+	ftl::render::Source *host_;
+};
+
+}
+}
+
+#endif  // _FTL_RENDER_DETAIL_SOURCE_HPP_
diff --git a/components/streams/src/builder.cpp b/components/streams/src/builder.cpp
new file mode 100644
index 000000000..11e077b64
--- /dev/null
+++ b/components/streams/src/builder.cpp
@@ -0,0 +1,440 @@
+#include <ftl/streams/builder.hpp>
+#include <ftl/timer.hpp>
+
+#define LOGURU_REPLACE_GLOG 1
+#include <loguru.hpp>
+
+#include <chrono>
+
+using ftl::streams::BaseBuilder;
+using ftl::streams::ForeignBuilder;
+using ftl::streams::LocalBuilder;
+using ftl::streams::IntervalSourceBuilder;
+using ftl::streams::ManualSourceBuilder;
+using ftl::streams::LockedFrameSet;
+using ftl::data::FrameSet;
+using ftl::data::Frame;
+using namespace std::chrono;
+using std::this_thread::sleep_for;
+
+
+/*float Builder::latency__ = 0.0f;
+float Builder::fps__ = 0.0f;
+int Builder::stats_count__ = 0;
+MUTEX Builder::msg_mutex__;*/
+
+BaseBuilder::BaseBuilder(ftl::data::Pool *pool, int id) : pool_(pool), id_(id) {
+	size_ = 1;
+}
+
+BaseBuilder::BaseBuilder() : pool_(nullptr), id_(0) {
+	size_ = 1;
+}
+
+BaseBuilder::~BaseBuilder() {
+
+}
+
+// =============================================================================
+
+LocalBuilder::LocalBuilder(ftl::data::Pool *pool, int id) : BaseBuilder(pool, id) {
+	// Host receives responses that must propagate
+	ctype_ = ftl::data::ChangeType::FOREIGN;
+}
+
+LocalBuilder::LocalBuilder() : BaseBuilder() {
+	// Host receives responses that must propagate
+	ctype_ = ftl::data::ChangeType::FOREIGN;
+}
+
+LocalBuilder::~LocalBuilder() {
+
+}
+
+LockedFrameSet LocalBuilder::get(int64_t timestamp, size_t ix) {
+	SHARED_LOCK(mtx_, lk);
+	if (!frameset_) {
+		frameset_ = _allocate(timestamp);
+	}
+
+	LockedFrameSet lfs(frameset_.get(), frameset_->smtx);
+	return lfs;
+}
+
+LockedFrameSet LocalBuilder::get(int64_t timestamp) {
+	SHARED_LOCK(mtx_, lk);
+	if (!frameset_) {
+		frameset_ = _allocate(timestamp);
+	}
+
+	LockedFrameSet lfs(frameset_.get(), frameset_->smtx);
+	return lfs;
+}
+
+void LocalBuilder::setFrameCount(size_t size) {
+	// TODO: Resize the buffered frame!?
+	size_ = size;
+}
+
+std::shared_ptr<ftl::data::FrameSet> LocalBuilder::getNextFrameSet(int64_t ts) {
+	UNIQUE_LOCK(mtx_, lk);
+	if (!frameset_) {
+		frameset_ = _allocate(ts);
+	}
+	auto fs = frameset_;
+	frameset_ = _allocate(ts+1);
+	lk.unlock();
+
+	// Must lock to ensure no updates can happen here
+	UNIQUE_LOCK(fs->smtx, lk2);
+	fs->changeTimestamp(ts);
+	fs->localTimestamp = ts;
+	fs->store();
+	//for (auto &f : fs->frames) {
+	//	f.store();
+	//}
+	return fs;
+}
+
+std::shared_ptr<ftl::data::FrameSet> LocalBuilder::_allocate(int64_t timestamp) {
+	auto newf = std::make_shared<FrameSet>(pool_, ftl::data::FrameID(id_,255), timestamp);
+	for (size_t i=0; i<size_; ++i) {
+		newf->frames.push_back(std::move(pool_->allocate(ftl::data::FrameID(id_, i), timestamp)));
+	}
+
+	newf->count = size_;
+	newf->mask = 0xFF;
+	newf->clearFlags();
+	return newf;
+}
+
+// =============================================================================
+
+IntervalSourceBuilder::IntervalSourceBuilder(ftl::data::Pool *pool, int id, ftl::data::DiscreteSource *src) : LocalBuilder(pool, id), srcs_({src}) {
+
+}
+
+IntervalSourceBuilder::IntervalSourceBuilder(ftl::data::Pool *pool, int id, const std::list<ftl::data::DiscreteSource*> &srcs) : LocalBuilder(pool, id), srcs_(srcs) {
+
+}
+
+IntervalSourceBuilder::IntervalSourceBuilder() : LocalBuilder() {
+
+}
+
+IntervalSourceBuilder::~IntervalSourceBuilder() {
+
+}
+
+void IntervalSourceBuilder::start() {
+	capture_ = std::move(ftl::timer::add(ftl::timer::timerlevel_t::kTimerHighPrecision, [this](int64_t ts) {
+		for (auto *s : srcs_) s->capture(ts);
+		return true;
+	}));
+
+	retrieve_ = std::move(ftl::timer::add(ftl::timer::timerlevel_t::kTimerMain, [this](int64_t ts) {
+		auto fs = getNextFrameSet(ts);
+
+		// TODO: Do in parallel...
+		for (auto *s : srcs_) {
+			if (!s->retrieve(fs->firstFrame())) {
+				LOG(WARNING) << "Frame is being skipped: " << ts;
+				fs->firstFrame().message(ftl::data::Message::Warning_FRAME_DROP, "Frame is being skipped");
+			}
+		}
+
+		cb_.trigger(fs);
+		return true;
+	}));
+}
+
+void IntervalSourceBuilder::stop() {
+	capture_.cancel();
+	retrieve_.cancel();
+}
+
+// =============================================================================
+
+ManualSourceBuilder::ManualSourceBuilder(ftl::data::Pool *pool, int id, ftl::data::DiscreteSource *src) : LocalBuilder(pool, id), src_(src) {
+
+}
+
+ManualSourceBuilder::ManualSourceBuilder() : LocalBuilder(), src_(nullptr) {
+
+}
+
+ManualSourceBuilder::~ManualSourceBuilder() {
+
+}
+
+void ManualSourceBuilder::tick() {
+	if (!src_) return;
+
+	int64_t ts = ftl::timer::get_time();
+	if (ts < last_timestamp_ + mspf_) return;
+	last_timestamp_ = ts;
+
+	src_->capture(ts);
+
+	auto fs = getNextFrameSet(ts);
+
+	if (!src_->retrieve(fs->firstFrame())) {
+		LOG(WARNING) << "Frame was skipping";
+		fs->firstFrame().message(ftl::data::Message::Warning_FRAME_DROP, "Frame is being skipped");
+	}
+
+	cb_.trigger(fs);
+}
+
+// =============================================================================
+
+ForeignBuilder::ForeignBuilder(ftl::data::Pool *pool, int id) : BaseBuilder(pool, id), head_(0) {
+	jobs_ = 0;
+	skip_ = false;
+	bufferSize_ = 1;
+	last_frame_ = 0;
+
+	mspf_ = ftl::timer::getInterval();
+}
+
+ForeignBuilder::ForeignBuilder() : BaseBuilder(), head_(0) {
+	jobs_ = 0;
+	skip_ = false;
+	bufferSize_ = 1;
+	last_frame_ = 0;
+
+	mspf_ = ftl::timer::getInterval();
+}
+
+ForeignBuilder::~ForeignBuilder() {
+	main_id_.cancel();
+
+	UNIQUE_LOCK(mutex_, lk);
+	// Make sure all jobs have finished
+	while (jobs_ > 0) {
+		sleep_for(milliseconds(10));
+	}
+
+	// Also make sure to get unique lock on any processing framesets.
+	for (auto &f : framesets_) {
+		UNIQUE_LOCK(f->smtx, lk);
+	}
+}
+
+LockedFrameSet ForeignBuilder::get(int64_t timestamp) {
+	if (timestamp <= 0) throw FTL_Error("Invalid frame timestamp");
+
+	UNIQUE_LOCK(mutex_, lk);
+
+	auto fs = _get(timestamp);
+	LockedFrameSet lfs(fs.get(), fs->smtx, [this,fs](ftl::data::FrameSet *d) {
+		if (fs->isComplete()) {
+			if (bufferSize_ == 0 && !fs->test(ftl::data::FSFlag::STALE)) {
+				UNIQUE_LOCK(mutex_, lk);
+				_schedule();
+			}
+		}
+	});
+	return lfs;
+}
+
+std::shared_ptr<ftl::data::FrameSet> ForeignBuilder::_get(int64_t timestamp) {
+	if (timestamp <= last_frame_) {
+		throw FTL_Error("Frameset already completed: " << timestamp);
+	}
+
+	auto fs = _findFrameset(timestamp);
+
+	if (!fs) {
+		// Add new frameset
+		fs = _addFrameset(timestamp);
+		if (!fs) throw FTL_Error("Could not add frameset");
+
+		_schedule();
+	}
+
+	if (fs->test(ftl::data::FSFlag::STALE)) {
+		throw FTL_Error("Frameset already completed");
+	}
+	return fs;
+}
+
+LockedFrameSet ForeignBuilder::get(int64_t timestamp, size_t ix) {
+	if (ix == 255) {
+		UNIQUE_LOCK(mutex_, lk);
+
+		if (timestamp <= 0) throw FTL_Error("Invalid frame timestamp (" << timestamp << ")");
+		auto fs = _get(timestamp);
+		if (!fs) throw FTL_Error("No frameset for time " << timestamp);
+
+		LockedFrameSet lfs(fs.get(), fs->smtx);
+		return lfs;
+	} else {
+		if (timestamp <= 0 || ix >= 32) throw FTL_Error("Invalid frame timestamp or index (" << timestamp << ", " << ix << ")");
+
+		UNIQUE_LOCK(mutex_, lk);
+
+		if (ix >= size_) {
+			size_ = ix+1;
+		}
+
+		auto fs = _get(timestamp);
+
+		if (ix >= fs->frames.size()) {
+			// FIXME: Check that no access to frames can occur without lock
+			UNIQUE_LOCK(fs->smtx, flk);
+			while (fs->frames.size() < size_) {
+				fs->frames.push_back(std::move(pool_->allocate(ftl::data::FrameID(fs->frameset(), + fs->frames.size()), fs->timestamp())));
+			}
+		}
+
+		LockedFrameSet lfs(fs.get(), fs->smtx, [this,fs](ftl::data::FrameSet *d) {
+			if (fs->isComplete()) {
+				if (bufferSize_ == 0 && !fs->test(ftl::data::FSFlag::STALE)) {
+					UNIQUE_LOCK(mutex_, lk);
+					_schedule();
+				}
+			}
+		});
+
+		return lfs;
+	}
+}
+
+void ForeignBuilder::_schedule() {
+	if (size_ == 0) return;
+	std::shared_ptr<ftl::data::FrameSet> fs;
+
+	// Still working on a previously scheduled frame
+	if (jobs_ > 0) return;
+
+	// Find a valid / completed frameset to process
+	fs = _getFrameset();
+
+	// We have a frameset so create a thread job to call the onFrameset callback
+	if (fs) {
+		jobs_++;
+
+		ftl::pool.push([this,fs](int) {
+			fs->store();
+
+			if (!fs->isComplete()) {
+				fs->set(ftl::data::FSFlag::PARTIAL);
+				fs->frames[0].message(ftl::data::Message::Warning_INCOMPLETE_FRAME, "Frameset not complete");
+			}
+
+			//UNIQUE_LOCK(fs->mutex(), lk2);
+
+			try {
+				cb_.trigger(fs);
+			} catch(const ftl::exception &e) {
+				LOG(ERROR) << "Exception in frameset builder: " << e.what();
+				//LOG(ERROR) << "Trace = " << e.trace();
+			} catch(std::exception &e) {
+				LOG(ERROR) << "Exception in frameset builder: " << e.what();
+			}
+
+			UNIQUE_LOCK(mutex_, lk);
+			//_freeFrameset(fs);
+			jobs_--;
+
+			// Schedule another frame immediately (or try to)
+			_schedule();
+		});
+	}
+
+}
+
+std::pair<float,float> BaseBuilder::getStatistics() {
+	return {-1.0f, -1.0f};
+}
+
+std::shared_ptr<ftl::data::FrameSet> ForeignBuilder::_findFrameset(int64_t ts) {
+	// Search backwards to find match
+	for (auto f : framesets_) {
+		if (f->timestamp() == ts) {
+			return f;
+		} else if (f->timestamp() < ts) {
+			return nullptr;
+		}
+	}
+
+	return nullptr;
+}
+
+/*
+ * Get the most recent completed frameset that isn't stale.
+ * Note: Must occur inside a mutex lock.
+ */
+std::shared_ptr<ftl::data::FrameSet> ForeignBuilder::_getFrameset() {
+	auto i = framesets_.begin();
+	int N = bufferSize_;
+
+	// Skip N frames to fixed buffer location
+	if (bufferSize_ > 0) {
+		while (N-- > 0 && i != framesets_.end()) ++i;
+	// Otherwise skip to first fully completed frame
+	} else {
+		while (i != framesets_.end() && !(*i)->isComplete()) ++i;
+	}
+
+	if (i != framesets_.end()) {
+		auto f = *i;
+		
+		// Lock to force completion of on going construction first
+		UNIQUE_LOCK(f->smtx, slk);
+		last_frame_ = f->timestamp();
+		auto j = framesets_.erase(i);
+		f->set(ftl::data::FSFlag::STALE);
+		slk.unlock();
+
+		int count = 0;
+		// Remove all previous framesets
+		// FIXME: Should do this in reverse order.
+		while (j!=framesets_.end()) {
+			++count;
+
+			auto f2 = *j;
+			LOG(WARNING) << "FrameSet discarded: " << f2->timestamp();
+			{
+				// Ensure frame processing is finished first
+				UNIQUE_LOCK(f2->smtx, lk);
+				j = framesets_.erase(j);
+			}
+		}
+
+		return f;
+	}
+
+	return nullptr;
+}
+
+std::shared_ptr<ftl::data::FrameSet> ForeignBuilder::_addFrameset(int64_t timestamp) {
+	if (framesets_.size() >= 32) {
+		LOG(WARNING) << "Frameset buffer full, resetting: " << timestamp;
+		framesets_.clear();
+	}
+
+	auto newf = std::make_shared<FrameSet>(pool_, ftl::data::FrameID(id_,255), timestamp, size_);
+	for (size_t i=0; i<size_; ++i) {
+		newf->frames.push_back(std::move(pool_->allocate(ftl::data::FrameID(id_, i), timestamp)));
+	}
+
+	newf->count = 0;
+	newf->mask = 0;
+	newf->localTimestamp = timestamp;
+	newf->clearFlags();
+
+	// Insertion sort by timestamp
+	for (auto i=framesets_.begin(); i!=framesets_.end(); i++) {
+		auto f = *i;
+
+		if (timestamp > f->timestamp()) {
+			framesets_.insert(i, newf);
+			return newf;
+		}
+	}
+
+	framesets_.push_back(newf);
+	return newf;
+}
diff --git a/components/streams/src/feed.cpp b/components/streams/src/feed.cpp
new file mode 100644
index 000000000..defe19720
--- /dev/null
+++ b/components/streams/src/feed.cpp
@@ -0,0 +1,1142 @@
+#include <loguru.hpp>
+#include <nlohmann/json.hpp>
+#include <ftl/streams/feed.hpp>
+#include <ftl/streams/renderer.hpp>
+
+#include <ftl/streams/netstream.hpp>
+#include <ftl/streams/filestream.hpp>
+
+#include "ftl/operators/colours.hpp"
+#include "ftl/operators/segmentation.hpp"
+#include "ftl/operators/mask.hpp"
+#include "ftl/operators/antialiasing.hpp"
+#include <ftl/operators/smoothing.hpp>
+#include <ftl/operators/disparity.hpp>
+#include <ftl/operators/detectandtrack.hpp>
+#include <ftl/operators/weighting.hpp>
+#include <ftl/operators/mvmls.hpp>
+#include <ftl/operators/clipping.hpp>
+#include <ftl/operators/poser.hpp>
+#include <ftl/operators/gt_analysis.hpp>
+
+using ftl::stream::Feed;
+using ftl::codecs::Channel;
+
+//static nlohmann::json feed_config;
+
+////////////////////////////////////////////////////////////////////////////////
+
+Feed::Filter::Filter(Feed* feed, const std::unordered_set<uint32_t>& sources, const std::unordered_set<Channel>& channels) :
+		feed_(feed), channels_(channels), channels_available_(channels), sources_(sources) {
+
+};
+
+Feed::Filter::~Filter() {
+
+}
+
+void Feed::Filter::remove() {
+	return feed_->removeFilter(this);
+}
+
+void Feed::Filter::on(const ftl::data::FrameSetCallback &cb) {
+	UNIQUE_LOCK(feed_->mtx_, lk);
+
+	if (std::find(feed_->filters_.begin(), feed_->filters_.end(),this) == feed_->filters_.end()) {
+		throw ftl::exception("Filter does not belong to Feed; This should never happen!");
+	}
+
+	handles_.push_back(std::move(handler_.on(cb)));
+}
+
+ftl::Handle Feed::Filter::onWithHandle(const ftl::data::FrameSetCallback &cb) {
+	UNIQUE_LOCK(feed_->mtx_, lk);
+
+	if (std::find(feed_->filters_.begin(), feed_->filters_.end(),this) == feed_->filters_.end()) {
+		throw ftl::exception("Filter does not belong to Feed; This should never happen!");
+	}
+
+	return std::move(handler_.on(cb));
+}
+
+std::list<ftl::data::FrameSetPtr> Feed::Filter::getLatestFrameSets() {
+	std::list<ftl::data::FrameSetPtr> results;
+
+	SHARED_LOCK(feed_->mtx_, lk);
+	if (sources_.empty()) {
+		for (auto &i : feed_->latest_) {
+			if (i.second) results.emplace_back(std::atomic_load(&(i.second)));
+		}
+	} else {
+		for (auto &s : sources_) {
+			auto i = feed_->latest_.find(s);
+			if (i != feed_->latest_.end()) {
+				if (i->second) results.emplace_back(std::atomic_load(&(i->second)));
+			}
+		}
+	}
+	return results;
+}
+
+Feed::Filter &Feed::Filter::select(const std::unordered_set<ftl::codecs::Channel> &cs) {
+	UNIQUE_LOCK(feed_->mtx_, lk);
+	channels_ = cs;
+	feed_->select();
+	return *this;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+Feed::Feed(nlohmann::json &config, ftl::net::Universe*net) :
+		ftl::Configurable(config), net_(net) {
+
+	//feed_config = ftl::loadJSON(FTL_LOCAL_CONFIG_ROOT "/feed.json");
+	restore(ftl::Configurable::getID(), {
+		"recent_files",
+		"recent_sources",
+		"known_hosts",
+		"known_groups",
+		"auto_host_connect",
+		"auto_host_sources",
+		"uri",
+		"recorder"
+	});
+
+	pool_ = std::make_unique<ftl::data::Pool>(3,5);
+
+	stream_ = std::unique_ptr<ftl::stream::Muxer>
+		(ftl::create<ftl::stream::Muxer>(this, "muxer"));
+
+	interceptor_ = std::unique_ptr<ftl::stream::Intercept>
+		(ftl::create<ftl::stream::Intercept>(this, "intercept"));
+
+	receiver_ = std::unique_ptr<ftl::stream::Receiver>
+		(ftl::create<ftl::stream::Receiver>(this, "receiver", pool_.get()));
+
+	sender_ = std::unique_ptr<ftl::stream::Sender>
+		(ftl::create<ftl::stream::Sender>(this, "sender"));
+
+	recorder_ = std::unique_ptr<ftl::stream::Sender>
+		(ftl::create<ftl::stream::Sender>(this, "recorder"));
+	record_stream_ = std::unique_ptr<ftl::stream::Broadcast>
+		(ftl::create<ftl::stream::Broadcast>(this, "record_stream"));
+	recorder_->setStream(record_stream_.get());
+
+	record_recv_handle_ = record_stream_->onPacket([this](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+		receiver_->processPackets(spkt, pkt);
+		return true;
+	});
+
+	record_filter_ = nullptr;
+
+	//interceptor_->setStream(stream_.get());
+	receiver_->setStream(stream_.get());
+	sender_->setStream(stream_.get());
+
+	handle_sender_ = pool_->onFlush([this]
+			(ftl::data::Frame &f, ftl::codecs::Channel c) {
+
+		// Send only reponse channels on a per frame basis
+		if (f.mode() == ftl::data::FrameMode::RESPONSE) {
+			// Remote sources need to use sender, otherwise loopback to local
+			if (streams_.find(f.frameset()) != streams_.end()) {
+				sender_->post(f, c);
+			} else {
+				receiver_->loopback(f, c);
+			}
+		}
+		return true;
+	});
+
+	net_->onConnect([this](ftl::net::Peer *p) {
+		ftl::pool.push([this,p](int id) {
+			_updateNetSources(p);
+		});
+	});
+
+	if (net_->isBound("add_stream")) net_->unbind("add_stream");
+	net_->bind("add_stream", [this](ftl::net::Peer &p, std::string uri){
+		//UNIQUE_LOCK(mtx_, lk);
+		_updateNetSources(&p, uri);
+	});
+
+	net_->onDisconnect([this](ftl::net::Peer *) {
+		// TODO: maintain map between peer and sources, on disconnect remove all
+		//       peer's source. Also map between Peers and fsids?
+		//std::unique_lock<std::mutex> lk(mtx_);
+	});
+
+	handle_receiver_ = receiver_->onFrameSet(
+		[this](const ftl::data::FrameSetPtr& fs) {
+			if (value("drop_partial_framesets", false)) {
+				if (fs->count < static_cast<int>(fs->frames.size())) {
+					LOG(WARNING) << "Dropping partial frameset: " << fs->timestamp();
+					return true;
+				}
+			}
+
+			// FIXME: What happens if pipeline added concurrently?
+			if (pre_pipelines_.count(fs->frameset()) == 1) {
+				pre_pipelines_[fs->frameset()]->apply(*fs, *fs, 0);
+			}
+
+			SHARED_LOCK(mtx_, lk);
+
+			std::atomic_store(&latest_.at(fs->frameset()), fs);
+
+			if (fs->hasAnyChanged(Channel::Thumbnail)) {
+				_saveThumbnail(fs);
+			}
+
+			for (auto* filter : filters_) {
+				// TODO: smarter update (update only when changed) instead of
+				// filter->channels_available_ = fs->channels();
+
+				if (filter->sources().empty()) {
+					//filter->channels_available_ = fs->channels();
+					filter->handler_.trigger(fs);
+				}
+				else {
+					// TODO: process partial/complete sets here (drop), that is
+					//       intersection filter->sources() and fs->sources() is
+					//       same as filter->sources().
+
+					// TODO: reverse map source ids required here?
+					for (const auto& src : filter->sources()) {
+						//if (fs->hasFrame(src)) {
+						if (fs->frameset() == src) {
+							//filter->channels_available_ = fs->channels();
+							filter->handler_.trigger(fs);
+							break;
+						}
+					}
+				}
+			}
+
+			return true;
+	});
+
+	stream_->begin();
+
+	//if (value("auto_host_connect", true)) autoConnect();
+}
+
+Feed::~Feed() {
+	UNIQUE_LOCK(mtx_, lk);
+	//ftl::saveJSON(FTL_LOCAL_CONFIG_ROOT "/feed.json", feed_config);
+
+	handle_receiver_.cancel();
+	handle_record_.cancel();
+	handle_sender_.cancel();
+	record_recv_handle_.cancel();
+
+	receiver_.reset();  // Note: Force destruction first to remove filters this way
+	sender_.reset();
+	recorder_.reset();
+
+	// TODO stop everything and clean up
+	// delete
+
+	for (auto &p : pre_pipelines_) {
+		delete p.second;
+	}
+	for (auto &d : devices_) {
+		delete d.second;
+	}
+	for (auto &r : renderers_) {
+		lk.unlock();
+		delete r.second;
+		lk.lock();
+	}
+
+	if (filters_.size() > 0) LOG(WARNING) << "Filters remain after feed destruct (" << filters_.size() << ")";
+	for (auto* filter : filters_) {
+		delete filter;
+	}
+	filters_.clear();
+
+	interceptor_.reset();
+	stream_.reset();
+	for (auto &ls : streams_) {
+		for (auto *s : ls.second) {
+			delete s;
+		}
+	}
+}
+
+void Feed::_saveThumbnail(const ftl::data::FrameSetPtr& fs) {
+	// TODO: Put thumb somewhere here...
+}
+
+uint32_t Feed::allocateFrameSetId(const std::string &group) {
+	if (group.size() == 0) {
+		return fs_counter_++;
+	} else {
+		auto i = groups_.find(group);
+		if (i == groups_.end()) {
+			uint32_t id = fs_counter_++;
+			groups_[group] = id;
+			return id;
+		} else {
+			return i->second;
+		}
+	}
+}
+
+void Feed::select() {
+	std::map<uint32_t, std::unordered_set<Channel>> selected_channels;
+	for (auto &filter : filters_) {
+		const auto& selected = filter->channels();
+
+		if (filter->sources().empty()) {
+			// no sources: select all sources with selected channels
+			for (const auto& [uri, fsid] : fsid_lookup_) {
+				std::ignore = uri;
+				selected_channels[fsid].insert(selected.begin(), selected.end());
+			}
+		}
+		else {
+			// sources given
+			for (const auto& fsid : filter->sources()) {
+				if (selected_channels.count(fsid) == 0) {
+					selected_channels.try_emplace(fsid);
+				}
+				selected_channels[fsid].insert(selected.begin(), selected.end());
+			}
+		}
+	}
+	for (auto& [fsid, channels] : selected_channels) {
+		stream_->select(fsid, channels, true);
+		LOG(INFO) << "Update selections";
+		for (auto c : channels) {
+			LOG(INFO) << "  -- select " << (int)c;
+		}
+	}
+}
+
+std::vector<std::string> Feed::listSources() {
+	std::vector<std::string> sources;
+	SHARED_LOCK(mtx_, lk);
+	sources.reserve(fsid_lookup_.size());
+	for (auto& [uri, fsid] : fsid_lookup_) {
+		std::ignore = fsid;
+		sources.push_back(uri);
+	}
+	return sources;
+}
+
+Feed::Filter* Feed::filter(const std::unordered_set<uint32_t> &framesets,
+		const std::unordered_set<Channel> &channels) {
+
+	auto* filter = new Filter(this, framesets, channels);
+	UNIQUE_LOCK(mtx_, lk);
+	filters_.push_back(filter);
+	select();
+	return filter;
+}
+
+Feed::Filter* Feed::filter(const std::unordered_set<Channel> &channels) {
+	return filter(std::unordered_set<uint32_t>{}, channels);
+}
+
+Feed::Filter* Feed::filter(const std::unordered_set<std::string> &sources, const std::unordered_set<Channel> &channels) {
+	std::unordered_set<uint32_t> fsids;
+
+	SHARED_LOCK(mtx_, lk);
+	for (const auto &src : sources) {
+		ftl::URI uri(src);
+
+		auto i = fsid_lookup_.find(uri.getBaseURI());
+		if (i != fsid_lookup_.end()) {
+			fsids.emplace(i->second);
+		}
+	}
+	return filter(fsids, channels);
+}
+
+void Feed::remove(const std::string &str) {
+	uint32_t fsid;
+
+	{
+		UNIQUE_LOCK(mtx_, lk);
+		auto i = fsid_lookup_.find(str);
+		if (i != fsid_lookup_.end()) {
+			fsid = i->second;
+		} else {
+			return;
+		}
+	}
+
+	remove(fsid);
+}
+
+void Feed::remove(uint32_t id) {
+	UNIQUE_LOCK(mtx_, lk);
+
+	// First tell all filters
+	for (auto *f : filters_) {
+		if (f->sources_.empty() || f->sources_.count(id)) {
+			f->remove_handler_.trigger(id);
+		}
+	}
+
+	remove_sources_cb_.trigger(id);
+
+	// TODO: Actual delete of source
+	// If stream source, remove from muxer
+	if (streams_.count(id)) {
+		auto &streams = streams_[id];
+		for (auto *s : streams) {
+			stream_->remove(s);
+			delete s;
+		}
+
+		streams_.erase(id);
+	} else if (devices_.count(id)) {
+		receiver_->removeBuilder(id);
+		delete devices_[id];
+		devices_.erase(id);
+	} else if (renderers_.count(id)) {
+
+	}
+
+	if (latest_.count(id)) latest_.erase(id);
+
+	for (auto i = fsid_lookup_.begin(); i != fsid_lookup_.end();) {
+		if (i->second == id) {
+			i = fsid_lookup_.erase(i);
+		} else {
+			++i;
+		}
+	}
+}
+
+ftl::operators::Graph* Feed::addPipeline(uint32_t fsid) {
+	UNIQUE_LOCK(mtx_, lk);
+	return _addPipeline(fsid);
+}
+
+ftl::operators::Graph* Feed::_addPipeline(uint32_t fsid) {
+	if (pre_pipelines_.count(fsid) != 0) {
+		delete pre_pipelines_[fsid];
+	}
+
+	if (devices_.count(fsid)) {
+		pre_pipelines_[fsid] = ftl::config::create<ftl::operators::Graph>
+			(devices_[fsid], std::string("pipeline"));
+	} else if (renderers_.count(fsid)) {
+		pre_pipelines_[fsid] = ftl::config::create<ftl::operators::Graph>
+			(renderers_[fsid], std::string("pipeline"));
+	} else if (streams_.count(fsid)) {
+		pre_pipelines_[fsid] = ftl::config::create<ftl::operators::Graph>
+			(streams_[fsid].front(), std::string("pipeline"));
+	}
+
+	//pre_pipelines_[fsid] = ftl::config::create<ftl::operators::Graph>
+	//	(this, std::string("pre_filters") + std::to_string(fsid));
+
+	return pre_pipelines_[fsid];
+}
+
+void Feed::_createPipeline(uint32_t fsid) {
+	// Don't recreate if already exists
+	if (pre_pipelines_.count(fsid)) return;
+
+	LOG(INFO) << "Creating pipeline";
+	auto *p = _addPipeline(fsid);
+
+	if (pipe_creator_) {
+		pipe_creator_(p);
+	} else {
+		p->append<ftl::operators::DepthChannel>("depth")->value("enabled", false);
+		p->append<ftl::operators::ClipScene>("clipping")->value("enabled", false);
+		p->append<ftl::operators::ColourChannels>("colour");  // Convert BGR to BGRA
+		p->append<ftl::operators::DetectAndTrack>("facedetection")->value("enabled", false);
+		p->append<ftl::operators::ArUco>("aruco")->value("enabled", false);
+		//p->append<ftl::operators::HFSmoother>("hfnoise");
+		p->append<ftl::operators::CrossSupport>("cross");
+		p->append<ftl::operators::PixelWeights>("weights");
+		p->append<ftl::operators::CullWeight>("remove_weights")->value("enabled", false);
+		p->append<ftl::operators::DegradeWeight>("degrade");
+		p->append<ftl::operators::VisCrossSupport>("viscross")->set("enabled", false);
+		p->append<ftl::operators::BorderMask>("border_mask");
+		p->append<ftl::operators::CullDiscontinuity>("remove_discontinuity");
+		p->append<ftl::operators::MultiViewMLS>("mvmls")->value("enabled", false);
+		p->append<ftl::operators::Poser>("poser")->value("enabled", true);
+		p->append<ftl::operators::GTAnalysis>("gtanalyse");
+	}
+}
+
+void Feed::setPipelineCreator(const std::function<void(ftl::operators::Graph*)> &cb) {
+	UNIQUE_LOCK(mtx_, lk);
+	pipe_creator_ = cb;
+}
+
+void Feed::removeFilter(Feed::Filter* filter) {
+	UNIQUE_LOCK(mtx_, lk);
+
+	if (record_filter_ == filter) {
+		_stopRecording();
+	}
+
+	auto iter = std::find(filters_.begin(), filters_.end(), filter);
+	if (iter != filters_.end()) {
+		filters_.erase(iter);
+		delete filter;
+	}
+}
+
+void Feed::_updateNetSources(ftl::net::Peer *p, const std::string &s, bool autoadd) {
+	UNIQUE_LOCK(mtx_, lk);
+	netcams_.insert(s);
+
+	// TODO: Auto add source
+
+	ftl::URI uri(s);
+	_add_recent_source(uri)["host"] = p->getURI();
+
+	if (autoadd || value("auto_host_sources", false)) {
+		add(uri);
+	}
+
+	ftl::pool.push([this, s](int id) {
+		std::vector<std::string> srcs{s};
+		new_sources_cb_.trigger(srcs);
+	});
+}
+
+void Feed::_updateNetSources(ftl::net::Peer *p, bool autoadd) {
+	//auto netcams =
+	//	net_->findAll<std::string>("list_streams");
+
+	// Peer may not have a list_streams binding yet
+	try {
+		auto peerstreams = p->call<std::vector<std::string>>("list_streams");
+	
+
+		UNIQUE_LOCK(mtx_, lk);
+		//netcams_ = std::move(netcams);
+		netcams_.insert(peerstreams.begin(), peerstreams.end());
+
+		for (const auto &s : peerstreams) {
+			ftl::URI uri(s);
+			_add_recent_source(uri)["host"] = p->getURI();
+
+			if (autoadd || value("auto_host_sources", false)) {
+				ftl::pool.push([this, uri](int id) { add(uri); });
+			}
+		}
+
+		ftl::pool.push([this, peerstreams](int id) {
+			new_sources_cb_.trigger(peerstreams);
+		});
+
+	} catch (const ftl::exception &e) {
+		
+	}
+
+	/* done by add()
+	if (n > 0) {
+		stream_->begin();
+	}*/
+}
+
+std::vector<std::string> Feed::availableNetworkSources() {
+	SHARED_LOCK(mtx_, lk);
+	std::vector<std::string> result(netcams_.begin(), netcams_.end());
+	return result;;
+}
+
+std::vector<std::string> Feed::availableGroups() {
+	std::vector<std::string> groups;
+	auto &known = getConfig()["known_groups"];
+
+	for (auto &f : known.items()) {
+		groups.push_back(f.key());
+	}
+
+	return groups;
+}
+
+std::vector<std::string> Feed::availableFileSources() {
+	std::vector<std::string> files;
+	auto &recent_files = getConfig()["recent_files"];
+
+	for (auto &f : recent_files.items()) {
+		files.push_back(f.key());
+	}
+
+	return files;
+}
+
+void Feed::clearFileHistory() {
+	UNIQUE_LOCK(mtx_, lk);
+	auto &recent_files = getConfig()["recent_files"];
+	recent_files.clear();
+
+	auto &recent = getConfig()["recent_sources"];
+	for (auto i=recent.begin(); i != recent.end();) {
+		ftl::URI uri(i.key());
+		if (uri.getScheme() == ftl::URI::SCHEME_FILE) {
+			i = recent.erase(i);
+		} else {
+			++i;
+		}
+	}
+}
+
+std::vector<std::string> Feed::knownHosts() {
+	std::vector<std::string> hosts;
+	auto &known = getConfig()["known_hosts"];
+
+	for (auto &f : known.items()) {
+		hosts.push_back(f.key());
+	}
+
+	return hosts;
+}
+
+void Feed::clearHostHistory() {
+	UNIQUE_LOCK(mtx_, lk);
+	auto &known = getConfig()["known_hosts"];
+	known.clear();
+
+	auto &recent = getConfig()["recent_sources"];
+	for (auto i=recent.begin(); i != recent.end();) {
+		ftl::URI uri(i.key());
+		if (uri.getScheme() == ftl::URI::SCHEME_TCP || uri.getScheme() == ftl::URI::SCHEME_WS) {
+			i = recent.erase(i);
+		} else {
+			++i;
+		}
+	}
+}
+
+std::set<ftl::stream::SourceInfo> Feed::recentSources() {
+	std::set<ftl::stream::SourceInfo> result;
+
+	auto &recent = getConfig()["recent_sources"];
+
+	for (auto &f : recent.items()) {
+		ftl::stream::SourceInfo info;
+		info.uri = f.key();
+		if (f.value().contains("uri")) info.uri = f.value()["uri"].get<std::string>();
+		info.last_used = f.value()["last_open"].get<int64_t>();
+		result.insert(info);
+	}
+
+	return result;
+}
+
+std::vector<std::string> Feed::availableDeviceSources() {
+	std::vector<std::string> results;
+
+	if (ftl::rgbd::Source::supports("device:pylon")) results.emplace_back("device:pylon");
+	if (ftl::rgbd::Source::supports("device:camera")) results.emplace_back("device:camera");
+	if (ftl::rgbd::Source::supports("device:stereo")) results.emplace_back("device:stereo");
+	if (ftl::rgbd::Source::supports("device:screen")) results.emplace_back("device:screen");
+	if (ftl::rgbd::Source::supports("device:realsense")) results.emplace_back("device:realsense");
+	if (ftl::render::Source::supports("device:render")) results.emplace_back("device:render");
+	if (ftl::render::Source::supports("device:openvr")) results.emplace_back("device:openvr");
+
+	return results;
+}
+
+void Feed::autoConnect() {
+	ftl::pool.push([this](int id) {
+		auto &known_hosts = getConfig()["known_hosts"];
+
+		for (auto &h : known_hosts.items()) {
+			net_->connect(h.key())->noReconnect();
+		}
+	});
+}
+
+bool Feed::sourceAvailable(const std::string &uri) {
+	return false;
+}
+
+bool Feed::sourceActive(const std::string &suri) {
+	ftl::URI uri(suri);
+
+	if (uri.getScheme() == ftl::URI::SCHEME_TCP || uri.getScheme() == ftl::URI::SCHEME_WS) {
+		return net_->isConnected(uri);
+	} else if (uri.getScheme() == ftl::URI::SCHEME_GROUP) {
+		// Check that every constituent source is active
+		auto &known = getConfig()["known_groups"];
+		if (known.contains(uri.getBaseURI())) {
+			auto &sources = known[uri.getBaseURI()]["sources"];
+
+			for (auto i=sources.begin(); i!=sources.end(); ++i) {
+				if (!sourceActive(i.key())) return false;
+			}
+		}
+		return true;
+	} else {
+		SHARED_LOCK(mtx_, lk);
+		return fsid_lookup_.count(uri.getBaseURI()) > 0;
+	}
+}
+
+std::string Feed::getName(const std::string &puri) {
+	ftl::URI uri(puri);
+
+	if (uri.isValid() == false) return "Invalid";
+
+	if (uri.getScheme() == ftl::URI::SCHEME_FTL) {
+		if (uri.hasAttribute("name")) return uri.getAttribute<std::string>("name");
+		try {
+			auto *cfgble = ftl::config::find(puri);
+			if (cfgble) {
+				auto &j = cfgble->getConfig();
+				std::string name = (j.is_structured()) ? j.value("name", j.value("uri", uri.getPathSegment(-1))) : uri.getPathSegment(-1);
+				return (name.size() == 0) ? uri.getHost() : name;
+			} else {
+				std::string name = uri.getPathSegment(-1);
+				return (name.size() == 0) ? uri.getHost() : name;
+			}
+			/*auto n = net_->findOne<std::string>("get_cfg", puri);
+			if (n) {
+				auto j = nlohmann::json::parse(*n);
+				return (j.is_structured()) ? j.value("name", j.value("uri", uri.getPathSegment(-1))) : uri.getPathSegment(-1);
+			}*/
+		} catch (const ftl::exception &e) {
+			e.ignore();
+		}
+		return puri;
+	} else if (uri.getScheme() == ftl::URI::SCHEME_DEVICE) {
+		if (uri.getPathSegment(0) == "pylon") return "Pylon";
+		if (uri.getPathSegment(0) == "camera") return "Web Cam";
+		if (uri.getPathSegment(0) == "stereo") return "Stereo";
+		if (uri.getPathSegment(0) == "realsense") return "Realsense";
+		if (uri.getPathSegment(0) == "screen") return "Screen Capture";
+		if (uri.getPathSegment(0) == "render") return "3D Virtual";
+		if (uri.getPathSegment(0) == "openvr") return "OpenVR";
+		return "Unknown Device";
+	} else if (uri.getScheme() == ftl::URI::SCHEME_FILE) {
+		auto &recent_files = getConfig()["recent_files"];
+		if (recent_files.is_structured() && recent_files.contains(uri.getBaseURI())) {
+			return recent_files[uri.getBaseURI()].value("name", uri.getPathSegment(-1));
+		} else {
+			LOG(INFO) << "Missing file: " << puri;
+			return uri.getPathSegment(-1);
+		}
+	} else if (uri.getScheme() == ftl::URI::SCHEME_TCP || uri.getScheme() == ftl::URI::SCHEME_WS) {
+		return uri.getBaseURI();
+	} else if (uri.getScheme() == ftl::URI::SCHEME_GROUP) {
+		auto &groups = getConfig()["known_groups"];
+		if (groups.contains(uri.getBaseURI())) {
+			return uri.getPathSegment(0) + std::string(" (") + std::to_string(groups[uri.getBaseURI()]["sources"].size()) + std::string(")");
+		} else {
+			return uri.getPathSegment(0);
+		}
+	}
+
+	return uri.getPathSegment(-1);
+}
+
+nlohmann::json &Feed::_add_recent_source(const ftl::URI &uri) {
+	auto &known = getConfig()["recent_sources"];
+	auto &details = known[uri.getBaseURI()];
+	std::string name = uri.getPathSegment(-1);
+
+	if (uri.hasAttribute("name")) {
+		name = uri.getAttribute<std::string>("name");
+	} else if (uri.getScheme() == ftl::URI::SCHEME_FILE) {
+		name = name.substr(0, name.find_last_of('.'));
+	}
+
+	details["uri"] = uri.to_string();
+	details["name"] = name;
+	details["last_open"] = ftl::timer::get_time();
+
+	if (uri.hasAttribute("group")) {
+		std::string grpname = uri.getAttribute<std::string>("group");
+		auto &groups = getConfig()["known_groups"];
+		auto &grpdetail = groups[std::string("group:")+grpname];
+		grpdetail["sources"][uri.getBaseURI()] = true;
+	}
+
+	return details;
+}
+
+void Feed::add(uint32_t fsid, const std::string &uri, ftl::stream::Stream* stream) {
+	fsid_lookup_[uri] = fsid;
+	latest_[fsid] = nullptr;
+	streams_[fsid].push_back(stream);
+
+	_createPipeline(fsid);
+
+	stream_->add(stream, fsid);
+	stream_->begin();
+	stream_->select(fsid, {Channel::Colour}, true);
+}
+
+uint32_t Feed::add(const std::string &path) {
+	ftl::URI uri(path);
+	return add(uri);
+}
+
+uint32_t Feed::add(const ftl::URI &uri) {
+	UNIQUE_LOCK(mtx_, lk);
+	
+	//if (!uri.isValid()) throw FTL_Error("Invalid URI: " << path);
+
+	if (fsid_lookup_.count(uri.getBaseURI()) > 0) return fsid_lookup_[uri.getBaseURI()];
+
+	const auto scheme = uri.getScheme();
+	const std::string group = uri.getAttribute<std::string>("group");
+
+	if ((scheme == ftl::URI::SCHEME_OTHER) || // assumes relative path
+		(scheme == ftl::URI::SCHEME_FILE)) {
+
+		auto eix = ((scheme == ftl::URI::SCHEME_OTHER) ? uri.getBaseURI() : uri.getPath()).find_last_of('.');
+		auto ext = ((scheme == ftl::URI::SCHEME_OTHER) ? uri.getBaseURI() : uri.getPath()).substr(eix+1);
+
+		if (ext != "ftl") {
+			throw FTL_Error("Bad filename (expects .ftl) : " << uri.getBaseURI());
+		}
+
+		const int fsid = allocateFrameSetId(group);
+		auto* fstream = ftl::create<ftl::stream::File>
+			(this, std::string("ftlfile-") + std::to_string(file_counter_++));
+
+		if (scheme == ftl::URI::SCHEME_OTHER) {
+			fstream->set("filename", uri.getBaseURI());
+		}
+		else {
+			// possible BUG: uri.getPath() might return (wrong) absolute paths
+			// for relative paths (extra / at beginning)
+#ifdef WIN32
+			fstream->set("filename", uri.getPath().substr(1));
+#else
+			fstream->set("filename", uri.getPath());
+#endif
+		}
+
+		fstream->set("uri", uri.to_string());
+
+		auto &recent_files = getConfig()["recent_files"];
+		auto &file_details = recent_files[uri.getBaseURI()];
+		std::string fname = uri.getPathSegment(-1);
+		file_details["name"] = fname.substr(0, fname.find_last_of('.'));
+		file_details["last_open"] = ftl::timer::get_time();
+
+		_add_recent_source(uri);
+
+		// TODO: URI normalization; should happen in add(,,) or add(,,,) take
+		// ftl::URI instead of std::string as argument. Note the bug above.
+		// TODO: write unit test for uri parsing
+		add(fsid, uri.getBaseURI(), fstream);
+
+		add_src_cb_.trigger(fsid);
+		return fsid;
+	}
+	else if (scheme == ftl::URI::SCHEME_DEVICE) {
+		int fsid = allocateFrameSetId("");  // TODO: Support groups with devices?
+		fsid_lookup_[uri.getBaseURI()] = fsid; // Manually add mapping
+
+		std::string srcname = std::string("source") + std::to_string(fsid);
+		uri.to_json(getConfig()[srcname]);
+
+		// Make the source object
+		ftl::data::DiscreteSource *source;
+
+		latest_[fsid] = nullptr;
+		lk.unlock();
+
+		if (uri.getBaseURI() == "device:render" || uri.getBaseURI() == "device:openvr") {
+			auto *rsource = ftl::create<ftl::render::Source>(this, srcname, this);
+			renderers_[fsid] = rsource;
+			source = rsource;
+
+			// Create local builder instance
+			auto *creator = new ftl::streams::ManualSourceBuilder(pool_.get(), fsid, source);
+			if (uri.getBaseURI() == "device:openvr") creator->setFrameRate(10000);
+			else creator->setFrameRate(30);
+
+			std::shared_ptr<ftl::streams::BaseBuilder> creatorptr(creator);
+			lk.lock();
+			receiver_->registerBuilder(creatorptr);
+
+			// FIXME: pointer is deleted when removed from receiver
+			render_builders_.push_back(creator);
+		} else {
+			auto *dsource = ftl::create<ftl::rgbd::Source>(this, srcname);
+			devices_[fsid] = dsource;
+			source = dsource;
+			_createPipeline(fsid);
+
+			// Create local builder instance
+			auto *creator = new ftl::streams::IntervalSourceBuilder(pool_.get(), fsid, {source});
+			std::shared_ptr<ftl::streams::BaseBuilder> creatorptr(creator);
+
+			lk.lock();
+			receiver_->registerBuilder(creatorptr);
+
+			creator->start();
+		}
+
+		_add_recent_source(uri);
+
+		add_src_cb_.trigger(fsid);
+		return fsid;
+	}
+
+	else if ((scheme == ftl::URI::SCHEME_TCP) ||
+			 (scheme == ftl::URI::SCHEME_WS)) {
+
+		// just connect, onConnect callback will add the stream
+		// TODO: do not connect same uri twice
+		// TODO: write unit test
+
+		auto &known_hosts = getConfig()["known_hosts"];
+		auto &host_details = known_hosts[uri.getBaseURIWithUser()];
+		host_details["last_open"] = ftl::timer::get_time();
+
+		if (uri.getPathLength() == 1 && uri.getPathSegment(0) == "*") {
+			auto *p = net_->connect(uri.getBaseURIWithUser());
+			if (p->waitConnection()) {
+				ftl::pool.push([this,p](int id) {_updateNetSources(p, true); });
+			}
+		} else {
+			ftl::pool.push([this,path = uri.getBaseURIWithUser()](int id) { net_->connect(path)->noReconnect(); });
+		}
+
+	}
+	else if (scheme == ftl::URI::SCHEME_FTL) {
+		// Attempt to ensure connection first
+		auto &known = getConfig()["recent_sources"];
+		auto &details = known[uri.getBaseURI()];
+		if (details.contains("host")) {
+			auto *p = net_->connect(details["host"].get<std::string>());
+			p->noReconnect();
+			if (!p->waitConnection()) {
+				throw FTL_Error("Could not connect to host " << details["host"].get<std::string>() << " for stream " << uri.getBaseURI());
+			}
+		} else {
+			// See if it can otherwise be found?
+			LOG(WARNING) << "Could not find stream host";
+		}
+
+		auto *stream = ftl::create<ftl::stream::Net>
+			(this, std::string("netstream")
+			+std::to_string(fsid_lookup_.size()), net_);
+
+		int fsid = allocateFrameSetId(group);
+
+		stream->set("uri", uri.to_string());
+		add(fsid, uri.getBaseURI(), stream);
+
+		LOG(INFO)	<< "Add Stream: "
+					<< stream->value("uri", std::string("NONE"))
+					<< " (" << fsid << ")";
+
+		add_src_cb_.trigger(fsid);
+		return fsid;
+	} else if (scheme == ftl::URI::SCHEME_GROUP) {
+		auto &known = getConfig()["known_groups"];
+		if (known.contains(uri.getBaseURI())) {
+			auto &sources = known[uri.getBaseURI()]["sources"];
+
+			lk.unlock();
+			for (auto i=sources.begin(); i!=sources.end(); ++i) {
+				ftl::URI uri2(i.key());
+				uri2.setAttribute("group", uri.getPathSegment(0));
+				add(uri2);
+			}
+
+			lk.lock();
+			_add_recent_source(uri);
+		}
+	}
+	else{
+		throw ftl::exception("bad uri");
+	}
+	return -1;
+}
+
+void Feed::render() {
+	SHARED_LOCK(mtx_, lk);
+	auto builders = render_builders_;
+	lk.unlock();
+
+	for (auto *r : builders) {
+		r->tick();
+	}
+}
+
+uint32_t Feed::getID(const std::string &source) {
+	return fsid_lookup_.at(source);
+}
+
+const std::unordered_set<Channel> Feed::availableChannels(ftl::data::FrameID id) {
+	ftl::data::FrameSetPtr fs;
+	// FIXME: Should this be locked?
+	std::atomic_store(&fs, latest_.at(id.frameset()));
+	if (fs && fs->hasFrame(id.source())) {
+		return (*fs.get())[id.source()].allChannels();
+	}
+	return {};
+}
+
+std::vector<ftl::data::FrameID> Feed::listFrames() {
+	std::vector<ftl::data::FrameID> result;
+	SHARED_LOCK(mtx_, lk);
+	result.reserve(fsid_lookup_.size());
+	for (const auto [k, fs] : latest_) {
+		if (fs) {
+			for (unsigned i = 0; i < fs->frames.size(); i++) {
+				result.push_back(ftl::data::FrameID(k, i));
+			}
+		}
+	}
+	return result;
+}
+
+std::string Feed::getURI(uint32_t fsid) {
+	SHARED_LOCK(mtx_, lk);
+	for (const auto& [k, v] : fsid_lookup_) {
+		if (v == fsid) {
+			return k;
+		}
+	}
+	return "";
+}
+
+std::string Feed::getSourceURI(ftl::data::FrameID id) {
+	/*if (streams_.count(id.frameset())) {
+		auto i = streams_.find(id.frameset());
+		return i->second->getID();
+	} else if (devices_.count(id.frameset())) {
+		auto i = devices_.find(id.frameset());
+		return i->second->getID();
+	} else if (renderers_.count(id.frameset())) {
+		auto i = renderers_.find(id.frameset());
+		return i->second->getID();
+	}*/
+
+	return "";
+}
+
+std::vector<unsigned int> Feed::listFrameSets() {
+	SHARED_LOCK(mtx_, lk);
+	std::vector<unsigned int> result;
+	result.reserve(fsid_lookup_.size());
+	for (const auto [k, fs] : latest_) {
+		if (fs) {
+			result.push_back(k);
+		}
+	}
+	return result;
+}
+
+void Feed::lowLatencyMode() {
+	receiver_->set("frameset_buffer_size", 0);
+}
+
+// ==== Record =================================================================
+
+void Feed::startRecording(Filter *f, const std::string &filename) {
+	{
+		UNIQUE_LOCK(mtx_, lk);
+		if (_isRecording()) throw FTL_Error("Already recording, cannot record " << filename);
+
+		record_filter_ = f;
+
+		auto *fstream = ftl::create<ftl::stream::File>(this, "record_file");
+		fstream->setMode(ftl::stream::File::Mode::Write);
+		fstream->set("filename", filename);
+		record_stream_->add(fstream);
+		record_stream_->begin();
+		recorder_->resetSender();
+	}
+	_beginRecord(f);
+}
+
+void Feed::startStreaming(Filter *f, const std::string &filename) {
+	if (_isRecording()) throw FTL_Error("Already recording, cannot live stream: " << filename);
+
+	// TODO: Allow net streaming
+}
+
+void Feed::startStreaming(Filter *f) {
+	{
+		UNIQUE_LOCK(mtx_, lk);
+		if (_isRecording()) throw FTL_Error("Already recording, cannot live stream");
+
+		record_filter_ = f;
+
+		auto *nstream = ftl::create<ftl::stream::Net>(this, "live_stream", net_);
+		nstream->set("uri", value("uri", std::string("ftl://vision.utu.fi/live")));
+
+		record_new_client_ = nstream->onClientConnect([this](ftl::net::Peer *p) {
+			stream_->reset();
+			return true;
+		});
+
+		record_stream_->add(nstream);
+		record_stream_->begin();
+		recorder_->resetSender();
+	}
+	_beginRecord(f);
+}
+
+void Feed::_beginRecord(Filter *f) {
+
+	handle_record_ = pool_->onFlushSet([this, f](ftl::data::FrameSet &fs, ftl::codecs::Channel c) {
+		// Skip framesets not in filter.
+		if (!f->sources().empty() && f->sources().count(fs.frameset()) == 0) return true;
+
+		if (f->channels().count(c)) {
+			recorder_->post(fs, c);
+		} else {
+			recorder_->post(fs, c, true);
+		}
+		return true;
+	});
+
+	handle_record2_ = f->onWithHandle([this, f](const ftl::data::FrameSetPtr &fs) {
+		record_stream_->select(fs->frameset(), f->channels(), true);
+		stream_->select(fs->frameset(), f->channels(), true);
+		ftl::pool.push([fs](int id) {
+			try {
+				fs->flush();  // Force now to reduce latency
+			} catch (const ftl::exception &e) {
+				LOG(ERROR) << "Exception when sending: " << e.what();
+			}
+		});
+		return true;
+	});
+}
+
+void Feed::stopRecording() {
+	UNIQUE_LOCK(mtx_, lk);
+	_stopRecording();
+}
+
+void Feed::_stopRecording() {
+	handle_record_.cancel();
+	handle_record2_.cancel();
+	record_new_client_.cancel();
+	record_stream_->end();
+
+	auto garbage = record_stream_->streams();
+
+	record_stream_->clear();
+
+	for (auto *s : garbage) {
+		delete s;
+	}
+
+	record_filter_ = nullptr;
+}
+
+bool Feed::isRecording() {
+	SHARED_LOCK(mtx_, lk);
+	return _isRecording();
+}
+
+bool Feed::_isRecording() {
+	return record_stream_->streams().size() != 0;
+}
diff --git a/components/streams/src/filestream.cpp b/components/streams/src/filestream.cpp
index ebfbdc6b0..d7ac4bcf9 100644
--- a/components/streams/src/filestream.cpp
+++ b/components/streams/src/filestream.cpp
@@ -1,5 +1,6 @@
 #include <fstream>
 #include <ftl/streams/filestream.hpp>
+#include <ftl/timer.hpp>
 
 #define LOGURU_REPLACE_GLOG 1
 #include <loguru.hpp>
@@ -16,7 +17,7 @@ File::File(nlohmann::json &config) : Stream(config), ostream_(nullptr), istream_
 	checked_ = false;
 	save_data_ = value("save_data", false);
 
-	on("save_data", [this](const ftl::config::Event &e) {
+	on("save_data", [this]() {
 		save_data_ = value("save_data", false);
 	});
 }
@@ -34,7 +35,7 @@ File::File(nlohmann::json &config, std::ofstream *os) : Stream(config), ostream_
 	checked_ = false;
 	save_data_ = value("save_data", false);
 
-	on("save_data", [this](const ftl::config::Event &e) {
+	on("save_data", [this]() {
 		save_data_ = value("save_data", false);
 	});
 }
@@ -54,6 +55,8 @@ bool File::_checkFile() {
 	int min_ts_diff = 1000;
 	first_ts_ = 10000000000000ll;
 
+	std::unordered_set<ftl::codecs::codec_t> codecs_found;
+
 	while (count > 0) {
 		std::tuple<ftl::codecs::StreamPacket,ftl::codecs::Packet> data;
 		if (!readPacket(data)) {
@@ -61,7 +64,9 @@ bool File::_checkFile() {
 		}
 
 		auto &spkt = std::get<0>(data);
-		//auto &pkt = std::get<1>(data);
+		auto &pkt = std::get<1>(data);
+
+		codecs_found.emplace(pkt.codec);
 
 		if (spkt.timestamp < first_ts_) first_ts_ = spkt.timestamp;
 
@@ -88,12 +93,14 @@ bool File::_checkFile() {
 
 	LOG(INFO) << " -- Frame rate = " << (1000 / min_ts_diff);
 	if (!is_video_) LOG(INFO) << " -- Static image";
-	interval_ = min_ts_diff;
-	return true;
-}
 
-bool File::onPacket(const std::function<void(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)> &f) {
-	cb_ = f;
+	std::string codec_str = "";
+	for (auto c : codecs_found) {
+		codec_str += std::string(" ") + std::to_string(int(c));
+	}
+	LOG(INFO) << " -- Codecs:" << codec_str;
+
+	interval_ = min_ts_diff;
 	return true;
 }
 
@@ -152,26 +159,31 @@ bool File::readPacket(std::tuple<ftl::codecs::StreamPacket,ftl::codecs::Packet>
 		msgpack::object obj = msg.get();
 
 		try {
-			obj.convert(data);
+			// Older versions have a different SPKT structure.
+			if (version_ < 5) {
+				std::tuple<ftl::codecs::StreamPacketV4, ftl::codecs::Packet> datav4;
+				obj.convert(datav4);
+
+				auto &spkt = std::get<0>(data);
+				auto &spktv4 = std::get<0>(datav4);
+				spkt.streamID = spktv4.streamID;
+				spkt.channel = spktv4.channel;
+				spkt.frame_number = spktv4.frame_number;
+				spkt.timestamp = spktv4.timestamp;
+				spkt.flags = 0;
+
+				std::get<1>(data) = std::move(std::get<1>(datav4));
+			} else {
+				obj.convert(data);
+			}
 		} catch (std::exception &e) {
 			LOG(INFO) << "Corrupt message: " << buffer_in_.nonparsed_size() << " - " << e.what();
 			//active_ = false;
 			return false;
 		}
 
-		// Fix to clear flags for version 2.
-		if (version_ <= 2) {
-			std::get<1>(data).flags = 0;
-		}
-		if (version_ < 4) {
-			std::get<0>(data).frame_number = std::get<0>(data).streamID;
-			std::get<0>(data).streamID = 0;
-			if (isFloatChannel(std::get<0>(data).channel)) std::get<1>(data).flags |= ftl::codecs::kFlagFloat;
-
-			auto codec = std::get<1>(data).codec;
-			if (codec == ftl::codecs::codec_t::HEVC) std::get<1>(data).codec = ftl::codecs::codec_t::HEVC_LOSSLESS;
-		}
-		std::get<0>(data).version = 4;
+		// Correct for older version differences.
+		_patchPackets(std::get<0>(data), std::get<1>(data));
 
 		return true;
 	}
@@ -179,6 +191,28 @@ bool File::readPacket(std::tuple<ftl::codecs::StreamPacket,ftl::codecs::Packet>
 	return false;
 }
 
+void File::_patchPackets(ftl::codecs::StreamPacket &spkt, ftl::codecs::Packet &pkt) {
+	// Fix to clear flags for version 2.
+	if (version_ <= 2) {
+		pkt.flags = 0;
+	}
+	if (version_ < 4) {
+		spkt.frame_number = spkt.streamID;
+		spkt.streamID = 0;
+		if (isFloatChannel(spkt.channel)) pkt.flags |= ftl::codecs::kFlagFloat;
+
+		auto codec = pkt.codec;
+		if (codec == ftl::codecs::codec_t::HEVC) pkt.codec = ftl::codecs::codec_t::HEVC_LOSSLESS;
+	}
+
+	spkt.version = 5;
+
+	// Fix for flags corruption
+	if (pkt.data.size() == 0) {
+		pkt.flags = 0;
+	}
+}
+
 bool File::tick(int64_t ts) {
 	if (!active_) return false;
 	if (mode_ != Mode::Read) {
@@ -194,13 +228,16 @@ bool File::tick(int64_t ts) {
 	#endif
 
 	if (jobs_ > 0) {
-		//LOG(ERROR) << "STILL HAS JOBS";
 		return true;
 	}
 
+	bool has_data = false;
+
 	// Check buffer first for frames already read
 	{
 		UNIQUE_LOCK(data_mutex_, dlk);
+		if (data_.size() > 0) has_data = true;
+
 		for (auto i = data_.begin(); i != data_.end(); ++i) {
 			if (std::get<0>(*i).timestamp <= timestamp_) {
 				++jobs_;
@@ -209,8 +246,12 @@ bool File::tick(int64_t ts) {
 					auto &spkt = std::get<0>(*i);
 					auto &pkt = std::get<1>(*i);
 
+					spkt.localTimestamp = spkt.timestamp;
+
 					try {
-						if (cb_) cb_(spkt, pkt);
+						cb_.trigger(spkt, pkt);
+					} catch (const ftl::exception &e) {
+						LOG(ERROR) << "Exception in packet callback: " << e.what() << e.trace();
 					} catch (std::exception &e) {
 						LOG(ERROR) << "Exception in packet callback: " << e.what();
 					}
@@ -227,6 +268,7 @@ bool File::tick(int64_t ts) {
 
 	while ((active_ && istream_->good()) || buffer_in_.nonparsed_size() > 0u) {
 		UNIQUE_LOCK(data_mutex_, dlk);
+		auto *lastData = (data_.size() > 0) ? &data_.back() : nullptr;
 		auto &data = data_.emplace_back();
 		dlk.unlock();
 
@@ -240,7 +282,7 @@ bool File::tick(int64_t ts) {
 		// Adjust timestamp
 		// FIXME: A potential bug where multiple times are merged into one?
 		std::get<0>(data).timestamp = (((std::get<0>(data).timestamp) - first_ts_) / interval_) * interval_ + timestart_;
-		std::get<0>(data).hint_capability = (is_video_) ? 0 : ftl::codecs::kStreamCap_Static;
+		std::get<0>(data).hint_capability = ((is_video_) ? 0 : ftl::codecs::kStreamCap_Static) | ftl::codecs::kStreamCap_Recorded;
 
 		// Maintain availability of channels.
 		available(0) += std::get<0>(data).channel;
@@ -248,23 +290,30 @@ bool File::tick(int64_t ts) {
 		// This should only occur for first few frames, generally otherwise
 		// the data buffer is already several frames ahead so is processed
 		// above. Hence, no need to bother parallelising this bit.
-		if (std::get<0>(data).timestamp <= timestamp_) {
+		/*if (std::get<0>(data).timestamp <= timestamp_) {
 			std::get<0>(data).timestamp = ts;
-			if (cb_) {
+			//if (cb_) {
 				dlk.lock();
 				try {
-					cb_(std::get<0>(data),std::get<1>(data));
+					LOG(INFO) << "EARLY TRIGGER: " << std::get<0>(data).timestamp << " - " << int(std::get<0>(data).channel);
+					cb_.trigger(std::get<0>(data),std::get<1>(data));
 				} catch (std::exception &e) {
 					LOG(ERROR) << "Exception in packet callback: " << e.what();
 				}
 				data_.pop_back();
-			}
-		} else if (std::get<0>(data).timestamp > extended_ts) {
+			//}
+		}*/
+		if (version_ < 5 && lastData) {
+			// For versions < 5, add completed flag to previous data
+			std::get<0>(*lastData).flags |= ftl::codecs::kFlagCompleted;
+		}
+
+		if (std::get<0>(data).timestamp > extended_ts) {
 			break;
 		}
 	}
 
-	timestamp_ += interval_;
+	if (has_data) timestamp_ += interval_;
 
 	if (data_.size() == 0 && value("looping", true)) {
 		buffer_in_.reset();
@@ -315,6 +364,7 @@ bool File::run() {
 }
 
 bool File::begin(bool dorun) {
+	if (active_) return true;
 	if (mode_ == Mode::Read) {
 		if (!checked_) _checkFile();
 		_open();
@@ -355,11 +405,17 @@ bool File::begin(bool dorun) {
 }
 
 bool File::end() {
-	UNIQUE_LOCK(mutex_, lk);
 	if (!active_) return false;
 	active_ = false;
+	
 	timer_.cancel();
 
+	UNIQUE_LOCK(mutex_, lk);
+
+	while (jobs_ > 0) {
+		std::this_thread::sleep_for(std::chrono::milliseconds(1));
+	}
+
 	if (mode_ == Mode::Read) {
 		if (istream_) {
 			istream_->close();
diff --git a/components/streams/src/injectors.cpp b/components/streams/src/injectors.cpp
index 539c9d376..1afcf5e84 100644
--- a/components/streams/src/injectors.cpp
+++ b/components/streams/src/injectors.cpp
@@ -5,17 +5,17 @@ using ftl::codecs::Channel;
 using ftl::util::FTLVectorBuffer;
 
 void ftl::stream::injectCalibration(ftl::stream::Stream *stream, const ftl::rgbd::FrameSet &fs, int ix, bool right) {
-	ftl::stream::injectCalibration(stream, fs.frames[ix], fs.timestamp, fs.id, ix, right);
+	ftl::stream::injectCalibration(stream, fs.frames[ix].cast<ftl::rgbd::Frame>(), fs.timestamp(), fs.frameset(), ix, right);
 }
 
 void ftl::stream::injectPose(ftl::stream::Stream *stream, const ftl::rgbd::FrameSet &fs, int ix) {
-	ftl::stream::injectPose(stream, fs.frames[ix], fs.timestamp, ix);
+	ftl::stream::injectPose(stream, fs.frames[ix].cast<ftl::rgbd::Frame>(), fs.timestamp(), ix);
 }
 
 void ftl::stream::injectConfig(ftl::stream::Stream *stream, const ftl::rgbd::FrameSet &fs, int ix) {
 	ftl::codecs::StreamPacket spkt = {
 		4,
-		fs.timestamp,
+		fs.timestamp(),
 		0,
 		static_cast<uint8_t>(ix),
 		Channel::Configuration
@@ -29,7 +29,7 @@ void ftl::stream::injectConfig(ftl::stream::Stream *stream, const ftl::rgbd::Fra
 	pkt.flags = 0;
 
 	FTLVectorBuffer buf(pkt.data);
-	msgpack::pack(buf, fs.frames[ix].getConfigString());
+	//msgpack::pack(buf, fs.frames[ix].getConfigString());
 
 	stream->post(spkt, pkt);
 }
@@ -71,6 +71,8 @@ void ftl::stream::injectCalibration(ftl::stream::Stream *stream, const ftl::rgbd
 		std::make_tuple(f.getRightCamera(), Channel::Right, 0) :
 		std::make_tuple(f.getLeftCamera(), Channel::Left, 0);
 
+	//auto data = (right) ? f.getRightCamera() : f.getLeftCamera();
+
 	ftl::codecs::Packet pkt;
 	pkt.codec = ftl::codecs::codec_t::MSGPACK;
 	//pkt.definition = ftl::codecs::definition_t::Any;
diff --git a/components/streams/src/netstream.cpp b/components/streams/src/netstream.cpp
index 9e0f1f48b..0d6fba657 100644
--- a/components/streams/src/netstream.cpp
+++ b/components/streams/src/netstream.cpp
@@ -3,6 +3,11 @@
 #define LOGURU_REPLACE_GLOG 1
 #include <loguru.hpp>
 
+#ifndef WIN32
+#include <unistd.h>
+#include <limits.h>
+#endif
+
 using ftl::stream::Net;
 using ftl::codecs::StreamPacket;
 using ftl::codecs::Packet;
@@ -22,25 +27,37 @@ float Net::sample_count__ = 0.0f;
 int64_t Net::last_msg__ = 0;
 MUTEX Net::msg_mtx__;
 
+static std::list<std::string> net_streams;
+static std::atomic_flag has_bindings = ATOMIC_FLAG_INIT;
+static SHARED_MUTEX stream_mutex;
+
 Net::Net(nlohmann::json &config, ftl::net::Universe *net) : Stream(config), active_(false), net_(net), clock_adjust_(0), last_ping_(0) {
-	// TODO: Install "find_stream" binding if not installed...
-	if (!net_->isBound("find_stream")) {
-		net_->bind("find_stream", [this](const std::string &uri) -> optional<ftl::UUID> {
+	if (!has_bindings.test_and_set()) {
+		if (net_->isBound("find_stream")) net_->unbind("find_stream");
+		net_->bind("find_stream", [net = net_](const std::string &uri, bool proxy) -> optional<ftl::UUID> {
 			LOG(INFO) << "REQUEST FIND STREAM: " << uri;
-			if (uri_ == uri) {
-				return net_->id();
-			} else {
-				return {};
+
+			ftl::URI u1(uri);
+			std::string base = u1.getBaseURI();
+
+			SHARED_LOCK(stream_mutex, lk);
+			for (const auto &s : net_streams) {
+				ftl::URI u2(s);
+				// Don't compare query string components.
+				if (base == u2.getBaseURI()) {
+					return net->id();
+				}
 			}
+			return {};
 		});
-	}
 
-	if (!net_->isBound("list_streams")) {
+		if (net_->isBound("list_streams")) net_->unbind("list_streams");
 		net_->bind("list_streams", [this]() {
 			LOG(INFO) << "REQUEST LIST STREAMS";
-			vector<string> streams;
-			streams.push_back(uri_);
-			return streams;
+			SHARED_LOCK(stream_mutex, lk);
+			//vector<string> streams;
+			//streams.push_back(uri_);  // Send full original URI
+			return net_streams;
 		});
 	}
 
@@ -52,21 +69,17 @@ Net::~Net() {
 	end();
 }
 
-bool Net::onPacket(const std::function<void(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)> &f) {
-	cb_ = f;
-	return true;
-}
-
 bool Net::post(const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
 	if (!active_) return false;
 
 	// Check if the channel has been requested recently enough. If not then disable it.
 	if (host_ && pkt.data.size() > 0 && spkt.frame_number == 0 && static_cast<int>(spkt.channel) >= 0 && static_cast<int>(spkt.channel) < 32) {
 		if (reqtally_[static_cast<int>(spkt.channel)] == 0) {
+			--reqtally_[static_cast<int>(spkt.channel)];
 			auto sel = selected(0);
 			sel -= spkt.channel;
 			select(0, sel);
-			LOG(INFO) << "Unselect Channel: " << (int)spkt.channel;
+			LOG(INFO) << "Unselect Channel: " << (int)spkt.channel << " (" << (int)spkt.streamID << ")";
 		} else {
 			--reqtally_[static_cast<int>(spkt.channel)];
 		}
@@ -91,9 +104,10 @@ bool Net::post(const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet
 
 				try {
 					// FIXME: This doesn't work for file sources with file relative timestamps...
-					short pre_transmit_latency = short(ftl::timer::get_time() - spkt.timestamp);
+					short pre_transmit_latency = short(ftl::timer::get_time() - spkt.localTimestamp);
+
 					if (!net_->send(client.peerid,
-							uri_,
+							base_uri_,
 							pre_transmit_latency,  // Time since timestamp for tx
 							spkt,
 							pkt)) {
@@ -111,9 +125,9 @@ bool Net::post(const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet
 			}
 		} else {
 			try {
-				short pre_transmit_latency = short(ftl::timer::get_time() - spkt.timestamp);
+				short pre_transmit_latency = short(ftl::timer::get_time() - spkt.localTimestamp);
 				if (!net_->send(peer_,
-						uri_,
+						base_uri_,
 						pre_transmit_latency,  // Time since timestamp for tx
 						spkt,
 						pkt)) {
@@ -135,17 +149,20 @@ bool Net::post(const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet
 bool Net::begin() {
 	if (active_) return true;
 	if (!get<string>("uri")) return false;
-	active_ = true;
 
 	uri_ = *get<string>("uri");
 
-	if (net_->isBound(uri_)) {
+	ftl::URI u(uri_);
+	if (!u.isValid() || !(u.getScheme() == ftl::URI::SCHEME_FTL)) return false;
+	base_uri_ = u.getBaseURI();
+
+	if (net_->isBound(base_uri_)) {
 		LOG(ERROR) << "Stream already exists! - " << uri_;
 		active_ = false;
 		return false;
 	}
 
-	net_->bind(uri_, [this](ftl::net::Peer &p, short ttimeoff, const ftl::codecs::StreamPacket &spkt_raw, const ftl::codecs::Packet &pkt) {
+	net_->bind(base_uri_, [this](ftl::net::Peer &p, short ttimeoff, const ftl::codecs::StreamPacket &spkt_raw, const ftl::codecs::Packet &pkt) {
 		int64_t now = std::chrono::time_point_cast<std::chrono::milliseconds>(std::chrono::high_resolution_clock::now()).time_since_epoch().count();
 
 		if (!active_) return;
@@ -153,7 +170,7 @@ bool Net::begin() {
 		StreamPacket spkt = spkt_raw;
 		// FIXME: see #335
 		//spkt.timestamp -= clock_adjust_;
-		spkt.originClockDelta = clock_adjust_;
+		spkt.localTimestamp = now - int64_t(ttimeoff);
 		spkt.hint_capability = 0;
 		spkt.hint_source_total = 0;
 		//LOG(INFO) << "LATENCY: " << ftl::timer::get_time() - spkt.localTimestamp() << " : " << spkt.timestamp << " - " << clock_adjust_;
@@ -166,6 +183,7 @@ bool Net::begin() {
 				last_frame_ = spkt.timestamp;
 
 				if (size() > 0) {
+					// TODO: For all framesets
 					auto sel = selected(0);
 
 					// A change in channel selections, so send those requests now
@@ -173,10 +191,8 @@ bool Net::begin() {
 						auto changed = sel - last_selected_;
 						last_selected_ = sel;
 
-						if (size() > 0) {
-							for (auto c : changed) {
-								_sendRequest(c, kAllFramesets, kAllFrames, 30, 0);
-							}
+						for (auto c : changed) {
+							_sendRequest(c, kAllFramesets, kAllFrames, 30, 0);
 						}
 					}
 				}
@@ -185,7 +201,7 @@ bool Net::begin() {
 				if (tally_ <= 5) {
 					// Yes, so send new requests
 					if (size() > 0) {
-						auto sel = selected(0);
+						const auto &sel = selected(0);
 						
 						for (auto c : sel) {
 							_sendRequest(c, kAllFramesets, kAllFrames, 30, 0);
@@ -201,7 +217,7 @@ bool Net::begin() {
 		// If hosting and no data then it is a request for data
 		// Note: a non host can receive empty data, meaning data is available
 		// but that you did not request it
-		if (host_ && pkt.data.size() == 0) {
+		if (host_ && pkt.data.size() == 0 && (spkt.flags & ftl::codecs::kFlagRequest)) {
 			// FIXME: Allow unselecting ...?
 			if (spkt.frameSetID() == 255) {
 				for (size_t i=0; i<size(); ++i) {
@@ -213,6 +229,7 @@ bool Net::begin() {
 			} else {
 				select(spkt.frameSetID(), selected(spkt.frameSetID()) + spkt.channel);
 			}
+
 			_processRequest(p, pkt);
 		} else {
 			// FIXME: Allow availability to change...
@@ -220,18 +237,46 @@ bool Net::begin() {
 			//LOG(INFO) << "AVAILABLE: " << (int)spkt.channel;
 		}
 
-		if (cb_) {
-			cb_(spkt, pkt);
+		//if (cb_) {
+			cb_.trigger(spkt, pkt);
 			if (pkt.data.size() > 0) _checkDataRate(pkt.data.size(), now-(spkt.timestamp+ttimeoff), spkt.timestamp);
-		}
+		//}
 	});
 
-	auto p = net_->findOne<ftl::UUID>("find_stream", uri_);
+	// First find non-proxy version, then check for proxy version if no match
+	auto p = net_->findOne<ftl::UUID>("find_stream", uri_, false);
+	if (!p) p = net_->findOne<ftl::UUID>("find_stream", uri_, true);
+
 	if (!p) {
 		LOG(INFO) << "Hosting stream: " << uri_;
 		// TODO: Register URI as available.
 		host_ = true;
 
+		// Alias the URI to the configurable if not already
+		// Allows the URI to be used to get config data.
+		if (ftl::config::find(uri_) == nullptr) {
+			ftl::config::alias(uri_, this);
+		}
+
+		{
+			UNIQUE_LOCK(stream_mutex, lk);
+			net_streams.push_back(uri_);
+		}
+
+		// Automatically set name if missing
+		if (!get<std::string>("name")) {
+			char hostname[1024] = {0};
+			#ifdef WIN32
+			DWORD size = 1024;
+			GetComputerName(hostname, &size);
+			#else
+			gethostname(hostname, 1024);
+			#endif
+
+			set("name", std::string(hostname));
+		}
+
+		active_ = true;
 		net_->broadcast("add_stream", uri_);
 
 		return true;
@@ -243,6 +288,8 @@ bool Net::begin() {
 	peer_ = *p;
 	tally_ = 30*kTallyScale;
 	for (size_t i=0; i<reqtally_.size(); ++i) reqtally_[i] = 0;
+
+	active_ = true;
 	
 	// Initially send a colour request just to create the connection
 	_sendRequest(Channel::Colour, kAllFramesets, kAllFrames, 30, 0);
@@ -270,23 +317,25 @@ bool Net::_sendRequest(Channel c, uint8_t frameset, uint8_t frames, uint8_t coun
 
 	Packet pkt = {
 		codec_t::Any,			// TODO: Allow specific codec requests
-		definition_t::Any,		// TODO: Allow specific definition requests
+		0,
 		count,
-		bitrate
+		bitrate,
+		0
 	};
 
 	StreamPacket spkt = {
-		4,
+		5,
 		ftl::timer::get_time(),
 		frameset,
 		frames,
 		c,
+		ftl::codecs::kFlagRequest,
 		0,
 		0,
 		0
 	};
 
-	net_->send(peer_, uri_, (short)0, spkt, pkt);
+	net_->send(peer_, base_uri_, (short)0, spkt, pkt);
 
 	// FIXME: Find a way to use this for correct stream latency info
 	if (false) { //if (c == Channel::Colour) {  // TODO: Not every time
@@ -353,6 +402,12 @@ bool Net::_processRequest(ftl::net::Peer &p, const ftl::codecs::Packet &pkt) {
 			client.quality = 255;  // TODO: Use quality given in packet
 			client.txcount = 0;
 			client.txmax = static_cast<int>(pkt.frame_count)*kTallyScale;
+
+			try {
+				connect_cb_.trigger(&p);
+			} catch (const ftl::exception &e) {
+				LOG(ERROR) << "Exception in stream connect callback: " << e.what();
+			}
 		}
 
 		// First connected peer (or reconnecting peer) becomes a time server
@@ -394,6 +449,13 @@ float Net::getRequiredBitrate() {
 
 bool Net::end() {
 	if (!active_) return false;
+
+	{
+		UNIQUE_LOCK(stream_mutex, lk);
+		auto i = std::find(net_streams.begin(), net_streams.end(), uri_);
+		if (i != net_streams.end()) net_streams.erase(i);
+	}
+
 	active_ = false;
 	net_->unbind(uri_);
 	return true;
diff --git a/components/streams/src/receiver.cpp b/components/streams/src/receiver.cpp
index a319d60e3..35b6675f7 100644
--- a/components/streams/src/receiver.cpp
+++ b/components/streams/src/receiver.cpp
@@ -2,6 +2,7 @@
 #include <ftl/codecs/depth_convert_cuda.hpp>
 #include <ftl/profiler.hpp>
 #include <ftl/audio/software_decoder.hpp>
+#include <ftl/rgbd/capabilities.hpp>
 
 #include <opencv2/cudaimgproc.hpp>
 #include <opencv2/highgui.hpp>
@@ -23,27 +24,27 @@ using ftl::stream::parsePose;
 using ftl::stream::parseConfig;
 using ftl::stream::injectCalibration;
 using ftl::stream::injectPose;
-using ftl::codecs::definition_t;
+using ftl::rgbd::Capability;
 
-Receiver::Receiver(nlohmann::json &config) : ftl::Configurable(config), stream_(nullptr) {
+Receiver::Receiver(nlohmann::json &config, ftl::data::Pool *p) : ftl::Configurable(config), stream_(nullptr), pool_(p) {
 	timestamp_ = 0;
 	second_channel_ = Channel::Depth;
 	frame_mask_ = value("frame_mask", 0xFFFFFFFFu);
 
-	size_t bsize = value("frameset_buffer_size", 3);
-	for (size_t i=0; i<ftl::stream::kMaxStreams; ++i) {
+	//size_t bsize = value("frameset_buffer_size", 3);
+	/*for (size_t i=0; i<ftl::stream::kMaxStreams; ++i) {
 		builder_[i].setID(i);
 		builder_[i].setBufferSize(bsize);
-	}
+	}*/
 
-	on("frameset_buffer_size", [this](const ftl::config::Event &e) {
+	on("frameset_buffer_size", [this]() {
 		size_t bsize = value("frameset_buffer_size", 3);
-		for (size_t i=0; i<ftl::stream::kMaxStreams; ++i) {
-			builder_[i].setBufferSize(bsize);
+		for (auto &i : builders_) {
+			i.second->setBufferSize(bsize);
 		}
 	});
 
-	on("frame_mask", [this](const ftl::config::Event &e) {
+	on("frame_mask", [this]() {
 		frame_mask_ = value("frame_mask", 0xFFFFFFFFu);
 	});
 }
@@ -53,13 +54,58 @@ Receiver::~Receiver() {
 	//	stream_->onPacket(nullptr);
 	//}
 
-	builder_[0].onFrameSet(nullptr);
+	//builder_[0].onFrameSet(nullptr);
+}
+
+void Receiver::loopback(ftl::data::Frame &f, ftl::codecs::Channel c) {
+	auto &build = builder(f.frameset());
+	auto fs = build.get(f.timestamp(), f.source());
+	fs->frames[f.source()].informChange(c, build.changeType(), f.getAnyMutable(c));
+	//f.remove(c);
+}
+
+ftl::streams::BaseBuilder &Receiver::builder(uint32_t id) {
+	auto i = builders_.find(id);
+	if (i == builders_.end()) {
+		auto fb = new ftl::streams::ForeignBuilder();
+		builders_[id] = std::shared_ptr<ftl::streams::BaseBuilder>(fb);
+		auto &b = builders_[id];
+		b->setID(id);
+		b->setPool(pool_);
+		fb->setBufferSize(value("frameset_buffer_size", 3));
+		handles_[id] = std::move(fb->onFrameSet([this](const ftl::data::FrameSetPtr& fs) {
+			callback_.trigger(fs);
+			return true;
+		}));
+		return *b;
+	} else {
+		return *(i->second);
+	}
+}
+
+void Receiver::removeBuilder(uint32_t id) {
+	UNIQUE_LOCK(mutex_, lk);
+	auto i = builders_.find(id);
+	if (i != builders_.end()) {
+		handles_.erase(id);
+		builders_.erase(i);
+	}
 }
 
-void Receiver::onAudio(const ftl::audio::FrameSet::Callback &cb) {
-	audio_cb_ = cb;
+void Receiver::registerBuilder(const std::shared_ptr<ftl::streams::BaseBuilder> &b) {
+	auto i = builders_.find(b->id());
+	if (i != builders_.end()) throw FTL_Error("Builder already exists");
+	builders_[b->id()] = b;
+	handles_[b->id()] = std::move(b->onFrameSet([this](const ftl::data::FrameSetPtr& fs) {
+		callback_.trigger(fs);
+		return true;
+	}));
 }
 
+//void Receiver::onAudio(const ftl::audio::FrameSet::Callback &cb) {
+//	audio_cb_ = cb;
+//}
+
 /*void Receiver::_processConfig(InternalStates &frame, const ftl::codecs::Packet &pkt) {
 	std::string cfg;
 	auto unpacked = msgpack::unpack((const char*)pkt.data.data(), pkt.data.size());
@@ -98,15 +144,15 @@ Receiver::InternalVideoStates &Receiver::_getVideoFrame(const StreamPacket &spkt
 	while (video_frames_[spkt.streamID].size() <= fn) {
 		//frames_.resize(spkt.frameNumber()+1);
 		video_frames_[spkt.streamID].push_back(new InternalVideoStates);
-		video_frames_[spkt.streamID][video_frames_[spkt.streamID].size()-1]->state.set("name",std::string("Source ")+std::to_string(fn+1));
+		//video_frames_[spkt.streamID][video_frames_[spkt.streamID].size()-1]->state.set("name",std::string("Source ")+std::to_string(fn+1));
 	}
 	auto &f = *video_frames_[spkt.streamID][fn];
-	if (!f.frame.origin()) f.frame.setOrigin(&f.state);
+	//if (!f.frame.origin()) f.frame.setOrigin(&f.state);
 	return f;
 }
 
 Receiver::InternalAudioStates::InternalAudioStates() : decoder(nullptr) {
-	
+
 }
 
 Receiver::InternalAudioStates &Receiver::_getAudioFrame(const StreamPacket &spkt, int ix) {
@@ -116,40 +162,64 @@ Receiver::InternalAudioStates &Receiver::_getAudioFrame(const StreamPacket &spkt
 	while (audio_frames_[spkt.streamID].size() <= fn) {
 		//frames_.resize(spkt.frameNumber()+1);
 		audio_frames_[spkt.streamID].push_back(new InternalAudioStates);
-		audio_frames_[spkt.streamID][audio_frames_[spkt.streamID].size()-1]->state.set("name",std::string("Source ")+std::to_string(fn+1));
+		//audio_frames_[spkt.streamID][audio_frames_[spkt.streamID].size()-1]->state.set("name",std::string("Source ")+std::to_string(fn+1));
 	}
 	auto &f = *audio_frames_[spkt.streamID][fn];
 	//if (!f.frame.origin()) f.frame.setOrigin(&f.state);
 	return f;
 }
 
-void Receiver::_processState(const StreamPacket &spkt, const Packet &pkt) {
-	for (int i=0; i<pkt.frame_count; ++i) {
-		InternalVideoStates &frame = _getVideoFrame(spkt,i);
-
-		// Deal with the special channels...
-		switch (spkt.channel) {
-		case Channel::Configuration		: ftl::config::parseJSON(frame.state.getConfig(), parseConfig(pkt)); break;
-		case Channel::Calibration		: frame.state.getLeft() = parseCalibration(pkt); break;
-		case Channel::Calibration2		: frame.state.getRight() = parseCalibration(pkt); break;
-		//case Channel::Pose				: frame.state.getPose() = parsePose(pkt); break;
-		case Channel::Pose				: frame.state.setPose(parsePose(pkt)); break;
-		default: break;
-		}
-	}
-}
-
 void Receiver::_processData(const StreamPacket &spkt, const Packet &pkt) {
-	//InternalVideoStates &frame = _getVideoFrame(spkt);
-	if (spkt.frameNumber() == 255) {
-		auto *fs = builder_[spkt.streamID].get(spkt.timestamp);
-		if (fs) {
-			fs->createRawData(spkt.channel, pkt.data);
+	auto &build = builder(spkt.streamID);
+	auto fs = build.get(spkt.timestamp, spkt.frame_number);
+	auto &f = (spkt.frame_number == 255) ? **fs : fs->frames[spkt.frame_number];
+
+	// Remove LIVE capability if stream hints it is recorded
+	if (spkt.channel == Channel::Capabilities && (spkt.hint_capability & ftl::codecs::kStreamCap_Recorded)) {
+		std::any data;
+		ftl::data::decode_type<std::unordered_set<Capability>>(data, pkt.data);
+
+		auto &cap = *std::any_cast<std::unordered_set<Capability>>(&data);
+		if (cap.count(Capability::LIVE)) {
+			cap.erase(Capability::LIVE);
 		}
+		cap.emplace(Capability::STREAMED);
+
+		f.informChange(spkt.channel, build.changeType(), data);
+	} else if (spkt.channel == Channel::Pose && pkt.codec == ftl::codecs::codec_t::POSE) {
+		// TODO: Remove this eventually, it allows old FTL files to work
+		std::any data;
+		auto &pose = data.emplace<Eigen::Matrix4d>();
+		pose = Eigen::Map<Eigen::Matrix4d>((double*)pkt.data.data());
+		f.informChange(spkt.channel, build.changeType(), data);
 	} else {
-		auto &frame = builder_[spkt.streamID].get(spkt.timestamp, spkt.frame_number);
-		frame.createRawData(spkt.channel, pkt.data);
+		f.informChange(spkt.channel, build.changeType(), pkt);
 	}
+
+	if (spkt.channel == Channel::Calibration) {
+		const auto &calibration = std::get<0>(f.get<std::tuple<ftl::rgbd::Camera, ftl::codecs::Channel, int>>(Channel::Calibration));
+		InternalVideoStates &ividstate = _getVideoFrame(spkt);
+		ividstate.width = calibration.width;
+		ividstate.height = calibration.height;
+	}
+
+	// TODO: Adjust metadata also for recorded streams
+
+	if (spkt.flags & ftl::codecs::kFlagCompleted) {
+		//UNIQUE_LOCK(vidstate.mutex, lk);
+		timestamp_ = spkt.timestamp;
+		fs->completed(spkt.frame_number);
+	}
+
+	fs->localTimestamp = spkt.localTimestamp;
+
+	/*const auto *cs = stream_;
+	const auto sel = stream_->selected(spkt.frameSetID()) & cs->available(spkt.frameSetID());
+
+	if (f.hasAll(sel)) {
+		timestamp_ = spkt.timestamp;
+		fs->completed(spkt.frame_number);
+	}*/
 }
 
 ftl::audio::Decoder *Receiver::_createAudioDecoder(InternalAudioStates &frame, const ftl::codecs::Packet &pkt) {
@@ -159,17 +229,24 @@ ftl::audio::Decoder *Receiver::_createAudioDecoder(InternalAudioStates &frame, c
 
 void Receiver::_processAudio(const StreamPacket &spkt, const Packet &pkt) {
 	// Audio Data
-	InternalAudioStates &frame = _getAudioFrame(spkt);
+	InternalAudioStates &state = _getAudioFrame(spkt);
+
+	//frame.frame.reset();
+	state.timestamp = spkt.timestamp;
+
+	auto &build = builder(spkt.streamID);
+	auto fs = build.get(spkt.timestamp, spkt.frame_number+pkt.frame_count-1);
+	auto &frame = fs->frames[0];
+
+	auto &audiolist = frame.createChange<std::list<ftl::audio::Audio>>(spkt.channel, build.changeType(), pkt);
+	auto &audio = audiolist.emplace_back();
 
-	frame.frame.reset();
-	frame.timestamp = spkt.timestamp;
-	auto &audio = frame.frame.create<ftl::audio::Audio>(spkt.channel);
 	//size_t size = pkt.data.size()/sizeof(short);
 	//audio.data().resize(size);
 	//auto *ptr = (short*)pkt.data.data();
 	//for (size_t i=0; i<size; i++) audio.data()[i] = ptr[i];
 
-	ftl::audio::Decoder *dec = _createAudioDecoder(frame, pkt);
+	ftl::audio::Decoder *dec = _createAudioDecoder(state, pkt);
 	if (!dec) {
 		LOG(ERROR) << "Could get an audio decoder";
 		return;
@@ -179,21 +256,29 @@ void Receiver::_processAudio(const StreamPacket &spkt, const Packet &pkt) {
 		return;
 	}
 
+	if (spkt.flags & ftl::codecs::kFlagCompleted) {
+		//UNIQUE_LOCK(vidstate.mutex, lk);
+		timestamp_ = spkt.timestamp;
+		fs->completed(spkt.frame_number);
+	}
+
+	fs->localTimestamp = spkt.localTimestamp;
+
 	// Generate settings from packet data
-	ftl::audio::AudioSettings settings;
+	/*ftl::audio::AudioSettings settings;
 	settings.channels = (spkt.channel == Channel::AudioStereo) ? 2 : 1;
 	settings.frame_size = 960;
-	
+
 	switch (pkt.definition) {
 	case definition_t::hz48000		: settings.sample_rate = 48000; break;
 	case definition_t::hz44100		: settings.sample_rate = 44100; break;
 	default: settings.sample_rate = 48000; break;
-	}
+	}*/
 
-	frame.state.setLeft(settings);
-	frame.frame.setOrigin(&frame.state);
+	//frame.state.setLeft(settings);
+	//frame.frame.setOrigin(&frame.state);
 
-	if (audio_cb_) {
+	/*if (audio_cb_) {
 		// Create an audio frameset wrapper.
 		ftl::audio::FrameSet fs;
 		fs.id = 0;
@@ -205,7 +290,7 @@ void Receiver::_processAudio(const StreamPacket &spkt, const Packet &pkt) {
 		frame.frame.swapTo(fs.frames.emplace_back());
 
 		audio_cb_(fs);
-	}
+	}*/
 }
 
 namespace sgm {
@@ -217,17 +302,39 @@ namespace sgm {
 void Receiver::_processVideo(const StreamPacket &spkt, const Packet &pkt) {
 	FTL_Profile("VideoPacket", 0.02);
 
-	const ftl::codecs::Channel rchan = spkt.channel;
+	//const ftl::codecs::Channel rchan = spkt.channel;
 	const unsigned int channum = (unsigned int)spkt.channel;
 	InternalVideoStates &ividstate = _getVideoFrame(spkt);
 
 	auto [tx,ty] = ftl::codecs::chooseTileConfig(pkt.frame_count);
 
-	int width = ividstate.state.getLeft().width;
-	int height = ividstate.state.getLeft().height;
+	// Get the frameset
+	auto &build = builder(spkt.streamID);
+	auto fs = build.get(spkt.timestamp, spkt.frame_number+pkt.frame_count-1);  // TODO: This is a hack
+
+	//if (!fs->frames[spkt.frame_number].has(Channel::Calibration)) {
+	//	LOG(WARNING) << "No calibration, skipping frame: " << spkt.timestamp;
+	//	return;
+	//}
+
+	//const auto &calibration = std::get<0>(fs->frames[spkt.frame_number].get<std::tuple<ftl::rgbd::Camera, ftl::codecs::Channel, int>>(Channel::Calibration));
+	int width = ividstate.width; //calibration.width;
+	int height = ividstate.height; //calibration.height;
 
 	if (width == 0 || height == 0) {
-		LOG(WARNING) << "No calibration, skipping frame";
+		// Attempt to retry the decode later
+		// Make a copy of the packets into a thread job
+		// FIXME: Check that thread pool does not explode
+		if (spkt.retry_count < 10) {
+			LOG(WARNING) << "No calibration, retrying: " << spkt.timestamp;
+			ftl::pool.push([this, spkt, pkt](int id) mutable {
+				++const_cast<StreamPacket&>(spkt).retry_count;
+				std::this_thread::sleep_for(std::chrono::milliseconds(10));
+				_processVideo(spkt, pkt);
+			});
+		} else {
+			LOG(WARNING) << "No calibration, failed frame: " << spkt.timestamp;
+		}
 		return;
 	}
 
@@ -281,17 +388,17 @@ void Receiver::_processVideo(const StreamPacket &spkt, const Packet &pkt) {
 
 	// Mark a frameset as being partial
 	if (pkt.flags & ftl::codecs::kFlagPartial) {
-		builder_[spkt.streamID].markPartial(spkt.timestamp);
+		fs->markPartial();
 	}
 
 	// Now split the tiles from surface into frames, doing colour conversions
 	// at the same time.
 	// Note: Done in reverse to allocate correct number of frames first time round
 	for (int i=pkt.frame_count-1; i>=0; --i) {
-		InternalVideoStates &vidstate = _getVideoFrame(spkt,i);
-		auto &frame = builder_[spkt.streamID].get(spkt.timestamp, spkt.frame_number+i);
+		//InternalVideoStates &vidstate = _getVideoFrame(spkt,i);
+		auto &frame = fs->frames[spkt.frame_number+i];
 
-		if (!frame.origin()) frame.setOrigin(&vidstate.state);
+		//if (!frame.origin()) frame.setOrigin(&vidstate.state);
 
 		if (frame.hasChannel(spkt.channel)) {
 			// FIXME: Is this a corruption in recording or in playback?
@@ -301,11 +408,12 @@ void Receiver::_processVideo(const StreamPacket &spkt, const Packet &pkt) {
 
 		// Add channel to frame and allocate memory if required
 		const cv::Size size = cv::Size(width, height);
-		frame.getBuffer<cv::cuda::GpuMat>(spkt.channel).create(size, ftl::codecs::type(spkt.channel)); //(isFloatChannel(rchan) ? CV_32FC1 : CV_8UC4));
+		auto &buf = frame.createChange<ftl::rgbd::VideoFrame>(spkt.channel, build.changeType(), pkt).createGPU();
+		buf.create(size, ftl::codecs::type(spkt.channel)); //(isFloatChannel(rchan) ? CV_32FC1 : CV_8UC4));
 
 		cv::Rect roi((i % tx)*width, (i / tx)*height, width, height);
 		cv::cuda::GpuMat sroi = surface(roi);
-		sroi.copyTo(frame.getBuffer<cv::cuda::GpuMat>(spkt.channel), cvstream);
+		sroi.copyTo(buf, cvstream);
 	}
 
 	// Must ensure all processing is finished before completing a frame.
@@ -313,108 +421,101 @@ void Receiver::_processVideo(const StreamPacket &spkt, const Packet &pkt) {
 
 	for (int i=0; i<pkt.frame_count; ++i) {
 		InternalVideoStates &vidstate = _getVideoFrame(spkt,i);
-		auto &frame = builder_[spkt.streamID].get(spkt.timestamp, spkt.frame_number+i);
-
-		const auto *cs = stream_;
-		auto sel = stream_->selected(spkt.frameSetID()) & cs->available(spkt.frameSetID());
+		//auto &frame = builder(spkt.streamID).get(spkt.timestamp, spkt.frame_number+i);
+		//auto &frame = fs->frames[spkt.frame_number+i];
 
-		frame.create<cv::cuda::GpuMat>(spkt.channel);
+		/*if (spkt.version < 5) {
+			const auto *cs = stream_;
+			const auto sel = stream_->selected(spkt.frameSetID()) & cs->available(spkt.frameSetID());
 
-		if (i == 0) {
-			Packet tmppkt = pkt;
-			frame.pushPacket(spkt.channel, tmppkt);
-		}
-		
-		UNIQUE_LOCK(vidstate.mutex, lk);
-		//if (frame.timestamp == spkt.timestamp) {
-			//frame.completed += spkt.channel;
-			
-			// Complete if all requested channels are found
-			if ((frame.getChannels() & sel) == sel) {
+			UNIQUE_LOCK(vidstate.mutex, lk);
+			if (frame.availableAll(sel)) {
 				timestamp_ = spkt.timestamp;
-				//frame.reset.clear();
+				fs->completed(spkt.frame_number+i);
+			}
+		}*/
 
-				//LOG(INFO) << "BUILDER PUSH: " << timestamp_ << ", " << spkt.frameNumber() << ", " << (int)pkt.frame_count;
+		if (spkt.flags & ftl::codecs::kFlagCompleted) {
+			UNIQUE_LOCK(vidstate.mutex, lk);
+			timestamp_ = spkt.timestamp;
+			fs->completed(spkt.frame_number+i);
+		}
+	}
 
-				if (vidstate.state.getLeft().width == 0) {
-					LOG(WARNING) << "Missing calibration for frame";
-				}
+	fs->localTimestamp = spkt.localTimestamp;
+}
 
-				// TODO: Have multiple builders for different framesets.
-				//builder_.push(frame.timestamp, spkt.frameNumber()+i, frame.frame);
-				builder_[spkt.streamID].completed(spkt.timestamp, spkt.frame_number+i);
+void Receiver::processPackets(const StreamPacket &spkt, const Packet &pkt) {
+	const unsigned int channum = (unsigned int)spkt.channel;
 
-				// Check for any state changes and send them back
-				//if (vidstate.state.hasChanged(Channel::Pose)) injectPose(stream_, frame, spkt.timestamp, spkt.frameNumber()+i);
-				//if (vidstate.state.hasChanged(Channel::Calibration)) injectCalibration(stream_, frame, spkt.timestamp, spkt.streamID, spkt.frameNumber()+i);
-				//if (vidstate.state.hasChanged(Channel::Calibration2)) injectCalibration(stream_, frame, spkt.timestamp, spkt.streamID, spkt.frameNumber()+i, true);
+	// No data packet means channel is only available.
+	if (pkt.data.size() == 0) {
+		if (spkt.streamID < 255 && !(spkt.flags & ftl::codecs::kFlagRequest)) {
+			// Get the frameset
+			auto fs = builder(spkt.streamID).get(spkt.timestamp, spkt.frame_number+pkt.frame_count-1);
+			const auto *cs = stream_;
+			const auto sel = stream_->selected(spkt.frameSetID()) & cs->available(spkt.frameSetID());
+
+			for (auto &frame : fs->frames) {
+				//LOG(INFO) << "MARK " << frame.source() << " " << (int)spkt.channel;
+				frame.markAvailable(spkt.channel);
+
+				if (spkt.flags & ftl::codecs::kFlagCompleted) {
+					//UNIQUE_LOCK(vidstate.mutex, lk);  // FIXME: Should have a lock here...
+					timestamp_ = spkt.timestamp;
+					fs->completed(frame.source());
+				}
 
-				//frame.reset();
-				//frame.completed.clear();
-				//frame.timestamp = -1;
+				//if (frame.availableAll(sel)) {
+					//LOG(INFO) << "FRAME COMPLETED " << frame.source();
+				//	fs->completed(frame.source());
+				//}
 			}
-		//} else {
-		//	LOG(ERROR) << "Frame timestamps mistmatch";
-		//}
-	}
-}
 
-void Receiver::setStream(ftl::stream::Stream *s) {
-	if (stream_) {
-		stream_->onPacket(nullptr);
+			fs->localTimestamp = spkt.localTimestamp;
+		}
+		return;
 	}
 
-	stream_ = s;
-
-	s->onPacket([this](const StreamPacket &spkt, const Packet &pkt) {	
-		const unsigned int channum = (unsigned int)spkt.channel;
-
-		//LOG(INFO) << "PACKET: " << spkt.timestamp << ", " << (int)spkt.channel << ", " << (int)pkt.codec << ", " << (int)pkt.definition;
-
-		// TODO: Allow for multiple framesets
-		//if (spkt.frameSetID() > 0) LOG(INFO) << "Frameset " << spkt.frameSetID() << " received: " << (int)spkt.channel;
-		if (spkt.frameSetID() >= ftl::stream::kMaxStreams) return;
+	//LOG(INFO) << "PACKET: " << spkt.timestamp << ", " << (int)spkt.channel << ", " << (int)pkt.codec << ", " << (int)pkt.definition;
 
-		// Frameset level data channels
-		if (spkt.frameNumber() == 255 && pkt.data.size() > 0) {
-			_processData(spkt,pkt);
-			return;
-		}
+	// TODO: Allow for multiple framesets
+	//if (spkt.frameSetID() > 0) LOG(INFO) << "Frameset " << spkt.frameSetID() << " received: " << (int)spkt.channel;
+	if (spkt.frameSetID() >= ftl::stream::kMaxStreams) return;
 
-		// Too many frames, so ignore.
-		//if (spkt.frameNumber() >= value("max_frames",32)) return;
-		if (spkt.frameNumber() >= 32 || ((1 << spkt.frameNumber()) & frame_mask_) == 0) return;
+	// Frameset level data channels
+	if (spkt.frameNumber() == 255 && pkt.data.size() > 0) {
+		_processData(spkt,pkt);
+		return;
+	}
 
-		// Dummy no data packet.
-		if (pkt.data.size() == 0) return;
+	// Too many frames, so ignore.
+	//if (spkt.frameNumber() >= value("max_frames",32)) return;
+	if (spkt.frameNumber() >= 32 || ((1 << spkt.frameNumber()) & frame_mask_) == 0) return;
 
 
-		if (channum >= 2048) {
-			_processData(spkt,pkt);
-		} else if (channum >= 64) {
-			_processState(spkt,pkt);
-		} else if (channum >= 32 && channum < 64) {
-			_processAudio(spkt,pkt);
-		} else {
-			_processVideo(spkt,pkt);
-		}
-	});
+	if (channum >= 64) {
+		_processData(spkt,pkt);
+	} else if (channum >= 32 && channum < 64) {
+		_processAudio(spkt,pkt);
+	} else {
+		_processVideo(spkt,pkt);
+	}
 }
 
-size_t Receiver::size() {
-	return builder_[0].size();
-}
+void Receiver::setStream(ftl::stream::Stream *s) {
+	handle_.cancel();
+	stream_ = s;
 
-ftl::rgbd::FrameState &Receiver::state(size_t ix) {
-	return builder_[0].state(ix);
+	handle_ = s->onPacket([this](const StreamPacket &spkt, const Packet &pkt) {
+		processPackets(spkt, pkt);
+		return true;
+	});
 }
 
-void Receiver::onFrameSet(const ftl::rgbd::VideoCallback &cb) {
-	for (size_t i=0; i<ftl::stream::kMaxStreams; ++i)
-		builder_[i].onFrameSet(cb);
+ftl::Handle Receiver::onFrameSet(const std::function<bool(const ftl::data::FrameSetPtr&)> &cb) {
+	//for (auto &b : builders_)
+	//	b.second.onFrameSet(cb);
+	return callback_.on(cb);
 }
 
-void Receiver::onFrameSet(size_t s, const ftl::rgbd::VideoCallback &cb) {
-	if (s >= 0 && s < ftl::stream::kMaxStreams)
-		builder_[s].onFrameSet(cb);
-}
diff --git a/components/streams/src/renderer.cpp b/components/streams/src/renderer.cpp
new file mode 100644
index 000000000..5da8355ed
--- /dev/null
+++ b/components/streams/src/renderer.cpp
@@ -0,0 +1,74 @@
+#include <ftl/streams/renderer.hpp>
+#include <ftl/rgbd/frame.hpp>
+#include <ftl/rgbd/frameset.hpp>
+#include <ftl/rgbd/capabilities.hpp>
+#include <loguru.hpp>
+
+#include "./renderers/screen_render.hpp"
+#include "./renderers/openvr_render.hpp"
+
+using ftl::render::Source;
+using ftl::codecs::Channel;
+using ftl::rgbd::Capability;
+using std::string;
+
+
+Source::Source(nlohmann::json &config, ftl::stream::Feed *feed)
+: ftl::Configurable(config), feed_(feed), impl_(nullptr) {
+	reset();
+
+	on("uri", [this]() {
+		LOG(INFO) << "URI change for renderer: " << getURI();
+		reset();
+	});
+}
+
+Source::~Source() {
+	if (impl_) delete impl_;
+}
+
+bool Source::supports(const std::string &puri) {
+	ftl::URI uri(puri);
+	if (!uri.isValid() || uri.getScheme() != ftl::URI::SCHEME_DEVICE) return false;
+
+	if (uri.getPathSegment(0) == "render") return true;
+	if (uri.getPathSegment(0) == "openvr") return ftl::render::OpenVRRender::supported();
+	return false;
+}
+
+void Source::reset() {
+	if (impl_) delete impl_;
+	impl_ = nullptr;
+
+	auto uristr = get<string>("uri");
+	if (!uristr) return;
+
+	restore(*uristr, {
+		"renderer",
+		"source",
+		"intrinsics",
+		"name"
+	});
+
+	ftl::URI uri(*uristr);
+	if (!uri.isValid()) return;
+	if (uri.getScheme() != ftl::URI::SCHEME_DEVICE) return;
+
+	if (uri.getPathSegment(0) == "render") {
+		impl_ = new ftl::render::ScreenRender(this, feed_);
+	} else if (uri.getPathSegment(0) == "openvr") {
+		impl_ = new ftl::render::OpenVRRender(this, feed_);
+	} else {
+		throw FTL_Error("Invalid render device: " << *uristr);
+	}
+}
+
+bool Source::capture(int64_t ts) {
+	if (impl_) return impl_->capture(ts);
+	else return false;
+}
+
+bool Source::retrieve(ftl::data::Frame &frame_out) {
+	if (impl_) return impl_->retrieve(frame_out);
+	else return false;
+}
diff --git a/components/streams/src/renderers/collisions.cpp b/components/streams/src/renderers/collisions.cpp
new file mode 100644
index 000000000..ddb5c2d5b
--- /dev/null
+++ b/components/streams/src/renderers/collisions.cpp
@@ -0,0 +1,73 @@
+#include "collisions.hpp"
+#include <ftl/codecs/touch.hpp>
+#include <ftl/utility/matrix_conversion.hpp>
+#include <ftl/rgbd/capabilities.hpp>
+#include <ftl/algorithms/dbscan.hpp>
+
+using ftl::codecs::Channel;
+using ftl::rgbd::Capability;
+
+void ftl::render::collision2touch(const ftl::rgbd::Frame &rgbdframe,
+ const std::vector<float4> &collisions,
+ const std::list<ftl::data::FrameSetPtr> &sets, uint32_t myid, float tmin, float tmax) {
+
+	std::vector<float4> clusters;
+	std::vector<short> labels;
+	ftl::dbscan<float4>(collisions, [](const std::vector<float4> &pts, size_t idx, float radius) {
+		std::vector<size_t> neighbors;
+		for (auto i = 0u; i < pts.size(); i++) {
+			if (i == idx) {
+				continue;
+			}
+			float dx = pts[idx].x - pts[i].x;
+			float dy = pts[idx].y - pts[i].y;
+
+			if (dx*dx+dy*dy < radius*radius) {
+				neighbors.push_back(i);
+			}
+		}
+		return neighbors;
+	}, 5, 16.0f, labels, clusters);
+
+	// TODO: Support multi-touch
+	if (clusters.size() == 1) {
+		//LOG(INFO) << "Found " << clusters.size() << " collisions";
+		//LOG(INFO) << "  -- " << clusters[0].x << "," << clusters[0].y << " " << clusters[0].z;
+
+		// Find all frames that support touch
+		for (auto &s : sets) {
+			if (s->frameset() == myid) continue;
+
+			for (const auto &f : s->frames) {
+				if (f.has(Channel::Capabilities)) {
+					const auto &cap = f.get<std::unordered_set<Capability>>(Channel::Capabilities);
+
+					// If it supports touch, calculate the touch points in screen coordinates
+					if (cap.count(Capability::TOUCH)){
+						const auto &rgbdf = f.cast<ftl::rgbd::Frame>();
+
+						// TODO: Use Eigen directly.
+						auto pose = MatrixConversion::toCUDA((rgbdf.getPose().inverse() * rgbdframe.getPose()).cast<float>());
+						float3 campos = pose * rgbdframe.getLeft().screenToCam(clusters[0].x, clusters[0].y, clusters[0].z);
+						const auto &cam = rgbdf.getLeft();
+						int2 pt = cam.camToScreen<int2>(campos);
+						//LOG(INFO) << "TOUCH AT " << pt.x << "," << pt.y << " - " << campos.z;
+
+						{
+							// Send the touch data
+							auto response = f.response();
+							auto &touches = response.create<std::vector<ftl::codecs::Touch>>(Channel::Touch);
+							auto &touch = touches.emplace_back();
+							touch.id = 0;
+							touch.x = pt.x;
+							touch.y = pt.y;
+							touch.type = ftl::codecs::TouchType::COLLISION;
+							touch.strength = (std::abs(campos.z - cam.maxDepth) <= tmin) ? 255 : 0;
+							touch.d = campos.z;
+						}
+					}
+				}
+			}
+		}
+	}
+}
\ No newline at end of file
diff --git a/components/streams/src/renderers/collisions.hpp b/components/streams/src/renderers/collisions.hpp
new file mode 100644
index 000000000..f80d481ee
--- /dev/null
+++ b/components/streams/src/renderers/collisions.hpp
@@ -0,0 +1,18 @@
+#ifndef _FTL_RENDER_COLLISIONS_HPP_
+#define _FTL_RENDER_COLLISIONS_HPP_
+
+#include <ftl/data/new_frameset.hpp>
+#include <ftl/rgbd/frame.hpp>
+
+namespace ftl {
+namespace render {
+
+void collision2touch(const ftl::rgbd::Frame &rgbdframe,
+	const std::vector<float4> &collisions,
+	const std::list<ftl::data::FrameSetPtr> &sets,
+	uint32_t myid, float tmin, float tmax);
+
+}
+}
+
+#endif
\ No newline at end of file
diff --git a/components/streams/src/renderers/openvr_render.cpp b/components/streams/src/renderers/openvr_render.cpp
new file mode 100644
index 000000000..6346cf244
--- /dev/null
+++ b/components/streams/src/renderers/openvr_render.cpp
@@ -0,0 +1,448 @@
+#include <ftl/streams/renderer.hpp>
+#include <ftl/rgbd/frame.hpp>
+#include <ftl/rgbd/frameset.hpp>
+#include <ftl/rgbd/capabilities.hpp>
+#include <ftl/cuda/transform.hpp>
+#include <ftl/operators/antialiasing.hpp>
+#include <ftl/operators/gt_analysis.hpp>
+#include <ftl/operators/poser.hpp>
+#include <ftl/codecs/shapes.hpp>
+
+#include <loguru.hpp>
+
+#include "openvr_render.hpp"
+#include "collisions.hpp"
+
+#include <GL/gl.h>
+
+using ftl::render::Source;
+using ftl::render::OpenVRRender;
+using ftl::codecs::Channel;
+using ftl::rgbd::Capability;
+using ftl::codecs::Shape3DType;
+
+#ifdef HAVE_OPENVR
+static vr::IVRSystem *HMD = nullptr;
+#endif
+
+
+OpenVRRender::OpenVRRender(ftl::render::Source *host, ftl::stream::Feed *feed)
+: ftl::render::BaseSourceImpl(host), feed_(feed), my_id_(0), post_pipe_(nullptr), baseline_(0.06f) {
+	#ifdef HAVE_OPENVR
+	if (HMD) throw FTL_Error("Can only have one OpenVR device");
+	#endif
+	initVR();
+
+	renderer_ = std::unique_ptr<ftl::render::CUDARender>(
+		ftl::create<ftl::render::CUDARender>(host_, "renderer")
+	);
+
+	renderer2_ = std::unique_ptr<ftl::render::CUDARender>(
+		ftl::create<ftl::render::CUDARender>(host_, "renderer2")
+	);
+
+	intrinsics_ = ftl::create<ftl::Configurable>(host_, "intrinsics");
+
+	filter_ = nullptr;
+	std::string source = host_->value("source", std::string(""));
+
+	if (source.size() > 0) {
+		filter_ = feed_->filter({source},{Channel::Colour, Channel::Depth});
+	} else {
+		filter_ = feed_->filter({Channel::Colour, Channel::Depth});
+	}
+
+	host_->on("source", [this]() {
+		std::string source = host_->value("source", std::string(""));
+
+		if (source.size() > 0) {
+			if (filter_) filter_->remove();
+			filter_ = feed_->filter({source},{Channel::Colour, Channel::Depth});
+		} else {
+			if (filter_) filter_->remove();
+			filter_ = feed_->filter({Channel::Colour, Channel::Depth});
+		}
+	});
+
+	eye_ = Eigen::Vector3d::Zero();
+	rotmat_.setIdentity();
+	initial_pose_.setIdentity();
+
+	host_->on("reset_pose", [this]() {
+		pose_calibrated_.clear();
+	});
+}
+
+OpenVRRender::~OpenVRRender() {
+	if (filter_) filter_->remove();
+	delete intrinsics_;
+	if (post_pipe_) delete post_pipe_;
+
+	#ifdef HAVE_OPENVR
+	if (HMD != nullptr) {
+		vr::VR_Shutdown();
+	}
+	#endif
+}
+
+bool OpenVRRender::initVR() {
+	#ifdef HAVE_OPENVR
+	if (!vr::VR_IsHmdPresent()) {
+		return false;
+	}
+
+	if (HMD) return true;
+
+	vr::EVRInitError eError = vr::VRInitError_None;
+	HMD = vr::VR_Init( &eError, vr::VRApplication_Scene );
+	
+	if (eError != vr::VRInitError_None)
+	{
+		HMD = nullptr;
+		LOG(ERROR) << "Unable to init VR runtime: " << vr::VR_GetVRInitErrorAsEnglishDescription(eError);
+		return false;
+	}
+
+	return true;
+	#else
+	return false;
+	#endif
+}
+
+bool OpenVRRender::supported() {
+	#ifdef HAVE_OPENVR
+	return vr::VR_IsHmdPresent();
+	#else
+	return false;
+	#endif
+}
+
+bool OpenVRRender::isReady() {
+	#ifdef HAVE_OPENVR
+	return HMD != nullptr;
+	#else
+	return false;
+	#endif
+}
+
+bool OpenVRRender::capture(int64_t ts) {
+	return true;
+}
+
+#ifdef HAVE_OPENVR
+static inline Eigen::Matrix4d ConvertSteamVRMatrixToMatrix4( const vr::HmdMatrix34_t &matPose )
+{
+	Eigen::Matrix4d matrixObj;
+	matrixObj <<
+		matPose.m[0][0], matPose.m[0][1], matPose.m[0][2], matPose.m[0][3],
+		matPose.m[1][0], matPose.m[1][1], matPose.m[1][2], matPose.m[1][3],
+		matPose.m[2][0], matPose.m[2][1], matPose.m[2][2], matPose.m[2][3],
+					0.0,			 0.0,			  0.0,			   1.0;
+	return matrixObj;
+}
+
+static inline Eigen::Matrix4d ConvertSteamVRMatrixToMatrix4( const vr::HmdMatrix44_t &matPose )
+{
+	Eigen::Matrix4d matrixObj;
+	matrixObj <<
+		matPose.m[0][0], matPose.m[0][1], matPose.m[0][2], matPose.m[0][3],
+		matPose.m[1][0], matPose.m[1][1], matPose.m[1][2], matPose.m[1][3],
+		matPose.m[2][0], matPose.m[2][1], matPose.m[2][2], matPose.m[2][3],
+		matPose.m[3][0], matPose.m[3][1], matPose.m[3][2], matPose.m[3][3];
+	return matrixObj;
+}
+
+static Eigen::Matrix3d getCameraMatrix(const double tanx1,
+								const double tanx2,
+								const double tany1,
+								const double tany2,
+								const double size_x,
+								const double size_y) {
+	
+	Eigen::Matrix3d C = Eigen::Matrix3d::Identity();
+	
+	CHECK(tanx1 < 0 && tanx2 > 0 && tany1 < 0 && tany2 > 0);
+	CHECK(size_x > 0 && size_y > 0);
+
+	double fx = size_x / (-tanx1 + tanx2);
+	double fy = size_y / (-tany1 + tany2);
+	C(0,0) = fx;
+	C(1,1) = fy;
+	C(0,2) = tanx1 * fx;
+	C(1,2) = tany1 * fy;
+
+	// safe to remove
+	CHECK((int) (abs(tanx1 * fx) + abs(tanx2 * fx)) == (int) size_x);
+	CHECK((int) (abs(tany1 * fy) + abs(tany2 * fy)) == (int) size_y);
+
+	return C;
+}
+
+static Eigen::Matrix3d getCameraMatrix(vr::IVRSystem *vr, const vr::Hmd_Eye &eye) {
+	float tanx1, tanx2, tany1, tany2;
+	uint32_t size_x, size_y;
+	vr->GetProjectionRaw(eye, &tanx1, &tanx2, &tany1, &tany2);
+	vr->GetRecommendedRenderTargetSize(&size_x, &size_y);
+	return getCameraMatrix(tanx1, tanx2, tany1, tany2, size_x, size_y);
+}
+#endif
+
+bool OpenVRRender::retrieve(ftl::data::Frame &frame_out) {
+
+	#ifdef HAVE_OPENVR
+	//auto input = std::atomic_load(&input_);
+
+	my_id_ = frame_out.frameset();
+
+	auto sets = filter_->getLatestFrameSets();
+
+	if (sets.size() > 0) {
+		ftl::rgbd::Frame &rgbdframe = frame_out.cast<ftl::rgbd::Frame>();
+
+		if (!frame_out.has(Channel::Calibration)) {
+			auto &left = rgbdframe.setLeft();
+			auto &right = rgbdframe.setRight();
+
+			left = ftl::rgbd::Camera::from(intrinsics_);
+			right = ftl::rgbd::Camera::from(intrinsics_);
+
+			left.baseline = baseline_;
+			right.baseline = baseline_;
+			
+			unsigned int size_x, size_y;
+			HMD->GetRecommendedRenderTargetSize(&size_x, &size_y);
+			left.width = size_x;
+			left.height = size_y;
+			right.width = size_x;
+			right.height = size_y;
+
+			Eigen::Matrix3d intrinsic;
+
+			intrinsic = getCameraMatrix(HMD, vr::Eye_Left);
+			CHECK(intrinsic(0, 2) < 0 && intrinsic(1, 2) < 0);
+			left.fx = intrinsic(0,0);
+			left.fy = intrinsic(0,0);
+			left.cx = intrinsic(0,2);
+			left.cy = intrinsic(1,2);
+
+			intrinsic = getCameraMatrix(HMD, vr::Eye_Right);
+			CHECK(intrinsic(0, 2) < 0 && intrinsic(1, 2) < 0);
+			right.fx = intrinsic(0,0);
+			right.fy = intrinsic(0,0);
+			right.cx = intrinsic(0,2);
+			right.cy = intrinsic(1,2);
+
+			if (!frame_out.has(Channel::Capabilities)) {
+				auto &cap = frame_out.create<std::unordered_set<Capability>>(Channel::Capabilities);
+				cap.emplace(Capability::VIDEO);
+				cap.emplace(Capability::MOVABLE);
+				cap.emplace(Capability::ADJUSTABLE);
+				cap.emplace(Capability::VIRTUAL);
+				cap.emplace(Capability::LIVE);
+				cap.emplace(Capability::VR);
+			}
+
+			auto &meta = frame_out.create<std::map<std::string,std::string>>(Channel::MetaData);
+			meta["name"] = host_->value("name", host_->getID());
+			meta["id"] = host_->getID();
+			meta["uri"] = std::string("device:openvr");
+			meta["device"] = std::string("OpenVR Render");
+		}
+		//if (!frame_out.has(Channel::Pose)) {
+		//	rgbdframe.setPose() = Eigen::Matrix4d::Identity();
+		//}
+
+		int width = rgbdframe.getLeft().width;
+		int height = rgbdframe.getLeft().height;
+
+		vr::VRCompositor()->SetTrackingSpace(vr::TrackingUniverseStanding);
+		auto vrerr = vr::VRCompositor()->WaitGetPoses(rTrackedDevicePose_, vr::k_unMaxTrackedDeviceCount, NULL, 0 );
+
+		if (vrerr != vr::VRCompositorError_None) {
+			frame_out.message(ftl::data::Message::Error_OPENVR, "Could not get VR pose");
+			LOG(ERROR) << "Error getting VR poses: " << (int)vrerr;
+		}
+
+		if (rTrackedDevicePose_[vr::k_unTrackedDeviceIndex_Hmd].bPoseIsValid ) {
+			Eigen::Matrix4d eye_l = ConvertSteamVRMatrixToMatrix4(
+				vr::VRSystem()->GetEyeToHeadTransform(vr::Eye_Left));
+
+			//Eigen::Matrix4d eye_r = ConvertSteamVRMatrixToMatrix4(
+			//	vr::VRSystem()->GetEyeToHeadTransform(vr::Eye_Left));
+
+			float baseline_in = 2.0 * eye_l(0, 3);
+
+			if (baseline_in != baseline_) {
+				baseline_ = baseline_in;
+				auto cur_left = rgbdframe.getLeft();
+				cur_left.baseline = baseline_;
+				rgbdframe.setLeft() = cur_left;
+
+				auto cur_right = rgbdframe.getRight();
+				cur_right.baseline = baseline_;
+				rgbdframe.setRight() = cur_right;
+			}
+			Eigen::Matrix4d pose = ConvertSteamVRMatrixToMatrix4(rTrackedDevicePose_[vr::k_unTrackedDeviceIndex_Hmd].mDeviceToAbsoluteTracking);
+			Eigen::Vector3d ea = pose.block<3, 3>(0, 0).eulerAngles(0, 1, 2);
+
+			Eigen::Vector3d vreye;
+			vreye[0] = pose(0, 3);
+			vreye[1] = -pose(1, 3);
+			vreye[2] = -pose(2, 3);
+
+			// NOTE: If modified, should be verified with VR headset!
+			Eigen::Matrix3d R;
+			R =		Eigen::AngleAxisd(ea[0], Eigen::Vector3d::UnitX()) *
+					Eigen::AngleAxisd(-ea[1], Eigen::Vector3d::UnitY()) *
+					Eigen::AngleAxisd(-ea[2], Eigen::Vector3d::UnitZ());
+
+			//double rd = 180.0 / 3.141592;
+			//LOG(INFO) << "rotation x: " << ea[0] *rd << ", y: " << ea[1] * rd << ", z: " << ea[2] * rd;
+			// pose.block<3, 3>(0, 0) = R;
+
+			rotmat_.block(0, 0, 3, 3) = R;
+
+			// TODO: Apply a rotation to orient also
+
+			// TODO: Somehow allow adjustment in addition to the VR pose...
+			//eye_[0] += (neye_[0] - eye_[0]) * lerpSpeed_ * delta_;
+			//eye_[1] += (neye_[1] - eye_[1]) * lerpSpeed_ * delta_;
+			//eye_[2] += (neye_[2] - eye_[2]) * lerpSpeed_ * delta_;
+
+			Eigen::Translation3d trans(eye_ + vreye);
+			Eigen::Affine3d t(trans);
+			auto viewPose = t.matrix() * rotmat_;
+
+			if (!pose_calibrated_.test_and_set()) {
+				initial_pose_ = viewPose.inverse();
+			}
+
+			rgbdframe.setPose() = initial_pose_*viewPose;
+
+		} else {
+			LOG(ERROR) << "No VR Pose";
+			frame_out.message(ftl::data::Message::Error_OPENVR, "Could not get VR pose");
+			rgbdframe.setPose().setIdentity();
+		}
+
+		// TODO: Get controller data if available...
+
+		texture1_.make(width, height, ftl::utility::GLTexture::Type::BGRA);
+		texture2_.make(width, height, ftl::utility::GLTexture::Type::BGRA);
+		
+		rgbdframe.create<cv::cuda::GpuMat>(Channel::Colour) = texture1_.map(renderer_->getCUDAStream());
+		rgbdframe.create<cv::cuda::GpuMat>(Channel::Colour2) = texture2_.map(renderer2_->getCUDAStream());
+		rgbdframe.create<cv::cuda::GpuMat>(Channel::Depth).create(height, width, CV_32F);
+		rgbdframe.createTexture<float>(Channel::Depth);
+
+		rgbdframe.set<ftl::rgbd::VideoFrame>(Channel::Colour).setOpenGL(texture1_.texture());
+		rgbdframe.set<ftl::rgbd::VideoFrame>(Channel::Colour2).setOpenGL(texture2_.texture());
+
+		auto shapes = rgbdframe.create<std::list<ftl::codecs::Shape3D>>(Channel::Shapes3D);
+
+		/*Eigen::Matrix4d origin;
+		origin.setIdentity();
+		std::string origin_name = host_->value("origin", std::string(""));
+		if (origin_name.size() > 0) {
+			ftl::operators::Poser::get(origin_name, origin);
+		}*/
+
+		try {
+			renderer_->begin(rgbdframe, ftl::codecs::Channel::Left);
+			renderer2_->begin(rgbdframe, Channel::Colour2);
+
+			for (auto &s : sets) {
+				if (s->frameset() == my_id_) continue;  // Skip self
+
+				Eigen::Matrix4d pose;
+				pose.setIdentity();
+				if (s->hasChannel(Channel::Pose)) pose = s->cast<ftl::rgbd::Frame>().getPose();
+
+				// TODO: Check frame has required channels?
+
+				// FIXME: Don't use identity transform, get from Poser somehow.
+				renderer_->submit(
+					s.get(),
+					ftl::codecs::Channels<0>(ftl::codecs::Channel::Colour),
+					pose);
+
+				renderer2_->submit(
+					s.get(),
+					ftl::codecs::Channels<0>(ftl::codecs::Channel::Colour),
+					pose);
+			}
+
+			renderer_->render();
+			renderer2_->render();
+
+			// Now do CPU-based render jobs
+			for (auto &s : sets) {
+				if (s->frameset() == my_id_) continue;  // Skip self
+
+				// TODO: Render audio also...
+				// Use another thread to merge all audio channels along with
+				// some degree of volume adjustment. Later, do 3D audio.
+
+				// Inject and copy data items
+				for (const auto &f : s->frames) {
+					// Add pose as a camera shape
+					auto &shape = shapes.list.emplace_back();
+					shape.id = f.id().id;
+					shape.type = Shape3DType::CAMERA;
+					shape.size = Eigen::Vector3f(0.2f,0.2f,0.2f);
+					shape.pose = f.cast<ftl::rgbd::Frame>().getPose().cast<float>();
+					shape.label = f.name();
+
+					// Copy all original shapes
+					if (f.hasChannel(Channel::Shapes3D)) {
+						const auto &fshapes = f.get<std::list<ftl::codecs::Shape3D>>(Channel::Shapes3D);
+						shapes.list.insert(shapes.list.end(), fshapes.begin(), fshapes.end());
+					}
+				}
+			}
+
+			// TODO: Blend option
+
+			renderer_->end();
+			renderer2_->end();
+		} catch (const ftl::exception &e) {
+			LOG(ERROR) << "Render exception: " << e.what();
+		}
+
+		if (!post_pipe_) {
+			post_pipe_ = ftl::config::create<ftl::operators::Graph>(host(), "post_filters");
+			post_pipe_->append<ftl::operators::FXAA>("fxaa");
+			post_pipe_->append<ftl::operators::GTAnalysis>("gtanalyse");
+		}
+
+		post_pipe_->apply(rgbdframe, rgbdframe, 0);
+
+		if (host_->value("enable_touch", false)) {
+			ftl::render::collision2touch(rgbdframe, renderer_->getCollisions(), sets, my_id_, host_->value("touch_min", 0.01f), host_->value("touch_max", 0.05f));
+		}
+
+		// FIXME: Use a stream
+		ftl::cuda::flip<uchar4>(rgbdframe.set<cv::cuda::GpuMat>(Channel::Colour), 0);
+		ftl::cuda::flip<uchar4>(rgbdframe.set<cv::cuda::GpuMat>(Channel::Colour2), 0);
+
+		texture1_.unmap(renderer_->getCUDAStream());
+		texture2_.unmap(renderer2_->getCUDAStream());
+		//return true;
+
+
+		// Send left and right textures to VR headset
+		vr::Texture_t leftEyeTexture = {(void*)(uintptr_t)texture1_.texture(), vr::TextureType_OpenGL, vr::ColorSpace_Gamma };
+		vr::VRCompositor()->Submit(vr::Eye_Left, &leftEyeTexture );
+		vr::Texture_t rightEyeTexture = {(void*)(uintptr_t)texture2_.texture(), vr::TextureType_OpenGL, vr::ColorSpace_Gamma };
+		vr::VRCompositor()->Submit(vr::Eye_Right, &rightEyeTexture );
+
+		glFlush();
+
+	}
+
+	return true;
+
+	#else
+	return false;
+	#endif
+}
diff --git a/components/streams/src/renderers/openvr_render.hpp b/components/streams/src/renderers/openvr_render.hpp
new file mode 100644
index 000000000..bdeadbeb7
--- /dev/null
+++ b/components/streams/src/renderers/openvr_render.hpp
@@ -0,0 +1,64 @@
+#ifndef _FTL_RENDER_OPENVR_SOURCE_HPP_
+#define _FTL_RENDER_OPENVR_SOURCE_HPP_
+
+#include <ftl/data/creators.hpp>
+#include <ftl/data/new_frameset.hpp>
+#include <ftl/render/renderer.hpp>
+#include <ftl/render/CUDARender.hpp>
+#include <ftl/streams/feed.hpp>
+#include <ftl/utility/gltexture.hpp>
+
+#include "../baserender.hpp"
+
+#include <ftl/config.h>
+
+#ifdef HAVE_OPENVR
+#include <openvr/openvr.h>
+#endif
+
+namespace ftl {
+namespace render {
+
+class OpenVRRender : public ftl::render::BaseSourceImpl {
+    public:
+    OpenVRRender(ftl::render::Source *host, ftl::stream::Feed *feed);
+	~OpenVRRender();
+
+    bool capture(int64_t ts) override;
+	bool retrieve(ftl::data::Frame &) override;
+
+	bool isReady() override;
+
+	static bool supported();
+
+	private:
+	ftl::stream::Feed *feed_;
+	ftl::stream::Feed::Filter *filter_;
+	ftl::data::FrameSetPtr input_;
+	std::unique_ptr<ftl::render::CUDARender> renderer_;
+	std::unique_ptr<ftl::render::CUDARender> renderer2_;
+	ftl::Configurable *intrinsics_;
+	uint32_t my_id_;
+
+	ftl::operators::Graph *post_pipe_;
+
+	std::atomic_flag pose_calibrated_;
+
+	float baseline_;
+	Eigen::Matrix4d initial_pose_;
+	Eigen::Matrix4d rotmat_;
+	Eigen::Vector3d eye_;
+	ftl::utility::GLTexture texture1_; // first channel (always left at the moment)
+	ftl::utility::GLTexture texture2_;
+
+	#ifdef HAVE_OPENVR
+	vr::TrackedDevicePose_t rTrackedDevicePose_[ vr::k_unMaxTrackedDeviceCount ];
+	#endif
+
+	bool initVR();
+};
+
+}
+}
+
+#endif
diff --git a/components/streams/src/renderers/screen_render.cpp b/components/streams/src/renderers/screen_render.cpp
new file mode 100644
index 000000000..d06d57f7f
--- /dev/null
+++ b/components/streams/src/renderers/screen_render.cpp
@@ -0,0 +1,205 @@
+#include <ftl/streams/renderer.hpp>
+#include <ftl/rgbd/frame.hpp>
+#include <ftl/rgbd/frameset.hpp>
+#include <ftl/rgbd/capabilities.hpp>
+#include <ftl/operators/antialiasing.hpp>
+#include <ftl/operators/gt_analysis.hpp>
+#include <ftl/utility/matrix_conversion.hpp>
+#include <ftl/codecs/shapes.hpp>
+
+#include <loguru.hpp>
+
+#include "screen_render.hpp"
+#include "collisions.hpp"
+
+using ftl::render::Source;
+using ftl::render::ScreenRender;
+using ftl::codecs::Channel;
+using ftl::rgbd::Capability;
+using ftl::codecs::Shape3DType;
+
+
+ScreenRender::ScreenRender(ftl::render::Source *host, ftl::stream::Feed *feed)
+: ftl::render::BaseSourceImpl(host), feed_(feed), my_id_(0), post_pipe_(nullptr) {
+	/*host->restore("device:render", {
+		"renderer",
+		"source",
+		"intrinsics",
+		"name"
+	});*/
+
+	renderer_ = std::unique_ptr<ftl::render::CUDARender>(
+		ftl::create<ftl::render::CUDARender>(host_, "renderer")
+	);
+
+	intrinsics_ = ftl::create<ftl::Configurable>(host_, "intrinsics");
+
+	intrinsics_->onAny({"focal","width","height"}, [this]() {
+		calibration_uptodate_.clear();
+	});
+
+	renderer_->value("projection", 0);
+	renderer_->onAny({"projection"}, [this]() {
+		calibration_uptodate_.clear();
+	});
+
+	filter_ = nullptr;
+	std::string source = host_->value("source", std::string(""));
+
+	if (source.size() > 0) {
+		filter_ = feed_->filter({source},{Channel::Colour, Channel::Depth});
+	} else {
+		filter_ = feed_->filter({Channel::Colour, Channel::Depth});
+	}
+
+	host_->on("source", [this]() {
+		std::string source = host_->value("source", std::string(""));
+
+		if (source.size() > 0) {
+			if (filter_) filter_->remove();
+			filter_ = feed_->filter({source},{Channel::Colour, Channel::Depth});
+		} else {
+			if (filter_) filter_->remove();
+			filter_ = feed_->filter({Channel::Colour, Channel::Depth});
+		}
+	});
+}
+
+ScreenRender::~ScreenRender() {
+	if (filter_) filter_->remove();
+	delete intrinsics_;
+	if (post_pipe_) delete post_pipe_;
+}
+
+bool ScreenRender::isReady() {
+	return true;
+}
+
+bool ScreenRender::capture(int64_t ts) {
+	return true;
+}
+
+bool ScreenRender::retrieve(ftl::data::Frame &frame_out) {
+	//auto input = std::atomic_load(&input_);
+
+	my_id_ = frame_out.frameset();
+	auto sets = filter_->getLatestFrameSets();
+	bool data_only = host_->value("data_only", false);
+
+	if (sets.size() > 0) {
+		ftl::rgbd::Frame &rgbdframe = frame_out.cast<ftl::rgbd::Frame>();
+
+		if (!frame_out.has(Channel::Calibration) || calibration_uptodate_.test_and_set()) {
+			rgbdframe.setLeft() = ftl::rgbd::Camera::from(intrinsics_);
+
+			auto &cap = frame_out.create<std::unordered_set<Capability>>(Channel::Capabilities);
+			cap.clear();
+			cap.emplace(Capability::VIDEO);
+			cap.emplace(Capability::MOVABLE);
+			cap.emplace(Capability::ADJUSTABLE);
+			cap.emplace(Capability::VIRTUAL);
+			cap.emplace(Capability::LIVE);
+			if (renderer_->value("projection", 0) == int(ftl::rgbd::Projection::EQUIRECTANGULAR)) cap.emplace(Capability::EQUI_RECT);
+
+			auto &meta = frame_out.create<std::map<std::string,std::string>>(Channel::MetaData);
+			meta["name"] = host_->value("name", host_->getID());
+			meta["id"] = host_->getID();
+			meta["uri"] = std::string("device:render");
+			meta["device"] = std::string("CUDA Render");
+		}
+		if (!frame_out.has(Channel::Pose)) {
+			rgbdframe.setPose() = Eigen::Matrix4d::Identity();
+		}
+
+		int width = rgbdframe.getLeft().width;
+		int height = rgbdframe.getLeft().height;
+		
+		// FIXME: Create opengl buffers here and map to cuda?
+		auto &colour = rgbdframe.create<cv::cuda::GpuMat>(Channel::Colour);
+		colour.create(height, width, CV_8UC4);
+		rgbdframe.create<cv::cuda::GpuMat>(Channel::Depth).create(height, width, CV_32F);
+		rgbdframe.createTexture<float>(Channel::Depth);
+
+		if (data_only) {
+			colour.setTo(cv::Scalar(0,0,0,0));
+		}
+
+		auto shapes = rgbdframe.create<std::list<ftl::codecs::Shape3D>>(Channel::Shapes3D);
+
+		try {
+			if (!data_only) renderer_->begin(rgbdframe, ftl::codecs::Channel::Left);
+
+			// Submit jobs to GPU first
+			for (auto &s : sets) {
+				if (s->frameset() == my_id_) continue;  // Skip self
+
+				Eigen::Matrix4d pose;
+				pose.setIdentity();
+				if (s->hasChannel(Channel::Pose)) pose = s->cast<ftl::rgbd::Frame>().getPose();
+
+				if (!data_only) renderer_->submit(
+					s.get(),
+					ftl::codecs::Channels<0>(ftl::codecs::Channel::Colour),
+					pose);
+			}
+
+			if (!data_only) renderer_->render();
+
+			// Blend another channel
+			int blend_channel = host_->value("blend_channel",0);
+			if (blend_channel > 0) {
+				if (!data_only) renderer_->blend(static_cast<Channel>(blend_channel));
+			}
+
+			// Now do CPU-based render jobs
+			for (auto &s : sets) {
+				if (s->frameset() == my_id_) continue;  // Skip self
+
+				// TODO: Render audio also...
+				// Use another thread to merge all audio channels along with
+				// some degree of volume adjustment. Later, do 3D audio.
+
+				// Inject and copy data items
+				for (const auto &f : s->frames) {
+					// Add pose as a camera shape
+					auto &shape = shapes.list.emplace_back();
+					shape.id = f.id().id;
+					shape.type = Shape3DType::CAMERA;
+					shape.size = Eigen::Vector3f(0.2f,0.2f,0.2f);
+					shape.pose = f.cast<ftl::rgbd::Frame>().getPose().cast<float>();
+					shape.label = f.name();
+
+					// Copy all original shapes
+					if (f.hasChannel(Channel::Shapes3D)) {
+						const auto &fshapes = f.get<std::list<ftl::codecs::Shape3D>>(Channel::Shapes3D);
+						shapes.list.insert(shapes.list.end(), fshapes.begin(), fshapes.end());
+					}
+				}
+			}
+
+			// This waits for GPU also
+			if (!data_only) renderer_->end();
+		} catch (const ftl::exception &e) {
+			LOG(ERROR) << "Render exception: " << e.what();
+		}
+
+		if (!data_only) {
+			if (!post_pipe_) {
+				post_pipe_ = ftl::config::create<ftl::operators::Graph>(host(), "post_filters");
+				post_pipe_->append<ftl::operators::FXAA>("fxaa");
+				post_pipe_->append<ftl::operators::GTAnalysis>("gtanalyse");
+			}
+
+			post_pipe_->apply(rgbdframe, rgbdframe, 0);
+
+			if (host_->value("enable_touch", false)) {
+				ftl::render::collision2touch(rgbdframe, renderer_->getCollisions(), sets, my_id_, host_->value("touch_min", 0.01f), host_->value("touch_max", 0.05f));
+			}
+		}
+
+		return true;
+	} else {
+		//LOG(INFO) << "Render fail";
+		return true;
+	}
+}
diff --git a/components/streams/src/renderers/screen_render.hpp b/components/streams/src/renderers/screen_render.hpp
new file mode 100644
index 000000000..e8a5635a5
--- /dev/null
+++ b/components/streams/src/renderers/screen_render.hpp
@@ -0,0 +1,43 @@
+#ifndef _FTL_RENDER_SCREEN_SOURCE_HPP_
+#define _FTL_RENDER_SCREEN_SOURCE_HPP_
+
+#include <ftl/data/creators.hpp>
+#include <ftl/data/new_frameset.hpp>
+#include <ftl/render/renderer.hpp>
+#include <ftl/render/CUDARender.hpp>
+#include <ftl/streams/feed.hpp>
+
+#include "../baserender.hpp"
+
+namespace ftl {
+namespace render {
+
+/**
+ * Wrap a renderer into a source entity that manages it. This obtains the
+ * relevant framesets and can be triggered by a builder to generate frames.
+ */
+class ScreenRender : public ftl::render::BaseSourceImpl {
+    public:
+    ScreenRender(ftl::render::Source *host, ftl::stream::Feed *feed);
+	~ScreenRender();
+
+    bool capture(int64_t ts) override;
+	bool retrieve(ftl::data::Frame &) override;
+
+	bool isReady() override;
+
+	private:
+	ftl::stream::Feed *feed_;
+	ftl::stream::Feed::Filter *filter_;
+	ftl::data::FrameSetPtr input_;
+	std::unique_ptr<ftl::render::CUDARender> renderer_;
+	ftl::Configurable *intrinsics_;
+	uint32_t my_id_;
+	ftl::operators::Graph *post_pipe_;
+	std::atomic_flag calibration_uptodate_;
+};
+
+}
+}
+
+#endif
diff --git a/components/streams/src/sender.cpp b/components/streams/src/sender.cpp
index 37f23525c..b658349af 100644
--- a/components/streams/src/sender.cpp
+++ b/components/streams/src/sender.cpp
@@ -26,10 +26,11 @@ using ftl::stream::injectConfig;
 Sender::Sender(nlohmann::json &config) : ftl::Configurable(config), stream_(nullptr) {
 	do_inject_.test_and_set();
 	iframe_ = 1;
-	add_iframes_ = value("iframes", 0);
+	add_iframes_ = value("iframes", 50);
+	timestamp_ = -1;
 
-	on("iframes", [this](const ftl::config::Event &e) {
-		add_iframes_ = value("iframes", 0);
+	on("iframes", [this]() {
+		add_iframes_ = value("iframes", 50);
 	});
 }
 
@@ -42,9 +43,11 @@ Sender::~Sender() {
 }
 
 void Sender::setStream(ftl::stream::Stream*s) {
-	if (stream_) stream_->onPacket(nullptr);
+	//if (stream_) stream_->onPacket(nullptr);
 	stream_ = s;
-	stream_->onPacket([this](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+	handle_ = stream_->onPacket([this](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+		if (pkt.data.size() > 0 || !(spkt.flags & ftl::codecs::kFlagRequest)) return true;
+
 		LOG(INFO) << "SENDER REQUEST : " << (int)spkt.channel;
 
 		//if (state_cb_) state_cb_(spkt.channel, spkt.streamID, spkt.frame_number);
@@ -53,6 +56,7 @@ void Sender::setStream(ftl::stream::Stream*s) {
 		// Inject state packets
 		//do_inject_ = true;
 		do_inject_.clear();
+		return true;
 	});
 }
 
@@ -74,69 +78,13 @@ ftl::audio::Encoder *Sender::_getAudioEncoder(int fsid, int sid, ftl::codecs::Ch
 	return state.encoder;
 }
 
-void Sender::post(const ftl::audio::FrameSet &fs) {
-	if (!stream_) return;
-
-	//if (fs.stale) return;
-	//fs.stale = true;
-
-	for (size_t i=0; i<fs.frames.size(); ++i) {
-		if (!(fs.frames[i].hasChannel(Channel::AudioMono) || fs.frames[i].hasChannel(Channel::AudioStereo))) continue;
-
-		auto &data = (fs.frames[i].hasChannel(Channel::AudioStereo)) ?
-			fs.frames[i].get<ftl::audio::Audio>(Channel::AudioStereo) :
-			fs.frames[i].get<ftl::audio::Audio>(Channel::AudioMono);
-
-		auto &settings = fs.frames[i].getSettings();
-
-		StreamPacket spkt;
-		spkt.version = 4;
-		spkt.timestamp = fs.timestamp;
-		spkt.streamID = fs.id;
-		spkt.frame_number = i;
-		spkt.channel = (fs.frames[i].hasChannel(Channel::AudioStereo)) ? Channel::AudioStereo : Channel::AudioMono;
-
-		ftl::codecs::Packet pkt;
-		pkt.codec = ftl::codecs::codec_t::OPUS;
-		pkt.definition = ftl::codecs::definition_t::Any;
-
-		switch (settings.sample_rate) {
-		case 48000		: pkt.definition = ftl::codecs::definition_t::hz48000; break;
-		case 44100		: pkt.definition = ftl::codecs::definition_t::hz44100; break;
-		default: break;
-		}
-
-		pkt.frame_count = 1;
-		pkt.flags = (fs.frames[i].hasChannel(Channel::AudioStereo)) ? ftl::codecs::kFlagStereo : 0;
-		pkt.bitrate = 180;
-
-		// Find encoder here ...
-		ftl::audio::Encoder *enc = _getAudioEncoder(fs.id, i, spkt.channel, pkt);
-
-		// Do encoding into pkt.data
-		if (!enc) {
-			LOG(ERROR) << "Could not find audio encoder";
-			return;
-		}
-		
-		if (!enc->encode(data.data(), pkt)) {
-			LOG(ERROR) << "Could not encode audio";
-			return;
-		}
-
-		stream_->post(spkt, pkt);
-
-		//LOG(INFO) << "SENT AUDIO: " << fs.timestamp << " - " << pkt.data.size();
-	}
-}
-
 template <typename T>
 static void writeValue(std::vector<unsigned char> &data, T value) {
 	unsigned char *pvalue_start = (unsigned char*)&value;
 	data.insert(data.end(), pvalue_start, pvalue_start+sizeof(T));
 }
 
-static void mergeNALUnits(const std::list<ftl::codecs::Packet> &pkts, ftl::codecs::Packet &pkt) {
+/*static void mergeNALUnits(const std::list<ftl::codecs::Packet> &pkts, ftl::codecs::Packet &pkt) {
 	size_t size = 0;
 	for (auto i=pkts.begin(); i!=pkts.end(); ++i) size += (*i).data.size();
 
@@ -156,60 +104,181 @@ static void mergeNALUnits(const std::list<ftl::codecs::Packet> &pkts, ftl::codec
 		//LOG(INFO) << "NAL Count = " << (*i).data.size();
 		pkt.data.insert(pkt.data.end(), (*i).data.begin(), (*i).data.end());
 	}
+}*/
+
+void Sender::_sendPersistent(ftl::data::Frame &frame) {
+	auto *session = frame.parent();
+	if (session) {
+		for (auto c : session->channels()) {
+			Sender::post(frame, c);
+		}
+	}
 }
 
-void Sender::post(ftl::rgbd::FrameSet &fs) {
+void Sender::fakePost(ftl::data::FrameSet &fs, ftl::codecs::Channel c) {
 	if (!stream_) return;
 
-	Channels selected;
-	Channels available;  // but not selected and actually sent.
-	Channels needencoding;
+	for (size_t i=0; i<fs.frames.size(); ++i) {
+		auto &frame = fs.frames[i];
+		if (frame.hasOwn(c)) ++fs.flush_count;
+		
+	}
+}
 
-	if (stream_->size() > 0) selected = stream_->selected(0);
+bool Sender::_checkNeedsIFrame(int64_t ts, bool injecting) {
+	int mspf = ftl::timer::getInterval();
 
-	bool do_inject = !do_inject_.test_and_set();
+	if (injecting) injection_timestamp_ = ts+2*mspf;
 
 	// Add an iframe at the requested frequency.
-	if (add_iframes_ > 0) iframe_ = (iframe_+1) % add_iframes_;
+	//if (add_iframes_ > 0 && ts != timestamp_) iframe_ = (iframe_+1) % add_iframes_;
+	//if (iframe_ == 0) injection_timestamp_ = ts+mspf;
+
+	// FIXME: This may need to be atomic in some cases?
+	//if (ts > timestamp_) timestamp_ = ts;
+	return injection_timestamp_ >= ts;
+}
+
+void Sender::post(ftl::data::FrameSet &fs, ftl::codecs::Channel c, bool noencode) {
+	if (!stream_) return;
+
+	std::unordered_set<ftl::codecs::Channel> selected;
+	if (stream_->size() > 0) selected = stream_->selected(fs.frameset());
+
+	bool do_inject = !do_inject_.test_and_set();
+	bool do_iframe = _checkNeedsIFrame(fs.timestamp(), do_inject);
 
 	FTL_Profile("SenderPost", 0.02);
 
-	// Send any frameset data channels
-	for (auto c : fs.getDataChannels()) {
+	bool available = false;
+	bool needs_encoding = true;
+
+	int valid_frames = 0;
+	int ccount = 0;
+	int forward_count = 0;
+
+	for (size_t i=0; i<fs.frames.size(); ++i) {
+		auto &frame = fs.frames[i];
+		if (!frame.has(c)) continue;
+
+		++valid_frames;
+		++fs.flush_count;
+
+		// TODO: Send entire persistent session on inject
+		if (do_inject) {
+			_sendPersistent(frame);
+		}
+
+		ccount += frame.changed().size();
+
+		if (selected.find(c) != selected.end() || (int)c >= 32) {
+			// FIXME: Sends high res colour, but receive end currently broken
+			//auto cc = (c == Channel::Colour && frame.hasChannel(Channel::ColourHighRes)) ? Channel::ColourHighRes : c;
+			auto cc = c;
+
+			// Check if there are existing encoded packets
+			const auto &packets = frame.getEncoded(cc);
+			if (packets.size() > 0) {
+				if (packets.size() == 1) {
+					
+				} else {
+					// PROBLEMS
+					LOG(WARNING) << "Multi packet send! - Channel = " << int(c) << ", count = " << packets.size();
+				}
+				forward_count += packets.back().frame_count;
+			}
+		} else {
+			needs_encoding = false;
+			available = true;
+		}
+	}
+
+	bool last_flush = ccount == fs.flush_count;
+
+	// Don't do anything if channel not in any frames.
+	if (valid_frames == 0) return;
+
+	// Can we just forward existing encoding?
+	// TODO: Test this code!
+	if (forward_count == valid_frames) {
+		needs_encoding = false;
+
+		for (size_t i=0; i<fs.frames.size(); ++i) {
+			auto &frame = fs.frames[i];
+			if (!frame.has(c)) continue;
+
+			const auto &packets = frame.getEncoded(c);
+			//if (packets.size() == 1) {
+				StreamPacket spkt;
+				spkt.version = 5;
+				spkt.timestamp = fs.timestamp();
+				spkt.streamID = fs.frameset(); //fs.id;
+				spkt.frame_number = i;
+				spkt.channel = c;
+				spkt.flags = (last_flush) ? ftl::codecs::kFlagCompleted : 0;
+
+				stream_->post(spkt, packets.back());
+			//} else if (packets.size() > 1) {
+				// PROBLEMS
+			//}
+		}
+	}
+
+	// Don't transmit if noencode and needs encoding
+	if (needs_encoding && noencode) {
+		needs_encoding = false;
+		available = true;
+	}
+
+	if (available) {
+		// Not selected so send an empty packet...
 		StreamPacket spkt;
-		spkt.version = 4;
-		spkt.timestamp = fs.timestamp;
-		spkt.streamID = 0; //fs.id;
+		spkt.version = 5;
+		spkt.timestamp = fs.timestamp();
+		spkt.streamID = fs.frameset();
 		spkt.frame_number = 255;
 		spkt.channel = c;
+		spkt.flags = (last_flush) ? ftl::codecs::kFlagCompleted : 0;
 
-		ftl::codecs::Packet pkt;
-		pkt.codec = ftl::codecs::codec_t::MSGPACK;
+		Packet pkt;
+		pkt.codec = codec_t::Any;
 		pkt.frame_count = 1;
-		pkt.flags = 0;
 		pkt.bitrate = 0;
-		pkt.data = fs.getRawData(c);
+		pkt.flags = 0;
 		stream_->post(spkt, pkt);
 	}
 
-	for (size_t i=0; i<fs.frames.size(); ++i) {
-		const auto &frame = fs.frames[i];
+	if (needs_encoding) {
+		_encodeChannel(fs, c, do_iframe, last_flush);
+	}
+}
 
-		if (do_inject) {
-			//LOG(INFO) << "Force inject calibration";
-			injectCalibration(stream_, fs, i);
-			injectCalibration(stream_, fs, i, true);
-			injectPose(stream_, fs, i);
-			injectConfig(stream_, fs, i);
-		} else {
-			if (frame.hasChanged(Channel::Pose)) injectPose(stream_, fs, i);
-			if (frame.hasChanged(Channel::Calibration)) injectCalibration(stream_, fs, i);
-			if (frame.hasChanged(Channel::Calibration2)) injectCalibration(stream_, fs, i, true);
-			if (frame.hasChanged(Channel::Configuration)) injectConfig(stream_, fs, i);
-		}
+void Sender::forceAvailable(ftl::data::FrameSet &fs, ftl::codecs::Channel c) {
+	StreamPacket spkt;
+	spkt.version = 5;
+	spkt.timestamp = fs.timestamp();
+	spkt.streamID = fs.frameset();
+	spkt.frame_number = 255;
+	spkt.channel = c;
+
+	Packet pkt;
+	pkt.codec = codec_t::Any;
+	pkt.frame_count = 1;
+	pkt.bitrate = 0;
+	pkt.flags = 0;
+	stream_->post(spkt, pkt);
+}
+
+void Sender::post(ftl::data::Frame &frame, ftl::codecs::Channel c) {
+	if (!stream_) return;
+
+	FTL_Profile("SenderPost", 0.02);
+
+	bool available = false;
+	bool needs_encoding = true;
 
 		// FIXME: Allow data channel selection rather than always send
-		for (auto c : frame.getDataChannels()) {
+		/*for (auto c : frame.getDataChannels()) {
 			StreamPacket spkt;
 			spkt.version = 4;
 			spkt.timestamp = fs.timestamp;
@@ -224,51 +293,49 @@ void Sender::post(ftl::rgbd::FrameSet &fs) {
 			pkt.bitrate = 0;
 			pkt.data = frame.getRawData(c);
 			stream_->post(spkt, pkt);
-		}
+		}*/
 
-		for (auto c : frame.getChannels()) {
-			if (selected.has(c)) {
+		//for (auto ic : frame.changed()) {
+			//auto c = ic.first;
+			if (true) { //if (selected.has(c)) {
 				// FIXME: Sends high res colour, but receive end currently broken
 				//auto cc = (c == Channel::Colour && frame.hasChannel(Channel::ColourHighRes)) ? Channel::ColourHighRes : c;
 				auto cc = c;
 
 				StreamPacket spkt;
-				spkt.version = 4;
-				spkt.timestamp = fs.timestamp;
-				spkt.streamID = 0; //fs.id;
-				spkt.frame_number = i;
+				spkt.version = 5;
+				spkt.timestamp = frame.timestamp();
+				spkt.streamID = frame.frameset(); //fs.id;
+				spkt.frame_number = frame.source();
 				spkt.channel = c;
 
 				// Check if there are existing encoded packets
-				const auto &packets = frame.getPackets(cc);
+				const auto &packets = frame.getEncoded(cc);
 				if (packets.size() > 0) {
+					needs_encoding = false;
 					if (packets.size() > 1) {
 						LOG(WARNING) << "Multi-packet send: " << (int)cc;
 						ftl::codecs::Packet pkt;
-						mergeNALUnits(packets, pkt);
-						stream_->post(spkt, pkt);
+						//mergeNALUnits(packets, pkt);
+						//stream_->post(spkt, pkt);
 					} else {
 						// Send existing encoding instead of re-encoding
 						//for (auto &pkt : packets) {
 						stream_->post(spkt, packets.front());
 						//}
 					}
-				} else  {
-					needencoding += c;
 				}
 			} else {
-				available += c;
+				available = true;
 			}
-		}
-
-	}
+		//}
 
-	for (auto c : available) {
+	if (available) {
 		// Not selected so send an empty packet...
 		StreamPacket spkt;
-		spkt.version = 4;
-		spkt.timestamp = fs.timestamp;
-		spkt.streamID = 0; // FIXME: fs.id;
+		spkt.version = 5;
+		spkt.timestamp = frame.timestamp();
+		spkt.streamID = frame.frameset();
 		spkt.frame_number = 255;
 		spkt.channel = c;
 
@@ -279,17 +346,34 @@ void Sender::post(ftl::rgbd::FrameSet &fs) {
 		stream_->post(spkt, pkt);
 	}
 
-	for (auto c : needencoding) {
+	if (needs_encoding) {
 		// TODO: One thread per channel.
-		_encodeChannel(fs, c, do_inject || iframe_ == 0);
+		_encodeChannel(frame, c, false);
 	}
 
 	//do_inject_ = false;
 }
 
-void Sender::_encodeChannel(ftl::rgbd::FrameSet &fs, Channel c, bool reset) {
+void Sender::setActiveEncoders(uint32_t fsid, const std::unordered_set<Channel> &ec) {
+	for (auto &t : state_) {
+		if ((t.first >> 8) == static_cast<int>(fsid)) {
+			if (t.second.encoder[0] && ec.count(static_cast<Channel>(t.first & 0xFF)) == 0) {
+				// Remove unwanted encoder
+				ftl::codecs::free(t.second.encoder[0]);
+				t.second.encoder[0] = nullptr;
+				if (t.second.encoder[1]) {
+					ftl::codecs::free(t.second.encoder[1]);
+					t.second.encoder[1] = nullptr;
+				}
+				LOG(INFO) << "Removing encoder for channel " << (t.first & 0xFF);
+			}
+		}
+	}
+}
+
+void Sender::_encodeVideoChannel(ftl::data::FrameSet &fs, Channel c, bool reset, bool last_flush) {
 	bool lossless = value("lossless", false);
-	int max_bitrate = std::max(0, std::min(255, value("max_bitrate", 255)));
+	int max_bitrate = std::max(0, std::min(255, value("max_bitrate", 128)));
 	//int min_bitrate = std::max(0, std::min(255, value("min_bitrate", 0)));  // TODO: Use this
 	codec_t codec = static_cast<codec_t>(value("codec", static_cast<int>(codec_t::Any)));
 	device_t device = static_cast<device_t>(value("encoder_device", static_cast<int>(device_t::Any)));
@@ -309,14 +393,20 @@ void Sender::_encodeChannel(ftl::rgbd::FrameSet &fs, Channel c, bool reset) {
 		//	fs.frames[offset].upload(cc);
 		//}
 
+		if (!fs.frames[offset].hasChannel(cc)) {
+			offset++;
+			continue;
+		}
+
 		StreamPacket spkt;
-		spkt.version = 4;
-		spkt.timestamp = fs.timestamp;
-		spkt.streamID = 0; // FIXME: fs.id;
+		spkt.version = 5;
+		spkt.timestamp = fs.timestamp();
+		spkt.streamID = fs.frameset();
 		spkt.frame_number = offset;
 		spkt.channel = c;
+		spkt.flags = (last_flush) ? ftl::codecs::kFlagCompleted : 0;
 
-		auto &tile = _getTile(fs.id, cc);
+		auto &tile = _getTile(fs.id(), cc);
 
 		ftl::codecs::Encoder *enc = tile.encoder[(offset==0)?0:1];
 		if (!enc) {
@@ -332,10 +422,12 @@ void Sender::_encodeChannel(ftl::rgbd::FrameSet &fs, Channel c, bool reset) {
 
 		// Upload if in host memory
 		for (auto &f : fs.frames) {
-			if (!fs.hasFrame(f.id)) continue;
-			if (f.isCPU(c)) {
-				f.upload(Channels<0>(cc), cv::cuda::StreamAccessor::getStream(enc->stream()));
-			}
+			if (!fs.hasFrame(f.source())) continue;
+
+			// FIXME:
+			//if (f.isCPU(c)) {
+			//	f.upload(Channels<0>(cc), cv::cuda::StreamAccessor::getStream(enc->stream()));
+			//}
 		}
 
 		int count = _generateTiles(fs, offset, cc, enc->stream(), lossless, is_stereo);
@@ -374,7 +466,7 @@ void Sender::_encodeChannel(ftl::rgbd::FrameSet &fs, Channel c, bool reset) {
 					cv::imshow("Test", tmp);
 					cv::waitKey(1);*/
 				} else {
-					LOG(ERROR) << "Encoding failed";
+					LOG(ERROR) << "Encoding failed for channel " << (int)cc;
 				}
 			} catch (std::exception &e) {
 				LOG(ERROR) << "Exception in encoder: " << e.what();
@@ -387,8 +479,130 @@ void Sender::_encodeChannel(ftl::rgbd::FrameSet &fs, Channel c, bool reset) {
 	}
 }
 
+void Sender::_encodeAudioChannel(ftl::data::FrameSet &fs, Channel c, bool reset, bool last_flush) {
+	
+	// TODO: combine into multiple opus streams
+	for (size_t i=0; i<fs.frames.size(); ++i) {
+		if (!fs.frames[i].hasChannel(c)) continue;
+
+		const auto &listdata = fs.frames[i].get<std::list<ftl::audio::Audio>>(c);
+
+		//auto &settings = fs.frames[i].getSettings();
+
+		StreamPacket spkt;
+		spkt.version = 5;
+		spkt.timestamp = fs.timestamp();
+		spkt.streamID = fs.frameset();
+		spkt.frame_number = i;
+		spkt.channel = c;
+		spkt.flags = (last_flush) ? ftl::codecs::kFlagCompleted : 0;
+
+		ftl::codecs::Packet pkt;
+		pkt.codec = ftl::codecs::codec_t::OPUS;
+		pkt.frame_count = 1;
+		pkt.flags = (c == Channel::AudioStereo) ? ftl::codecs::kFlagStereo : 0;
+		pkt.bitrate = 180;
+
+		// Find encoder here ...
+		ftl::audio::Encoder *enc = _getAudioEncoder(fs.frameset(), i, c, pkt);
+
+		// Do encoding into pkt.data
+		if (!enc) {
+			LOG(ERROR) << "Could not find audio encoder";
+			return;
+		}
+		
+		for (auto &data : listdata) {
+			if (!enc->encode(data.data(), pkt)) {
+				LOG(ERROR) << "Could not encode audio";
+				return;
+			}
+		}
+
+		stream_->post(spkt, pkt);
+	}
+}
+
+void Sender::_encodeDataChannel(ftl::data::FrameSet &fs, Channel c, bool reset, bool last_flush) {
+	int i=0;
+
+	// TODO: Pack all frames into a single packet
+	for (auto &f : fs.frames) {
+		StreamPacket spkt;
+		spkt.version = 5;
+		spkt.timestamp = fs.timestamp();
+		spkt.streamID = fs.frameset();
+		spkt.frame_number = i++;
+		spkt.channel = c;
+		spkt.flags = (last_flush) ? ftl::codecs::kFlagCompleted : 0;
+
+		ftl::codecs::Packet pkt;
+		pkt.frame_count = 1;
+		pkt.codec = codec_t::MSGPACK;
+		pkt.bitrate = 255;
+		pkt.flags = 0;
+		
+		auto encoder = ftl::data::getTypeEncoder(f.type(c));
+		if (encoder) {
+			if (encoder(f, c, pkt.data)) {
+				stream_->post(spkt, pkt);
+			}
+		} else {
+			LOG(WARNING) << "Missing msgpack encoder";
+		}
+	}
+}
+
+void Sender::_encodeDataChannel(ftl::data::Frame &f, Channel c, bool reset) {
+	StreamPacket spkt;
+	spkt.version = 5;
+	spkt.timestamp = f.timestamp();
+	spkt.streamID = f.frameset();
+	spkt.frame_number = f.source();
+	spkt.channel = c;
+
+	ftl::codecs::Packet pkt;
+	pkt.frame_count = 1;
+	pkt.codec = codec_t::MSGPACK;
+	pkt.bitrate = 255;
+	pkt.flags = 0;
+	
+	auto encoder = ftl::data::getTypeEncoder(f.type(c));
+	if (encoder) {
+		if (encoder(f, c, pkt.data)) {
+			stream_->post(spkt, pkt);
+		}
+	} else {
+		LOG(WARNING) << "Missing msgpack encoder";
+	}
+}
+
+void Sender::_encodeChannel(ftl::data::FrameSet &fs, Channel c, bool reset, bool last_flush) {
+	int ic = int(c);
+
+	if (ic < 32) {
+		_encodeVideoChannel(fs, c, reset, last_flush);
+	} else if (ic < 64) {
+		_encodeAudioChannel(fs, c, reset, last_flush);
+	} else {
+		_encodeDataChannel(fs, c, reset, last_flush);
+	}
+}
+
+void Sender::_encodeChannel(ftl::data::Frame &frame, Channel c, bool reset) {
+	int ic = int(c);
+
+	if (ic < 32) {
+		//_encodeVideoChannel(frame, c, reset);
+	} else if (ic < 64) {
+		//_encodeAudioChannel(frame, c, reset);
+	} else {
+		_encodeDataChannel(frame, c, reset);
+	}
+}
+
 cv::Rect Sender::_generateROI(const ftl::rgbd::FrameSet &fs, ftl::codecs::Channel c, int offset, bool stereo) {
-	const ftl::rgbd::Frame &cframe = fs.firstFrame();
+	const ftl::data::Frame &cframe = fs.firstFrame();
 	int rwidth = cframe.get<cv::cuda::GpuMat>(c).cols;
 	if (stereo) rwidth *= 2;
 	int rheight = cframe.get<cv::cuda::GpuMat>(c).rows;
@@ -425,9 +639,9 @@ float Sender::_selectFloatMax(Channel c) {
 }
 
 int Sender::_generateTiles(const ftl::rgbd::FrameSet &fs, int offset, Channel c, cv::cuda::Stream &stream, bool lossless, bool stereo) {
-	auto &surface = _getTile(fs.id, c);
+	auto &surface = _getTile(fs.id(), c);
 
-	const ftl::rgbd::Frame *cframe = nullptr; //&fs.frames[offset];
+	const ftl::data::Frame *cframe = nullptr; //&fs.frames[offset];
 
 	const auto &m = fs.firstFrame().get<cv::cuda::GpuMat>(c);
 
diff --git a/components/streams/src/stream.cpp b/components/streams/src/stream.cpp
index 43c53eae6..57c9c5cd9 100644
--- a/components/streams/src/stream.cpp
+++ b/components/streams/src/stream.cpp
@@ -8,26 +8,56 @@ using ftl::stream::Broadcast;
 using ftl::stream::Intercept;
 using ftl::stream::Stream;
 
-const ftl::codecs::Channels<0> &Stream::available(int fs) const {
+std::unordered_set<ftl::codecs::Channel> operator&(const std::unordered_set<ftl::codecs::Channel> &a, const std::unordered_set<ftl::codecs::Channel> &b) {
+	std::unordered_set<ftl::codecs::Channel> result;
+	for (auto &i : a) {
+		if (b.find(i) != b.end()) result.insert(i);
+	}
+	return result;
+}
+
+std::unordered_set<ftl::codecs::Channel> operator-(const std::unordered_set<ftl::codecs::Channel> &a, const std::unordered_set<ftl::codecs::Channel> &b) {
+	std::unordered_set<ftl::codecs::Channel> result;
+	for (auto &i : a) {
+		if (b.find(i) == b.end()) result.insert(i);
+	}
+	return result;
+}
+
+bool operator!=(const std::unordered_set<ftl::codecs::Channel> &a, const std::unordered_set<ftl::codecs::Channel> &b) {
+	if (a.size() != b.size()) return true;
+	for (auto &i : a) {
+		if (b.count(i) == 0) return true;
+	}
+	return false;
+}
+
+const std::unordered_set<ftl::codecs::Channel> &Stream::available(int fs) const {
 	SHARED_LOCK(mtx_, lk);
 	if (fs < 0 || static_cast<uint32_t>(fs) >= state_.size()) throw FTL_Error("Frameset index out-of-bounds: " << fs);
 	return state_[fs].available;
 }
 
-const ftl::codecs::Channels<0> &Stream::selected(int fs) const {
+const std::unordered_set<ftl::codecs::Channel> &Stream::selected(int fs) const {
 	SHARED_LOCK(mtx_, lk);
 	if (fs < 0 || static_cast<uint32_t>(fs) >= state_.size()) throw FTL_Error("Frameset index out-of-bounds: " << fs);
 	return state_[fs].selected;
 }
 
-void Stream::select(int fs, const ftl::codecs::Channels<0> &s, bool make) {
+std::unordered_set<ftl::codecs::Channel> Stream::selectedNoExcept(int fs) const {
+	SHARED_LOCK(mtx_, lk);
+	if (fs < 0 || static_cast<uint32_t>(fs) >= state_.size()) return {};
+	return state_[fs].selected;
+}
+
+void Stream::select(int fs, const std::unordered_set<ftl::codecs::Channel> &s, bool make) {
 	UNIQUE_LOCK(mtx_, lk);
 	if (fs < 0 || (!make && static_cast<uint32_t>(fs) >= state_.size())) throw FTL_Error("Frameset index out-of-bounds: " << fs);
 	if (static_cast<uint32_t>(fs) >= state_.size()) state_.resize(fs+1);
 	state_[fs].selected = s;
 }
 
-ftl::codecs::Channels<0> &Stream::available(int fs) {
+std::unordered_set<ftl::codecs::Channel> &Stream::available(int fs) {
 	UNIQUE_LOCK(mtx_, lk);
 	if (fs < 0) throw FTL_Error("Frameset index out-of-bounds: " << fs);
 	if (static_cast<uint32_t>(fs) >= state_.size()) state_.resize(fs+1);
@@ -45,7 +75,10 @@ Muxer::Muxer(nlohmann::json &config) : Stream(config), nid_{0} {
 }
 
 Muxer::~Muxer() {
-
+	UNIQUE_LOCK(mutex_,lk);
+	for (auto &se : streams_) {
+		se.handle.cancel();
+	}
 }
 
 
@@ -54,54 +87,80 @@ void Muxer::add(Stream *s, size_t fsid) {
 	if (fsid < 0u || fsid >= ftl::stream::kMaxStreams) return;
 
 	auto &se = streams_.emplace_back();
-	int i = streams_.size()-1;
+	//int i = streams_.size()-1;
 	se.stream = s;
+	ftl::stream::Muxer::StreamEntry *ptr = &se;
 
-	s->onPacket([this,s,i,fsid](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+	se.handle = std::move(s->onPacket([this,s,fsid,ptr](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
 		//TODO: Allow input streams to have other streamIDs
 		// Same fsid means same streamIDs map together in the end
-	
+
+		/*ftl::stream::Muxer::StreamEntry *ptr = nullptr;
+		{
+			SHARED_LOCK(mutex_,lk);
+			ptr = &streams_[i];
+		}*/
+
 		ftl::codecs::StreamPacket spkt2 = spkt;
+		ptr->original_fsid = spkt.streamID;
 		spkt2.streamID = fsid;
 
 		if (spkt2.frame_number < 255) {
-			int id = _lookup(fsid, i, spkt.frame_number);
+			int id = _lookup(fsid, ptr, spkt.frame_number, pkt.frame_count);
 			spkt2.frame_number = id;
 		}
 
 		_notify(spkt2, pkt);
 		s->select(spkt.streamID, selected(fsid));
-	});
+		return true;
+	}));
 }
 
-bool Muxer::onPacket(const std::function<void(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)> &cb) {
+void Muxer::remove(Stream *s) {
 	UNIQUE_LOCK(mutex_,lk);
-	cb_ = cb;
-	return true;
+	for (auto i = streams_.begin(); i != streams_.end(); ++i) {
+		if (i->stream == s) {
+			i->handle.cancel();
+			auto *se = &(*i);
+
+			for (size_t j=0; j<kMaxStreams; ++j) {
+				for (auto &k : revmap_[j]) {
+					if (k.first == se) {
+						k.first = nullptr;
+					}
+				}
+			}
+
+			streams_.erase(i);
+			return;
+		}
+	}
 }
 
-int Muxer::originStream(size_t fsid, int fid) {
+ftl::stream::Stream *Muxer::originStream(size_t fsid, int fid) {
 	if (fsid < ftl::stream::kMaxStreams && static_cast<uint32_t>(fid) < revmap_[fsid].size()) {
-		return std::get<0>(revmap_[fsid][fid]);
+		return std::get<0>(revmap_[fsid][fid])->stream;
 	}
-	return -1;
+	return nullptr;
 }
 
 bool Muxer::post(const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
 	SHARED_LOCK(mutex_, lk);
-	available(spkt.frameSetID()) += spkt.channel;
-	
+	if (pkt.data.size() > 0 || !(spkt.flags & ftl::codecs::kFlagRequest)) available(spkt.frameSetID()) += spkt.channel;
+
 	if (spkt.streamID < ftl::stream::kMaxStreams && spkt.frame_number < revmap_[spkt.streamID].size()) {
-		auto [sid, ssid] = revmap_[spkt.streamID][spkt.frame_number];
-		auto &se = streams_[sid];
+		auto [se, ssid] = revmap_[spkt.streamID][spkt.frame_number];
+		//auto &se = streams_[sid];
+
+		if (!se) return false;
 
 		//LOG(INFO) << "POST " << spkt.frame_number;
 
 		ftl::codecs::StreamPacket spkt2 = spkt;
-		spkt2.streamID = 0;
+		spkt2.streamID = se->original_fsid;
 		spkt2.frame_number = ssid;
-		se.stream->select(0, selected(spkt.frameSetID()));
-		return se.stream->post(spkt2, pkt);
+		se->stream->select(spkt2.streamID, selected(spkt.frameSetID()));
+		return se->stream->post(spkt2, pkt);
 	} else {
 		return false;
 	}
@@ -137,22 +196,27 @@ void Muxer::reset() {
 	}
 }
 
-int Muxer::_lookup(size_t fsid, int sid, int ssid) {
+int Muxer::_lookup(size_t fsid, ftl::stream::Muxer::StreamEntry *se, int ssid, int count) {
 	SHARED_LOCK(mutex_, lk);
-	auto &se = streams_[sid];
-	if (static_cast<uint32_t>(ssid) >= se.maps.size()) {
+	//auto &se = streams_[sid];
+	if (static_cast<uint32_t>(ssid) >= se->maps.size()) {
 		lk.unlock();
 		{
 			UNIQUE_LOCK(mutex_, lk2);
-			if (static_cast<uint32_t>(ssid) >= se.maps.size()) {
+			while (static_cast<uint32_t>(ssid) >= se->maps.size()) {
 				int nid = nid_[fsid]++;
-				se.maps.push_back(nid);
-				revmap_[fsid].push_back({sid,ssid});
+				revmap_[fsid].push_back({se, static_cast<uint32_t>(se->maps.size())});
+				se->maps.push_back(nid);
+				for (int i=1; i<count; ++i) {
+					int nid = nid_[fsid]++;
+					revmap_[fsid].push_back({se, static_cast<uint32_t>(se->maps.size())});
+					se->maps.push_back(nid);
+				}
 			}
 		}
 		lk.lock();
 	}
-	return se.maps[ssid];
+	return se->maps[ssid];
 }
 
 void Muxer::_notify(const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
@@ -160,7 +224,7 @@ void Muxer::_notify(const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Pa
 	available(spkt.frameSetID()) += spkt.channel;
 
 	try {
-		if (cb_) cb_(spkt, pkt);  // spkt.frame_number < 255 && 
+		cb_.trigger(spkt, pkt);  // spkt.frame_number < 255 &&
 	} catch (std::exception &e) {
 		LOG(ERROR) << "Exception in packet handler: " << e.what();
 	}
@@ -180,35 +244,28 @@ void Broadcast::add(Stream *s) {
 	UNIQUE_LOCK(mutex_,lk);
 
 	streams_.push_back(s);
-	s->onPacket([this,s](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
-		LOG(INFO) << "BCAST Request: " << (int)spkt.streamID << " " << (int)spkt.channel << " " << spkt.timestamp;
+	handles_.push_back(std::move(s->onPacket([this,s](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+		//LOG(INFO) << "BCAST Request: " << (int)spkt.streamID << " " << (int)spkt.channel << " " << spkt.timestamp;
 		SHARED_LOCK(mutex_, lk);
 		if (spkt.frameSetID() < 255) available(spkt.frameSetID()) += spkt.channel;
-		if (cb_) cb_(spkt, pkt);
+		cb_.trigger(spkt, pkt);
 		if (spkt.streamID < 255) s->select(spkt.streamID, selected(spkt.streamID));
-	});
+		return true;
+	})));
 }
 
 void Broadcast::remove(Stream *s) {
 	UNIQUE_LOCK(mutex_,lk);
-	s->onPacket(nullptr);
+	// TODO: Find and remove handle also
 	streams_.remove(s);
 }
 
 void Broadcast::clear() {
 	UNIQUE_LOCK(mutex_,lk);
-	for (auto s : streams_) {
-		s->onPacket(nullptr);
-	}
+	handles_.clear();
 	streams_.clear();
 }
 
-bool Broadcast::onPacket(const std::function<void(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)> &cb) {
-	UNIQUE_LOCK(mutex_,lk);
-	cb_ = cb;
-	return true;
-}
-
 bool Broadcast::post(const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
 	SHARED_LOCK(mutex_, lk);
 	if (spkt.frameSetID() < 255) available(spkt.frameSetID()) += spkt.channel;
@@ -266,22 +323,17 @@ void Intercept::setStream(Stream *s) {
 	UNIQUE_LOCK(mutex_,lk);
 
 	stream_ = s;
-	s->onPacket([this](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+	handles_.push_back(std::move(s->onPacket([this](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
 		SHARED_LOCK(mutex_, lk);
 		available(spkt.frameSetID()) += spkt.channel;
-		if (cb_) cb_(spkt, pkt);
+		cb_.trigger(spkt, pkt);
 		if (intercept_) intercept_(spkt, pkt);
 		stream_->select(spkt.streamID, selected(spkt.streamID));
-	});
-}
-
-bool Intercept::onPacket(const std::function<void(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)> &cb) {
-	UNIQUE_LOCK(mutex_,lk);
-	cb_ = cb;
-	return true;
+		return true;
+	})));
 }
 
-bool Intercept::onIntercept(const std::function<void(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)> &cb) {
+bool Intercept::onIntercept(const std::function<bool(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)> &cb) {
 	UNIQUE_LOCK(mutex_,lk);
 	intercept_ = cb;
 	return true;
diff --git a/components/streams/test/CMakeLists.txt b/components/streams/test/CMakeLists.txt
index 288f2ff07..272a87a24 100644
--- a/components/streams/test/CMakeLists.txt
+++ b/components/streams/test/CMakeLists.txt
@@ -8,6 +8,8 @@ target_include_directories(stream_unit PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../in
 target_link_libraries(stream_unit
 	ftlcommon ftlcodecs ftlrgbd)
 
+target_precompile_headers(stream_unit REUSE_FROM ftldata)
+
 add_test(StreamUnitTest stream_unit)
 
 ### File Stream Unit ###########################################################
@@ -21,6 +23,8 @@ target_include_directories(filestream_unit PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/.
 target_link_libraries(filestream_unit
 	ftlcommon ftlcodecs ftlrgbd)
 
+target_precompile_headers(filestream_unit REUSE_FROM ftldata)
+
 add_test(FileStreamUnitTest filestream_unit)
 
 ### Net Stream Unit ###########################################################
@@ -48,6 +52,8 @@ target_include_directories(sender_unit PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../in
 target_link_libraries(sender_unit
 	ftlcommon ftlcodecs ftlrgbd ftlaudio)
 
+target_precompile_headers(sender_unit REUSE_FROM ftldata)
+
 add_test(SenderUnitTest sender_unit)
 
 ### Receiver Unit ##############################################################
@@ -58,9 +64,58 @@ $<TARGET_OBJECTS:CatchTest>
 	../src/stream.cpp
 	../src/injectors.cpp
 	../src/parsers.cpp
+	../src/builder.cpp
 )
 target_include_directories(receiver_unit PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include")
 target_link_libraries(receiver_unit
 	ftlcommon ftlcodecs ftlrgbd ftlaudio)
 
+target_precompile_headers(receiver_unit REUSE_FROM ftldata)
+
 add_test(ReceiverUnitTest receiver_unit)
+
+### Receiver Sender Unit #######################################################
+add_executable(recsend_unit
+$<TARGET_OBJECTS:CatchTest>
+	./recsend_unit.cpp
+	../src/receiver.cpp
+	../src/stream.cpp
+	../src/sender.cpp
+	../src/builder.cpp
+)
+target_include_directories(recsend_unit PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include")
+target_link_libraries(recsend_unit
+	ftlcommon ftlcodecs ftlrgbd ftlaudio)
+
+target_precompile_headers(recsend_unit REUSE_FROM ftldata)
+
+add_test(RecSendUnitTest recsend_unit)
+
+### Builder Unit ###############################################################
+add_executable(builder_unit
+$<TARGET_OBJECTS:CatchTest>
+	./builder_unit.cpp
+	../src/builder.cpp
+)
+target_include_directories(builder_unit PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include")
+target_link_libraries(builder_unit
+	ftlcommon ftldata)
+
+target_precompile_headers(builder_unit REUSE_FROM ftldata)
+
+add_test(BuilderUnitTest builder_unit)
+
+
+### Feed Unit ##################################################################
+add_executable(feed_unit
+	$<TARGET_OBJECTS:CatchTest>
+	./feed_unit.cpp
+	../src/feed.cpp
+)
+target_include_directories(feed_unit PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include")
+target_link_libraries(feed_unit
+	ftlrgbd ftlstreams ftloperators ftlcommon ftldata)
+
+target_precompile_headers(feed_unit REUSE_FROM ftldata)
+
+add_test(FeedUnitTest feed_unit)
diff --git a/components/streams/test/builder_unit.cpp b/components/streams/test/builder_unit.cpp
new file mode 100644
index 000000000..9d2d7c077
--- /dev/null
+++ b/components/streams/test/builder_unit.cpp
@@ -0,0 +1,201 @@
+#include "catch.hpp"
+
+#include <ftl/streams/builder.hpp>
+
+using ftl::data::Pool;
+using ftl::data::Frame;
+using ftl::data::FrameSet;
+using ftl::streams::ForeignBuilder;
+using ftl::streams::LocalBuilder;
+using ftl::codecs::Channel;
+
+TEST_CASE("ftl::streams::ForeignBuilder can obtain a frameset", "[]") {
+	SECTION("with one frame allocated") {
+		Pool pool(2,5);
+		ForeignBuilder builder(&pool, 44);
+
+		builder.get(100, 0);
+		{
+			auto fs = builder.get(100);
+
+			REQUIRE( fs->frameset() == 44 );
+			REQUIRE( fs->source() == 255);
+			REQUIRE( fs->timestamp() == 100 );
+			REQUIRE( fs->frames.size() == 1 );
+			REQUIRE( fs->frames[0].status() == ftl::data::FrameStatus::CREATED );
+			REQUIRE( fs->frames[0].id() == (44<<8) );
+			REQUIRE( fs->frames[0].timestamp() == 100 );
+		}
+	}
+
+	SECTION("with five frames allocated") {
+		Pool pool(2,5);
+		ForeignBuilder builder(&pool, 44);
+
+		builder.get(100, 4);
+		builder.get(100, 0);
+
+		{
+			auto fs = builder.get(100);
+
+			REQUIRE( fs->frameset() == 44 );
+			REQUIRE( fs->timestamp() == 100 );
+			REQUIRE( fs->frames.size() == 5 );
+			REQUIRE( fs->frames[3].status() == ftl::data::FrameStatus::CREATED );
+			REQUIRE( fs->frames[3].id() == (44<<8)+3 );
+			REQUIRE( fs->frames[3].timestamp() == 100 );
+		}
+	}
+}
+
+TEST_CASE("ftl::streams::ForeignBuilder can complete a frame", "[]") {
+	SECTION("with two frames allocated") {
+		Pool pool(2,5);
+		ForeignBuilder builder(&pool, 44);
+
+		builder.get(100, 1);
+		builder.get(100, 0);
+
+		{
+			auto fs = builder.get(100);
+			fs->completed(0);
+
+			REQUIRE( fs->frameset() == 44 );
+			REQUIRE( fs->timestamp() == 100 );
+			REQUIRE( fs->frames.size() == 2 );
+			//REQUIRE( fs->frames[0].status() == ftl::data::FrameStatus::CREATED );
+			REQUIRE( fs->firstFrame().id() == (44<<8) );
+			REQUIRE( fs->firstFrame().timestamp() == 100 );
+		}
+	}
+}
+
+TEST_CASE("ftl::streams::ForeignBuilder can complete a frameset", "[]") {
+	SECTION("with one frame allocated and no buffering") {
+		Pool pool(2,5);
+		ForeignBuilder builder(&pool, 44);
+
+		builder.setBufferSize(0);
+
+		builder.get(100, 0);
+
+		int fsid = 0;
+
+		auto h = builder.onFrameSet([&fsid](const ftl::data::FrameSetPtr& fs) {
+			fsid = fs->frameset();
+			return false;
+		});
+
+		{
+			auto fs = builder.get(100);
+			fs->completed(0);
+		}
+
+		// TODO: Find better way to wait...
+		std::this_thread::sleep_for(std::chrono::milliseconds(10));
+		REQUIRE( fsid == 44 );
+	}
+
+	SECTION("with two frames allocated and no buffering") {
+		Pool pool(2,5);
+		ForeignBuilder builder(&pool, 44);
+
+		builder.setBufferSize(0);
+
+		builder.get(100, 1);
+
+		int fsid = 0;
+
+		auto h = builder.onFrameSet([&fsid](const ftl::data::FrameSetPtr& fs) {
+			fsid = fs->frameset();
+			return false;
+		});
+
+		{
+			auto fs = builder.get(100);
+			fs->completed(0);
+			fs->completed(1);
+		}
+
+		// TODO: Find better way to wait...
+		std::this_thread::sleep_for(std::chrono::milliseconds(10));
+		REQUIRE( fsid == 44 );
+	}
+
+	SECTION("does not complete a partial") {
+		Pool pool(2,5);
+		ForeignBuilder builder(&pool, 44);
+
+		builder.setBufferSize(0);
+
+		builder.get(100, 1);
+
+		int fsid = 0;
+
+		auto h = builder.onFrameSet([&fsid](const ftl::data::FrameSetPtr& fs) {
+			fsid = fs->frameset();
+			return false;
+		});
+
+		{
+			auto fs = builder.get(100);
+			fs->completed(1);
+		}
+
+		// TODO: Find better way to wait...
+		std::this_thread::sleep_for(std::chrono::milliseconds(10));
+		REQUIRE( fsid == 0 );
+	}
+}
+
+TEST_CASE("ftl::streams::LocalBuilder can provide empty frames", "[]") {
+	SECTION("a single empty frameset") {
+		Pool pool(2,5);
+		LocalBuilder builder(&pool, 45);
+
+		auto fs = builder.getNextFrameSet(100);
+
+		REQUIRE( fs );
+		REQUIRE( fs->timestamp() == 100 );
+		REQUIRE( fs->frames.size() == 1 );
+		REQUIRE( fs->hasFrame(0) );
+		REQUIRE( fs->count == 1 );
+	}
+
+	SECTION("multiple framesets frameset") {
+		Pool pool(2,5);
+		LocalBuilder builder(&pool, 45);
+
+		auto fs = builder.getNextFrameSet(100);
+		fs->firstFrame().create<int>(Channel::Control) = 77;
+
+		fs = builder.getNextFrameSet(110);
+
+		REQUIRE( fs );
+		REQUIRE( fs->timestamp() == 110 );
+		REQUIRE( fs->frames.size() == 1 );
+		REQUIRE( fs->hasFrame(0) );
+		REQUIRE( fs->hasChannel(Channel::Control) == false );
+	}
+}
+
+TEST_CASE("ftl::streams::LocalBuilder can provide filled frames", "[]") {
+	SECTION("a single filled frameset") {
+		Pool pool(2,5);
+		LocalBuilder builder(&pool, 45);
+
+		// Fake some received data, as done by Receiver class.
+		{
+			auto pfs = builder.get(100);
+			pfs->firstFrame().createChange<int>(Channel::Control, ftl::data::ChangeType::FOREIGN) = 56;
+			pfs->completed(0);
+		}
+
+		auto fs = builder.getNextFrameSet(100);
+
+		REQUIRE( fs );
+		REQUIRE( fs->timestamp() == 100 );
+		REQUIRE( fs->frames.size() == 1 );
+		REQUIRE( fs->frames[0].get<int>(Channel::Control) == 56 );
+	}
+}
diff --git a/components/streams/test/feed_unit.cpp b/components/streams/test/feed_unit.cpp
new file mode 100644
index 000000000..d2df6e59b
--- /dev/null
+++ b/components/streams/test/feed_unit.cpp
@@ -0,0 +1,34 @@
+#include <catch.hpp>
+
+#include <nlohmann/json.hpp>
+#include <ftl/streams/feed.hpp>
+
+#include <ftl/operators/colours.hpp>
+
+using ftl::config::json_t;
+
+TEST_CASE("ftl::streams::Feed can obtain a frameset", "[]") {
+	json_t global = json_t{{"$id","ftl://test"}};
+	ftl::config::configure(global);
+
+	json_t cfg1 = json_t{
+		{"$id","ftl://test/1"}
+	};
+
+	json_t cfg2 = json_t{
+		{"$id","ftl://test/2"}
+	};
+
+	auto* net = ftl::create<ftl::net::Universe>(cfg1);
+	auto* feed = ftl::create<ftl::stream::Feed>(cfg2, net);
+
+	feed->add("./file.ftl");
+	feed->add("file:///absolutefile.ftl");
+	//feed->add("file://relativefile.ftl");  // This is not allowed
+	feed->add("file:/./relativefile.ftl");
+	feed->add("file:./relativefile.ftl");
+	feed->add("device:dev1");
+	feed->add("device:/dev2");
+	feed->add("device://dev3");
+
+}
diff --git a/components/streams/test/filestream_unit.cpp b/components/streams/test/filestream_unit.cpp
index 8962cede1..25c33b580 100644
--- a/components/streams/test/filestream_unit.cpp
+++ b/components/streams/test/filestream_unit.cpp
@@ -2,6 +2,7 @@
 
 #include <ftl/streams/filestream.hpp>
 #include <nlohmann/json.hpp>
+#include <ftl/timer.hpp>
 
 using ftl::stream::File;
 using ftl::stream::Stream;
@@ -29,19 +30,21 @@ TEST_CASE("ftl::stream::File write and read", "[stream]") {
 
 		REQUIRE( writer->begin() );
 
-		REQUIRE( writer->post({4,ftl::timer::get_time(),2,1,ftl::codecs::Channel::Confidence},{ftl::codecs::codec_t::Any, ftl::codecs::definition_t::Any, 0, 0, 0, {'f'}}) );
+		REQUIRE( writer->post({4,ftl::timer::get_time(),2,1,ftl::codecs::Channel::Confidence},{ftl::codecs::codec_t::Any, 0, 0, 0, 0, {'f'}}) );
 
 		writer->end();
 
 		reader->set("filename", "/tmp/ftl_file_stream_test.ftl");
 
 		ftl::codecs::StreamPacket tspkt = {4,0,0,1,ftl::codecs::Channel::Colour};
-		REQUIRE( reader->onPacket([&tspkt](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+		auto h = reader->onPacket([&tspkt](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
 			tspkt = spkt;
-		}) );
+			return true;
+		});
 		REQUIRE( reader->begin(false) );
 
-		//reader->tick();
+		reader->tick(100);
+		reader->end();
 
 		//REQUIRE( tspkt.timestamp == 0 );
 		REQUIRE( tspkt.streamID == (uint8_t)2 );
@@ -54,9 +57,9 @@ TEST_CASE("ftl::stream::File write and read", "[stream]") {
 
 		REQUIRE( writer->begin() );
 
-		REQUIRE( writer->post({4,0,0,1,ftl::codecs::Channel::Confidence},{ftl::codecs::codec_t::Any, ftl::codecs::definition_t::Any, 0, 0, 0, {'f'}}) );
-		REQUIRE( writer->post({4,0,1,1,ftl::codecs::Channel::Depth},{ftl::codecs::codec_t::Any, ftl::codecs::definition_t::Any, 0, 0, 0, {'f'}}) );
-		REQUIRE( writer->post({4,0,2,1,ftl::codecs::Channel::Screen},{ftl::codecs::codec_t::Any, ftl::codecs::definition_t::Any, 0, 0, 0, {'f'}}) );
+		REQUIRE( writer->post({4,0,0,1,ftl::codecs::Channel::Confidence},{ftl::codecs::codec_t::Any, 0, 0, 0, 0, {'f'}}) );
+		REQUIRE( writer->post({4,0,1,1,ftl::codecs::Channel::Depth},{ftl::codecs::codec_t::Any, 0, 0, 0, 0, {'f'}}) );
+		REQUIRE( writer->post({4,0,2,1,ftl::codecs::Channel::Screen},{ftl::codecs::codec_t::Any, 0, 0, 0, 0, {'f'}}) );
 
 		writer->end();
 
@@ -64,13 +67,15 @@ TEST_CASE("ftl::stream::File write and read", "[stream]") {
 
 		ftl::codecs::StreamPacket tspkt = {4,0,0,1,ftl::codecs::Channel::Colour};
 		int count = 0;
-		REQUIRE( reader->onPacket([&tspkt,&count](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+		auto h = reader->onPacket([&tspkt,&count](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
 			tspkt = spkt;
 			++count;
-		}) );
+			return true;
+		});
 		REQUIRE( reader->begin(false) );
 
-		//reader->tick();
+		reader->tick(100);
+		reader->end();
 
 		REQUIRE( count == 3 );
 		REQUIRE( tspkt.timestamp > 0 );
@@ -85,9 +90,9 @@ TEST_CASE("ftl::stream::File write and read", "[stream]") {
 		REQUIRE( writer->begin() );
 
 		auto time = ftl::timer::get_time();
-		REQUIRE( writer->post({4,time,0,1,ftl::codecs::Channel::Confidence},{ftl::codecs::codec_t::Any, ftl::codecs::definition_t::Any, 0, 0, 0, {'f'}}) );
-		REQUIRE( writer->post({4,time+ftl::timer::getInterval(),0,1,ftl::codecs::Channel::Depth},{ftl::codecs::codec_t::Any, ftl::codecs::definition_t::Any, 0, 0, 0, {'f'}}) );
-		REQUIRE( writer->post({4,time+2*ftl::timer::getInterval(),0,1,ftl::codecs::Channel::Screen},{ftl::codecs::codec_t::Any, ftl::codecs::definition_t::Any, 0, 0, 0, {'f'}}) );
+		REQUIRE( writer->post({4,time,0,1,ftl::codecs::Channel::Confidence},{ftl::codecs::codec_t::Any, 0, 0, 0, 0, {'f'}}) );
+		REQUIRE( writer->post({4,time+ftl::timer::getInterval(),0,1,ftl::codecs::Channel::Depth},{ftl::codecs::codec_t::Any, 0, 0, 0, 0, {'f'}}) );
+		REQUIRE( writer->post({4,time+2*ftl::timer::getInterval(),0,1,ftl::codecs::Channel::Screen},{ftl::codecs::codec_t::Any, 0, 0, 0, 0, {'f'}}) );
 
 		writer->end();
 
@@ -95,27 +100,29 @@ TEST_CASE("ftl::stream::File write and read", "[stream]") {
 
 		ftl::codecs::StreamPacket tspkt = {4,0,0,1,ftl::codecs::Channel::Colour};
 		int count = 0;
-		REQUIRE( reader->onPacket([&tspkt,&count](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+		auto h = reader->onPacket([&tspkt,&count](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
 			tspkt = spkt;
 			++count;
-		}) );
+			return true;
+		});
 		REQUIRE( reader->begin(false) );
 
-		//reader->tick();
+		reader->tick(100);
+		std::this_thread::sleep_for(std::chrono::milliseconds(10));
 
 		REQUIRE( count == 1 );
 		//REQUIRE( tspkt.timestamp == 0 );
 		//auto itime = tspkt.timestamp;
 
 		count = 0;
-		reader->tick(0);
+		reader->tick(101);
 		std::this_thread::sleep_for(std::chrono::milliseconds(10));
 
 		REQUIRE( count == 1 );
 		//REQUIRE( tspkt.timestamp == itime+ftl::timer::getInterval() );
 
 		count = 0;
-		reader->tick(0);
+		reader->tick(102);
 		std::this_thread::sleep_for(std::chrono::milliseconds(10));
 
 		REQUIRE( count == 1 );
diff --git a/components/streams/test/receiver_unit.cpp b/components/streams/test/receiver_unit.cpp
index 0ed51f850..0f8b362af 100644
--- a/components/streams/test/receiver_unit.cpp
+++ b/components/streams/test/receiver_unit.cpp
@@ -3,9 +3,12 @@
 #include <ftl/streams/receiver.hpp>
 #include <ftl/codecs/nvidia_encoder.hpp>
 #include <ftl/streams/injectors.hpp>
+#include <ftl/rgbd/frame.hpp>
 
 #include <nlohmann/json.hpp>
 
+#include <loguru.hpp>
+
 using ftl::codecs::definition_t;
 using ftl::codecs::codec_t;
 using ftl::stream::Receiver;
@@ -14,17 +17,13 @@ using ftl::rgbd::FrameSet;
 using ftl::codecs::Channel;
 using ftl::codecs::Channels;
 using ftl::config::json_t;
+using ftl::data::FrameID;
 
 class TestStream : public ftl::stream::Stream {
 	public:
 	explicit TestStream(nlohmann::json &config) : ftl::stream::Stream(config) {};
 	~TestStream() {};
 
-	bool onPacket(const std::function<void(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)> &cb) {
-		cb_ = cb;
-		return true;
-	}
-
 	bool post(const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
 		available(spkt.streamID) += spkt.channel;
 		if (pkt.data.size() == 0) {
@@ -36,7 +35,7 @@ class TestStream : public ftl::stream::Stream {
 				select(spkt.frameSetID(), selected(spkt.frameSetID()) + spkt.channel);
 			}
 		}
-		if (cb_) cb_(spkt, pkt);
+		cb_.trigger(spkt, pkt);
 		return true;
 	}
 
@@ -44,19 +43,22 @@ class TestStream : public ftl::stream::Stream {
 	bool end() override { return true; }
 	bool active() override { return true; }
 
-	private:
-	std::function<void(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)> cb_;
+	//private:
+	//std::function<void(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)> cb_;
 };
 
 
 TEST_CASE( "Receiver generating onFrameSet" ) {
+	//ftl::data::make_channel<ftl::rgbd::Camera>(Channel::Calibration, "calibration", ftl::data::StorageMode::PERSISTENT);
 	json_t global = json_t{{"$id","ftl://test"}};
 	ftl::config::configure(global);
 
+	ftl::data::Pool pool(5,7);
+
 	json_t cfg = json_t{
 		{"$id","ftl://test/1"}
 	};
-	auto *receiver = ftl::create<Receiver>(cfg);
+	auto *receiver = ftl::create<Receiver>(cfg, &pool);
 
 	json_t cfg2 = json_t{
 		{"$id","ftl://test/2"}
@@ -80,12 +82,15 @@ TEST_CASE( "Receiver generating onFrameSet" ) {
 	spkt.channel = Channel::Colour;
 	spkt.streamID = 0;
 
-	ftl::rgbd::Frame dummy;
-	ftl::rgbd::FrameState state;
-	state.getLeft().width = 1280;
-	state.getLeft().height = 720;
-	dummy.setOrigin(&state);
-	ftl::stream::injectCalibration(&stream, dummy, 0, 0, 0);
+	ftl::data::Frame dummy = pool.allocate(FrameID(0,0),10);
+	dummy.store();
+	ftl::rgbd::Frame &state = dummy.cast<ftl::rgbd::Frame>();
+	state.setLeft().width = 1280;
+	state.setLeft().height = 720;
+
+	// Must tell it to wait for colour before completing.
+	stream.select(0, {Channel::Colour}, true);
+	ftl::stream::injectCalibration(&stream, state, 10, 0, 0);
 
 	ftl::timer::start(false);
 
@@ -95,17 +100,18 @@ TEST_CASE( "Receiver generating onFrameSet" ) {
 		bool r = encoder.encode(m, pkt);
 		REQUIRE( r );
 
+		spkt.flags |= ftl::codecs::kFlagCompleted;
 		stream.post(spkt, pkt);
 
 		int count = 0;
-		receiver->onFrameSet([&count](ftl::rgbd::FrameSet &fs) {
+		auto h = receiver->onFrameSet([&count](const ftl::data::FrameSetPtr& fs) {
 			++count;
 
-			REQUIRE( fs.timestamp == 10 );
-			REQUIRE( fs.frames.size() == 1 );
-			REQUIRE( fs.frames[0].hasChannel(Channel::Colour) );
-			REQUIRE( fs.frames[0].get<cv::cuda::GpuMat>(Channel::Colour).rows == 720 );
-			REQUIRE( fs.frames[0].get<cv::cuda::GpuMat>(Channel::Colour).type() == CV_8UC4 );
+			REQUIRE( fs->timestamp() == 10 );
+			REQUIRE( fs->frames.size() == 1 );
+			REQUIRE( fs->frames[0].hasChannel(Channel::Colour) );
+			REQUIRE( fs->frames[0].get<cv::cuda::GpuMat>(Channel::Colour).rows == 720 );
+			REQUIRE( fs->frames[0].get<cv::cuda::GpuMat>(Channel::Colour).type() == CV_8UC4 );
 
 			return true;
 		});
@@ -118,16 +124,19 @@ TEST_CASE( "Receiver generating onFrameSet" ) {
 
 	SECTION("multi-frameset") {
 		cv::cuda::GpuMat m(cv::Size(1280,720), CV_8UC4, cv::Scalar(0));
-		ftl::stream::injectCalibration(&stream, dummy, 1, 1, 0);
+
+		stream.select(1, {Channel::Colour}, true);
+		ftl::stream::injectCalibration(&stream, state, 10, 1, 0);
 
 		bool r = encoder.encode(m, pkt);
 		REQUIRE( r );
 
+		spkt.flags |= ftl::codecs::kFlagCompleted;
 		stream.post(spkt, pkt);
 
 		std::atomic<int> mask = 0;
-		receiver->onFrameSet([&mask](ftl::rgbd::FrameSet &fs) {
-			mask |= 1 << fs.id;
+		auto h = receiver->onFrameSet([&mask](const ftl::data::FrameSetPtr& fs) {
+			mask |= 1 << fs->frameset();
 			return true;
 		});
 
@@ -142,26 +151,27 @@ TEST_CASE( "Receiver generating onFrameSet" ) {
 
 	SECTION("a tiled colour frame") {
 		cv::cuda::GpuMat m(cv::Size(2560,720), CV_8UC4, cv::Scalar(0));
-		ftl::stream::injectCalibration(&stream, dummy, 0, 0, 1);
+		ftl::stream::injectCalibration(&stream, state, 10, 0, 1);
 
 		pkt.frame_count = 2;
 		bool r = encoder.encode(m, pkt);
 		REQUIRE( r );
 
+		spkt.flags |= ftl::codecs::kFlagCompleted;
 		stream.post(spkt, pkt);
 
 		int count = 0;
-		receiver->onFrameSet([&count](ftl::rgbd::FrameSet &fs) {
+		auto h = receiver->onFrameSet([&count](const ftl::data::FrameSetPtr& fs) {
 			++count;
 
-			REQUIRE( fs.timestamp == 10 );
-			REQUIRE( fs.frames.size() == 2 );
-			REQUIRE( fs.frames[0].hasChannel(Channel::Colour) );
-			REQUIRE( fs.frames[0].get<cv::cuda::GpuMat>(Channel::Colour).rows == 720 );
-			REQUIRE( fs.frames[0].get<cv::cuda::GpuMat>(Channel::Colour).type() == CV_8UC4 );
-			REQUIRE( fs.frames[1].hasChannel(Channel::Colour) );
-			REQUIRE( fs.frames[1].get<cv::cuda::GpuMat>(Channel::Colour).rows == 720 );
-			REQUIRE( fs.frames[1].get<cv::cuda::GpuMat>(Channel::Colour).type() == CV_8UC4 );
+			REQUIRE( fs->timestamp() == 10 );
+			REQUIRE( fs->frames.size() == 2 );
+			REQUIRE( fs->frames[0].hasChannel(Channel::Colour) );
+			REQUIRE( fs->frames[0].get<cv::cuda::GpuMat>(Channel::Colour).rows == 720 );
+			REQUIRE( fs->frames[0].get<cv::cuda::GpuMat>(Channel::Colour).type() == CV_8UC4 );
+			REQUIRE( fs->frames[1].hasChannel(Channel::Colour) );
+			REQUIRE( fs->frames[1].get<cv::cuda::GpuMat>(Channel::Colour).rows == 720 );
+			REQUIRE( fs->frames[1].get<cv::cuda::GpuMat>(Channel::Colour).type() == CV_8UC4 );
 
 			return true;
 		});
@@ -174,7 +184,9 @@ TEST_CASE( "Receiver generating onFrameSet" ) {
 
 	SECTION("a tiled lossy depth frame") {
 		cv::cuda::GpuMat m(cv::Size(2560,720), CV_32F, cv::Scalar(0));
-		ftl::stream::injectCalibration(&stream, dummy, 0, 0, 1);
+		ftl::stream::injectCalibration(&stream, state, 10, 0, 1);
+
+		stream.select(0, {Channel::Depth}, true);
 
 		spkt.channel = Channel::Depth;
 		pkt.frame_count = 2;
@@ -182,20 +194,21 @@ TEST_CASE( "Receiver generating onFrameSet" ) {
 		bool r = encoder.encode(m, pkt);
 		REQUIRE( r );
 
+		spkt.flags |= ftl::codecs::kFlagCompleted;
 		stream.post(spkt, pkt);
 
 		int count = 0;
-		receiver->onFrameSet([&count](ftl::rgbd::FrameSet &fs) {
+		auto h = receiver->onFrameSet([&count](const ftl::data::FrameSetPtr& fs) {
 			++count;
 
-			REQUIRE( fs.timestamp == 10 );
-			REQUIRE( fs.frames.size() == 2 );
-			REQUIRE( fs.frames[0].hasChannel(Channel::Depth) );
-			REQUIRE( fs.frames[0].get<cv::cuda::GpuMat>(Channel::Depth).rows == 720 );
-			REQUIRE( fs.frames[0].get<cv::cuda::GpuMat>(Channel::Depth).type() == CV_32F );
-			REQUIRE( fs.frames[1].hasChannel(Channel::Depth) );
-			REQUIRE( fs.frames[1].get<cv::cuda::GpuMat>(Channel::Depth).rows == 720 );
-			REQUIRE( fs.frames[1].get<cv::cuda::GpuMat>(Channel::Depth).type() == CV_32F );
+			REQUIRE( fs->timestamp() == 10 );
+			REQUIRE( fs->frames.size() == 2 );
+			REQUIRE( fs->frames[0].hasChannel(Channel::Depth) );
+			REQUIRE( fs->frames[0].get<cv::cuda::GpuMat>(Channel::Depth).rows == 720 );
+			REQUIRE( fs->frames[0].get<cv::cuda::GpuMat>(Channel::Depth).type() == CV_32F );
+			REQUIRE( fs->frames[1].hasChannel(Channel::Depth) );
+			REQUIRE( fs->frames[1].get<cv::cuda::GpuMat>(Channel::Depth).rows == 720 );
+			REQUIRE( fs->frames[1].get<cv::cuda::GpuMat>(Channel::Depth).type() == CV_32F );
 
 			return true;
 		});
@@ -208,7 +221,9 @@ TEST_CASE( "Receiver generating onFrameSet" ) {
 
 	SECTION("a tiled lossless depth frame") {
 		cv::cuda::GpuMat m(cv::Size(2560,720), CV_32F, cv::Scalar(0));
-		ftl::stream::injectCalibration(&stream, dummy, 0, 0, 1);
+		ftl::stream::injectCalibration(&stream, state, 10, 0, 1);
+
+		stream.select(0, {Channel::Depth}, true);
 
 		spkt.channel = Channel::Depth;
 		pkt.frame_count = 2;
@@ -217,20 +232,21 @@ TEST_CASE( "Receiver generating onFrameSet" ) {
 		bool r = encoder.encode(m, pkt);
 		REQUIRE( r );
 
+		spkt.flags |= ftl::codecs::kFlagCompleted;
 		stream.post(spkt, pkt);
 
 		int count = 0;
-		receiver->onFrameSet([&count](ftl::rgbd::FrameSet &fs) {
+		auto h = receiver->onFrameSet([&count](const ftl::data::FrameSetPtr& fs) {
 			++count;
 
-			REQUIRE( fs.timestamp == 10 );
-			REQUIRE( fs.frames.size() == 2 );
-			REQUIRE( fs.frames[0].hasChannel(Channel::Depth) );
-			REQUIRE( fs.frames[0].get<cv::cuda::GpuMat>(Channel::Depth).rows == 720 );
-			REQUIRE( fs.frames[0].get<cv::cuda::GpuMat>(Channel::Depth).type() == CV_32F );
-			REQUIRE( fs.frames[1].hasChannel(Channel::Depth) );
-			REQUIRE( fs.frames[1].get<cv::cuda::GpuMat>(Channel::Depth).rows == 720 );
-			REQUIRE( fs.frames[1].get<cv::cuda::GpuMat>(Channel::Depth).type() == CV_32F );
+			REQUIRE( fs->timestamp() == 10 );
+			REQUIRE( fs->frames.size() == 2 );
+			REQUIRE( fs->frames[0].hasChannel(Channel::Depth) );
+			REQUIRE( fs->frames[0].get<cv::cuda::GpuMat>(Channel::Depth).rows == 720 );
+			REQUIRE( fs->frames[0].get<cv::cuda::GpuMat>(Channel::Depth).type() == CV_32F );
+			REQUIRE( fs->frames[1].hasChannel(Channel::Depth) );
+			REQUIRE( fs->frames[1].get<cv::cuda::GpuMat>(Channel::Depth).rows == 720 );
+			REQUIRE( fs->frames[1].get<cv::cuda::GpuMat>(Channel::Depth).type() == CV_32F );
 
 			return true;
 		});
@@ -245,16 +261,20 @@ TEST_CASE( "Receiver generating onFrameSet" ) {
 	//while (ftl::pool.n_idle() != ftl::pool.size()) std::this_thread::sleep_for(std::chrono::milliseconds(10));
 	delete receiver;
 	//ftl::config::cleanup();
+	//ftl::data::clearRegistry();
 }
 
 TEST_CASE( "Receiver sync bugs" ) {
+	//ftl::data::make_channel<ftl::rgbd::Camera>(Channel::Calibration, "calibration", ftl::data::StorageMode::PERSISTENT);
 	json_t global = json_t{{"$id","ftl://test"}};
 	ftl::config::configure(global);
 
+	ftl::data::Pool pool(5,7);
+
 	json_t cfg = json_t{
 		{"$id","ftl://test/1"}
 	};
-	auto *receiver = ftl::create<Receiver>(cfg);
+	auto *receiver = ftl::create<Receiver>(cfg, &pool);
 
 	json_t cfg2 = json_t{
 		{"$id","ftl://test/2"}
@@ -278,16 +298,16 @@ TEST_CASE( "Receiver sync bugs" ) {
 	spkt.channel = Channel::Colour;
 	spkt.streamID = 0;
 
-	ftl::rgbd::Frame dummy;
-	ftl::rgbd::FrameState state;
-	state.getLeft().width = 1280;
-	state.getLeft().height = 720;
-	dummy.setOrigin(&state);
-	ftl::stream::injectCalibration(&stream, dummy, 0, 0, 0);
+	ftl::data::Frame dummy = pool.allocate(FrameID(0,0),10);
+	dummy.store();
+	ftl::rgbd::Frame &state = dummy.cast<ftl::rgbd::Frame>();
+	state.setLeft().width = 1280;
+	state.setLeft().height = 720;
 
-	ftl::timer::start(false);
+	stream.select(0, Channel::Colour + Channel::Colour2, true);
+	ftl::stream::injectCalibration(&stream, state, 10, 0, 0);
 
-	stream.select(0, Channel::Colour + Channel::Colour2);
+	ftl::timer::start(false);
 
 	SECTION("out of phase packets") {
 		cv::cuda::GpuMat m(cv::Size(1280,720), CV_8UC4, cv::Scalar(0));
@@ -298,11 +318,12 @@ TEST_CASE( "Receiver sync bugs" ) {
 		int count = 0;
 		int64_t ts = 0;
 		bool haswrongchan = false;
-		receiver->onFrameSet([&count,&ts,&haswrongchan](ftl::rgbd::FrameSet &fs) {
-			++count;
+		auto h = receiver->onFrameSet([&count,&ts,&haswrongchan](const ftl::data::FrameSetPtr& fs) {
 
-			ts = fs.timestamp;
-			haswrongchan = fs.frames[0].hasChannel(Channel::ColourHighRes);
+			ts = fs->timestamp();
+			haswrongchan = fs->frames[0].hasChannel(Channel::ColourHighRes);
+
+			++count;
 
 			return true;
 		});
@@ -310,12 +331,14 @@ TEST_CASE( "Receiver sync bugs" ) {
 		try { stream.post(spkt, pkt); } catch(...) {}
 		spkt.timestamp = 10;
 		spkt.channel = Channel::ColourHighRes;
+		spkt.flags |= ftl::codecs::kFlagCompleted;
 		try { stream.post(spkt, pkt); } catch(...) {}
 		spkt.timestamp = 20;
 		spkt.channel = Channel::Colour2;
 		try { stream.post(spkt, pkt); } catch(...) {}
 		spkt.timestamp = 20;
 		spkt.channel = Channel::Colour;
+		spkt.flags |= ftl::codecs::kFlagCompleted;
 		try { stream.post(spkt, pkt); } catch(...) {}
 
 		int i=10;
@@ -329,16 +352,20 @@ TEST_CASE( "Receiver sync bugs" ) {
 	ftl::timer::stop(true);
 	//while (ftl::pool.n_idle() != ftl::pool.size()) std::this_thread::sleep_for(std::chrono::milliseconds(10));
 	delete receiver;
+	//ftl::data::clearRegistry();
 }
 
 TEST_CASE( "Receiver non zero buffer" ) {
+	//ftl::data::make_channel<ftl::rgbd::Camera>(Channel::Calibration, "calibration", ftl::data::StorageMode::PERSISTENT);
 	json_t global = json_t{{"$id","ftl://test"}};
 	ftl::config::configure(global);
 
+	ftl::data::Pool pool(5,7);
+
 	json_t cfg = json_t{
 		{"$id","ftl://test/1"}
 	};
-	auto *receiver = ftl::create<Receiver>(cfg);
+	auto *receiver = ftl::create<Receiver>(cfg, &pool);
 
 	json_t cfg2 = json_t{
 		{"$id","ftl://test/2"}
@@ -362,12 +389,12 @@ TEST_CASE( "Receiver non zero buffer" ) {
 	spkt.channel = Channel::Colour;
 	spkt.streamID = 0;
 
-	ftl::rgbd::Frame dummy;
-	ftl::rgbd::FrameState state;
-	state.getLeft().width = 1280;
-	state.getLeft().height = 720;
-	dummy.setOrigin(&state);
-	ftl::stream::injectCalibration(&stream, dummy, 0, 0, 0);
+	ftl::data::Frame dummy = pool.allocate(FrameID(0,0),10);
+	dummy.store();
+	ftl::rgbd::Frame &state = dummy.cast<ftl::rgbd::Frame>();
+	state.setLeft().width = 1280;
+	state.setLeft().height = 720;
+	ftl::stream::injectCalibration(&stream, state, 10, 0, 0);
 
 	ftl::timer::start(false);
 
@@ -378,20 +405,22 @@ TEST_CASE( "Receiver non zero buffer" ) {
 		REQUIRE( r );
 
 		int count = 0;
-		receiver->onFrameSet([&count](ftl::rgbd::FrameSet &fs) {
+		auto h = receiver->onFrameSet([&count](const ftl::data::FrameSetPtr& fs) {
 			++count;
 
-			REQUIRE( fs.timestamp == 10 );
-			REQUIRE( fs.frames.size() == 1 );
-			REQUIRE( fs.frames[0].hasChannel(Channel::Colour) );
-			REQUIRE( fs.frames[0].get<cv::cuda::GpuMat>(Channel::Colour).rows == 720 );
-			REQUIRE( fs.frames[0].get<cv::cuda::GpuMat>(Channel::Colour).type() == CV_8UC4 );
+			REQUIRE( fs->timestamp() == 10 );
+			REQUIRE( fs->frames.size() == 1 );
+			REQUIRE( fs->frames[0].hasChannel(Channel::Colour) );
+			REQUIRE( fs->frames[0].get<cv::cuda::GpuMat>(Channel::Colour).rows == 720 );
+			REQUIRE( fs->frames[0].get<cv::cuda::GpuMat>(Channel::Colour).type() == CV_8UC4 );
 
 			return true;
 		});
 
 		stream.post(spkt, pkt);
+		spkt.flags |= ftl::codecs::kFlagCompleted;
 		spkt.timestamp += 10;
+		spkt.flags |= ftl::codecs::kFlagCompleted;
 		stream.post(spkt, pkt);
 
 		int i=10;
@@ -403,4 +432,226 @@ TEST_CASE( "Receiver non zero buffer" ) {
 	ftl::timer::stop(true);
 	//while (ftl::pool.n_idle() != ftl::pool.size()) std::this_thread::sleep_for(std::chrono::milliseconds(10));
 	delete receiver;
+	//ftl::data::clearRegistry();
 }
+
+TEST_CASE( "Receiver for data channels" ) {
+	//ftl::data::make_channel<ftl::rgbd::Camera>(Channel::Calibration, "calibration", ftl::data::StorageMode::PERSISTENT);
+	json_t global = json_t{{"$id","ftl://test"}};
+	ftl::config::configure(global);
+
+	ftl::data::Pool pool(5,7);
+
+	json_t cfg = json_t{
+		{"$id","ftl://test/1"}
+	};
+	auto *receiver = ftl::create<Receiver>(cfg, &pool);
+
+	json_t cfg2 = json_t{
+		{"$id","ftl://test/2"}
+	};
+	TestStream stream(cfg2);
+	receiver->setStream(&stream);
+	receiver->set("frameset_buffer_size", 0);
+
+	ftl::codecs::Packet pkt;
+	pkt.codec = codec_t::MSGPACK;
+	pkt.bitrate = 255;
+	pkt.flags = 0;
+	pkt.frame_count = 1;
+
+	ftl::codecs::StreamPacket spkt;
+	spkt.version = 4;
+	spkt.timestamp = 10;
+	spkt.frame_number = 0;
+	spkt.channel = Channel::Data;
+	spkt.streamID = 0;
+
+	ftl::timer::start(false);
+
+	SECTION("a single data packet") {
+
+		pkt.data.resize(0);
+		ftl::util::FTLVectorBuffer buf(pkt.data);
+		msgpack::pack(buf, 5.0f);
+
+		spkt.flags |= ftl::codecs::kFlagCompleted;
+		stream.post(spkt, pkt);
+
+		int count = 0;
+		auto h = receiver->onFrameSet([&count](const ftl::data::FrameSetPtr& fs) {
+			++count;
+
+			REQUIRE( fs->timestamp() == 10 );
+			REQUIRE( fs->frames.size() == 1 );
+			REQUIRE( fs->frames[0].hasChannel(Channel::Data) );
+			REQUIRE( fs->frames[0].get<float>(Channel::Data) == 5.0f );
+
+			return true;
+		});
+
+		int i=10;
+		while (i-- > 0 && count < 1) std::this_thread::sleep_for(std::chrono::milliseconds(10));
+
+		REQUIRE( count == 1 );
+	}
+
+	SECTION("a single data packet in overall frameset") {
+
+		spkt.frame_number = 255;
+		pkt.data.resize(0);
+		ftl::util::FTLVectorBuffer buf(pkt.data);
+		msgpack::pack(buf, 5.0f);
+
+		stream.post(spkt, pkt);
+
+		// Need to have at least one frame for this to work
+		spkt.frame_number = 0;
+		spkt.flags |= ftl::codecs::kFlagCompleted;
+		stream.post(spkt, pkt);
+
+		int count = 0;
+		auto h = receiver->onFrameSet([&count](const std::shared_ptr<ftl::data::FrameSet>& fs) {
+			++count;
+
+			REQUIRE( fs->timestamp() == 10 );
+			REQUIRE( fs->frames.size() == 1 );
+			REQUIRE( fs->hasChannel(Channel::Data) );
+			REQUIRE( fs->get<float>(Channel::Data) == 5.0f );
+
+			return true;
+		});
+
+		int i=10;
+		while (i-- > 0 && count < 1) std::this_thread::sleep_for(std::chrono::milliseconds(10));
+
+		REQUIRE( count == 1 );
+	}
+
+	SECTION("a single calibration packet") {
+
+		pkt.data.resize(0);
+		ftl::util::FTLVectorBuffer buf(pkt.data);
+		ftl::rgbd::Camera calib;
+		calib.width = 1024;
+		msgpack::pack(buf, calib);
+
+		spkt.flags |= ftl::codecs::kFlagCompleted;
+		stream.post(spkt, pkt);
+
+		int count = 0;
+		auto h = receiver->onFrameSet([&count](const ftl::data::FrameSetPtr& fs) {
+			++count;
+
+			REQUIRE( fs->timestamp() == 10 );
+			REQUIRE( fs->frames.size() == 1 );
+			REQUIRE( fs->frames[0].hasChannel(Channel::Data) );
+			REQUIRE( fs->frames[0].get<ftl::rgbd::Camera>(Channel::Data).width == 1024 );
+
+			return true;
+		});
+
+		int i=10;
+		while (i-- > 0 && count < 1) std::this_thread::sleep_for(std::chrono::milliseconds(10));
+
+		REQUIRE( count == 1 );
+	}
+
+	SECTION("a single pose packet") {
+
+		pkt.data.resize(0);
+		ftl::util::FTLVectorBuffer buf(pkt.data);
+		Eigen::Matrix4d pose;
+		msgpack::pack(buf, pose);
+
+		spkt.flags |= ftl::codecs::kFlagCompleted;
+		stream.post(spkt, pkt);
+
+		int count = 0;
+		auto h = receiver->onFrameSet([&count](const std::shared_ptr<ftl::data::FrameSet>& fs) {
+			++count;
+
+			REQUIRE( fs->timestamp() == 10 );
+			REQUIRE( fs->frames.size() == 1 );
+			REQUIRE( fs->frames[0].hasChannel(Channel::Data) );
+			fs->frames[0].get<Eigen::Matrix4d>(Channel::Data);
+
+			return true;
+		});
+
+		int i=10;
+		while (i-- > 0 && count < 1) std::this_thread::sleep_for(std::chrono::milliseconds(10));
+
+		REQUIRE( count == 1 );
+	}
+
+	ftl::timer::stop(true);
+	delete receiver;
+}
+
+// TODO: Annoying to test because I need to create valid audio encoding
+/*TEST_CASE( "Receiver for audio channels" ) {
+	//ftl::data::make_channel<ftl::rgbd::Camera>(Channel::Calibration, "calibration", ftl::data::StorageMode::PERSISTENT);
+	json_t global = json_t{{"$id","ftl://test"}};
+	ftl::config::configure(global);
+
+	ftl::data::Pool pool(5,7);
+
+	json_t cfg = json_t{
+		{"$id","ftl://test/1"}
+	};
+	auto *receiver = ftl::create<Receiver>(cfg, &pool);
+
+	json_t cfg2 = json_t{
+		{"$id","ftl://test/2"}
+	};
+	TestStream stream(cfg2);
+	receiver->setStream(&stream);
+	receiver->set("frameset_buffer_size", 0);
+
+	ftl::codecs::Packet pkt;
+	pkt.codec = codec_t::OPUS;
+	pkt.bitrate = 255;
+	pkt.flags = 0;
+	pkt.frame_count = 1;
+
+	ftl::codecs::StreamPacket spkt;
+	spkt.version = 4;
+	spkt.timestamp = 10;
+	spkt.frame_number = 0;
+	spkt.channel = Channel::AudioMono;
+	spkt.streamID = 0;
+
+	ftl::timer::start(false);
+
+	SECTION("a single data packet") {
+
+		pkt.data.resize(0);
+		ftl::util::FTLVectorBuffer buf(pkt.data);
+		msgpack::pack(buf, 5.0f);
+
+		stream.post(spkt, pkt);
+
+		int count = 0;
+		auto h = receiver->onFrameSet([&count](const std::shared_ptr<ftl::data::FrameSet>& fs) {
+			++count;
+
+			REQUIRE( fs->timestamp() == 10 );
+			REQUIRE( fs->frames.size() == 1 );
+			REQUIRE( fs->frames[0].hasChannel(Channel::Data) );
+			REQUIRE( fs->frames[0].get<float>(Channel::Data) == 5.0f );
+
+			return true;
+		});
+
+		int i=10;
+		while (i-- > 0 && count < 1) std::this_thread::sleep_for(std::chrono::milliseconds(10));
+
+		REQUIRE( count == 1 );
+	}
+
+	
+
+	ftl::timer::stop(true);
+	delete receiver;
+}*/
diff --git a/components/streams/test/recsend_unit.cpp b/components/streams/test/recsend_unit.cpp
new file mode 100644
index 000000000..25ff480a7
--- /dev/null
+++ b/components/streams/test/recsend_unit.cpp
@@ -0,0 +1,179 @@
+#include "catch.hpp"
+
+#include <ftl/streams/receiver.hpp>
+#include <ftl/streams/sender.hpp>
+#include <ftl/rgbd/frame.hpp>
+
+#include <nlohmann/json.hpp>
+
+#include <loguru.hpp>
+
+using ftl::codecs::definition_t;
+using ftl::codecs::codec_t;
+using ftl::stream::Receiver;
+using ftl::stream::Sender;
+using ftl::data::Frame;
+using ftl::data::FrameSet;
+using ftl::codecs::Channel;
+using ftl::config::json_t;
+using ftl::data::FrameID;
+
+class TestStream : public ftl::stream::Stream {
+	public:
+	explicit TestStream(nlohmann::json &config) : ftl::stream::Stream(config) {};
+	~TestStream() {};
+
+	bool post(const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+		available(spkt.streamID) += spkt.channel;
+		if (pkt.data.size() == 0) {
+			if (spkt.frameSetID() == 255) {
+				for (size_t i=0; i<size(); ++i) {
+					select(i, selected(i) + spkt.channel);
+				}
+			} else {
+				select(spkt.frameSetID(), selected(spkt.frameSetID()) + spkt.channel);
+			}
+		}
+		cb_.trigger(spkt, pkt);
+		return true;
+	}
+
+	bool begin() override { return true; }
+	bool end() override { return true; }
+	bool active() override { return true; }
+};
+
+class DummySource : public ftl::data::DiscreteSource {
+	public:
+
+	bool capture(int64_t ts) override { return true; }
+	bool retrieve(ftl::data::Frame &f) override { return true; }
+};
+
+
+TEST_CASE( "Send and receiver via encoding" ) {
+	json_t global = json_t{{"$id","ftl://test"}};
+	ftl::config::configure(global);
+
+	ftl::data::Pool pool(5,7);
+
+	json_t rcfg = json_t{
+		{"$id","ftl://test/1"}
+	};
+	auto *receiver = ftl::create<Receiver>(rcfg, &pool);
+
+	json_t scfg = json_t{
+		{"$id","ftl://test/2"}
+	};
+	auto *sender = ftl::create<Sender>(scfg);
+
+	json_t cfg2 = json_t{
+		{"$id","ftl://test/3"}
+	};
+
+	TestStream stream(cfg2);
+
+	receiver->setStream(&stream);
+	receiver->set("frameset_buffer_size", 0);
+	sender->setStream(&stream);
+
+	ftl::timer::start(false);
+
+	SECTION("a single data only frame") {
+		stream.select(0, {Channel::Control}, true);
+
+		Frame f = pool.allocate(ftl::data::FrameID(0,0), 1000);
+		f.store();
+		auto fsptr = FrameSet::fromFrame(f);
+		FrameSet &fs = *fsptr;
+
+		fs.frames[0].create<int>(Channel::Control) = 57;
+
+		int count = 0;
+		ftl::data::FrameSetPtr result;
+		auto h = receiver->onFrameSet([&count,&result](const ftl::data::FrameSetPtr &fs) {
+			count++;
+			result = fs;
+			return true;
+		});
+
+		sender->post(fs, Channel::Control);
+
+		int i=10;
+		while (i-- > 0 && count < 1) std::this_thread::sleep_for(std::chrono::milliseconds(10));
+
+		REQUIRE( count == 1 );
+		REQUIRE( result->frames[0].has(Channel::Control) );
+		REQUIRE( result->frames[0].getChangeType(Channel::Control) == ftl::data::ChangeType::COMPLETED );
+		REQUIRE( result->frames[0].get<int>(Channel::Control) == 57 );
+	}
+
+	ftl::timer::stop(true);
+
+	delete receiver;
+	delete sender;
+}
+
+TEST_CASE( "Response via loopback" ) {
+	json_t global = json_t{{"$id","ftl://test"}};
+	ftl::config::configure(global);
+
+	ftl::data::Pool pool(5,7);
+
+	json_t rcfg = json_t{
+		{"$id","ftl://test/1"}
+	};
+	auto *receiver = ftl::create<Receiver>(rcfg, &pool);
+
+	json_t cfg2 = json_t{
+		{"$id","ftl://test/3"}
+	};
+
+	TestStream stream(cfg2);
+
+	receiver->setStream(&stream);
+	receiver->set("frameset_buffer_size", 0);
+
+	auto hh = pool.onFlush([receiver](ftl::data::Frame &f, Channel c) {
+		receiver->loopback(f, c);
+		return true;
+	});
+
+	ftl::timer::start(false);
+
+	SECTION("a single data only frame") {
+		DummySource source;
+
+		stream.select(0, {Channel::Control}, true);
+
+		auto *builder = new ftl::streams::ManualSourceBuilder(&pool, 0, &source);
+		builder->setFrameRate(10000);
+		std::shared_ptr<ftl::streams::BaseBuilder> builderptr(builder);
+		receiver->registerBuilder(builderptr);
+
+		int count = 0;
+		ftl::data::FrameSetPtr result;
+		auto h = receiver->onFrameSet([&count,&result](const ftl::data::FrameSetPtr &fs) {
+			count++;
+			result = fs;
+			auto response = fs->frames[0].response();
+			response.create<int>(Channel::Control) = count;
+			return true;
+		});
+
+		builder->tick();
+		builder->tick();
+
+		int i=10;
+		while (i-- > 0 && count < 1) std::this_thread::sleep_for(std::chrono::milliseconds(10));
+
+		REQUIRE( count == 2 );
+		REQUIRE( result->frames[0].has(Channel::Control) );
+		REQUIRE( result->frames[0].getChangeType(Channel::Control) == ftl::data::ChangeType::FOREIGN );
+		REQUIRE( result->frames[0].get<int>(Channel::Control) == 1 );
+	}
+
+	ftl::timer::stop(true);
+
+	delete receiver;
+}
diff --git a/components/streams/test/sender_unit.cpp b/components/streams/test/sender_unit.cpp
index 8e646957d..59c7f916a 100644
--- a/components/streams/test/sender_unit.cpp
+++ b/components/streams/test/sender_unit.cpp
@@ -2,14 +2,17 @@
 
 #include <ftl/streams/sender.hpp>
 #include <ftl/codecs/hevc.hpp>
+#include <ftl/data/framepool.hpp>
 
 #include <nlohmann/json.hpp>
 
+#include <loguru.hpp>
+
 using ftl::codecs::definition_t;
 using ftl::codecs::codec_t;
 using ftl::stream::Sender;
-using ftl::rgbd::Frame;
-using ftl::rgbd::FrameSet;
+using ftl::data::Frame;
+using ftl::data::FrameSet;
 using ftl::codecs::Channel;
 using ftl::codecs::Channels;
 using ftl::config::json_t;
@@ -19,11 +22,6 @@ class TestStream : public ftl::stream::Stream {
 	explicit TestStream(nlohmann::json &config) : ftl::stream::Stream(config) {};
 	~TestStream() {};
 
-	bool onPacket(const std::function<void(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)> &cb) {
-		cb_ = cb;
-		return true;
-	}
-
 	bool onIntercept(const std::function<void(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)> &cb) {
 		icb_ = cb;
 		return true;
@@ -39,7 +37,7 @@ class TestStream : public ftl::stream::Stream {
 			} else {
 				select(spkt.frameSetID(), selected(spkt.frameSetID()) + spkt.channel);
 			}
-			if (cb_) cb_(spkt, pkt);
+			cb_.trigger(spkt, pkt);
 		}
 		if (icb_) icb_(spkt, pkt);
 		return true;
@@ -50,7 +48,7 @@ class TestStream : public ftl::stream::Stream {
 	bool active() override { return true; }
 
 	private:
-	std::function<void(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)> cb_;
+	//std::function<void(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)> cb_;
 	std::function<void(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)> icb_;
 };
 
@@ -64,9 +62,11 @@ TEST_CASE( "Sender::post() video frames" ) {
 	};
 	auto *sender = ftl::create<Sender>(cfg);
 	
-	FrameSet fs;
-	fs.frames.emplace_back();
-	fs.timestamp = 1000;
+	ftl::data::Pool pool(4,6);
+	Frame f = pool.allocate(ftl::data::FrameID(0,0), 1000);
+	f.store();
+	auto fsptr = FrameSet::fromFrame(f);
+	FrameSet &fs = *fsptr;
 
 	json_t cfg2 = json_t{
 		{"$id","ftl://test/2"}
@@ -74,32 +74,35 @@ TEST_CASE( "Sender::post() video frames" ) {
 	TestStream stream(cfg2);
 	sender->setStream(&stream);
 
+	ftl::codecs::StreamPacket prev_spkt;
 	ftl::codecs::StreamPacket spkt;
 	ftl::codecs::Packet pkt;
 	int count = 0;
 
-	stream.onIntercept([&count,&spkt,&pkt](const ftl::codecs::StreamPacket &pspkt, const ftl::codecs::Packet &ppkt) {
+	stream.onIntercept([&count,&spkt,&pkt,&prev_spkt](const ftl::codecs::StreamPacket &pspkt, const ftl::codecs::Packet &ppkt) {
+		prev_spkt = spkt;
 		spkt = pspkt;
 		pkt = ppkt;
 		++count;
 	});
 
 	SECTION("a single colour frame") {
-		stream.select(0, Channels(Channel::Colour), true);
+		stream.select(0, {Channel::Colour}, true);
 
 		fs.count = 1;
 		fs.mask = 1;
 		fs.frames[0].create<cv::cuda::GpuMat>(Channel::Colour).create(cv::Size(1280,720), CV_8UC4);
-		fs.frames[0].get<cv::cuda::GpuMat>(Channel::Colour).setTo(cv::Scalar(0));
+		fs.frames[0].set<cv::cuda::GpuMat>(Channel::Colour).setTo(cv::Scalar(0));
 
-		sender->post(fs);
+		sender->post(fs, Channel::Colour);
 
 		REQUIRE( count == 1 );
-		REQUIRE( spkt.version == 4 );
+		REQUIRE( spkt.version == 5 );
 		REQUIRE( spkt.timestamp == 1000 );
 		REQUIRE( (int)spkt.frame_number == 0 );
 		REQUIRE( spkt.streamID == 0 );
 		REQUIRE( spkt.channel == Channel::Colour );
+		REQUIRE( (spkt.flags & ftl::codecs::kFlagCompleted) );
 		REQUIRE( pkt.codec == codec_t::HEVC );
 		REQUIRE( pkt.data.size() > 0 );
 		REQUIRE( pkt.frame_count == 1 );
@@ -107,24 +110,27 @@ TEST_CASE( "Sender::post() video frames" ) {
 	}
 
 	SECTION("two colour frames tiled") {
-		stream.select(0, Channels(Channel::Colour), true);
+		stream.select(0, {Channel::Colour}, true);
+
+		fs.resize(2);
+		fs.frames[1].store();
 
 		fs.count = 2;
 		fs.mask = 3;
 		fs.frames[0].create<cv::cuda::GpuMat>(Channel::Colour).create(cv::Size(1280,720), CV_8UC4);
-		fs.frames[0].get<cv::cuda::GpuMat>(Channel::Colour).setTo(cv::Scalar(0));
-		fs.frames.emplace_back();
+		fs.frames[0].set<cv::cuda::GpuMat>(Channel::Colour).setTo(cv::Scalar(0));
 		fs.frames[1].create<cv::cuda::GpuMat>(Channel::Colour).create(cv::Size(1280,720), CV_8UC4);
-		fs.frames[1].get<cv::cuda::GpuMat>(Channel::Colour).setTo(cv::Scalar(0));
+		fs.frames[1].set<cv::cuda::GpuMat>(Channel::Colour).setTo(cv::Scalar(0));
 
-		sender->post(fs);
+		sender->post(fs, Channel::Colour);
 
 		REQUIRE( count == 1 );
-		REQUIRE( spkt.version == 4 );
+		REQUIRE( spkt.version == 5 );
 		REQUIRE( spkt.timestamp == 1000 );
 		REQUIRE( (int)spkt.frame_number == 0 );
 		REQUIRE( spkt.streamID == 0 );
 		REQUIRE( spkt.channel == Channel::Colour );
+		REQUIRE( (spkt.flags & ftl::codecs::kFlagCompleted) );
 		REQUIRE( pkt.codec == codec_t::HEVC );
 		REQUIRE( pkt.data.size() > 0 );
 		REQUIRE( pkt.frame_count == 2 );
@@ -132,20 +138,22 @@ TEST_CASE( "Sender::post() video frames" ) {
 	}
 
 	SECTION("two depth frames tiled") {
-		stream.select(0, Channels(Channel::Depth), true);
+		stream.select(0, {Channel::Depth}, true);
+
+		fs.resize(2);
+		fs.frames[1].store();
 
 		fs.count = 2;
 		fs.mask = 3;
 		fs.frames[0].create<cv::cuda::GpuMat>(Channel::Depth).create(cv::Size(1280,720), CV_32F);
-		fs.frames[0].get<cv::cuda::GpuMat>(Channel::Depth).setTo(cv::Scalar(0.0f));
-		fs.frames.emplace_back();
+		fs.frames[0].set<cv::cuda::GpuMat>(Channel::Depth).setTo(cv::Scalar(0.0f));
 		fs.frames[1].create<cv::cuda::GpuMat>(Channel::Depth).create(cv::Size(1280,720), CV_32F);
-		fs.frames[1].get<cv::cuda::GpuMat>(Channel::Depth).setTo(cv::Scalar(0.0f));
+		fs.frames[1].set<cv::cuda::GpuMat>(Channel::Depth).setTo(cv::Scalar(0.0f));
 
-		sender->post(fs);
+		sender->post(fs, Channel::Depth);
 
 		REQUIRE( count == 1 );
-		REQUIRE( spkt.version == 4 );
+		REQUIRE( spkt.version == 5 );
 		REQUIRE( spkt.timestamp == 1000 );
 		REQUIRE( (int)spkt.frame_number == 0 );
 		REQUIRE( spkt.streamID == 0 );
@@ -158,23 +166,25 @@ TEST_CASE( "Sender::post() video frames" ) {
 	}
 
 	SECTION("10 depth frames tiled") {
-		stream.select(0, Channels(Channel::Depth), true);
+		stream.select(0, {Channel::Depth}, true);
+
+		fs.resize(10);
 
 		fs.count = 10;
 		fs.mask = 0x3FF;
 		fs.frames[0].create<cv::cuda::GpuMat>(Channel::Depth).create(cv::Size(1280,720), CV_32F);
-		fs.frames[0].get<cv::cuda::GpuMat>(Channel::Depth).setTo(cv::Scalar(0.0f));
+		fs.frames[0].set<cv::cuda::GpuMat>(Channel::Depth).setTo(cv::Scalar(0.0f));
 
 		for (int i=1; i<10; ++i) {
-			fs.frames.emplace_back();
+			fs.frames[i].store();
 			fs.frames[i].create<cv::cuda::GpuMat>(Channel::Depth).create(cv::Size(1280,720), CV_32F);
-			fs.frames[i].get<cv::cuda::GpuMat>(Channel::Depth).setTo(cv::Scalar(0.0f));
+			fs.frames[i].set<cv::cuda::GpuMat>(Channel::Depth).setTo(cv::Scalar(0.0f));
 		}
 
-		sender->post(fs);
+		sender->post(fs, Channel::Depth);
 
 		REQUIRE( count == 2 );
-		REQUIRE( spkt.version == 4 );
+		REQUIRE( spkt.version == 5 );
 		REQUIRE( spkt.timestamp == 1000 );
 		REQUIRE( (int)spkt.frame_number == 9 );
 		REQUIRE( spkt.streamID == 0 );
@@ -187,21 +197,23 @@ TEST_CASE( "Sender::post() video frames" ) {
 	}
 
 	SECTION("two lossless depth frames tiled") {
-		stream.select(0, Channels(Channel::Depth), true);
+		stream.select(0, {Channel::Depth}, true);
+
+		fs.resize(2);
+		fs.frames[1].store();
 
 		fs.count = 2;
 		fs.mask = 3;
 		fs.frames[0].create<cv::cuda::GpuMat>(Channel::Depth).create(cv::Size(1280,720), CV_32F);
-		fs.frames[0].get<cv::cuda::GpuMat>(Channel::Depth).setTo(cv::Scalar(0.0f));
-		fs.frames.emplace_back();
+		fs.frames[0].set<cv::cuda::GpuMat>(Channel::Depth).setTo(cv::Scalar(0.0f));
 		fs.frames[1].create<cv::cuda::GpuMat>(Channel::Depth).create(cv::Size(1280,720), CV_32F);
-		fs.frames[1].get<cv::cuda::GpuMat>(Channel::Depth).setTo(cv::Scalar(0.0f));
+		fs.frames[1].set<cv::cuda::GpuMat>(Channel::Depth).setTo(cv::Scalar(0.0f));
 
 		sender->set("codec", (int)codec_t::HEVC_LOSSLESS);
-		sender->post(fs);
+		sender->post(fs, Channel::Depth);
 
 		REQUIRE( count == 1 );
-		REQUIRE( spkt.version == 4 );
+		REQUIRE( spkt.version == 5 );
 		REQUIRE( spkt.timestamp == 1000 );
 		REQUIRE( (int)spkt.frame_number == 0 );
 		REQUIRE( spkt.streamID == 0 );
@@ -219,18 +231,23 @@ TEST_CASE( "Sender::post() video frames" ) {
 		fs.count = 1;
 		fs.mask = 1;
 		fs.frames[0].create<cv::cuda::GpuMat>(Channel::Colour).create(cv::Size(1280,720), CV_8UC4);
-		fs.frames[0].get<cv::cuda::GpuMat>(Channel::Colour).setTo(cv::Scalar(0));
+		fs.frames[0].set<cv::cuda::GpuMat>(Channel::Colour).setTo(cv::Scalar(0));
 		fs.frames[0].create<cv::cuda::GpuMat>(Channel::Depth).create(cv::Size(1280,720), CV_32F);
-		fs.frames[0].get<cv::cuda::GpuMat>(Channel::Depth).setTo(cv::Scalar(0.0f));
+		fs.frames[0].set<cv::cuda::GpuMat>(Channel::Depth).setTo(cv::Scalar(0.0f));
 
-		sender->post(fs);
+		sender->post(fs, Channel::Colour);
+		sender->post(fs, Channel::Depth);
 
 		REQUIRE( count == 2 );
-		REQUIRE( spkt.version == 4 );
+		REQUIRE( spkt.version == 5 );
 		REQUIRE( spkt.timestamp == 1000 );
 		REQUIRE( (int)spkt.frame_number == 0 );
 		REQUIRE( spkt.streamID == 0 );
 		REQUIRE( spkt.channel == Channel::Depth );
+		REQUIRE( !(prev_spkt.flags & ftl::codecs::kFlagCompleted) );
+		REQUIRE( prev_spkt.channel == Channel::Colour );
+		REQUIRE( prev_spkt.timestamp == 1000 );
+		REQUIRE( (spkt.flags & ftl::codecs::kFlagCompleted) );
 		REQUIRE( pkt.codec == codec_t::HEVC );
 		REQUIRE( pkt.data.size() > 0 );
 		REQUIRE( pkt.frame_count == 1 );
@@ -250,9 +267,11 @@ TEST_CASE( "Sender request to control encoding" ) {
 	};
 	auto *sender = ftl::create<Sender>(cfg);
 	
-	FrameSet fs;
-	fs.frames.emplace_back();
-	fs.timestamp = 1000;
+	ftl::data::Pool pool(4,6);
+	Frame f = pool.allocate(ftl::data::FrameID(0,0), 1000);
+	f.store();
+	auto fsptr = FrameSet::fromFrame(f);
+	FrameSet &fs = *fsptr;
 
 	json_t cfg2 = json_t{
 		{"$id","ftl://test/2"}
@@ -276,19 +295,22 @@ TEST_CASE( "Sender request to control encoding" ) {
 		stream.post({
 			4, 1000, 0, 255, Channel::Colour
 		},{
-			codec_t::Any, definition_t::Any, 255, 255, 0, {}
+			codec_t::Any, 0, 255, 255, 0, {}
 		});
 
 		fs.count = 1;
 		fs.mask = 1;
 		fs.frames[0].create<cv::cuda::GpuMat>(Channel::Colour).create(cv::Size(1280,720), CV_8UC4);
-		fs.frames[0].get<cv::cuda::GpuMat>(Channel::Colour).setTo(cv::Scalar(0));
+		fs.frames[0].set<cv::cuda::GpuMat>(Channel::Colour).setTo(cv::Scalar(0));
+
+		fs.frames[0].create<std::tuple<ftl::rgbd::Camera, Channel, int>>(Channel::Calibration);
+		fs.frames[0].create<Eigen::Matrix4d>(Channel::Pose);
 
 		count = 0;
-		sender->post(fs);
+		sender->post(fs, Channel::Colour);
 
-		REQUIRE( count == 5 );
-		REQUIRE( spkt.version == 4 );
+		REQUIRE( count == 1 );
+		REQUIRE( spkt.version == 5 );
 		REQUIRE( spkt.timestamp == 1000 );
 		REQUIRE( (int)spkt.frame_number == 0 );
 		REQUIRE( spkt.streamID == 0 );
@@ -299,3 +321,254 @@ TEST_CASE( "Sender request to control encoding" ) {
 		REQUIRE( ftl::codecs::hevc::validNAL(pkt.data.data(), pkt.data.size()) );
 	}
 }
+
+TEST_CASE( "Sender::post() data channels" ) {
+	json_t global = json_t{{"$id","ftl://test"}};
+	ftl::config::configure(global);
+
+	json_t cfg = json_t{
+		{"$id","ftl://test/1"}
+	};
+	auto *sender = ftl::create<Sender>(cfg);
+	
+	ftl::data::Pool pool(4,6);
+	Frame f = pool.allocate(ftl::data::FrameID(0,0), 1000);
+	f.store();
+	auto fsptr = FrameSet::fromFrame(f);
+	FrameSet &fs = *fsptr;
+
+	json_t cfg2 = json_t{
+		{"$id","ftl://test/2"}
+	};
+	TestStream stream(cfg2);
+	sender->setStream(&stream);
+
+	ftl::codecs::StreamPacket spkt;
+	ftl::codecs::Packet pkt;
+	int count = 0;
+
+	stream.onIntercept([&count,&spkt,&pkt](const ftl::codecs::StreamPacket &pspkt, const ftl::codecs::Packet &ppkt) {
+		spkt = pspkt;
+		pkt = ppkt;
+		++count;
+	});
+
+	SECTION("a single calibration channel") {
+		stream.select(0, {Channel::Calibration}, true);
+
+		fs.count = 1;
+		fs.mask = 1;
+		auto &calib = std::get<0>(fs.frames[0].create<std::tuple<ftl::rgbd::Camera, Channel, int>>(Channel::Calibration));
+		calib.width = 1024;
+
+		fs.frames[0].flush();
+		sender->post(fs, Channel::Calibration);
+
+		REQUIRE( count == 1 );
+		REQUIRE( spkt.version == 5 );
+		REQUIRE( spkt.timestamp == 1000 );
+		REQUIRE( (int)spkt.frame_number == 0 );
+		REQUIRE( spkt.streamID == 0 );
+		REQUIRE( spkt.channel == Channel::Calibration );
+		REQUIRE( (spkt.flags & ftl::codecs::kFlagCompleted) );
+		REQUIRE( pkt.codec == codec_t::MSGPACK );
+		REQUIRE( pkt.data.size() > 0 );
+		REQUIRE( pkt.frame_count == 1 );
+	}
+
+	SECTION("a single pose channel") {
+		stream.select(0, {Channel::Pose}, true);
+
+		fs.count = 1;
+		fs.mask = 1;
+		fs.frames[0].create<Eigen::Matrix4d>(Channel::Pose);
+
+		fs.frames[0].flush();
+		sender->post(fs, Channel::Pose);
+
+		REQUIRE( count == 1 );
+		REQUIRE( spkt.version == 5 );
+		REQUIRE( spkt.timestamp == 1000 );
+		REQUIRE( (int)spkt.frame_number == 0 );
+		REQUIRE( spkt.streamID == 0 );
+		REQUIRE( spkt.channel == Channel::Pose );
+		REQUIRE( pkt.codec == codec_t::MSGPACK );
+		REQUIRE( pkt.data.size() > 0 );
+		REQUIRE( pkt.frame_count == 1 );
+	}
+
+	SECTION("a single custom channel") {
+		stream.select(0, {Channel::Data}, true);
+
+		fs.count = 1;
+		fs.mask = 1;
+		auto &vf = fs.frames[0].create<std::vector<float>>(Channel::Data);
+		vf.push_back(5.0f);
+		vf.push_back(33.0f);
+
+		fs.frames[0].flush();
+		sender->post(fs, Channel::Data);
+
+		REQUIRE( count == 1 );
+		REQUIRE( spkt.version == 5 );
+		REQUIRE( spkt.timestamp == 1000 );
+		REQUIRE( (int)spkt.frame_number == 0 );
+		REQUIRE( spkt.streamID == 0 );
+		REQUIRE( spkt.channel == Channel::Data );
+		REQUIRE( pkt.codec == codec_t::MSGPACK );
+		REQUIRE( pkt.data.size() > 0 );
+		REQUIRE( pkt.frame_count == 1 );
+		// TODO: Check decodes correctly.
+	}
+
+	SECTION("a single list channel") {
+		stream.select(0, {Channel::Data}, true);
+
+		fs.count = 1;
+		fs.mask = 1;
+		auto vf = fs.frames[0].create<std::list<float>>(Channel::Data);
+		vf = 5.0f;
+		vf = 33.0f;
+
+		fs.frames[0].flush();
+		sender->post(fs, Channel::Data);
+
+		REQUIRE( count == 1 );
+		REQUIRE( spkt.version == 5 );
+		REQUIRE( spkt.timestamp == 1000 );
+		REQUIRE( (int)spkt.frame_number == 0 );
+		REQUIRE( spkt.streamID == 0 );
+		REQUIRE( spkt.channel == Channel::Data );
+		REQUIRE( pkt.codec == codec_t::MSGPACK );
+		REQUIRE( pkt.data.size() > 0 );
+		REQUIRE( pkt.frame_count == 1 );
+		// TODO: Check decodes correctly.
+	}
+}
+
+TEST_CASE( "Sender::post() audio channels" ) {
+	json_t global = json_t{{"$id","ftl://test"}};
+	ftl::config::configure(global);
+
+	json_t cfg = json_t{
+		{"$id","ftl://test/1"}
+	};
+	auto *sender = ftl::create<Sender>(cfg);
+	
+	ftl::data::Pool pool(4,6);
+	Frame f = pool.allocate(ftl::data::FrameID(0,0), 1000);
+	f.store();
+	auto fsptr = FrameSet::fromFrame(f);
+	FrameSet &fs = *fsptr;
+
+	json_t cfg2 = json_t{
+		{"$id","ftl://test/2"}
+	};
+	TestStream stream(cfg2);
+	sender->setStream(&stream);
+
+	ftl::codecs::StreamPacket spkt;
+	ftl::codecs::Packet pkt;
+	int count = 0;
+
+	stream.onIntercept([&count,&spkt,&pkt](const ftl::codecs::StreamPacket &pspkt, const ftl::codecs::Packet &ppkt) {
+		spkt = pspkt;
+		pkt = ppkt;
+		++count;
+	});
+
+	SECTION("a single mono audio channel") {
+		ftl::data::make_type<ftl::rgbd::Camera>();
+
+		stream.select(0, {Channel::AudioMono}, true);
+
+		fs.count = 1;
+		fs.mask = 1;
+		auto audio = fs.frames[0].create<std::list<ftl::audio::AudioFrame>>(Channel::AudioMono);
+
+		// Fake 3 audio frames
+		ftl::audio::AudioFrame aframe;
+		aframe.data().resize(3*ftl::audio::kFrameSize);
+		audio = std::move(aframe);
+
+		fs.frames[0].flush();
+		sender->post(fs, Channel::AudioMono);
+
+		REQUIRE( count == 1 );
+		REQUIRE( spkt.version == 5 );
+		REQUIRE( spkt.timestamp == 1000 );
+		REQUIRE( (int)spkt.frame_number == 0 );
+		REQUIRE( spkt.streamID == 0 );
+		REQUIRE( spkt.channel == Channel::AudioMono );
+		REQUIRE( pkt.codec == codec_t::OPUS );
+		REQUIRE( pkt.data.size() > 0 );
+		REQUIRE( pkt.frame_count == 1 );
+	}
+
+	SECTION("multi frame mono audio channel") {
+		ftl::data::make_type<ftl::rgbd::Camera>();
+
+		stream.select(0, {Channel::AudioMono}, true);
+
+		fs.count = 1;
+		fs.mask = 1;
+		auto audio = fs.frames[0].create<std::list<ftl::audio::AudioFrame>>(Channel::AudioMono);
+
+		// Fake 3 audio frames
+		ftl::audio::AudioFrame aframe1;
+		aframe1.data().resize(3*ftl::audio::kFrameSize);
+		audio = std::move(aframe1);
+
+		sender->post(fs, Channel::AudioMono);
+		REQUIRE( count == 1 );
+		REQUIRE( spkt.version == 5 );
+		REQUIRE( spkt.timestamp == 1000 );
+		REQUIRE( (int)spkt.frame_number == 0 );
+		REQUIRE( spkt.streamID == 0 );
+		REQUIRE( spkt.channel == Channel::AudioMono );
+		REQUIRE( pkt.codec == codec_t::OPUS );
+		REQUIRE( pkt.data.size() > 0 );
+		REQUIRE( pkt.frame_count == 1 );
+
+		size_t firstsize = pkt.data.size();
+		pkt.data.clear();
+
+		ftl::audio::AudioFrame aframe2;
+		aframe2.data().resize(2*ftl::audio::kFrameSize);
+		audio = std::move(aframe2);
+
+		//fs.frames[0].flush();
+		sender->post(fs, Channel::AudioMono);
+
+		REQUIRE( count == 2 );
+		REQUIRE( pkt.data.size() > firstsize );
+	}
+
+	SECTION("a single stereo audio channel") {
+		ftl::data::make_type<ftl::rgbd::Camera>();
+
+		stream.select(0, {Channel::AudioStereo}, true);
+
+		fs.count = 1;
+		fs.mask = 1;
+		auto audio = fs.frames[0].create<std::list<ftl::audio::AudioFrame>>(Channel::AudioStereo);
+
+		// Fake 3 audio frames
+		ftl::audio::AudioFrame aframe;
+		aframe.data().resize(2*3*ftl::audio::kFrameSize);
+		audio = std::move(aframe);
+
+		fs.frames[0].flush();
+		sender->post(fs, Channel::AudioStereo);
+
+		REQUIRE( count == 1 );
+		REQUIRE( spkt.version == 5 );
+		REQUIRE( spkt.timestamp == 1000 );
+		REQUIRE( (int)spkt.frame_number == 0 );
+		REQUIRE( spkt.streamID == 0 );
+		REQUIRE( spkt.channel == Channel::AudioStereo );
+		REQUIRE( pkt.codec == codec_t::OPUS );
+		REQUIRE( pkt.data.size() > 0 );
+		REQUIRE( pkt.frame_count == 1 );
+	}
+}
diff --git a/components/streams/test/stream_unit.cpp b/components/streams/test/stream_unit.cpp
index aa2093a40..a9d67652a 100644
--- a/components/streams/test/stream_unit.cpp
+++ b/components/streams/test/stream_unit.cpp
@@ -13,14 +13,9 @@ class TestStream : public ftl::stream::Stream {
 	TestStream(nlohmann::json &config) : ftl::stream::Stream(config) {};
 	~TestStream() {};
 
-	bool onPacket(const std::function<void(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)> &cb) {
-		cb_ = cb;
-		return true;
-	}
-
 	bool post(const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
 		available(spkt.streamID) += spkt.channel;
-		if (cb_) cb_(spkt, pkt);
+		cb_.trigger(spkt, pkt);
 		return true;
 	}
 
@@ -29,7 +24,7 @@ class TestStream : public ftl::stream::Stream {
 	bool active() override { return true; }
 
 	private:
-	std::function<void(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)> cb_;
+	//std::function<void(const ftl::codecs::StreamPacket &, const ftl::codecs::Packet &)> cb_;
 };
 
 TEST_CASE("ftl::stream::Muxer()::write", "[stream]") {
@@ -55,8 +50,9 @@ TEST_CASE("ftl::stream::Muxer()::write", "[stream]") {
 
 		ftl::codecs::StreamPacket tspkt = {4,0,0,1,ftl::codecs::Channel::Colour};;
 
-		s->onPacket([&tspkt](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+		auto h = s->onPacket([&tspkt](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
 			tspkt = spkt;
+			return true;
 		});
 
 		REQUIRE( !mux->post({4,100,0,1,ftl::codecs::Channel::Colour},{}) );
@@ -80,8 +76,9 @@ TEST_CASE("ftl::stream::Muxer()::write", "[stream]") {
 		mux->add(s2);
 
 		ftl::codecs::StreamPacket tspkt = {4,0,0,1,ftl::codecs::Channel::Colour};
-		mux->onPacket([&tspkt](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+		auto h = mux->onPacket([&tspkt](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
 			tspkt = spkt;
+			return true;
 		});
 
 		REQUIRE( s1->post({4,100,0,0,ftl::codecs::Channel::Colour},{}) );
@@ -94,11 +91,13 @@ TEST_CASE("ftl::stream::Muxer()::write", "[stream]") {
 
 		ftl::codecs::StreamPacket tspkt2 = {4,0,0,1,ftl::codecs::Channel::Colour};
 		ftl::codecs::StreamPacket tspkt3 = {4,0,0,1,ftl::codecs::Channel::Colour};
-		s1->onPacket([&tspkt2](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+		auto h2 = s1->onPacket([&tspkt2](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
 			tspkt2 = spkt;
+			return true;
 		});
-		s2->onPacket([&tspkt3](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+		auto h3 = s2->onPacket([&tspkt3](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
 			tspkt3 = spkt;
+			return true;
 		});
 
 		REQUIRE( mux->post({4,200,0,1,ftl::codecs::Channel::Colour},{}) );
@@ -135,8 +134,9 @@ TEST_CASE("ftl::stream::Muxer()::post multi-frameset", "[stream]") {
 		mux->add(s2,1);
 
 		ftl::codecs::StreamPacket tspkt = {4,0,0,1,ftl::codecs::Channel::Colour};
-		mux->onPacket([&tspkt](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+		auto h = mux->onPacket([&tspkt](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
 			tspkt = spkt;
+			return true;
 		});
 
 		REQUIRE( s1->post({4,100,0,0,ftl::codecs::Channel::Colour},{}) );
@@ -149,11 +149,13 @@ TEST_CASE("ftl::stream::Muxer()::post multi-frameset", "[stream]") {
 
 		ftl::codecs::StreamPacket tspkt2 = {4,0,0,1,ftl::codecs::Channel::Colour};
 		ftl::codecs::StreamPacket tspkt3 = {4,0,0,1,ftl::codecs::Channel::Colour};
-		s1->onPacket([&tspkt2](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+		auto h2 = s1->onPacket([&tspkt2](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
 			tspkt2 = spkt;
+			return true;
 		});
-		s2->onPacket([&tspkt3](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+		auto h3 = s2->onPacket([&tspkt3](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
 			tspkt3 = spkt;
+			return true;
 		});
 
 		REQUIRE( mux->post({4,200,1,0,ftl::codecs::Channel::Colour},{}) );
@@ -190,8 +192,9 @@ TEST_CASE("ftl::stream::Muxer()::read", "[stream]") {
 		mux->add(s2);
 
 		ftl::codecs::StreamPacket tspkt = {4,0,0,1,ftl::codecs::Channel::Colour};
-		mux->onPacket([&tspkt](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+		auto h = mux->onPacket([&tspkt](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
 			tspkt = spkt;
+			return true;
 		});
 
 		REQUIRE( s1->post({4,100,0,0,ftl::codecs::Channel::Colour},{}) );
@@ -228,8 +231,9 @@ TEST_CASE("ftl::stream::Muxer()::read", "[stream]") {
 		mux->add(s2);
 
 		ftl::codecs::StreamPacket tspkt = {4,0,0,1,ftl::codecs::Channel::Colour};
-		mux->onPacket([&tspkt](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+		auto h = mux->onPacket([&tspkt](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
 			tspkt = spkt;
+			return true;
 		});
 
 		REQUIRE( s1->post({4,100,0,0,ftl::codecs::Channel::Colour},{}) );
@@ -290,8 +294,9 @@ TEST_CASE("ftl::stream::Muxer()::read multi-frameset", "[stream]") {
 		mux->add(s4,1);
 
 		ftl::codecs::StreamPacket tspkt = {4,0,0,1,ftl::codecs::Channel::Colour};
-		mux->onPacket([&tspkt](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+		auto h = mux->onPacket([&tspkt](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
 			tspkt = spkt;
+			return true;
 		});
 
 		REQUIRE( s1->post({4,100,0,0,ftl::codecs::Channel::Colour},{}) );
@@ -342,11 +347,13 @@ TEST_CASE("ftl::stream::Broadcast()::write", "[stream]") {
 		ftl::codecs::StreamPacket tspkt1 = {4,0,0,1,ftl::codecs::Channel::Colour};
 		ftl::codecs::StreamPacket tspkt2 = {4,0,0,1,ftl::codecs::Channel::Colour};
 
-		s1->onPacket([&tspkt1](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+		auto h1 = s1->onPacket([&tspkt1](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
 			tspkt1 = spkt;
+			return true;
 		});
-		s2->onPacket([&tspkt2](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
+		auto h2 = s2->onPacket([&tspkt2](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
 			tspkt2 = spkt;
+			return true;
 		});
 
 		REQUIRE( mux->post({4,100,0,1,ftl::codecs::Channel::Colour},{}) );
diff --git a/components/structures/CMakeLists.txt b/components/structures/CMakeLists.txt
index b47288977..c389b1e5c 100644
--- a/components/structures/CMakeLists.txt
+++ b/components/structures/CMakeLists.txt
@@ -1,11 +1,21 @@
 
-add_library(ftldata ./src/new_frame.cpp ./src/pool.cpp)
+add_library(ftldata ./src/new_frame.cpp ./src/pool.cpp ./src/frameset.cpp ./src/creators.cpp)
 
 target_include_directories(ftldata PUBLIC
 	${CMAKE_CURRENT_SOURCE_DIR}/include)
 
 target_link_libraries(ftldata ftlcommon Eigen3::Eigen ftlcodecs)
 
+target_precompile_headers(ftldata
+	PRIVATE ../common/cpp/include/ftl/utility/msgpack_optional.hpp
+	PRIVATE ../common/cpp/include/ftl/cuda_common.hpp
+	PRIVATE ../common/cpp/include/loguru.hpp
+	PRIVATE include/ftl/data/new_frame.hpp
+	PRIVATE include/ftl/data/new_frameset.hpp
+)
+
+set_property(TARGET ftldata PROPERTY CUDA_ARCHITECTURES OFF)
+
 if (BUILD_TESTS)
 add_subdirectory(test)
 endif()
diff --git a/components/structures/include/ftl/data/channels.hpp b/components/structures/include/ftl/data/channels.hpp
index 341759b13..d01ad9a9b 100644
--- a/components/structures/include/ftl/data/channels.hpp
+++ b/components/structures/include/ftl/data/channels.hpp
@@ -4,10 +4,12 @@
 #include <string>
 #include <ftl/codecs/channels.hpp>
 #include <ftl/exception.hpp>
+#include <ftl/utility/vectorbuffer.hpp>
 
 namespace ftl {
 namespace data {
 
+class Frame;
 
 /** Kind of channel in terms of data persistence */
 enum class StorageMode {
@@ -19,7 +21,8 @@ enum class StorageMode {
 /** If a channel has changed, what is the current status of that change. */
 enum class ChangeType {
 	UNCHANGED,
-	LOCAL,			// Explicit local modification occurred
+	PRIMARY,		// Explicit local primary modification occurred
+	RESPONSE,		// Explicit local response change
 	FOREIGN,		// Received externally, to be forwarded
 	COMPLETED		// Received externally, not to be forwarded
 };
@@ -29,7 +32,8 @@ enum class ChannelStatus {
 	INVALID,		// Any data is stale and should not be referenced
 	VALID,			// Contains currently valid data
 	FLUSHED,		// Has already been transmitted, now read-only
-	DISPATCHED		// Externally received, can't be flushed but can be modified locally
+	DISPATCHED,		// Externally received, can't be flushed but can be modified locally
+	ENCODED			// Still in an encoded form
 };
 
 /* Internal structure for channel configurations. */
@@ -77,18 +81,28 @@ std::string getChannelName(ftl::codecs::Channel);
 /** Unsupported */
 ftl::codecs::Channel getChannelByName(const std::string &name);
 
+/**
+ * Attempts to get a msgpack encoder for this channel. Such encoders are
+ * registered by typeid basis when creating channels.
+ */
+std::function<bool(const ftl::data::Frame &, ftl::codecs::Channel, std::vector<uint8_t> &)> getTypeEncoder(size_t type);
+
+void setTypeEncoder(size_t type, const std::function<bool(const ftl::data::Frame &, ftl::codecs::Channel, std::vector<uint8_t> &)> &e);
+
 /**
  * Helper to register a channel using a template specified type.
  */
 template <typename T>
-void make_channel(ftl::codecs::Channel c, const std::string &name, StorageMode mode) {
+bool make_channel(ftl::codecs::Channel c, const std::string &name, StorageMode mode) {
 	// TODO: Generate packer + unpacker?
 	registerChannel(c, {name, mode, typeid(T).hash_code()});
+	return true;
 }
 
 template <>
-inline void make_channel<void>(ftl::codecs::Channel c, const std::string &name, StorageMode mode) {
+inline bool make_channel<void>(ftl::codecs::Channel c, const std::string &name, StorageMode mode) {
 	registerChannel(c, {name, mode, 0});
+	return true;
 }
 
 }
diff --git a/components/structures/include/ftl/data/creators.hpp b/components/structures/include/ftl/data/creators.hpp
new file mode 100644
index 000000000..3cd28e4d3
--- /dev/null
+++ b/components/structures/include/ftl/data/creators.hpp
@@ -0,0 +1,67 @@
+#ifndef _FTL_DATA_FRAMECREATOR_HPP_
+#define _FTL_DATA_FRAMECREATOR_HPP_
+
+#include <ftl/handle.hpp>
+#include <ftl/data/new_frame.hpp>
+
+namespace ftl {
+namespace data {
+
+class Pool;
+
+/**
+ * Create frames on demand.
+ */
+class FrameCreator {
+	friend class Pool;
+
+	public:
+	Frame create();
+	Frame create(int64_t timestamp);
+
+	inline uint32_t id() const { return id_; }
+	inline Pool *pool() const { return pool_; }
+
+	protected:
+	FrameCreator(Pool *p_pool, FrameID p_id) : pool_(p_pool), id_(p_id) {}
+
+	private:
+	Pool *pool_;
+	FrameID id_;
+};
+
+/**
+ * Abstract class for discrete data sources involving a high precision capture
+ * and slower retrieve step. This works for both cameras and audio sources.
+ */
+class DiscreteSource {
+	public:
+	virtual bool capture(int64_t ts)=0;
+	virtual bool retrieve(ftl::data::Frame &)=0;
+};
+
+/**
+ * Create frames at the global frame rate with both capture and retrieve steps.
+ * A source should implement this
+ */
+class IntervalFrameCreator : public ftl::data::FrameCreator {
+	friend class Pool;
+
+	private:
+	explicit IntervalFrameCreator(Pool *p_pool, FrameID p_id, DiscreteSource *src);
+
+	public:
+
+	void start();
+	void stop();
+
+	private:
+	ftl::Handle capture_;
+	ftl::Handle retrieve_;
+	DiscreteSource *src_;
+};
+
+}
+}
+
+#endif
\ No newline at end of file
diff --git a/components/structures/include/ftl/data/frame.hpp b/components/structures/include/ftl/data/frame.hpp
deleted file mode 100644
index c304e4e97..000000000
--- a/components/structures/include/ftl/data/frame.hpp
+++ /dev/null
@@ -1,567 +0,0 @@
-#pragma once
-#ifndef _FTL_DATA_FRAME_HPP_
-#define _FTL_DATA_FRAME_HPP_
-
-#include <ftl/configuration.hpp>
-#include <ftl/exception.hpp>
-
-#include <ftl/codecs/channels.hpp>
-#include <ftl/codecs/codecs.hpp>
-//#include <ftl/codecs/packet.hpp>
-#include <ftl/utility/vectorbuffer.hpp>
-
-#include <type_traits>
-#include <array>
-//#include <list>
-#include <unordered_map>
-
-#include <Eigen/Eigen>
-
-namespace ftl {
-namespace data {
-
-/**
- * Manage a set of channels corresponding to a single frame. There are three
- * kinds of channel in a frame: 1) the data type of interest (DoI)
- * (eg. audio, video, etc), 2) Persistent state and 3) Generic meta data.
- * The DoI is a template arg and could be in any form. Different DoIs will use
- * different frame instances, ie. audio and video frame types. Persistent state
- * may or may not change between frames but is always available. Finally,
- * generic data is a small amount of information about the primary data that may
- * or may not exist each frame, and is not required to exist.
- * 
- * There is no specification for frame rates, intervals or synchronisation at
- * this level. A frame is a quantum of data of any temporal size which can be
- * added to a FrameSet to be synchronised with other frames.
- * 
- * Use this template class either by inheriting it or just by providing the
- * template arguments. It is not abstract and can work directly.
- * 
- * The template DATA parameter must be a class or struct that implements three
- * methods: 1) `const T& at<T>()` to cast to const type, 2) `T& at<T>()` to cast
- * to non-const type, and 3) `T& make<T>() to create data as a type.
- * 
- * The STATE parameter must be an instance of `ftl::data::FrameState`.
- * 
- * @see ftl::data::FrameState
- * @see ftl::data::FrameSet
- * @see ftl::rgbd::FrameState
- * @see ftl::rgbd::Frame
- */
-template <int BASE, int N, typename STATE, typename DATA>
-class Frame {
-	static_assert(N <= ftl::codecs::Channels<BASE>::kMax, "Too many channels requested");
-
-public:
-	Frame() : origin_(nullptr) {}
-	Frame(Frame &&f) {
-		f.swapTo(*this);
-		f.reset();
-	}
-
-	Frame &operator=(Frame &&f) {
-		f.swapTo(*this);
-		f.reset();
-		return *this;
-	}
-
-	// Prevent frame copy, instead use a move.
-	Frame(const Frame &)=delete;
-	Frame &operator=(const Frame &)=delete;
-
-	/**
-	 * Perform a buffer swap of the selected channels. This is intended to be
-	 * a copy from `this` to the passed frame object but by buffer swap
-	 * instead of memory copy, meaning `this` may become invalid afterwards.
-	 * It is a complete frame swap.
-	 */
-	void swapTo(ftl::codecs::Channels<BASE>, Frame &);
-
-	void swapTo(Frame &);
-
-	/**
-	 * Swap only selected channels to another frame, without resetting or swapping
-	 * any other aspect of the frame. Unlike swapTo, this isn't intended to
-	 * be a complete frame swap.
-	 */
-	void swapChannels(ftl::codecs::Channels<BASE> channels, Frame<BASE,N,STATE,DATA> &);
-
-	void swapChannels(ftl::codecs::Channel, ftl::codecs::Channel);
-
-	/**
-	 * Does a host or device memory copy into the given frame.
-	 */
-	void copyTo(ftl::codecs::Channels<BASE>, Frame &);
-
-	/**
-	 * Create a channel but without any format.
-	 */
-	template <typename T> T &create(ftl::codecs::Channel c);
-
-	/**
-	 * Set the value of a channel. Some channels should not be modified via the
-	 * non-const get method, for example the data channels.
-	 */
-	template <typename T> void create(ftl::codecs::Channel channel, const T &value);
-
-	/**
-	 * Append encoded data for a channel. This will move the data, invalidating
-	 * the original packet structure. It is to be used to allow data that is
-	 * already encoded to be transmitted or saved again without re-encoding.
-	 * A called to `create` will clear all encoded data for that channel.
-	 */
-	//void pushPacket(ftl::codecs::Channel c, ftl::codecs::Packet &pkt);
-
-	/**
-	 * Obtain a list of any existing encodings for this channel.
-	 */
-	//const std::list<ftl::codecs::Packet> &getPackets(ftl::codecs::Channel c) const;
-
-	/**
-	 * Clear any existing encoded packets. Used when the channel data is
-	 * modified and the encodings are therefore out-of-date.
-	 */
-	//void clearPackets(ftl::codecs::Channel c);
-
-	/**
-	 * Reset all channels without releasing memory.
-	 */
-	void reset();
-
-	/**
-	 * Reset all channels and release memory.
-	 */
-	//void resetFull();
-
-	/**
-	 * Is there valid data in channel (either host or gpu). This does not
-	 * verify that any memory or data exists for the channel.
-	 */
-	bool hasChannel(ftl::codecs::Channel channel) const {
-		int c = static_cast<int>(channel);
-		if (c >= 64 && c <= 68) return true;
-		else if (c >= 2048) return data_channels_.has(channel);
-		else if (c < BASE || c >= BASE+N) return false;
-		else return channels_.has(channel);
-	}
-
-	/**
-	 * Obtain a mask of all available channels in the frame.
-	 */
-	inline ftl::codecs::Channels<BASE> getChannels() const { return channels_; }
-
-	inline ftl::codecs::Channels<2048> getDataChannels() const { return data_channels_; }
-
-	/**
-	 * Does this frame have new data for a channel. This is compared with a
-	 * previous frame and always returns true for image data. It may return
-	 * false for persistent state data (calibration, pose etc).
-	 */
-	inline bool hasChanged(ftl::codecs::Channel c) const {
-		return (static_cast<int>(c) < 64) ? true : state_.hasChanged(c);
-	}
-
-	/**
-	 * Method to get reference to the channel content.
-	 * @param	Channel type
-	 * @return	Const reference to channel data
-	 * 
-	 * Result is valid only if hasChannel() is true. Host/Gpu transfer is
-	 * performed, if necessary, but with a warning since an explicit upload or
-	 * download should be used.
-	 */
-	template <typename T> const T& get(ftl::codecs::Channel channel) const;
-
-	/**
-	 * Get the data from a data channel. This only works for the data channels
-	 * and will throw an exception with any others.
-	 */
-	template <typename T> void get(ftl::codecs::Channel channel, T &params) const;
-
-	/**
-	 * Method to get reference to the channel content. The channel must already
-	 * have been created of this will throw an exception. See `getBuffer` to
-	 * get access before creation.
-	 * 
-	 * @param	Channel type
-	 * @return	Reference to channel data
-	 * 
-	 * Result is valid only if hasChannel() is true.
-	 */
-	template <typename T> T& get(ftl::codecs::Channel channel);
-
-	/**
-	 * Method to get reference to the channel content. Unlike `get`, the channel
-	 * must not already exist as this is intended as a pre-create step that
-	 * allocates memory and populates the buffer. `create` must then be called
-	 * to make the channel available.
-	 * 
-	 * @param	Channel type
-	 * @return	Reference to channel data
-	 * 
-	 * Result is valid only if hasChannel() is true.
-	 */
-	template <typename T> T& getBuffer(ftl::codecs::Channel channel);
-
-	/**
-	 * Wrapper accessor function to get frame pose.
-	 */
-	const Eigen::Matrix4d &getPose() const;
-
-	/**
-	 * Change the pose of the origin state and mark as changed.
-	 */
-	void setPose(const Eigen::Matrix4d &pose, bool mark=true);
-
-	/**
-	 * Change the pose of the origin state and mark as changed.
-	 */
-	void patchPose(const Eigen::Matrix4d &pose);
-
-	/**
-	 * Wrapper to access left settings channel.
-	 */
-	const typename STATE::Settings &getSettings() const;
-
-	const typename STATE::Settings &getLeft() const;
-	const typename STATE::Settings &getRight() const;
-
-	void setLeft(const typename STATE::Settings &);
-	void setRight(const typename STATE::Settings &);
-
-	/**
-	 * Change left settings in the origin state. This should send
-	 * the changed parameters in reverse through a stream.
-	 */
-	void setSettings(const typename STATE::Settings &c);
-
-	/**
-	 * Dump the current frame config object to a json string.
-	 */
-	std::string getConfigString() const;
-
-	/**
-	 * Access the raw data channel vector object.
-	 */
-	const std::vector<unsigned char> &getRawData(ftl::codecs::Channel c) const;
-
-	/**
-	 * Provide raw data for a data channel.
-	 */
-	void createRawData(ftl::codecs::Channel c, const std::vector<unsigned char> &v);
-
-	/**
-	 * Wrapper to access a config property. If the property does not exist or
-	 * is not of the requested type then the returned optional is false.
-	 */
-	template <class T>
-	std::optional<T> get(const std::string &name) { return state_.template get<T>(name); }
-
-	/**
-	 * Modify a config property. This does not modify the origin config so
-	 * will not get transmitted over the stream.
-	 * @todo Modify origin to send backwards over a stream.
-	 */
-	template <typename T>
-	void set(const std::string &name, T value) { state_.set(name, value); }
-
-	/**
-	 * Set the persistent state for the frame. This can only be done after
-	 * construction or a reset. Multiple calls to this otherwise will throw
-	 * an exception. The pointer must remain valid for the life of the frame.
-	 */
-	void setOrigin(STATE *state);
-
-	/**
-	 * Get the original frame state object. This can be a nullptr in some rare
-	 * cases. When wishing to change state (pose, calibration etc) then those
-	 * changes must be done on this origin, either directly or via wrappers.
-	 */
-	STATE *origin() const { return origin_; }
-
-	//ftl::codecs::Channels<BASE> completed;
-
-	typedef STATE State;
-
-	int id;
-
-protected:
-	/* Lookup internal state for a given channel. */
-	inline DATA &getData(ftl::codecs::Channel c) { return data_[static_cast<unsigned int>(c)-BASE]; }
-	inline const DATA &getData(ftl::codecs::Channel c) const { return data_[static_cast<unsigned int>(c)-BASE]; }
-
-private:
-	std::array<DATA, N> data_;
-
-	std::unordered_map<int, std::vector<unsigned char>> data_data_;
-
-	ftl::codecs::Channels<BASE> channels_;	// Does it have a channel
-	ftl::codecs::Channels<2048> data_channels_;
-
-	// Persistent state
-	STATE state_;
-	STATE *origin_;
-};
-
-}
-}
-
-// ==== Implementations ========================================================
-
-template <int BASE, int N, typename STATE, typename DATA>
-void ftl::data::Frame<BASE,N,STATE,DATA>::reset() {
-	origin_ = nullptr;
-	channels_.clear();
-	data_channels_.clear();
-	for (size_t i=0u; i<ftl::codecs::Channels<BASE>::kMax; ++i) {
-		data_[i].reset();
-	}
-}
-
-template <int BASE, int N, typename STATE, typename DATA>
-void ftl::data::Frame<BASE,N,STATE,DATA>::swapTo(ftl::codecs::Channels<BASE> channels, Frame<BASE,N,STATE,DATA> &f) {
-	f.reset();
-	f.origin_ = origin_;
-	f.state_ = state_;
-
-	// For all channels in this frame object
-	for (auto c : channels_) {
-		// Should we swap this channel?
-		if (channels.has(c)) {
-			f.channels_ += c;
-			// TODO: Make sure this does a move not copy
-			std::swap(f.getData(c),getData(c));
-		}
-	}
-
-	f.data_data_ = std::move(data_data_);
-	f.data_channels_ = data_channels_;
-	data_channels_.clear();
-	channels_.clear();
-}
-
-template <int BASE, int N, typename STATE, typename DATA>
-void ftl::data::Frame<BASE,N,STATE,DATA>::swapTo(Frame<BASE,N,STATE,DATA> &f) {
-	swapTo(ftl::codecs::Channels<BASE>::All(), f);
-}
-
-template <int BASE, int N, typename STATE, typename DATA>
-void ftl::data::Frame<BASE,N,STATE,DATA>::swapChannels(ftl::codecs::Channel a, ftl::codecs::Channel b) {
-	auto &m1 = getData(a);
-	auto &m2 = getData(b);
-
-	auto temp = std::move(m2);
-	m2 = std::move(m1);
-	m1 = std::move(temp);
-}
-
-template <int BASE, int N, typename STATE, typename DATA>
-void ftl::data::Frame<BASE,N,STATE,DATA>::swapChannels(ftl::codecs::Channels<BASE> channels, Frame<BASE,N,STATE,DATA> &f) {
-	// For all channels in this frame object
-	for (auto c : channels_) {
-		// Should we swap this channel?
-		if (channels.has(c)) {
-			f.channels_ += c;
-			// TODO: Make sure this does a move not copy
-			std::swap(f.getData(c),getData(c));
-			channels_ -= c;
-		}
-	}
-}
-
-template <int BASE, int N, typename STATE, typename DATA>
-void ftl::data::Frame<BASE,N,STATE,DATA>::copyTo(ftl::codecs::Channels<BASE> channels, Frame<BASE,N,STATE,DATA> &f) {
-	f.reset();
-	f.origin_ = origin_;
-	f.state_ = state_;
-
-	// For all channels in this frame object
-	for (auto c : channels_) {
-		// Should we copy this channel?
-		if (channels.has(c)) {
-			f.channels_ += c;
-			f.getData(c) = getData(c);
-		}
-	}
-
-	f.data_data_ = data_data_;
-	f.data_channels_ = data_channels_;
-}
-
-template <int BASE, int N, typename STATE, typename DATA>
-// cppcheck-suppress *
-template <typename T>
-T& ftl::data::Frame<BASE,N,STATE,DATA>::get(ftl::codecs::Channel channel) {
-	if (channel == ftl::codecs::Channel::None) {
-		throw FTL_Error("Attempting to get channel 'None'");
-	}
-
-	// Add channel if not already there
-	if (!channels_.has(channel)) {
-		throw FTL_Error("Frame channel does not exist: " << (int)channel);
-	}
-
-	return getData(channel).template as<T>();
-}
-
-template <int BASE, int N, typename STATE, typename DATA>
-// cppcheck-suppress *
-template <typename T>
-T& ftl::data::Frame<BASE,N,STATE,DATA>::getBuffer(ftl::codecs::Channel channel) {
-	if (channel == ftl::codecs::Channel::None) {
-		throw ftl::exception("Attempting to get channel 'None'");
-	}
-
-	if (channels_.has(channel)) {
-		throw ftl::exception(ftl::Formatter() << "Cannot getBuffer on existing channel: " << (int)channel);
-	}
-
-	if (static_cast<int>(channel) < BASE || static_cast<int>(channel) >= BASE+32) {
-		throw ftl::exception(ftl::Formatter() << "Frame channel does not exist: " << (int)channel);
-	}
-
-	return getData(channel).template make<T>();
-}
-
-template <int BASE, int N, typename STATE, typename DATA>
-// cppcheck-suppress *
-template <typename T>
-const T& ftl::data::Frame<BASE,N,STATE,DATA>::get(ftl::codecs::Channel channel) const {
-	if (channel == ftl::codecs::Channel::None) {
-		throw FTL_Error("Attempting to get channel 'None'");
-	} else if (channel == ftl::codecs::Channel::Pose) {
-		return state_.template as<T,ftl::codecs::Channel::Pose>();
-	} else if (channel == ftl::codecs::Channel::Calibration) {
-		return state_.template as<T,ftl::codecs::Channel::Calibration>();
-	} else if (channel == ftl::codecs::Channel::Calibration2) {
-		return state_.template as<T,ftl::codecs::Channel::Calibration2>();
-	} else if (channel == ftl::codecs::Channel::Configuration) {
-		return state_.template as<T,ftl::codecs::Channel::Configuration>();
-	}
-
-	// Add channel if not already there
-	if (!channels_.has(channel)) {
-		throw FTL_Error("Frame channel does not exist: " << (int)channel);
-	}
-
-	return getData(channel).template as<T>();
-}
-
-// Default data channel implementation
-template <int BASE, int N, typename STATE, typename DATA>
-// cppcheck-suppress *
-template <typename T>
-void ftl::data::Frame<BASE,N,STATE,DATA>::get(ftl::codecs::Channel channel, T &params) const {
-	if (static_cast<int>(channel) < static_cast<int>(ftl::codecs::Channel::Data)) throw FTL_Error("Cannot use generic type with non data channel");
-	if (!hasChannel(channel)) throw FTL_Error("Data channel does not exist");
-
-	const auto &i = data_data_.find(static_cast<int>(channel));
-	if (i == data_data_.end()) throw FTL_Error("Data channel does not exist");
-
-	auto unpacked = msgpack::unpack((const char*)(*i).second.data(), (*i).second.size());
-	unpacked.get().convert(params);
-}
-
-template <int BASE, int N, typename STATE, typename DATA>
-// cppcheck-suppress *
-template <typename T>
-T &ftl::data::Frame<BASE,N,STATE,DATA>::create(ftl::codecs::Channel c) {
-	if (c == ftl::codecs::Channel::None) {
-		throw FTL_Error("Cannot create a None channel");
-	}
-	channels_ += c;
-
-	auto &m = getData(c);
-	return m.template make<T>();
-}
-
-template <int BASE, int N, typename STATE, typename DATA>
-// cppcheck-suppress *
-template <typename T>
-void ftl::data::Frame<BASE,N,STATE,DATA>::create(ftl::codecs::Channel channel, const T &value) {
-	if (static_cast<int>(channel) < static_cast<int>(ftl::codecs::Channel::Data)) throw FTL_Error("Cannot use generic type with non data channel");
-
-	data_channels_ += channel;
-
-	auto &v = *std::get<0>(data_data_.insert({static_cast<int>(channel),{}}));
-	v.second.resize(0);
-	ftl::util::FTLVectorBuffer buf(v.second);
-	msgpack::pack(buf, value);
-}
-
-template <int BASE, int N, typename STATE, typename DATA>
-void ftl::data::Frame<BASE,N,STATE,DATA>::setOrigin(STATE *state) {
-	if (origin_ != nullptr) {
-		throw FTL_Error("Can only set origin once after reset");
-	}
-
-	origin_ = state;
-	state_ = *state;
-}
-
-template <int BASE, int N, typename STATE, typename DATA>
-const Eigen::Matrix4d &ftl::data::Frame<BASE,N,STATE,DATA>::getPose() const {
-	return get<Eigen::Matrix4d>(ftl::codecs::Channel::Pose);
-}
-
-template <int BASE, int N, typename STATE, typename DATA>
-const typename STATE::Settings &ftl::data::Frame<BASE,N,STATE,DATA>::getLeft() const {
-	return get<typename STATE::Settings>(ftl::codecs::Channel::Calibration);
-}
-
-template <int BASE, int N, typename STATE, typename DATA>
-const typename STATE::Settings &ftl::data::Frame<BASE,N,STATE,DATA>::getSettings() const {
-	return get<typename STATE::Settings>(ftl::codecs::Channel::Calibration);
-}
-
-template <int BASE, int N, typename STATE, typename DATA>
-const typename STATE::Settings &ftl::data::Frame<BASE,N,STATE,DATA>::getRight() const {
-	return get<typename STATE::Settings>(ftl::codecs::Channel::Calibration2);
-}
-
-template <int BASE, int N, typename STATE, typename DATA>
-void ftl::data::Frame<BASE,N,STATE,DATA>::setPose(const Eigen::Matrix4d &pose, bool mark) {
-	if (origin_) {
-		if (mark) origin_->setPose(pose);
-		else origin_->getPose() = pose;
-	}
-	state_.setPose(pose);
-}
-
-template <int BASE, int N, typename STATE, typename DATA>
-void ftl::data::Frame<BASE,N,STATE,DATA>::patchPose(const Eigen::Matrix4d &pose) {
-	state_.getPose() = pose * state_.getPose();
-}
-
-template <int BASE, int N, typename STATE, typename DATA>
-void ftl::data::Frame<BASE,N,STATE,DATA>::setLeft(const typename STATE::Settings &c) {
-	if (origin_) origin_->setLeft(c);
-	state_.setLeft(c);
-}
-
-template <int BASE, int N, typename STATE, typename DATA>
-void ftl::data::Frame<BASE,N,STATE,DATA>::setRight(const typename STATE::Settings &c) {
-	if (origin_) origin_->setRight(c);
-	state_.setRight(c);
-}
-
-template <int BASE, int N, typename STATE, typename DATA>
-std::string ftl::data::Frame<BASE,N,STATE,DATA>::getConfigString() const {
-	return ftl::config::dumpJSON(get<nlohmann::json>(ftl::codecs::Channel::Configuration));
-}
-
-template <int BASE, int N, typename STATE, typename DATA>
-const std::vector<unsigned char> &ftl::data::Frame<BASE,N,STATE,DATA>::getRawData(ftl::codecs::Channel channel) const {
-	if (static_cast<int>(channel) < static_cast<int>(ftl::codecs::Channel::Data)) throw FTL_Error("Non data channel");
-	if (!hasChannel(channel)) throw FTL_Error("Data channel does not exist");
-
-	return data_data_.at(static_cast<int>(channel));
-}
-
-template <int BASE, int N, typename STATE, typename DATA>
-void ftl::data::Frame<BASE,N,STATE,DATA>::createRawData(ftl::codecs::Channel c, const std::vector<unsigned char> &v) {
-	data_data_.insert({static_cast<int>(c), v});
-	data_channels_ += c;
-}
-
-#endif // _FTL_DATA_FRAME_HPP_
diff --git a/components/structures/include/ftl/data/framepool.hpp b/components/structures/include/ftl/data/framepool.hpp
index 204965ff7..ab619531e 100644
--- a/components/structures/include/ftl/data/framepool.hpp
+++ b/components/structures/include/ftl/data/framepool.hpp
@@ -2,6 +2,8 @@
 #define _FTL_DATA_FRAMEPOOL_HPP_
 
 #include <ftl/data/new_frame.hpp>
+#include <ftl/data/new_frameset.hpp>
+#include <ftl/data/creators.hpp>
 #include <list>
 #include <unordered_map>
 
@@ -9,24 +11,38 @@ namespace ftl {
 namespace data {
 
 class Pool {
+	friend class Session;
+	friend class FrameSet;
+
 	public:
 	explicit Pool(size_t min_n, size_t max_n);
 	~Pool();
 
-	ftl::data::Frame allocate(uint32_t id, int64_t timestamp);
+	ftl::data::Frame allocate(FrameID id, int64_t timestamp);
 	void release(Frame &f);
 
-	ftl::data::Session &session(uint32_t id);
+	ftl::data::Session &session(FrameID id);
+	inline ftl::data::Session &group(FrameID id) { return session(id); }
+
+	inline ftl::Handle onFlush(const std::function<bool(ftl::data::Frame&,ftl::codecs::Channel)> &cb) { return flush_.on(cb); }
+
+	inline ftl::Handle onFlushSet(const std::function<bool(ftl::data::FrameSet&,ftl::codecs::Channel)> &cb) { return flush_fs_.on(cb); }
 
-	size_t size(uint32_t id);
+	size_t size(FrameID id);
 
 	size_t size();
 
+	template <typename T, typename ...ARGS>
+	T creator(FrameID id, ARGS ...args) {
+		static_assert(std::is_base_of<ftl::data::FrameCreator, T>::value, "A creator must inherit FrameCreator");
+		return T(this, id, args...);
+	}
+
 	private:
 	struct PoolData {
 		std::list<ftl::data::Frame*> pool;
 		ftl::data::Session session;
-		int64_t last_timestamp;
+		int64_t last_timestamp=0;
 	};
 
 	std::unordered_map<uint32_t, PoolData> pool_;
@@ -34,7 +50,12 @@ class Pool {
 	size_t max_n_;
 	size_t ideal_n_;
 
-	PoolData &_getPool(uint32_t);
+	ftl::Handler<ftl::data::Frame&,ftl::codecs::Channel> flush_;
+	ftl::Handler<ftl::data::FrameSet&,ftl::codecs::Channel> flush_fs_;
+
+	MUTEX mutex_;
+
+	PoolData &_getPool(FrameID);
 };
 
 }
diff --git a/components/structures/include/ftl/data/frameset.hpp b/components/structures/include/ftl/data/frameset.hpp
deleted file mode 100644
index 4de7035f5..000000000
--- a/components/structures/include/ftl/data/frameset.hpp
+++ /dev/null
@@ -1,244 +0,0 @@
-#ifndef _FTL_DATA_FRAMESET_HPP_
-#define _FTL_DATA_FRAMESET_HPP_
-
-#include <ftl/threads.hpp>
-#include <ftl/timer.hpp>
-#include <ftl/data/frame.hpp>
-#include <functional>
-
-//#include <opencv2/opencv.hpp>
-#include <vector>
-
-namespace ftl {
-namespace data {
-
-// Allows a latency of 20 frames maximum
-//static const size_t kMaxFramesets = 15;
-static const size_t kMaxFramesInSet = 32;
-
-enum class FSFlag : int {
-	STALE = 0,
-	PARTIAL = 1
-};
-
-/**
- * Represents a set of synchronised frames, each with two channels. This is
- * used to collect all frames from multiple computers that have the same
- * timestamp.
- */
-template <typename FRAME>
-class FrameSet {
-	public:
-
-	int id=0;
-	int64_t timestamp;				// Millisecond timestamp of all frames
-	int64_t originClockDelta;
-	std::vector<FRAME> frames;
-	std::atomic<int> count;				// Number of valid frames
-	std::atomic<unsigned int> mask;		// Mask of all sources that contributed
-	//bool stale;						// True if buffers have been invalidated
-	SHARED_MUTEX mtx;
-
-	Eigen::Matrix4d pose;  // Set to identity by default.
-
-	inline int64_t localTimestamp() const { return timestamp + originClockDelta; }
-
-	void set(FSFlag f) { flags_ |= (1 << static_cast<int>(f)); }
-	void clear(FSFlag f) { flags_ &= ~(1 << static_cast<int>(f)); }
-	bool test(FSFlag f) const { return flags_ & (1 << static_cast<int>(f)); }
-	void clearFlags() { flags_ = 0; }
-
-	/**
-	 * Move the entire frameset to another frameset object. This will
-	 * invalidate the current frameset object as all memory buffers will be
-	 * moved.
-	 */
-	void swapTo(ftl::data::FrameSet<FRAME> &);
-
-    typedef FRAME Frame;
-    typedef std::function<bool(ftl::data::FrameSet<FRAME> &)> Callback;
-
-	/**
-	 * Get the data from a data channel. This only works for the data channels
-	 * and will throw an exception with any others.
-	 */
-	template <typename T> void get(ftl::codecs::Channel channel, T &params) const;
-
-	/**
-	 * Set the value of a channel. Some channels should not be modified via the
-	 * non-const get method, for example the data channels.
-	 */
-	template <typename T> void create(ftl::codecs::Channel channel, const T &value);
-
-	/**
-	 * Access the raw data channel vector object.
-	 */
-	const std::vector<unsigned char> &getRawData(ftl::codecs::Channel c) const;
-
-	/**
-	 * Provide raw data for a data channel.
-	 */
-	void createRawData(ftl::codecs::Channel c, const std::vector<unsigned char> &v);
-
-	/**
-	 * Is there valid data in channel (either host or gpu). This does not
-	 * verify that any memory or data exists for the channel.
-	 */
-	inline bool hasChannel(ftl::codecs::Channel channel) const {
-		int c = static_cast<int>(channel);
-		if (c == 66) return true;
-		else if (c >= 2048) return data_channels_.has(channel);
-		return false;
-	}
-
-	/**
-	 * Check that a given frame is valid in this frameset.
-	 */
-	inline bool hasFrame(size_t ix) const { return (1 << ix) & mask; }
-
-	/**
-	 * Get the first valid frame in this frameset. No valid frames throws an
-	 * exception.
-	 */
-	FRAME &firstFrame();
-
-	const FRAME &firstFrame() const;
-
-	void clearData() {
-		data_.clear();
-		data_channels_.clear();
-	}
-
-	ftl::codecs::Channels<2048> getDataChannels() const { return data_channels_; }
-
-	private:
-	std::unordered_map<int, std::vector<unsigned char>> data_;
-	ftl::codecs::Channels<2048> data_channels_;
-	std::atomic<int> flags_;
-};
-
-/**
- * Callback type for receiving video frames.
- */
-//typedef std::function<bool(ftl::rgbd::FrameSet &)> VideoCallback;
-
-/**
- * Abstract class for any generator of FrameSet structures. A generator
- * produces (decoded) frame sets at regular frame intervals depending on the
- * global timer settings. The `onFrameSet` callback may be triggered from any
- * thread and also may drop frames and not be called for a given timestamp.
- */
-template <typename FRAMESET>
-class Generator {
-	public:
-	Generator() {}
-	virtual ~Generator() {}
-
-	/** Number of frames in last frameset. This can change over time. */
-	virtual size_t size()=0;
-
-	/**
-	 * Get the persistent state object for a frame. An exception is thrown
-	 * for a bad index.
-	 */
-	virtual typename FRAMESET::Frame::State &state(size_t ix)=0;
-
-	inline typename FRAMESET::Frame::State &operator[](int ix) { return state(ix); }
-
-	/** Register a callback to receive new frame sets. */
-	virtual void onFrameSet(const typename FRAMESET::Callback &)=0;
-};
-
-}
-}
-
-// === Implementations =========================================================
-
-template <typename FRAME>
-void ftl::data::FrameSet<FRAME>::swapTo(ftl::data::FrameSet<FRAME> &fs) {
-	//UNIQUE_LOCK(fs.mtx, lk);
-	std::unique_lock<std::shared_mutex> lk(fs.mtx);
-
-	//if (fs.frames.size() != frames.size()) {
-		// Assume "this" is correct and "fs" is not.
-		fs.frames.resize(frames.size());
-	//}
-
-	fs.timestamp = timestamp;
-	fs.count = static_cast<int>(count);
-	fs.flags_ = (int)flags_;
-	fs.mask = static_cast<unsigned int>(mask);
-	fs.id = id;
-	fs.pose = pose;
-
-	for (size_t i=0; i<frames.size(); ++i) {
-		frames[i].swapTo(ftl::codecs::Channels<0>::All(), fs.frames[i]);
-	}
-
-	std::swap(fs.data_, data_);
-	fs.data_channels_ = data_channels_;
-	data_channels_.clear();
-
-	set(ftl::data::FSFlag::STALE);
-}
-
-// Default data channel implementation
-template <typename FRAME>
-// cppcheck-suppress *
-template <typename T>
-void ftl::data::FrameSet<FRAME>::get(ftl::codecs::Channel channel, T &params) const {
-	if (static_cast<int>(channel) < static_cast<int>(ftl::codecs::Channel::Data)) throw FTL_Error("Cannot use generic type with non data channel");
-	if (!hasChannel(channel)) throw FTL_Error("Data channel does not exist");
-
-	const auto &i = data_.find(static_cast<int>(channel));
-	if (i == data_.end()) throw FTL_Error("Data channel does not exist");
-
-	auto unpacked = msgpack::unpack((const char*)(*i).second.data(), (*i).second.size());
-	unpacked.get().convert(params);
-}
-
-template <typename FRAME>
-// cppcheck-suppress *
-template <typename T>
-void ftl::data::FrameSet<FRAME>::create(ftl::codecs::Channel channel, const T &value) {
-	if (static_cast<int>(channel) < static_cast<int>(ftl::codecs::Channel::Data)) throw FTL_Error("Cannot use generic type with non data channel");
-
-	data_channels_ += channel;
-
-	auto &v = *std::get<0>(data_.insert({static_cast<int>(channel),{}}));
-	v.second.resize(0);
-	ftl::util::FTLVectorBuffer buf(v.second);
-	msgpack::pack(buf, value);
-}
-
-template <typename FRAME>
-const std::vector<unsigned char> &ftl::data::FrameSet<FRAME>::getRawData(ftl::codecs::Channel channel) const {
-	if (static_cast<int>(channel) < static_cast<int>(ftl::codecs::Channel::Data)) throw FTL_Error("Non data channel");
-	if (!hasChannel(channel)) throw FTL_Error("Data channel does not exist");
-
-	return data_.at(static_cast<int>(channel));
-}
-
-template <typename FRAME>
-void ftl::data::FrameSet<FRAME>::createRawData(ftl::codecs::Channel c, const std::vector<unsigned char> &v) {
-	data_.insert({static_cast<int>(c), v});
-	data_channels_ += c;
-}
-
-template <typename FRAME>
-FRAME &ftl::data::FrameSet<FRAME>::firstFrame() {
-	for (size_t i=0; i<frames.size(); ++i) {
-		if (hasFrame(i)) return frames[i];
-	}
-	throw FTL_Error("No frames in frameset");
-}
-
-template <typename FRAME>
-const FRAME &ftl::data::FrameSet<FRAME>::firstFrame() const {
-	for (size_t i=0; i<frames.size(); ++i) {
-		if (hasFrame(i)) return frames[i];
-	}
-	throw FTL_Error("No frames in frameset");
-}
-
-#endif  // _FTL_DATA_FRAMESET_HPP_
diff --git a/components/structures/include/ftl/data/framestate.hpp b/components/structures/include/ftl/data/framestate.hpp
deleted file mode 100644
index 378a37f3d..000000000
--- a/components/structures/include/ftl/data/framestate.hpp
+++ /dev/null
@@ -1,302 +0,0 @@
-#ifndef _FTL_DATA_FRAMESTATE_HPP_
-#define _FTL_DATA_FRAMESTATE_HPP_
-
-#include <ftl/configuration.hpp>
-#include <ftl/exception.hpp>
-#include <ftl/codecs/channels.hpp>
-#include <Eigen/Eigen>
-#include <array>
-#include <optional>
-#include <string>
-
-namespace ftl {
-namespace data {
-
-/**
- * Represent state that is persistent across frames. Such state may or may not
- * change from one frame to the next so a record of what has changed must be
- * kept. Changing state should be done at origin and not in the frame. State
- * that is marked as changed will then be send into a stream and the changed
- * status will be cleared, allowing data to only be sent/saved when actual
- * changes occur.
- * 
- * The provided SETTINGS type must support MsgPack and be copyable. An example
- * of settings is camera intrinsics.
- * 
- * COUNT is the number of settings channels available. For example, video state
- * has two settings channels, one for left camera and one for right camera.
- */
-template <typename SETTINGS, int COUNT>
-class FrameState {
-	public:
-	typedef SETTINGS Settings;
-
-	FrameState();
-	FrameState(FrameState &);
-	FrameState(FrameState &&);
-	~FrameState();
-
-	/**
-	 * Update the pose and mark as changed.
-	 */
-	void setPose(const Eigen::Matrix4d &pose) {
-		pose_ = pose;
-		changed_ += ftl::codecs::Channel::Pose;
-	}
-
-	/**
-	 * Update the left settings and mark as changed.
-	 */
-	void setLeft(const SETTINGS &p) {
-		static_assert(COUNT > 0, "No settings channel");
-		settings_[0] = p;
-		changed_ += ftl::codecs::Channel::Settings1;
-	}
-
-	/**
-	 * Update the right settings and mark as changed.
-	 */
-	void setRight(const SETTINGS &p) {
-		static_assert(COUNT > 1, "No second settings channel");
-		settings_[1] = p;
-		changed_ += ftl::codecs::Channel::Settings2;
-	}
-
-	/**
-	 * Change settings using ID number. Necessary when more than 2 settings
-	 * channels exist, otherwise use `setLeft` and `setRight`.
-	 */
-	template <int I>
-	void set(const SETTINGS &p) {
-		static_assert(I < COUNT, "Settings channel too large");
-		settings_[I] = p;
-		changed_ += __idToChannel(I);
-	}
-
-	/**
-	 * Get the current pose.
-	 */
-	inline const Eigen::Matrix4d &getPose() const { return pose_; }
-
-	/**
-	 * Get the left settings.
-	 */
-	inline const SETTINGS &getLeft() const { return settings_[0]; }
-
-	/**
-	 * Get the right settings.
-	 */
-	inline const SETTINGS &getRight() const { return settings_[1]; }
-
-	/**
-	 * Get a modifiable pose reference that does not change the changed status.
-	 * @attention Should only be used internally.
-	 * @todo Make private eventually.
-	 */
-	inline Eigen::Matrix4d &getPose() { return pose_; }
-
-	/**
-	 * Get a modifiable left settings reference that does not change
-	 * the changed status. Modifications made using this will not be propagated.
-	 * @attention Should only be used internally.
-	 * @todo Make private eventually.
-	 */
-	inline SETTINGS &getLeft() { return settings_[0]; }
-
-	/**
-	 * Get a modifiable right settings reference that does not change
-	 * the changed status. Modifications made using this will not be propagated.
-	 * @attention Should only be used internally.
-	 * @todo Make private eventually.
-	 */
-	inline SETTINGS &getRight() { return settings_[1]; }
-
-	/**
-	 * Get a named config property.
-	 */
-	template <typename T>
-	std::optional<T> get(const std::string &name) {
-		return ftl::config::getJSON<T>(config_, name);
-	}
-
-	/**
-	 * Helper class to specialising channel based state access.
-	 * @private
-	 */
-	template <typename T, ftl::codecs::Channel C, typename S, int N> struct As {
-		static const T &func(const ftl::data::FrameState<S,N> &t) {
-			throw FTL_Error("Type not supported for state channel");
-		}
-
-		static T &func(ftl::data::FrameState<S,N> &t) {
-			throw FTL_Error("Type not supported for state channel");
-		}
-	};
-
-	// Specialise for pose
-	template <typename S, int N>
-	struct As<Eigen::Matrix4d,ftl::codecs::Channel::Pose,S,N> {
-		static const Eigen::Matrix4d &func(const ftl::data::FrameState<S,N> &t) {
-			return t.pose_;
-		}
-
-		static Eigen::Matrix4d &func(ftl::data::FrameState<S,N> &t) {
-			return t.pose_;
-		}
-	};
-
-	// Specialise for settings 1
-	template <typename S, int N>
-	struct As<S,ftl::codecs::Channel::Settings1,S,N> {
-		static const S &func(const ftl::data::FrameState<S,N> &t) {
-			return t.settings_[0];
-		}
-
-		static S &func(ftl::data::FrameState<S,N> &t) {
-			return t.settings_[0];
-		}
-	};
-
-	// Specialise for settings 2
-	template <typename S, int N>
-	struct As<S,ftl::codecs::Channel::Settings2,S,N> {
-		static const S &func(const ftl::data::FrameState<S,N> &t) {
-			return t.settings_[1];
-		}
-
-		static S &func(ftl::data::FrameState<S,N> &t) {
-			return t.settings_[1];
-		}
-	};
-
-	// Specialise for config
-	template <typename S, int N>
-	struct As<nlohmann::json,ftl::codecs::Channel::Configuration,S,N> {
-		static const nlohmann::json &func(const ftl::data::FrameState<S,N> &t) {
-			return *t.config_;
-		}
-
-		static nlohmann::json &func(ftl::data::FrameState<S,N> &t) {
-			return *t.config_;
-		}
-	};
-
-	/**
-	 * Allow access to state items using a known channel number. By default
-	 * these throw an exception unless specialised to accept a particular type
-	 * for a particular channel. The specialisations are automatic for pose,
-	 * config and SETTINGS items.
-	 */
-	template <typename T, ftl::codecs::Channel C>
-	T &as() { return As<T,C,SETTINGS,COUNT>::func(*this); }
-
-	/**
-	 * Allow access to state items using a known channel number. By default
-	 * these throw an exception unless specialised to accept a particular type
-	 * for a particular channel. The specialisations are automatic for pose,
-	 * config and SETTINGS items.
-	 */
-	template <typename T, ftl::codecs::Channel C>
-	const T &as() const {
-		return As<T,C,SETTINGS,COUNT>::func(*this);
-	}
-
-	/**
-	 * Set a named config property. Also makes state as changed to be resent.
-	 */
-	template <typename T>
-	void set(const std::string &name, T value) {
-		ftl::config::setJSON<T>(config_, name, value);
-		changed_ += ftl::codecs::Channel::Configuration;
-	}
-
-	inline const nlohmann::json &getConfig() const { return *config_; }
-
-	inline nlohmann::json &getConfig() { return *config_; }
-
-	/**
-	 * Check if pose or settings have been modified and not yet forwarded.
-	 * Once forwarded through a pipeline / stream the changed status is cleared.
-	 */
-	inline bool hasChanged(ftl::codecs::Channel c) const { return changed_.has(c); }
-
-	/**
-	 * Copy assignment will clear the changed status of the original.
-	 */
-	FrameState &operator=(FrameState &);
-
-	FrameState &operator=(FrameState &&);
-
-	/**
-	 * Clear the changed status to unchanged.
-	 */
-	inline void clear() { changed_.clear(); }
-
-	private:
-	Eigen::Matrix4d pose_;
-	std::array<SETTINGS,COUNT> settings_;
-	nlohmann::json *config_;
-	ftl::codecs::Channels<64> changed_;  // Have the state channels changed?
-
-	static inline ftl::codecs::Channel __idToChannel(int id) {
-		return (id == 0) ? ftl::codecs::Channel::Settings1 : (id == 1) ?
-			ftl::codecs::Channel::Settings2 :
-			static_cast<ftl::codecs::Channel>(static_cast<int>(ftl::codecs::Channel::Settings3)+(id-2));
-	}
-};
-
-}
-}
-
-
-template <typename SETTINGS, int COUNT>
-ftl::data::FrameState<SETTINGS,COUNT>::FrameState() : settings_({{0}}), config_(ftl::config::createJSON()) {
-	pose_ = Eigen::Matrix4d::Identity();
-}
-
-template <typename SETTINGS, int COUNT>
-ftl::data::FrameState<SETTINGS,COUNT>::~FrameState() {
-	ftl::config::destroyJSON(config_);
-}
-
-template <typename SETTINGS, int COUNT>
-ftl::data::FrameState<SETTINGS,COUNT>::FrameState(ftl::data::FrameState<SETTINGS,COUNT> &f) {
-	pose_ = f.pose_;
-	settings_ = f.settings_;
-	changed_ = f.changed_;
-	ftl::config::copyJSON(config_, f.config_);
-	f.changed_.clear();
-}
-
-template <typename SETTINGS, int COUNT>
-ftl::data::FrameState<SETTINGS,COUNT>::FrameState(ftl::data::FrameState<SETTINGS,COUNT> &&f) {
-	pose_ = f.pose_;
-	settings_ = f.settings_;
-	changed_ = f.changed_;
-	config_ = f.config_;
-	f.config_ = nullptr;
-	f.changed_.clear();
-}
-
-template <typename SETTINGS, int COUNT>
-ftl::data::FrameState<SETTINGS,COUNT> &ftl::data::FrameState<SETTINGS,COUNT>::operator=(ftl::data::FrameState<SETTINGS,COUNT> &f) {
-	pose_ = f.pose_;
-	settings_ = f.settings_;
-	changed_ = f.changed_;
-	ftl::config::copyJSON(config_, f.config_);
-	f.changed_.clear();
-	return *this;
-}
-
-template <typename SETTINGS, int COUNT>
-ftl::data::FrameState<SETTINGS,COUNT> &ftl::data::FrameState<SETTINGS,COUNT>::operator=(ftl::data::FrameState<SETTINGS,COUNT> &&f) {
-	pose_ = f.pose_;
-	settings_ = f.settings_;
-	changed_ = f.changed_;
-	config_ = f.config_;
-	f.changed_.clear();
-	f.config_ = nullptr;
-	return *this;
-}
-
-#endif  // _FTL_DATA_FRAMESTATE_HPP_
diff --git a/components/structures/include/ftl/data/messages.hpp b/components/structures/include/ftl/data/messages.hpp
new file mode 100644
index 000000000..9f90263fd
--- /dev/null
+++ b/components/structures/include/ftl/data/messages.hpp
@@ -0,0 +1,30 @@
+#ifndef _FTL_DATA_MESSAGES_HPP_
+#define _FTL_DATA_MESSAGES_HPP_
+
+#include <msgpack.hpp>
+
+namespace ftl {
+namespace data {
+
+// Note: On Windows ERROR_* sometimes matches a macro and fails, hence use of Error not ERROR
+enum class Message : int {
+	Error_UNKNOWN = 0,
+	Error_OPERATOR_EXCEPTION,
+	Error_FRAME_GRAB,
+	Error_BAD_FORMAT,
+	Error_OPENVR,
+	Warning_UNKNOWN = 1024,
+	Warning_FRAME_DROP,
+	Warning_PIPELINE_DROP,
+	Warning_MISSING_CHANNEL,
+	Warning_INCOMPLETE_FRAME,
+	INFORMATION_UNKNOWN = 2046,
+	OTHER_UNKNOWN = 3072
+};
+
+}
+}
+
+MSGPACK_ADD_ENUM(ftl::data::Message);
+
+#endif
\ No newline at end of file
diff --git a/components/structures/include/ftl/data/new_frame.hpp b/components/structures/include/ftl/data/new_frame.hpp
index 5a45662de..9fa92ed14 100644
--- a/components/structures/include/ftl/data/new_frame.hpp
+++ b/components/structures/include/ftl/data/new_frame.hpp
@@ -1,16 +1,24 @@
 #ifndef _FTL_DATA_NEWFRAME_HPP_
 #define _FTL_DATA_NEWFRAME_HPP_
 
+// Remove pointless warning
+#ifdef _MSC_VER
+#pragma warning(disable : 4544)
+#endif
+
 #include <map>
 #include <unordered_set>
 #include <any>
 #include <optional>
 #include <list>
 #include <unordered_map>
+#include <functional>
 #include <ftl/codecs/channels.hpp>
+#include <ftl/codecs/packet.hpp>
 #include <ftl/data/channels.hpp>
 #include <ftl/exception.hpp>
 #include <ftl/handle.hpp>
+#include <ftl/data/messages.hpp>
 
 template<typename T> struct is_list : public std::false_type {};
 
@@ -23,16 +31,70 @@ namespace data {
 
 class Session;
 class Pool;
+class FrameSet;
+
+/**
+ * Unique identifier for a single frame. This is stored as two 16bit numbers
+ * packed into a 32bit int. Every frame has a FrameID, as does every frameset.
+ * FrameID + Timestamp together will be a unique object within the system since
+ * frames cannot be duplicated.
+ */
+struct FrameID {
+	uint32_t id;
+
+	/**
+	 * Frameset ID for this frame.
+	 */
+	inline unsigned int frameset() const { return id >> 8; }
+
+	/**
+	 * Frame index within the frameset. This will correspond to the vector
+	 * index in the frameset object.
+	 */
+	inline unsigned int source() const { return id & 0xff; }
+
+	/**
+	 * The packed int with both frameset ID and index.
+	 */
+	operator uint32_t() const { return id; }
 
+	/**
+	 * Create a frame ID using a frameset id and a source number.
+	 * @param fs Frameset id
+	 * @param s Source number inside frameset
+	 */
+	FrameID(unsigned int fs, unsigned int s) : id((fs << 8) + (s & 0xff) ) {}
+	FrameID() : id(0) {}
+};
+
+/**
+ * A frame object can be used in 3 different scenarios. A frame mode cannot be
+ * changed after construction and so each mode is constructed differently.
+ */
+enum FrameMode {
+	PRIMARY,		/// A normal frame generated by a builder
+	RESPONSE,		/// A frame that acts as a reply to a primary frame
+	STANDALONE		/// Not associated with a source or stream, used for storage
+};
+
+/**
+ * The life cycle of a frame goes through all of these frame status stages.
+ * From the `Pool` it is created. After the frame is populated with initial data
+ * it is `stored`. New data is inserted to the frame before being `flushed`.
+ * Finally, when the frame is destroyed the data is transfered to the `Pool`
+ * for memory reuse and the frame is `released`.
+ */
 enum FrameStatus {
-	CREATED,   // Initial state, before store
-	STORED,    // Changed to this after call to `store`
-	FLUSHED,   // Changed to this after call to `flush`
-	RELEASED   // Destroyed or moved
+	CREATED,   /// Initial state, before store
+	STORED,    /// Changed to this after call to `store`
+	FLUSHED,   /// Changed to this after call to `flush`
+	RELEASED   /// Destroyed or moved
 };
 
 /**
- * Helper class to enable aggregation of aggregate channels.
+ * Helper class to enable aggregation of aggregate channels. Assignment acts to
+ * append data to a list rather than replace that data. It allows all data
+ * changes to be recorded. Not thread-safe however.
  */
 template <typename T>
 struct Aggregator {
@@ -45,6 +107,16 @@ struct Aggregator {
 		return *this;
 	}
 
+	Aggregator &operator=(const typename T::value_type &v) {
+		list.push_back(v);
+		return *this;
+	}
+
+	Aggregator &operator=(typename T::value_type &&v) {
+		list.push_back(std::move(v));
+		return *this;
+	}
+
 	Aggregator &operator=(T &&l) {
 		if (aggregate) list.splice(list.end(), l, l.begin(), l.end());
 		else list = std::move(l);
@@ -55,24 +127,84 @@ struct Aggregator {
 	operator T() const { return list; }
 };
 
+/**
+ * A `Frame` is the primary unit of data within the system. A data source
+ * generates discrete blocks of data with a timestamp, these blocks are
+ * encapsulated in a frame that has any number of channels. A `Frame` must be
+ * constructed from a `Pool` object so that memory can be reused.
+ *
+ * It can be moved around but not copied since the quantity of data involved in
+ * a frame is huge.
+ *
+ * A frame goes through the following stages:
+ *   1) Creation from reused memory in `Pool`
+ *   2) Populate with incoming initial data/changes (from stream)
+ *   3) Store of changes to persistent memory
+ *   4) Create any new data such as new video frames
+ *   5) Flush the data to transmit or save, becomes readonly
+ *   6) Release memory to `Pool`
+ *
+ * A channel stores one particular element of data of a specified type. To write
+ * to a channel the `create` or `set` methods must be used, this will mark the
+ * channel as changed but can only occur before the frame is flushed and
+ * readonly. A `get` method allows const access to the data as long as the
+ * channel exists.
+ *
+ * On change events are triggered when `store` occurs, whereas on flush events
+ * occur after flush. Both of these may occur on destruction if the frame was
+ * not stored or flushed before destruction.
+ *
+ * Some channels may fail `hasChannel` but still be marked as `available`. This
+ * will be due to the data not being transmitted or encoded until requested.
+ *
+ * Each frame is also associated with a `Session` object which stores all
+ * persistent data. Persistent data can then be accessed via any `Frame` with
+ * the same ID since they share a `Session`.
+ *
+ * A `Frame` provides some basic methods, however, it can be cast to other
+ * frame types using the cast method which provides additional wrapper
+ * functions. An example is `ftl::rgbd::Frame`.
+ *
+ * @see https://gitlab.utu.fi/nicolas.pope/ftl/-/wikis/Design/Frames
+ */
 class Frame {
 	friend class Session;
 	friend class ftl::data::Pool;
-	friend class ftl::streams::Feed;
+	friend class ftl::data::FrameSet;
 
-	private:
+	protected:
 	// Only Feed class should construct
-	Frame(Pool *ppool, Session *parent, uint32_t pid, int64_t ts) : timestamp_(ts), id(pid), pool_(ppool), parent_(parent), status_(FrameStatus::CREATED) {};
+	Frame(Pool *ppool, Session *parent, FrameID pid, int64_t ts);
 	int64_t timestamp_=0;
+	FrameID id_;
 
 	public:
-	const uint32_t id=0;
 
+	/**
+	 * Millisecond timestamp when the frame was originally constructed and which
+	 * was the instant the data contents were captured.
+	 */
 	inline int64_t timestamp() const { return timestamp_; }
 
+	/**
+	 * Unique identification of data source. Combined with timestamp it will
+	 * become a unique item of data and a singleton in the system.
+	 */
+	inline FrameID id() const { return id_; }
+
+	/**
+	 * Access the frameset ID for this frame.
+	 */
+	inline unsigned int frameset() const { return id_.frameset(); }
+
+	/**
+	 * Access the index of the frame within the frameset.
+	 */
+	inline unsigned int source() const { return id_.source(); }
+
 	public:
 	Frame()=delete;
-	
+
 	~Frame();
 
 	Frame(Frame &&f) {
@@ -88,55 +220,197 @@ class Frame {
 	Frame(const Frame &)=delete;
 	Frame &operator=(const Frame &)=delete;
 
+	/**
+	 * Obtain the current life-cycle status of the frame. This determines what
+	 * operations are permitted and what the behviour of the frame is.
+	 */
 	inline FrameStatus status() const { return status_; }
 
+	/**
+	 * Number of data channels in the frame. Excluding previous persistent data.
+	 */
 	inline size_t size() const { return data_.size(); }
 
+	/**
+	 * Is there data in this frame or in the persistent store for the given
+	 * channel number?
+	 */
 	bool has(ftl::codecs::Channel c) const;
 
+	/**
+	 * Check that either this frame or the persistent store has all the
+	 * channels in the set.
+	 */
+	bool hasAll(const std::unordered_set<ftl::codecs::Channel> &cs);
+
+	/**
+	 * @see has(Channel)
+	 */
+	inline bool hasChannel(ftl::codecs::Channel c) const { return has(c); }
+
+	/**
+	 * Does this frame have data for a given channel. This excludes any data
+	 * that may be in the persistent store.
+	 */
 	inline bool hasOwn(ftl::codecs::Channel c) const;
 
+	/**
+	 * Is the channel potentially available if requested via a stream. Not all
+	 * channels are encoded and transmitted, but must be requested. This
+	 * indicates if such a request can be fullfilled.
+	 */
+	inline bool available(ftl::codecs::Channel c) const;
+
+	/**
+	 * A complete set of all channels that are potentially available but may
+	 * not currently have the data stored within this object. It means the
+	 * source of the frame can provide the data but has not be requested to
+	 * actually do so, or cannot due to resource constraints.
+	 */
+	std::unordered_set<ftl::codecs::Channel> available() const;
+
+	bool availableAll(const std::unordered_set<ftl::codecs::Channel> &cs) const;
+
+	/**
+	 * Used by a receiver to mark potential availability. Should not be used
+	 * elsewhere.
+	 */
+	inline void markAvailable(ftl::codecs::Channel c);
+
+	/**
+	 * Has a given channel been marked as changed?
+	 */
 	inline bool changed(ftl::codecs::Channel c) const;
 
+	/**
+	 * A channel is readonly if it has been flushed. An exception is thrown if
+	 * a write is attempted.
+	 */
 	inline bool readonly(ftl::codecs::Channel c) const;
 
+	/**
+	 * @see readonly(Channel)
+	 */
 	inline bool flushed(ftl::codecs::Channel c) const;
 
+	/**
+	 * Changes can occur from different sources for different reasons, this
+	 * obtains the cause of the change. For example, it can be a primary local
+	 * change or it can be received from a remote source. Change type does
+	 * influence behaviour during store and flush actions.
+	 */
 	inline ftl::data::ChangeType getChangeType(ftl::codecs::Channel c) const;
 
+	/**
+	 * Obtain the map of all changes.
+	 */
 	inline const std::unordered_map<ftl::codecs::Channel, ChangeType> &changed() const { return changed_; }
 
+	/**
+	 * Obtains a set of all available channels. This excludes channels in the
+	 * persistent store.
+	 */
+	std::unordered_set<ftl::codecs::Channel> channels() const;
+
+	/**
+	 * All channels including those in the persistent store.
+	 */
+	std::unordered_set<ftl::codecs::Channel> allChannels() const;
+
+	/**
+	 * Test if the type of the channel matches the template type. Other
+	 * functions throw exceptions if wrong type is used, but this will not. It
+	 * will also return false if the channel is missing.
+	 */
 	template <typename T>
 	bool isType(ftl::codecs::Channel c) const;
 
+	/**
+	 * Get a readonly const reference to the content of a channel. If the
+	 * channel does not exist or if the template type does not match the content
+	 * then it throws an exception. The data can either be within this frame,
+	 * or if not in the frame then it checks the persistent store.
+	 *
+	 * The data may internally still be encoded and will only be decoded on the
+	 * first call to `get`. It is therefore strongly advised that any initial
+	 * calls to `get` are not concurrent as it will not be thread-safe.
+	 */
 	template <typename T>
 	const T &get(ftl::codecs::Channel c) const;
 
+	/**
+	 * Should not be used directly, but allows direct access to the data for
+	 * a channel without any attempt to cast it to type. Throws exceptions if
+	 * the channel does not exist, but will also look in the persistent
+	 * store.
+	 */
 	const std::any &getAny(ftl::codecs::Channel c) const;
 
+	/**
+	 * Get the hash code from the C++ `type_info` structure that corresponds to
+	 * the current data contents.
+	 */
+	inline size_t type(ftl::codecs::Channel c) const { return getAny(c).type().hash_code(); }
+
+	/**
+	 * Should not be used. Allows modification without marking as changed.
+	 */
 	std::any &getAnyMutable(ftl::codecs::Channel c);
 
+	/**
+	 * Should not be used. Does not throw exceptions but can return a nullptr
+	 * instead if the channel does not exist or the type does not match.
+	 * Currently, this does not do lazy decode of data so may fail.
+	 */
 	template <typename T>
 	const T *getPtr(ftl::codecs::Channel c) const noexcept;
 
+	/**
+	 * Should not be used.
+	 */
 	template <typename T>
 	T *getMutable(ftl::codecs::Channel c);
 
+	/**
+	 * Mark a channel as changed even if there is no data. It can result in
+	 * `hasChannel` giving false but `changed` giving true. Intended to be used
+	 * internally.
+	 */
 	inline void touch(ftl::codecs::Channel c) {
-		changed_[c] = ChangeType::LOCAL;
+		markAvailable(c);
+		changed_[c] = (mode_ == FrameMode::PRIMARY) ? ChangeType::PRIMARY : ChangeType::RESPONSE;
 	}
 
+	/**
+	 * Should not be used.
+	 */
 	inline void touch(ftl::codecs::Channel c, ChangeType type) {
+		markAvailable(c);
 		changed_[c] = type;
 	}
 
+	/**
+	 * Mark the channel as unchanged. This will mean it will not be flushed,
+	 * transmitted or saved but will still return true with `hasChannel`.
+	 */
 	inline void untouch(ftl::codecs::Channel c) {
 		changed_.erase(c);
 	}
 
+	/**
+	 * Create a new channel with the given template type. It will mark the
+	 * channel as changed and return a mutable reference of the correct data
+	 * type. It is not possible to create a channel after it has been flushed,
+	 * this will throw an exception. The channel may have existing data from
+	 * the memory pool which can be overwritten, but this is not true for
+	 * every channel number (only video frames currently).
+	 */
 	template <typename T, std::enable_if_t<!is_list<T>::value,int> = 0>
 	T &create(ftl::codecs::Channel c);
 
+	/**
+	 * Create method used for aggregate channels. @see create.
+	 */
 	template <typename T, std::enable_if_t<is_list<T>::value,int> = 0>
 	ftl::data::Aggregator<T> create(ftl::codecs::Channel c);
 
@@ -145,8 +419,8 @@ class Frame {
 	 * changes the channel status to `DISPATCHED`. If the storage mode is
 	 * `persistent` this adds to session store instead of local frame store,
 	 * although the change status is added to the local frame.
-	 * 
-	 * To be used by receive, no one else.
+	 *
+	 * To be used by receiver, no one else. Currently unused.
 	 */
 	template <typename T, std::enable_if_t<!is_list<T>::value,int> = 0>
 	T &createChange(ftl::codecs::Channel c, ftl::data::ChangeType t);
@@ -154,14 +428,59 @@ class Frame {
 	template <typename T, std::enable_if_t<is_list<T>::value,int> = 0>
 	ftl::data::Aggregator<T> createChange(ftl::codecs::Channel c, ftl::data::ChangeType t);
 
+	/**
+	 * Create a change but with encoded data provided. This allows for both
+	 * lazy decode and for subsequent data forwarding without encoding.
+	 *
+	 * Currently unused.
+	 */
 	template <typename T>
-	T &createChange(ftl::codecs::Channel c, ftl::data::ChangeType t, std::vector<uint8_t> &data);
+	T &createChange(ftl::codecs::Channel c, ftl::data::ChangeType t, const ftl::codecs::Packet &data);
+
+	/**
+	 * Create a channel, mark with the given change type and provided encoded
+	 * data. Does not decode the data as it does not know the actually data
+	 * type of this channel at this time.
+	 *
+	 * To be used by `receiver`.
+	 * @see ftl::stream::Receiver
+	 */
+	inline void informChange(ftl::codecs::Channel c, ftl::data::ChangeType t, const ftl::codecs::Packet &data) {
+		createAnyChange(c, t, data);
+	}
+
+	/**
+	 * Create a channel, mark with a given change type but do not provide any
+	 * data or type information.
+	 */
+	inline void informChange(ftl::codecs::Channel c, ftl::data::ChangeType t) {
+		createAnyChange(c, t);
+	}
+
+	/**
+	 * Create a channel, mark with a given change type and provided unencoded
+	 * data. The data is moved into the channel. This is used by `Receiver` to
+	 * provide a loopback functionality.
+	 */
+	inline void informChange(ftl::codecs::Channel c, ftl::data::ChangeType t, std::any &data) {
+		createAnyChange(c, t) = std::move(data);
+	}
 
-	const std::vector<uint8_t> &getEncoded(ftl::codecs::Channel c) const;
+	/**
+	 * Retrieve all encoded data packets for a channel, if any. Note that
+	 * encoded data is removed if the channel is modified.
+	 */
+	const std::list<ftl::codecs::Packet> &getEncoded(ftl::codecs::Channel c) const;
 
+	/** Do not use. */
 	template <typename T, typename ...ARGS>
 	T &emplace(ftl::codecs::Channel, ARGS...);
 
+	/**
+	 * Can be used instead of `create` to modify channel contents. It has the
+	 * same rules as `create`, except that if the channel does not exist then
+	 * it will throw an exception instead of creating the channel.
+	 */
 	template <typename T, std::enable_if_t<!is_list<T>::value,int> = 0>
 	T &set(ftl::codecs::Channel c);
 
@@ -178,10 +497,29 @@ class Frame {
 	 */
 	void hardRemove(ftl::codecs::Channel);
 
+	/**
+	 * Add a callback to a channel to watch for change events. These are
+	 * triggered by the `store` operation. Note that `Receiver` will call
+	 * `store` on a frame before generating a frameset callback, therefore
+	 * these events always occur and complete before the frameset is generated.
+	 */
 	inline ftl::Handle onChange(ftl::codecs::Channel c, const std::function<bool(Frame&,ftl::codecs::Channel)> &cb);
 
+	/**
+	 * Add a callback to listen for any and all changes to the frame.
+	 * @see onChange(Channel, cb).
+	 */
 	inline ftl::Handle onChange(const std::function<bool(Frame&,ftl::codecs::Channel)> &cb);
 
+	/**
+	 * All changed channels generate a flush event when the frame is flushed
+	 * explicitly or on destruction. There is one exception, forwarded changes
+	 * do generate a change event but do no subsequently generate a flush event
+	 * as they are considered completed changes. This prevents loops whilst
+	 * ensuring everyone has a copy of the change.
+	 *
+	 * @see changeType
+	 */
 	inline ftl::Handle onFlush(const std::function<bool(Frame&,ftl::codecs::Channel)> &cb);
 
 	/**
@@ -196,6 +534,8 @@ class Frame {
 
 	void swapChannel(ftl::codecs::Channel, Frame &);
 
+	void swapChannels(ftl::codecs::Channel, ftl::codecs::Channel);
+
 	/**
 	 * Discard all change status without removing the data.
 	 */
@@ -218,35 +558,111 @@ class Frame {
 	 */
 	void release();
 
-	/** Send changes back through origin stream. */
+	/**
+	 * Send changes back through origin stream. Causes all channels to be
+	 * individually flushed, resulting in flush events and each channel being
+	 * readonly. Only changed channels are flushed. Note: A frame cannot be
+	 * flushed multiple times and the entire frame becomes readonly after this.
+	 */
 	bool flush();
 
+	/**
+	 * Force a flush of only a single channel, allowing the frame to continue
+	 * to be modified (except this channel). This will generate a single
+	 * flush event.
+	 */
 	bool flush(ftl::codecs::Channel c);
 
 	/** Copy persistent changes to session. To be called before dispatch. */
 	void store();
 
+	/**
+	 * Should only be used by Feed class. Ignores storage rules and saves
+	 * to session anyway. Unused.
+	 */
+	void forceStore();
+
+	/**
+	 * Iterator.
+	 */
 	inline auto begin() const { return data_.begin(); }
 	inline auto end() const { return data_.end(); }
 
-	// onBeginFlush
-	// onEndFlush
-	// onError
-
 	inline MUTEX &mutex();
 
+	/**
+	 * Generate a new frame to respond to this one. The destruction of this
+	 * new frame will flush the changes and results in those response changes
+	 * being transmitted back to the original source of the frame. The original
+	 * source will then see these changes in the next frame it attempt to
+	 * generate.
+	 */
+	Frame response() const;
+
+	/**
+	 * Convert this frame to another type. That type must not have any
+	 * additional member variables, only wrapper methods.
+	 */
+	template <typename T>
+	T &cast();
+
+	template <typename T>
+	const T &cast() const;
+
+	/**
+	 * Used to create isolated frame objects for buffer purposes. This is
+	 * deliberately separate from default constructor to force its explicit use.
+	 */
+	static Frame make_standalone();
+
+	/**
+	 * The memory pool associated with this frame. Note: the pool class also
+	 * provides `onFlush` events, allowing an event handler to respond to any
+	 * frame that is flushed.
+	 */
+	inline Pool *pool() const { return pool_; }
+
+	/**
+	 * The persistent data store for this frame. It is also a frame object and
+	 * can be used in the same manner.
+	 */
+	inline Session *parent() const { return parent_; }
+
+	inline FrameMode mode() const { return mode_; }
+
+	// ==== Wrapper functions ==================================================
+
+	void message(ftl::data::Message code, const std::string &msg);
+
+	void message(ftl::data::Message code, const ftl::Formatter &msg);
+
+	/** Note, throws exception if `Channel::Messages` is missing. */
+	const std::map<ftl::data::Message,std::string> &messages() const;
+
+	inline bool hasMessages() const { return hasChannel(ftl::codecs::Channel::Messages); }
+
+	/**
+	 * Get or generate a name for this frame.
+	 */
+	std::string name() const;
+
+	/** Can throw an exception if missing, use `hasChannel(Channel::MetaData)` first. */
+	const std::map<std::string,std::string> &metadata() const;
+
+	// =========================================================================
+
 	protected:
 	std::any &createAnyChange(ftl::codecs::Channel c, ftl::data::ChangeType t);
 
-	std::any &createAnyChange(ftl::codecs::Channel c, ftl::data::ChangeType t, std::vector<uint8_t> &data);
+	std::any &createAnyChange(ftl::codecs::Channel c, ftl::data::ChangeType t, const ftl::codecs::Packet &data);
 
 	std::any &createAny(ftl::codecs::Channel c);
 
 	private:
 	struct ChannelData {
-		ChannelStatus status=ChannelStatus::INVALID;
-		std::any data;
-		std::vector<uint8_t> encoded={};
+		mutable ChannelStatus status=ChannelStatus::INVALID;
+		mutable std::any data;
+		std::list<ftl::codecs::Packet> encoded={};
 	};
 
 	ChannelData &_getData(ftl::codecs::Channel);
@@ -257,18 +673,25 @@ class Frame {
 	Pool *pool_;
 	Session *parent_;
 	FrameStatus status_;
+	FrameMode mode_ = FrameMode::PRIMARY;
+	uint64_t available_ = 0;
 
 	inline void restart(int64_t ts) {
 		timestamp_ = ts;
 		status_ = FrameStatus::CREATED;
 	}
+
+	/**
+	 * Primary frames also store on flush.
+	 */
+	void _primaryStore();
 };
 
 class Session : public Frame {
 	friend class Frame;
 
 	public:
-	Session() : Frame(nullptr, nullptr,0,0) {
+	Session() : Frame(nullptr, nullptr,FrameID(0,0),0) {
 		status_ = FrameStatus::STORED;
 	}
 
@@ -290,7 +713,7 @@ class Session : public Frame {
 	void flush(Frame &f, ftl::codecs::Channel c);
 
 	inline MUTEX &mutex() { return mutex_; }
-	
+
 	private:
 	std::unordered_map<uint64_t, ftl::Handler<Frame&,ftl::codecs::Channel>> change_channel_;
 	ftl::Handler<Frame&,ftl::codecs::Channel> change_;
@@ -299,6 +722,25 @@ class Session : public Frame {
 	MUTEX mutex_;
 };
 
+template <typename T>
+bool make_type() {
+	setTypeEncoder(typeid(T).hash_code(), [](const ftl::data::Frame &f, ftl::codecs::Channel c, std::vector<uint8_t> &data) {
+		data.resize(0);
+		ftl::util::FTLVectorBuffer buf(data);
+		msgpack::pack(buf, f.get<T>(c));
+		return true;
+	});
+	return true;
+}
+
+template <typename T>
+bool decode_type(std::any &a, const std::vector<uint8_t> &data) {
+	auto unpacked = msgpack::unpack((const char*)data.data(), data.size());
+	T &t = a.emplace<T>();
+	unpacked.get().convert<T>(t);
+	return true;
+}
+
 }
 }
 
@@ -306,11 +748,34 @@ class Session : public Frame {
 
 MUTEX &ftl::data::Frame::mutex() { return parent_->mutex(); }
 
+template <typename T>
+T &ftl::data::Frame::cast() {
+	static_assert(std::is_base_of<Frame, T>::value, "Can only cast to type inheriting Frame");
+	static_assert(sizeof(T) == sizeof(Frame), "Casting type must not have additional data members");
+	return *reinterpret_cast<T*>(this);
+}
+
+template <typename T>
+const T &ftl::data::Frame::cast() const {
+	static_assert(std::is_base_of<Frame, T>::value, "Can only cast to type inheriting Frame");
+	static_assert(sizeof(T) == sizeof(Frame), "Casting type must not have additional data members");
+	return *reinterpret_cast<const T*>(this);
+}
+
 bool ftl::data::Frame::hasOwn(ftl::codecs::Channel c) const {
 	const auto &i = data_.find(c);
 	return (i != data_.end() && i->second.status != ftl::data::ChannelStatus::INVALID);
 }
 
+bool ftl::data::Frame::available(ftl::codecs::Channel c) const {
+	const int ic = static_cast<int>(c);
+	return (ic >= 64) ? has(c) : (0x1ull << ic) & available_;
+}
+
+void ftl::data::Frame::markAvailable(ftl::codecs::Channel c) {
+	if ((int)c < 64) available_ |= (0x1ull << (int)c);
+}
+
 bool ftl::data::Frame::changed(ftl::codecs::Channel c) const {
 	return changed_.find(c) != changed_.end();
 }
@@ -330,7 +795,7 @@ bool ftl::data::Frame::readonly(ftl::codecs::Channel c) const {
 }
 
 ftl::Handle ftl::data::Frame::onChange(ftl::codecs::Channel c, const std::function<bool(Frame&,ftl::codecs::Channel)> &cb) {
-	return parent_->onChange(id, c, cb);
+	return parent_->onChange(id(), c, cb);
 }
 
 ftl::Handle ftl::data::Frame::onChange(const std::function<bool(Frame&,ftl::codecs::Channel)> &cb) {
@@ -347,7 +812,7 @@ bool ftl::data::Frame::isType(ftl::codecs::Channel c) const {
 	if (i != data_.end() && i->second.status != ftl::data::ChannelStatus::INVALID) {
 		return typeid(T) == i->second.data.type();
 	} else {
-		return (parent_ && parent_->isType<T>(c)); 
+		return (parent_ && parent_->isType<T>(c));
 	}
 }
 
@@ -362,11 +827,27 @@ const T *ftl::data::Frame::getPtr(ftl::codecs::Channel c) const noexcept {
 template <typename T>
 const T &ftl::data::Frame::get(ftl::codecs::Channel c) const {
 	const auto &d = _getData(c);
+
+	if (d.status != ftl::data::ChannelStatus::INVALID && !d.data.has_value() && d.encoded.size() > 0) {
+		UNIQUE_LOCK(parent_->mutex(), lk);
+		if (!d.data.has_value()) {
+			// Do a decode now and change the status
+			d.status = ftl::data::ChannelStatus::DISPATCHED;
+
+			try {
+				decode_type<T>(d.data, d.encoded.front().data);
+			} catch (...) {
+				throw FTL_Error("Decode failure for channel " << int(c));
+			}
+		}
+	}
+
 	if (d.status != ftl::data::ChannelStatus::INVALID) {
+		if (!d.data.has_value()) throw FTL_Error("'get' does not have value (" << static_cast<unsigned int>(c) << ")");
 		auto *p = std::any_cast<T>(&d.data);
 		if (!p) throw FTL_Error("'get' wrong type for channel (" << static_cast<unsigned int>(c) << ")");
 		return *p;
-	} else throw FTL_Error("Missing channel (" << static_cast<unsigned int>(c) << ")");
+	} else throw FTL_Error("Missing channel (" << static_cast<unsigned int>(c) << ") for (" << frameset() << "," << source() << ")");
 }
 
 // Non-list version
@@ -375,6 +856,7 @@ T &ftl::data::Frame::create(ftl::codecs::Channel c) {
 	if (isAggregate(c)) throw FTL_Error("Aggregate channels must be of list type");
 
 	ftl::data::verifyChannelType<T>(c);
+	ftl::data::make_type<T>();
 
 	std::any &a = createAny(c);
 	if (!isType<T>(c)) return a.emplace<T>();
@@ -385,6 +867,7 @@ T &ftl::data::Frame::create(ftl::codecs::Channel c) {
 template <typename T, std::enable_if_t<is_list<T>::value,int> = 0>
 ftl::data::Aggregator<T> ftl::data::Frame::create(ftl::codecs::Channel c) {
 	ftl::data::verifyChannelType<T>(c);
+	ftl::data::make_type<T>();
 
 	std::any &a = createAny(c);
 	if (!isType<T>(c)) a.emplace<T>();
@@ -392,10 +875,11 @@ ftl::data::Aggregator<T> ftl::data::Frame::create(ftl::codecs::Channel c) {
 }
 
 template <typename T>
-T &ftl::data::Frame::createChange(ftl::codecs::Channel c, ftl::data::ChangeType type, std::vector<uint8_t> &data) {
+T &ftl::data::Frame::createChange(ftl::codecs::Channel c, ftl::data::ChangeType type, const ftl::codecs::Packet &data) {
 	if (!bool(is_list<T>{}) && isAggregate(c)) throw FTL_Error("Aggregate channels must be of list type");
 
 	ftl::data::verifyChannelType<T>(c);
+	//ftl::data::make_type<T>();
 
 	std::any &a = createAnyChange(c, type, data);
 	if (!isType<T>(c)) return a.emplace<T>();
@@ -408,6 +892,7 @@ T &ftl::data::Frame::createChange(ftl::codecs::Channel c, ftl::data::ChangeType
 	if (isAggregate(c)) throw FTL_Error("Aggregate channels must be of list type");
 
 	ftl::data::verifyChannelType<T>(c);
+	ftl::data::make_type<T>();
 
 	std::any &a = createAnyChange(c, type);
 	if (!isType<T>(c)) return a.emplace<T>();
@@ -418,6 +903,7 @@ T &ftl::data::Frame::createChange(ftl::codecs::Channel c, ftl::data::ChangeType
 template <typename T, std::enable_if_t<is_list<T>::value,int> = 0>
 ftl::data::Aggregator<T> ftl::data::Frame::createChange(ftl::codecs::Channel c, ftl::data::ChangeType type) {
 	ftl::data::verifyChannelType<T>(c);
+	ftl::data::make_type<T>();
 
 	std::any &a = createAnyChange(c, type);
 	if (!isType<T>(c)) a.emplace<T>();
@@ -474,4 +960,4 @@ ftl::data::Aggregator<T> ftl::data::Frame::set(ftl::codecs::Channel c) {
 	}
 }
 
-#endif
\ No newline at end of file
+#endif
diff --git a/components/structures/include/ftl/data/new_frameset.hpp b/components/structures/include/ftl/data/new_frameset.hpp
new file mode 100644
index 000000000..ad75c990b
--- /dev/null
+++ b/components/structures/include/ftl/data/new_frameset.hpp
@@ -0,0 +1,151 @@
+#ifndef _FTL_DATA_NFRAMESET_HPP_
+#define _FTL_DATA_NFRAMESET_HPP_
+
+#include <ftl/threads.hpp>
+#include <ftl/timer.hpp>
+#include <ftl/data/new_frame.hpp>
+#include <functional>
+
+//#include <opencv2/opencv.hpp>
+#include <vector>
+
+namespace ftl {
+namespace data {
+
+// Allows a latency of 20 frames maximum
+//static const size_t kMaxFramesets = 15;
+static const size_t kMaxFramesInSet = 32;
+
+enum class FSFlag : int {
+	STALE = 0,
+	PARTIAL = 1
+};
+
+/**
+ * Represents a set of synchronised frames, each with two channels. This is
+ * used to collect all frames from multiple computers that have the same
+ * timestamp.
+ */
+class FrameSet : public ftl::data::Frame {
+	private:
+	//FrameSet(Pool *ppool, Session *parent, uint32_t pid, int64_t ts) :
+	//	Frame(ppool, parent, pid | 0xFF, ts) {};
+
+
+	public:
+	FrameSet(Pool *ppool, FrameID pid, int64_t ts, size_t psize=1);
+	~FrameSet();
+
+	//int id=0;
+	//int64_t timestamp;				// Millisecond timestamp of all frames
+	int64_t localTimestamp;
+	std::vector<Frame> frames;
+	std::atomic<int> count;				// Number of valid frames
+	std::atomic<unsigned int> mask;		// Mask of all sources that contributed
+	std::atomic<int> flush_count;		// How many channels have been flushed
+	SHARED_MUTEX smtx;
+
+	//Eigen::Matrix4d pose;  // Set to identity by default.
+
+	inline void set(FSFlag f) { flags_ |= (1 << static_cast<int>(f)); }
+	inline void clear(FSFlag f) { flags_ &= ~(1 << static_cast<int>(f)); }
+	inline bool test(FSFlag f) const { return flags_ & (1 << static_cast<int>(f)); }
+	inline void clearFlags() { flags_ = 0; }
+
+	std::unordered_set<ftl::codecs::Channel> channels();
+
+	/**
+	 * Move the entire frameset to another frameset object. This will
+	 * invalidate the current frameset object as all memory buffers will be
+	 * moved.
+	 */
+	void moveTo(ftl::data::FrameSet &);
+
+	/**
+	 * Mark a frame as being completed. This modifies the mask and count
+	 * members.
+	 */
+	void completed(size_t ix);
+
+	inline void markPartial() {
+		set(ftl::data::FSFlag::PARTIAL);
+	}
+
+	/**
+	 * Are all frames complete within this frameset?
+	 */
+	inline bool isComplete() { return static_cast<unsigned int>(count) == frames.size(); }
+
+	/**
+	 * Check that a given frame is valid in this frameset.
+	 */
+	inline bool hasFrame(size_t ix) const { return (1 << ix) & mask; }
+
+	/**
+	 * Get the first valid frame in this frameset. No valid frames throws an
+	 * exception.
+	 */
+	Frame &firstFrame();
+
+	const Frame &firstFrame() const;
+
+	inline Frame &operator[](int ix) { return frames[ix]; }
+	inline const Frame &operator[](int ix) const { return frames[ix]; }
+
+	/**
+	 * Flush all frames in the frameset.
+	 */
+	void flush();
+
+	/**
+	 * Store all frames.
+	 */
+	void store();
+
+	/**
+	 * Flush a channel for all frames in the frameset.
+	 */
+	void flush(ftl::codecs::Channel);
+
+	void resize(size_t s);
+
+	/**
+	 * Force a change to all frame timestamps. This is generally used internally
+	 * to allow frameset buffering in advance of knowing an exact timestamp.
+	 * The method will update the timestamps of all contained frames and the
+	 * frameset itself.
+	 */
+	void changeTimestamp(int64_t ts);
+
+	/**
+	 * Make a frameset from a single frame. It borrows the pool, id and
+	 * timestamp from the frame and creates a wrapping frameset instance.
+	 */
+	static std::shared_ptr<FrameSet>  fromFrame(Frame &);
+
+	/**
+	 * Check if channel has changed in any frames.
+	 */
+	bool hasAnyChanged(ftl::codecs::Channel) const;
+
+	private:
+	std::atomic<int> flags_;
+};
+
+using FrameSetPtr = std::shared_ptr<ftl::data::FrameSet>;
+using FrameSetCallback = std::function<bool(const FrameSetPtr&)>;
+
+class Generator {
+	public:
+	virtual ftl::Handle onFrameSet(const FrameSetCallback &)=0;
+};
+
+/**
+ * Callback type for receiving video frames.
+ */
+//typedef std::function<bool(ftl::rgbd::FrameSet &)> VideoCallback;
+
+}
+}
+
+#endif  // _FTL_DATA_FRAMESET_HPP_
diff --git a/components/structures/src/creators.cpp b/components/structures/src/creators.cpp
new file mode 100644
index 000000000..21f4789bf
--- /dev/null
+++ b/components/structures/src/creators.cpp
@@ -0,0 +1,44 @@
+#include <ftl/data/creators.hpp>
+#include <ftl/data/framepool.hpp>
+#include <ftl/timer.hpp>
+
+#include <loguru.hpp>
+
+using ftl::data::Frame;
+using ftl::data::Pool;
+using ftl::data::FrameCreator;
+using ftl::data::IntervalFrameCreator;
+
+Frame FrameCreator::create() {
+	Frame f = pool_->allocate(id_, ftl::timer::get_time());
+	return f;
+}
+
+Frame FrameCreator::create(int64_t timestamp) {
+	Frame f = pool_->allocate(id_, timestamp);
+	return f;
+}
+
+IntervalFrameCreator::IntervalFrameCreator(Pool *p_pool, FrameID p_id, DiscreteSource *src)
+	: FrameCreator(p_pool, p_id), src_(src) {}
+
+void IntervalFrameCreator::start() {
+	capture_ = std::move(ftl::timer::add(ftl::timer::timerlevel_t::kTimerHighPrecision, [this](int64_t ts) {
+		src_->capture(ts);
+		return true;
+	}));
+
+	retrieve_ = std::move(ftl::timer::add(ftl::timer::timerlevel_t::kTimerMain, [this](int64_t ts) {
+		Frame f = create(ts);
+		f.store();
+		if (!src_->retrieve(f)) {
+			LOG(WARNING) << "Frame was skipping";
+		}
+		return true;
+	}));
+}
+
+void IntervalFrameCreator::stop() {
+	capture_.cancel();
+	retrieve_.cancel();
+}
diff --git a/components/structures/src/frameset.cpp b/components/structures/src/frameset.cpp
new file mode 100644
index 000000000..4d4cbc88b
--- /dev/null
+++ b/components/structures/src/frameset.cpp
@@ -0,0 +1,144 @@
+#include <ftl/data/new_frameset.hpp>
+#include <ftl/data/framepool.hpp>
+
+using ftl::data::Frame;
+using ftl::data::FrameSet;
+
+FrameSet::FrameSet(Pool *ppool, FrameID pid, int64_t ts, size_t psize) :
+	Frame(ppool->allocate(FrameID(pid.frameset(),255), ts)), mask(0) {
+	
+	flush_count = 0; // Reset flush on store...
+	frames.reserve(psize);
+}
+
+FrameSet::~FrameSet() {
+	if (status() == ftl::data::FrameStatus::CREATED) store();
+	if (status() == ftl::data::FrameStatus::STORED) flush();
+}
+
+void ftl::data::FrameSet::completed(size_t ix) {
+	if (ix == 255) {
+
+	} else if (ix < frames.size()) {
+		// If already completed for given frame, then skip
+		if (mask & (1 << ix)) return;
+
+		mask |= (1 << ix);
+		++count;
+	} else {
+		throw FTL_Error("Completing frame that does not exist: " << timestamp() << ":" << ix);
+	}
+}
+
+void ftl::data::FrameSet::changeTimestamp(int64_t ts) {
+	timestamp_ = ts;
+	for (auto &f : frames) {
+		f.timestamp_ = ts;
+	}
+}
+
+void ftl::data::FrameSet::resize(size_t s) {
+	while (frames.size() < s) {
+		frames.push_back(std::move(pool()->allocate(FrameID(frameset(), frames.size()), timestamp())));
+	}
+	while (frames.size() > s) frames.pop_back();
+}
+
+void ftl::data::FrameSet::moveTo(ftl::data::FrameSet &fs) {
+	UNIQUE_LOCK(fs.mutex(), lk);
+	Frame::moveTo(fs);
+	fs.count = static_cast<int>(count);
+	fs.flags_ = (int)flags_;
+	fs.mask = static_cast<unsigned int>(mask);
+	fs.frames = std::move(frames);
+
+	count = 0;
+	mask = 0;
+	set(ftl::data::FSFlag::STALE);
+}
+
+ftl::data::Frame &ftl::data::FrameSet::firstFrame() {
+	for (size_t i=0; i<frames.size(); ++i) {
+		if (hasFrame(i)) return frames[i];
+	}
+	throw FTL_Error("No frames in frameset");
+}
+
+const ftl::data::Frame &ftl::data::FrameSet::firstFrame() const {
+	for (size_t i=0; i<frames.size(); ++i) {
+		if (hasFrame(i)) return frames[i];
+	}
+	throw FTL_Error("No frames in frameset");
+}
+
+bool ftl::data::FrameSet::hasAnyChanged(ftl::codecs::Channel c) const {
+	for (size_t i=0; i<frames.size(); ++i) {
+		if (frames[i].changed(c)) return true;
+	}
+	return false;
+}
+
+void FrameSet::store() {
+	if (status() != ftl::data::FrameStatus::CREATED) throw FTL_Error("Cannot store frameset multiple times");
+
+	{
+		//UNIQUE_LOCK(smtx, lk);
+		for (auto &f : frames) if (f.status() == ftl::data::FrameStatus::CREATED) f.store();
+		ftl::data::Frame::store();
+	}
+}
+
+void FrameSet::flush() {
+	if (status() == ftl::data::FrameStatus::FLUSHED) throw FTL_Error("Cannot flush frameset multiple times");
+
+	// Build list of all changed but unflushed channels.
+	std::unordered_set<ftl::codecs::Channel> unflushed;
+
+	{
+		UNIQUE_LOCK(smtx, lk);
+		for (auto &f : frames) {
+			for (auto &c : f.changed()) {
+				if (!f.flushed(c.first)) {
+					unflushed.emplace(c.first);
+				}
+			}
+		}
+
+		for (auto &f : frames) if (f.status() == ftl::data::FrameStatus::STORED) f.flush();
+		ftl::data::Frame::flush();
+	}
+
+	for (auto c : unflushed) {
+		pool()->flush_fs_.trigger(*this, c);
+	}
+}
+
+void FrameSet::flush(ftl::codecs::Channel c) {
+	{
+		UNIQUE_LOCK(smtx, lk);
+		for (auto &f : frames) if (f.hasOwn(c)) f.flush(c);
+	}
+	
+	pool()->flush_fs_.trigger(*this, c);
+}
+
+/**
+ * Make a frameset from a single frame. It borrows the pool, id and
+ * timestamp from the frame and creates a wrapping frameset instance.
+ */
+std::shared_ptr<FrameSet> FrameSet::fromFrame(Frame &f) {
+	auto sptr = std::make_shared<FrameSet>(f.pool(), f.id(), f.timestamp());
+	sptr->frames.push_back(std::move(f));
+	sptr->count = 1;
+	sptr->mask = 1;
+	return sptr;
+}
+
+std::unordered_set<ftl::codecs::Channel> FrameSet::channels() {
+	std::unordered_set<ftl::codecs::Channel> res{};
+	for (auto& f : frames) {
+		auto c = f.channels();
+		res.insert(c.begin(), c.end());
+	}
+	return res;
+}
diff --git a/components/structures/src/new_frame.cpp b/components/structures/src/new_frame.cpp
index 7ed685d0c..2b45a62a3 100644
--- a/components/structures/src/new_frame.cpp
+++ b/components/structures/src/new_frame.cpp
@@ -1,20 +1,27 @@
 #include <ftl/data/new_frame.hpp>
 #include <ftl/data/framepool.hpp>
+#include <ftl/timer.hpp>
 
 using ftl::data::Frame;
 using ftl::data::Session;
 using ftl::data::ChannelConfig;
 using ftl::data::StorageMode;
 using ftl::data::FrameStatus;
+using ftl::codecs::Channel;
+using ftl::data::Message;
 
 #define LOGURU_REPLACE_GLOG 1
 #include <loguru.hpp>
 
 static std::unordered_map<ftl::codecs::Channel, ChannelConfig> reg_channels;
+static std::unordered_map<size_t, std::function<bool(const ftl::data::Frame &, ftl::codecs::Channel, std::vector<uint8_t> &)>> encoders;
 
 void ftl::data::registerChannel(ftl::codecs::Channel c, const ChannelConfig &config) {
 	auto i = reg_channels.find(c);
 	if (i != reg_channels.end()) {
+		if (i->second.mode == config.mode && i->second.type_id == config.type_id && i->second.name == config.name) {
+			return;
+		}
 		throw FTL_Error("Channel " << static_cast<unsigned int>(c) << " already registered");
 	}
 
@@ -27,12 +34,12 @@ void ftl::data::clearRegistry() {
 
 bool ftl::data::isPersistent(ftl::codecs::Channel c) {
 	auto i = reg_channels.find(c);
-	return (i != reg_channels.end()) ? i->second.mode == StorageMode::PERSISTENT : false;
+	return (i != reg_channels.end()) ? i->second.mode == StorageMode::PERSISTENT : int(c) >= 64 && int(c) < 2048;
 }
 
 bool ftl::data::isAggregate(ftl::codecs::Channel c) {
 	auto i = reg_channels.find(c);
-	return (i != reg_channels.end()) ? i->second.mode == StorageMode::AGGREGATE : false;
+	return (i != reg_channels.end()) ? i->second.mode == StorageMode::AGGREGATE : (int(c) >= 32 && int(c) < 64) || int(c) >= 4096;
 }
 
 size_t ftl::data::getChannelType(ftl::codecs::Channel c) {
@@ -49,25 +56,83 @@ ftl::codecs::Channel ftl::data::getChannelByName(const std::string &name) {
 	return ftl::codecs::Channel::Colour;
 }
 
+std::function<bool(const ftl::data::Frame &, ftl::codecs::Channel, std::vector<uint8_t> &)> ftl::data::getTypeEncoder(size_t type) {
+	const auto &i = encoders.find(type);
+	if (i != encoders.end()) return i->second;
+	else return nullptr;
+}
+
+void ftl::data::setTypeEncoder(size_t type, const std::function<bool(const ftl::data::Frame &, ftl::codecs::Channel, std::vector<uint8_t> &)> &e) {
+	encoders[type] = e;
+}
+
+//==============================================================================
+
+//static std::atomic_int frame_count = 0;
+
+Frame::Frame(Pool *ppool, Session *parent, FrameID pid, int64_t ts)
+ : timestamp_(ts), id_(pid), pool_(ppool), parent_(parent), status_(FrameStatus::CREATED) {
+	//LOG(INFO) << "Frames: " << ++frame_count;
+ };
+
 Frame::~Frame() {
 	if (status_ == FrameStatus::CREATED) store();
 	if (status_ == FrameStatus::STORED) flush();
-	if (status_ != FrameStatus::RELEASED && pool_) pool_->release(*this);
+	if (status_ != FrameStatus::RELEASED && pool_) {
+		pool_->release(*this);
+		//--frame_count;
+	}
 };
 
+bool ftl::data::Frame::hasAll(const std::unordered_set<ftl::codecs::Channel> &cs) {
+	for (auto &a : cs) {
+		if (!has(a)) return false;
+	}
+	return true;
+}
+
 bool ftl::data::Frame::has(ftl::codecs::Channel c) const {
 	const auto &i = data_.find(c);
 	if (i != data_.end() && i->second.status != ftl::data::ChannelStatus::INVALID) {
 		return true;
 	} else {
-		return (parent_ && parent_->has(c)); 
+		return (parent_ && parent_->has(c));
+	}
+}
+
+bool ftl::data::Frame::availableAll(const std::unordered_set<ftl::codecs::Channel> &cs) const {
+	bool result = true;
+	for (auto c : cs) {
+		result &= available(c);
+	}
+	return result;
+}
+
+std::unordered_set<ftl::codecs::Channel> ftl::data::Frame::available() const {
+	std::unordered_set<ftl::codecs::Channel> result = channels();
+
+	uint64_t m = 1;
+	// TODO: NAIVE, use ffs or ctz.
+	for (int i=0; i<32; ++i) {
+		if (m & available_) result.emplace(static_cast<Channel>(i));
+		m <<= 1;
+	}
+
+	return result;
+}
+
+void ftl::data::Frame::remove(ftl::codecs::Channel c) {
+	const auto &i = data_.find(c);
+	if (i != data_.end()) {
+		i->second.status = ftl::data::ChannelStatus::INVALID;
+		changed_.erase(c);
 	}
 }
 
 Frame::ChannelData &Frame::_getData(ftl::codecs::Channel c) {
 	if (status_ == FrameStatus::RELEASED) throw FTL_Error("Reading a released frame");
 	const auto &i = data_.find(c);
-	if (i != data_.end()) {
+	if (i != data_.end() && i->second.status != ChannelStatus::INVALID) {
 		return i->second;
 	} else if (parent_) {
 		return parent_->_getData(c);
@@ -77,7 +142,7 @@ Frame::ChannelData &Frame::_getData(ftl::codecs::Channel c) {
 const Frame::ChannelData &Frame::_getData(ftl::codecs::Channel c) const {
 	if (status_ == FrameStatus::RELEASED) throw FTL_Error("Reading a released frame");
 	const auto &i = data_.find(c);
-	if (i != data_.end()) {
+	if (i != data_.end() && i->second.status != ChannelStatus::INVALID) {
 		return i->second;
 	} else if (parent_) {
 		return parent_->_getData(c);
@@ -87,26 +152,44 @@ const Frame::ChannelData &Frame::_getData(ftl::codecs::Channel c) const {
 std::any &Frame::createAnyChange(ftl::codecs::Channel c, ftl::data::ChangeType t) {
 	if (status_ != FrameStatus::CREATED) throw FTL_Error("Cannot apply change after store " << static_cast<int>(status_));
 
-	auto &d = data_[c];
-	if (d.status != ftl::data::ChannelStatus::FLUSHED) {
-		d.status = ftl::data::ChannelStatus::DISPATCHED;
-		d.encoded.clear();
+	ftl::data::Frame::ChannelData *d;
+
+	if (parent_) {
+		UNIQUE_LOCK(mutex(), lk);
+		d = &(data_[c]);
 		touch(c, t);
-		return d.data;
+	} else {
+		d = &(data_[c]);
+		touch(c, t);
+	}
+
+	if (d->status != ftl::data::ChannelStatus::FLUSHED) {
+		d->status = ftl::data::ChannelStatus::DISPATCHED;
+		d->encoded.clear();
+		return d->data;
 	} else {
 		throw FTL_Error("Channel is flushed and read-only: " << static_cast<unsigned int>(c));
 	}
 }
 
-std::any &Frame::createAnyChange(ftl::codecs::Channel c, ftl::data::ChangeType t, std::vector<uint8_t> &data) {
+std::any &Frame::createAnyChange(ftl::codecs::Channel c, ftl::data::ChangeType t, const ftl::codecs::Packet &data) {
 	if (status_ != FrameStatus::CREATED) throw FTL_Error("Cannot apply change after store " << static_cast<int>(status_));
 
-	auto &d = data_[c];
-	if (d.status != ftl::data::ChannelStatus::FLUSHED) {
-		d.status = ftl::data::ChannelStatus::DISPATCHED;
-		d.encoded = std::move(data);
+	ftl::data::Frame::ChannelData *d;
+
+	if (parent_) {
+		UNIQUE_LOCK(mutex(), lk);
+		d = &(data_[c]);
+		touch(c, t);
+	} else {
+		d = &(data_[c]);
 		touch(c, t);
-		return d.data;
+	}
+
+	if (d->status != ftl::data::ChannelStatus::FLUSHED) {
+		d->status = (data.codec == ftl::codecs::codec_t::MSGPACK) ? ftl::data::ChannelStatus::ENCODED : ftl::data::ChannelStatus::DISPATCHED;
+		d->encoded.push_back(data);
+		return d->data;
 	} else {
 		throw FTL_Error("Channel is flushed and read-only: " << static_cast<unsigned int>(c));
 	}
@@ -115,12 +198,21 @@ std::any &Frame::createAnyChange(ftl::codecs::Channel c, ftl::data::ChangeType t
 std::any &Frame::createAny(ftl::codecs::Channel c) {
 	if (status_ != FrameStatus::STORED) throw FTL_Error("Cannot create before store or after flush");
 
-	auto &d = data_[c];
-	if (d.status != ftl::data::ChannelStatus::FLUSHED) {
-		d.status = ftl::data::ChannelStatus::VALID;
-		d.encoded.clear();
+	ftl::data::Frame::ChannelData *d;
+
+	if (parent_) {
+		UNIQUE_LOCK(mutex(), lk);
+		d = &(data_[c]);
 		touch(c);
-		return d.data;
+	} else {
+		d = &(data_[c]);
+		touch(c);
+	}
+
+	if (d->status != ftl::data::ChannelStatus::FLUSHED) {
+		d->status = ftl::data::ChannelStatus::VALID;
+		d->encoded.clear();
+		return d->data;
 	} else {
 		throw FTL_Error("Channel is flushed and read-only: " << static_cast<unsigned int>(c));
 	}
@@ -131,7 +223,12 @@ std::any &Frame::getAnyMutable(ftl::codecs::Channel c) {
 	return d.data;
 }
 
-const std::vector<uint8_t> &ftl::data::Frame::getEncoded(ftl::codecs::Channel c) const {
+const std::any &Frame::getAny(ftl::codecs::Channel c) const {
+	auto &d = _getData(c);
+	return d.data;
+}
+
+const std::list<ftl::codecs::Packet> &ftl::data::Frame::getEncoded(ftl::codecs::Channel c) const {
 	const auto &d = _getData(c);
 	if (d.status != ftl::data::ChannelStatus::INVALID) {
 		return d.encoded;
@@ -149,6 +246,7 @@ bool Frame::flush() {
 	for (auto c : changed_) {
 		_getData(c.first).status = ChannelStatus::FLUSHED;
 	}
+	_primaryStore();
 	return true;
 }
 
@@ -170,6 +268,40 @@ void Frame::store() {
 
 	if (!parent_) return;
 
+	{
+		UNIQUE_LOCK(parent_->mutex(), lk);
+		for (auto c : changed_) {
+			if (ftl::data::isPersistent(c.first) && hasOwn(c.first)) {
+				auto &d = data_[c.first];
+				auto &pd = parent_->data_[c.first];
+				pd.data = std::move(d.data);
+				pd.encoded = std::move(d.encoded);
+				//if (d.status == ChannelStatus::ENCODED) LOG(INFO) << "STORE ENCODED: " << (int)c.first;
+				pd.status = ChannelStatus::VALID;
+				//data_.erase(c.first);
+				d.status = ChannelStatus::INVALID;
+			}
+		}
+	}
+
+	for (auto c : changed_) {
+		parent_->change_.trigger(*this, c.first);
+		uint64_t sig = (uint64_t(id()) << 32) + static_cast<unsigned int>(c.first);
+		const auto &i = parent_->change_channel_.find(sig);
+		if (i != parent_->change_channel_.end()) i->second.trigger(*this, c.first);
+	}
+}
+
+void Frame::_primaryStore() {
+	if (mode_ == FrameMode::RESPONSE) return;
+	forceStore();
+}
+
+void Frame::forceStore() {
+	if (!parent_) return;
+
+	//UNIQUE_LOCK(parent_->mutex(), lk);
+
 	for (auto c : changed_) {
 		if (ftl::data::isPersistent(c.first) && hasOwn(c.first)) {
 			auto &d = data_[c.first];
@@ -178,12 +310,14 @@ void Frame::store() {
 			//pd.encoded = std::move(d.encoded);
 			pd.status = ChannelStatus::VALID;
 			//data_.erase(c.first);
+			d.status = ChannelStatus::INVALID;
 		}
 
-		parent_->change_.trigger(*this, c.first);
-		uint64_t sig = (uint64_t(id) << 32) + static_cast<unsigned int>(c.first);
-		const auto &i = parent_->change_channel_.find(sig);
-		if (i != parent_->change_channel_.end()) i->second.trigger(*this, c.first);
+		//parent_->change_.trigger(*this, c.first);
+		//uint64_t sig = (uint64_t(id()) << 32) + static_cast<unsigned int>(c.first);
+		//const auto &i = parent_->change_channel_.find(sig);
+
+		//if (i != parent_->change_channel_.end()) i->second.trigger(*this, c.first);
 	}
 }
 
@@ -196,11 +330,16 @@ void Frame::merge(Frame &f) {
 		d.status = ChannelStatus::VALID;
 		touch(x.first);
 	}
+	f.status_ = FrameStatus::RELEASED;
+	f.changed_.clear();
 }
 
 void Frame::moveTo(Frame &f) {
 	if (status_ == FrameStatus::RELEASED) throw FTL_Error("Moving released frame");
+	f.id_ = id_;
+	f.timestamp_ = timestamp_;
 	f.status_ = status_;
+	f.mode_ = mode_;
 	f.parent_ = parent_;
 	f.pool_ = pool_;
 	f.data_ = std::move(data_);
@@ -211,7 +350,7 @@ void Frame::moveTo(Frame &f) {
 void Frame::swapChanged(Frame &f) {
 	for (auto x : changed_) {
 		f.data_[x.first].data.swap(data_[x.first].data);
-		f.changed_[x.first] = ftl::data::ChangeType::LOCAL;
+		f.changed_[x.first] = (mode_ == FrameMode::PRIMARY) ? ChangeType::PRIMARY : ChangeType::RESPONSE;
 	}
 }
 
@@ -222,28 +361,123 @@ void Frame::swapChannel(ftl::codecs::Channel c, Frame &f) {
 		fd.data.swap(d.data);
 		d.status = ftl::data::ChannelStatus::VALID;
 		changed_[c] = f.changed_[c];
-		f.changed_[c] = ftl::data::ChangeType::LOCAL;
+		f.changed_[c] = (mode_ == FrameMode::PRIMARY) ? ChangeType::PRIMARY : ChangeType::RESPONSE;
+	}
+}
+
+void Frame::swapChannels(ftl::codecs::Channel c1, ftl::codecs::Channel c2) {
+	if (hasOwn(c1) && hasOwn(c2)) {
+		auto &d1 = data_[c1];
+		auto &d2 = data_[c2];
+		d2.data.swap(d1.data);
+
+		auto status = d1.status;
+		d1.status = d2.status;
+		d2.status = status;
+
+		changed_[c1] = (mode_ == FrameMode::PRIMARY) ? ChangeType::PRIMARY : ChangeType::RESPONSE;
+		changed_[c2] = (mode_ == FrameMode::PRIMARY) ? ChangeType::PRIMARY : ChangeType::RESPONSE;
 	}
 }
 
 void Frame::reset() {
 	for (auto &d : data_) {
 		d.second.status = ChannelStatus::INVALID;
+		d.second.encoded.clear();
+
+		// Note: Data channels should be cleared
+		if ((int)d.first >= 32) d.second.data.reset();
 	}
 	changed_.clear();
 	status_ = FrameStatus::CREATED;
+	mode_ = FrameMode::PRIMARY;
+	available_ = 0;
 }
 
 void Frame::hardReset() {
 	status_ = FrameStatus::CREATED;
 	changed_.clear();
 	data_.clear();
+	available_ = 0;
+}
+
+Frame Frame::response() const {
+	if (!pool_) throw FTL_Error("Frame has no pool, cannot generate response");
+	Frame f = pool_->allocate(id_, ftl::timer::get_time());
+	f.mode_ = FrameMode::RESPONSE;
+	f.store();
+	return f;
+}
+
+Frame Frame::make_standalone() {
+	Frame f(nullptr, nullptr, FrameID(0,0), 0);
+	f.mode_ = FrameMode::STANDALONE;
+	return f;
+}
+
+std::unordered_set<ftl::codecs::Channel> Frame::channels() const {
+	std::unordered_set<ftl::codecs::Channel> res{};
+	for (const auto& [k, v] : data_) {
+		std::ignore = v;
+		res.emplace(k);
+	}
+	return res;
+}
+
+std::unordered_set<ftl::codecs::Channel> Frame::allChannels() const {
+	std::unordered_set<ftl::codecs::Channel> res{};
+	for (const auto& [k, v] : data_) {
+		std::ignore = v;
+		res.emplace(k);
+	}
+	if (parent_) {
+		for (const auto& [k, v] : parent_->data_) {
+			std::ignore = v;
+			res.emplace(k);
+		}
+	}
+
+	uint64_t m = 1;
+	// TODO: NAIVE, use ffs or ctz.
+	for (int i=0; i<32; ++i) {
+		if (m & available_) res.emplace(static_cast<Channel>(i));
+		m <<= 1;
+	}
+	return res;
+}
+
+const std::map<ftl::data::Message,std::string> &Frame::messages() const {
+	return get<std::map<ftl::data::Message,std::string>>(Channel::Messages);
+}
+
+void Frame::message(ftl::data::Message code, const std::string &msg) {
+	auto &msgs = create<std::map<ftl::data::Message,std::string>>(Channel::Messages);
+	msgs[code] = msg;
+}
+
+void Frame::message(ftl::data::Message code, const ftl::Formatter &msg) {
+	message(code, msg.str());
+}
+
+std::string Frame::name() const {
+	if (has(Channel::MetaData)) {
+		const auto &meta = get<std::map<std::string,std::string>>(Channel::MetaData);
+		auto i = meta.find("name");
+		if (i != meta.end()) return i->second;
+	}
+
+	// Generate a name
+	return std::string("Frame-") + std::to_string(frameset()) + std::string("-") + std::to_string(source());
+}
+
+const std::map<std::string,std::string> &Frame::metadata() const {
+	return get<std::map<std::string,std::string>>(Channel::MetaData);
 }
 
 // ==== Session ================================================================
 
-ftl::Handle Session::onChange(uint32_t id, ftl::codecs::Channel c, const std::function<bool(Frame&,ftl::codecs::Channel)> &cb) {
-	uint64_t sig = (uint64_t(id) << 32) + static_cast<unsigned int>(c);
+ftl::Handle Session::onChange(uint32_t pid, ftl::codecs::Channel c, const std::function<bool(Frame&,ftl::codecs::Channel)> &cb) {
+	uint64_t sig = (uint64_t(pid) << 32) + static_cast<unsigned int>(c);
 	return change_channel_[sig].on(cb);
 }
 
@@ -256,42 +490,44 @@ ftl::Handle Session::onFlush(const std::function<bool(Frame&,ftl::codecs::Channe
 }
 
 void Session::notifyChanges(Frame &f) {
-	
+
 }
 
 void Session::flush(Frame &f) {
-	// TODO: Lock
 	for (auto c : f.changed()) {
-		if (c.second == ftl::data::ChangeType::LOCAL) {
+		if (c.second == ftl::data::ChangeType::PRIMARY || c.second == ftl::data::ChangeType::RESPONSE) {
 			auto &d = f._getData(c.first);
 			if (d.status == ftl::data::ChannelStatus::VALID) {
 				d.status = ftl::data::ChannelStatus::FLUSHED;
 				flush_.trigger(f, c.first);
+				if (f.pool()) f.pool()->flush_.trigger(f, c.first);
 			}
 		} else if (c.second == ftl::data::ChangeType::FOREIGN) {
 			auto &d = f._getData(c.first);
 			if (d.status == ftl::data::ChannelStatus::DISPATCHED) {
 				d.status = ftl::data::ChannelStatus::FLUSHED;
 				flush_.trigger(f, c.first);
+				if (f.pool()) f.pool()->flush_.trigger(f, c.first);
 			}
 		}
 	}
 }
 
 void Session::flush(Frame &f, ftl::codecs::Channel c) {
-	// TODO: Lock
 	auto cc = f.changed_[c];
-	if (cc == ftl::data::ChangeType::LOCAL) {
+	if (cc == ftl::data::ChangeType::PRIMARY || cc == ftl::data::ChangeType::RESPONSE) {
 		auto &d = f._getData(c);
 		if (d.status == ftl::data::ChannelStatus::VALID) {
 			d.status = ftl::data::ChannelStatus::FLUSHED;
 			flush_.trigger(f, c);
+			if (f.pool()) f.pool()->flush_.trigger(f, c);
 		}
 	} else if (cc == ftl::data::ChangeType::FOREIGN) {
 		auto &d = f._getData(c);
 		if (d.status == ftl::data::ChannelStatus::DISPATCHED) {
 			d.status = ftl::data::ChannelStatus::FLUSHED;
 			flush_.trigger(f, c);
+			if (f.pool()) f.pool()->flush_.trigger(f, c);
 		}
 	}
 }
diff --git a/components/structures/src/pool.cpp b/components/structures/src/pool.cpp
index dd22c3b46..ff2484e13 100644
--- a/components/structures/src/pool.cpp
+++ b/components/structures/src/pool.cpp
@@ -9,6 +9,7 @@ Pool::Pool(size_t min_n, size_t max_n) : min_n_(min_n), max_n_(max_n) {
 }
 
 Pool::~Pool() {
+	UNIQUE_LOCK(mutex_, lk);
 	for (auto &p : pool_) {
 		for (auto *f : p.second.pool) {
 			f->status_ = FrameStatus::RELEASED;
@@ -17,52 +18,65 @@ Pool::~Pool() {
 	}
 }
 
-Frame Pool::allocate(uint32_t id, int64_t timestamp) {
-	auto &pool = _getPool(id);
+Frame Pool::allocate(FrameID id, int64_t timestamp) {
+	Frame *f;
 
-	if (timestamp < pool.last_timestamp) {
-		throw FTL_Error("New frame timestamp is older that previous: " << timestamp);
-	}
+	{
+		UNIQUE_LOCK(mutex_, lk);
+		auto &pool = _getPool(id);
 
-	// Add items as required
-	if (pool.pool.size() < min_n_) {
-		while (pool.pool.size() < ideal_n_) {
-			pool.pool.push_back(new Frame(this, &pool.session, id, 0));
+		if (timestamp < pool.last_timestamp) {
+			timestamp = pool.last_timestamp;
+			//throw FTL_Error("New frame timestamp is older than previous: " << timestamp << " vs " << pool.last_timestamp);
 		}
+
+		// Add items as required
+		if (pool.pool.size() < min_n_) {
+			while (pool.pool.size() < ideal_n_) {
+				pool.pool.push_back(new Frame(this, &pool.session, id, 0));
+			}
+		}
+
+		f = pool.pool.front();
+		pool.pool.pop_front();
+		pool.last_timestamp = timestamp;
 	}
 
-	Frame *f = pool.pool.front();
 	Frame ff = std::move(*f);
-	delete f;
 	ff.restart(timestamp);
-	pool.pool.pop_front();
-	pool.last_timestamp = timestamp;
+	delete f;
+
 	return ff;
 }
 
 void Pool::release(Frame &f) {
 	if (f.status() == FrameStatus::RELEASED) return;
 	f.reset();
-	auto &pool = _getPool(f.id);
+
+	UNIQUE_LOCK(mutex_, lk);
+	auto &pool = _getPool(f.id());
 
 	if (pool.pool.size() < max_n_) {
-		Frame *pf = new Frame(this, &pool.session, f.id, 0);
+		Frame *pf = new Frame(this, &pool.session, f.id(), 0);
 		f.moveTo(*pf);
 		pool.pool.push_back(pf);
 	}
 }
 
-ftl::data::Session &Pool::session(uint32_t id) {
+ftl::data::Session &Pool::session(FrameID id) {
+	UNIQUE_LOCK(mutex_, lk);
 	auto &pool = _getPool(id);
 	return pool.session;
 }
 
-size_t Pool::size(uint32_t id) {
+size_t Pool::size(FrameID id) {
+	UNIQUE_LOCK(mutex_, lk);
 	auto &pool = _getPool(id);
 	return pool.pool.size();
 }
 
 size_t Pool::size() {
+	UNIQUE_LOCK(mutex_, lk);
 	size_t s = 0;
 	for (auto &p : pool_) {
 		s += p.second.pool.size();
@@ -70,6 +84,6 @@ size_t Pool::size() {
 	return s;
 }
 
-ftl::data::Pool::PoolData &Pool::_getPool(uint32_t id) {
-	return pool_[id];
+ftl::data::Pool::PoolData &Pool::_getPool(FrameID id) {
+	return pool_[id.id];
 }
diff --git a/components/structures/test/CMakeLists.txt b/components/structures/test/CMakeLists.txt
index 653cfd3c4..fb67e4e58 100644
--- a/components/structures/test/CMakeLists.txt
+++ b/components/structures/test/CMakeLists.txt
@@ -2,15 +2,28 @@
 add_executable(nframe_unit
 	$<TARGET_OBJECTS:CatchTest>
 	./frame_unit.cpp
-	../src/new_frame.cpp
-	./frame_example_1.cpp
 )
 target_include_directories(nframe_unit PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include")
 target_link_libraries(nframe_unit
 	ftlcommon ftlcodecs)
 
+	target_precompile_headers(nframe_unit REUSE_FROM ftlcommon)
+
 add_test(NFrameUnitTest nframe_unit)
 
+### Frame Example 1 ############################################################
+add_executable(frame_example_1
+	$<TARGET_OBJECTS:CatchTest>
+	../src/pool.cpp
+	../src/new_frame.cpp
+	./frame_example_1.cpp
+)
+target_include_directories(frame_example_1 PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include")
+target_link_libraries(frame_example_1
+	ftlcommon ftlcodecs)
+
+add_test(FrameEg1Test frame_example_1)
+
 ### Pool Unit ##################################################################
 add_executable(pool_unit
 	$<TARGET_OBJECTS:CatchTest>
diff --git a/components/structures/test/frame_example_1.cpp b/components/structures/test/frame_example_1.cpp
index 8be10efaa..cb4d20e45 100644
--- a/components/structures/test/frame_example_1.cpp
+++ b/components/structures/test/frame_example_1.cpp
@@ -1,12 +1,18 @@
 #include "catch.hpp"
 
+#include <ftl/codecs/packet.hpp>
 #include <ftl/data/new_frame.hpp>
+#include <ftl/data/framepool.hpp>
+#include <ftl/timer.hpp>
+
+#include <loguru.hpp>
 
 using ftl::data::Session;
 using ftl::data::Frame;
 using ftl::codecs::Channel;
 using ftl::data::ChangeType;
 using ftl::data::StorageMode;
+using ftl::data::FrameID;
 
 namespace ftl {
 namespace streams {
@@ -14,48 +20,68 @@ namespace streams {
 /* Mock Feed class */
 class Feed {
 	public:
-	Feed() : buffer0_(nullptr, &session_,0,0), buffer1_(nullptr, &session_,0,0) {
-		flush_handle_ = session_.onFlush([this](Frame &f, Channel c) {
+	Feed() : pool_(5,10), buffer0_(std::move(pool_.allocate(FrameID(0,0),0))), buffer1_(std::move(pool_.allocate(FrameID(0,0),0))) {
+		flush_handle_ = pool_.session(FrameID(0,0)).onFlush([this](Frame &f, Channel c) {
 			// Loop changes back to buffer.
 			// Normally transmitted somewhere first.
-			//buffer_.swapChannel(c, f);
+			// buffer1_.swapChannel(c, f);
 			ChangeType cc = f.getChangeType(c);
-			buffer0_.createAnyChange(c, (cc == ChangeType::LOCAL) ? ChangeType::FOREIGN : ChangeType::COMPLETED) = f.getAnyMutable(c);
+			if (cc == ChangeType::RESPONSE) {
+				ftl::codecs::Packet pkt;
+				pkt.frame_count = 1;
+				pkt.codec = ftl::codecs::codec_t::MSGPACK;
+				pkt.bitrate = 255;
+				pkt.flags = 0;
+				
+				auto encoder = ftl::data::getTypeEncoder(f.type(c));
+				if (encoder) {
+					if (encoder(f, c, pkt.data)) {
+						buffer1_.informChange(c, ChangeType::FOREIGN, pkt);
+					}
+				} else {
+					LOG(WARNING) << "Missing msgpack encoder";
+				}
+			} else if (cc == ChangeType::PRIMARY) {
+				//ftl::codecs::Packet pkt(f.getEncoded(c).front());
+				//buffer0_.informChange(c, (cc == ChangeType::PRIMARY) ? ChangeType::FOREIGN : ChangeType::COMPLETED, ???);
+				buffer0_.swapChannel(c, f);
+			}
 			return true;
 		});
 	}
 
 	inline Frame &buffer() { return buffer0_; }
-	inline Session &session() { return session_; }
 
 	inline void fakeDispatch() {
-		buffer0_.moveTo(buffer1_);
-		buffer0_ = Frame(nullptr, &session_,0,0);
+		Frame f = std::move(buffer0_);
+		buffer0_ = pool_.allocate(FrameID(0,0),ftl::timer::get_time());
 
 		// Save any persistent changes
-		buffer1_.store();
+		f.store();
 		// Transmit any forwarding changes and prevent further changes
-		buffer1_.flush();  // TODO: use special dispatched function
+		//f.flush();  // TODO: use special dispatched function
 
 		// Call the onFrame handler.
 		// Would be in another thread in real version of this class.
-		frame_handler_.trigger(buffer1_);
+		frame_handler_.trigger(f);
 	}
 
-	inline ftl::Handle onFrame(const std::function<bool(Frame&)> &cb) {
-		return frame_handler_.on(cb);
-	}
+	inline Frame getFrame() {
+		Frame f = std::move(buffer1_);
+		buffer1_ = pool_.allocate(FrameID(0,0),ftl::timer::get_time());
 
-	Frame createFrame(int id) {
-		// TODO: Give it the id and a timestamp
-		Frame f(nullptr, &session_,0,0);
+		// Save any persistent changes
 		f.store();
 		return f;
 	}
 
+	inline ftl::Handle onFrame(const std::function<bool(Frame&)> &cb) {
+		return frame_handler_.on(cb);
+	}
+
 	private:
+	ftl::data::Pool pool_;
 	ftl::Handler<Frame&> frame_handler_;
-	Session session_;
 	Frame buffer0_;
 	Frame buffer1_;
 	ftl::Handle flush_handle_;
@@ -72,6 +98,17 @@ struct VideoFrame {
 	int matdata;
 };
 
+// Disable msgpack
+template <>
+inline bool ftl::data::make_type<VideoFrame>() {
+	return false;
+}
+
+template <>
+inline bool ftl::data::decode_type<VideoFrame>(std::any &a, const std::vector<uint8_t> &data) {
+	return false;
+}
+
 TEST_CASE("ftl::data::Frame full non-owner example", "[example]") {
 	// Register channels somewhere at startup
 	ftl::data::make_channel<VideoFrame>(Channel::Colour, "colour", StorageMode::TRANSIENT);
@@ -85,6 +122,73 @@ TEST_CASE("ftl::data::Frame full non-owner example", "[example]") {
 	int changed = 0;
 	ftl::Handle myhandle;
 
+	auto h = feed.onFrame([&i,&feed,&myhandle,&changed](Frame &f) {
+		i++;
+
+		// First frame received
+		// User of Frame makes changes or reads values from state
+		REQUIRE( f.get<float>(Channel::Pose) == 6.0f );
+		REQUIRE( f.get<VideoFrame>(Channel::Depth).gpudata == 1 );
+
+		// Create a new frame for same source for some return state
+		Frame nf = f.response();
+		nf.create<std::list<std::string>>(Channel::Messages) = "First Message";
+		nf.create<std::list<std::string>>(Channel::Messages) = "Second Message";
+		nf.create<int>(Channel::Control) = 3456;
+		//nf.set<float>(Channel::Pose) = 7.0f;
+
+		// Listen for this `Control` change to be confirmed
+		myhandle = nf.onChange(Channel::Control, [&changed](Frame &f, Channel c) {
+			changed++;
+			return false;  // Call once only
+		});
+
+		// Either by destruction or manually, final action is flush to send
+		nf.flush();
+
+		return true;
+	});
+
+	// Generate some incoming changes from network
+	// Usually this is done inside the Feed class...
+	feed.buffer().createChange<VideoFrame>(Channel::Colour, ChangeType::FOREIGN).gpudata = 1;
+	feed.buffer().createChange<VideoFrame>(Channel::Depth, ChangeType::COMPLETED).gpudata = 1;
+	feed.buffer().createChange<float>(Channel::Pose, ChangeType::FOREIGN) = 6.0f;
+
+	// Fake a frame being completely received on network or from file
+	feed.fakeDispatch();
+
+	// Now pretend to be an owner and create a new frame... it should have the
+	// response data in it, so check for that.
+	{
+		Frame f = feed.getFrame();
+		REQUIRE( changed == 1 );  // Change notified before `onFrame`
+		REQUIRE( f.get<float>(Channel::Pose) == 6.0f );
+		REQUIRE( f.get<int>(Channel::Control) == 3456 );
+		REQUIRE( (*f.get<std::list<std::string>>(Channel::Messages).begin()) == "First Message" );
+	}
+	// We wont bother dispatching this new frame
+	//feed.fakeDispatch();
+
+	REQUIRE( i == 1 );
+
+	// For testing only...
+	ftl::data::clearRegistry();
+}
+
+TEST_CASE("ftl::data::Frame full owner example", "[example]") {
+	// Register channels somewhere at startup
+	ftl::data::make_channel<VideoFrame>(Channel::Colour, "colour", StorageMode::TRANSIENT);
+	ftl::data::make_channel<VideoFrame>(Channel::Depth, "depth", StorageMode::TRANSIENT);
+	ftl::data::make_channel<std::list<std::string>>(Channel::Messages, "messages", StorageMode::AGGREGATE);
+	ftl::data::make_channel<float>(Channel::Pose, "pose", StorageMode::PERSISTENT);
+
+	Feed feed;
+
+	int i=0;
+	int changed = 0;
+	ftl::Handle myhandle;
+
 	auto h = feed.onFrame([&i,&feed,&myhandle,&changed](Frame &f) {
 		// First frame received
 		if (i++ == 0 ) {
@@ -93,39 +197,45 @@ TEST_CASE("ftl::data::Frame full non-owner example", "[example]") {
 			REQUIRE( f.get<VideoFrame>(Channel::Depth).gpudata == 1 );
 
 			// Create a new frame for same source for some return state
-			Frame nf = feed.createFrame(f.id);
-			nf.create<std::list<std::string>>(Channel::Messages) = {"First Message"};
-			nf.create<std::list<std::string>>(Channel::Messages) = {"Second Message"};
+			Frame nf = f.response();
+			nf.create<std::list<std::string>>(Channel::Messages) = "First Message";
+			nf.create<std::list<std::string>>(Channel::Messages) = "Second Message";
 			nf.create<int>(Channel::Control) = 3456;
-			//nf.set<float>(Channel::Pose) = 7.0f;
+			nf.set<float>(Channel::Pose) = 7.0f;
 
 			// Listen for this `Control` change to be confirmed
 			myhandle = nf.onChange(Channel::Control, [&changed](Frame &f, Channel c) {
 				changed++;
 				return false;  // Call once only
 			});
-			
+
 			// Either by destruction or manually, final action is flush to send
 			nf.flush();
 		// Second frame received
 		} else {
-			REQUIRE( changed == 1 );  // Change notified before `onFrame`
-			REQUIRE( f.get<float>(Channel::Pose) == 6.0f );
-			REQUIRE( f.get<int>(Channel::Control) == 3456 );
-			REQUIRE( (*f.get<std::list<std::string>>(Channel::Messages).begin()) == "First Message" );
+
 		}
 		return true;
 	});
 
-	// Generate some incoming changes from network
-	// Usually this is done inside the Feed class...
-	feed.buffer().createChange<VideoFrame>(Channel::Colour, ChangeType::FOREIGN).gpudata = 1;
-	feed.buffer().createChange<VideoFrame>(Channel::Depth, ChangeType::COMPLETED).gpudata = 1;
-	feed.buffer().createChange<float>(Channel::Pose, ChangeType::FOREIGN) = 6.0f;
-
-	// Fake a frame being received on network or from file
+	// Create an entirely new frame, destruction will send it.
+	{
+		Frame f = feed.getFrame();
+		f.create<VideoFrame>(Channel::Colour).gpudata = 1;
+		f.create<VideoFrame>(Channel::Depth).gpudata = 1;
+		f.create<float>(Channel::Pose) = 6.0f;
+	}
+	// Trigger local onFrame callback with the above frame.
 	feed.fakeDispatch();
-	// And dispatch the response frame also
+
+	// Create next new frame, now includes response changes
+	{
+		Frame f = feed.getFrame();
+		REQUIRE( changed == 1 );  // Change notified before `onFrame`
+		REQUIRE( f.get<float>(Channel::Pose) == 7.0f );
+		REQUIRE( f.get<int>(Channel::Control) == 3456 );
+		REQUIRE( (*f.get<std::list<std::string>>(Channel::Messages).begin()) == "First Message" );
+	}
 	feed.fakeDispatch();
 
 	REQUIRE( i == 2 );
diff --git a/components/structures/test/frame_unit.cpp b/components/structures/test/frame_unit.cpp
index 265fa6e9d..33477e510 100644
--- a/components/structures/test/frame_unit.cpp
+++ b/components/structures/test/frame_unit.cpp
@@ -12,15 +12,22 @@ using ftl::data::Frame;
 using ftl::codecs::Channel;
 using ftl::data::ChangeType;
 using ftl::data::StorageMode;
+using ftl::data::FrameID;
 
 namespace ftl {
 namespace data {
 
 class Pool {
 	public:
-	static Frame make(Session *s, int id, uint64_t ts) { return Frame(nullptr, s, id, ts); }
+	static Frame make(Session *s, FrameID id, uint64_t ts) { return Frame(nullptr, s, id, ts); }
+	static Frame make(Pool *p, Session *s, FrameID id, uint64_t ts) { return Frame(p, s, id, ts); }
 
 	void release(Frame &f);
+
+	Frame allocate(FrameID id, int64_t ts);
+
+	ftl::Handler<ftl::data::Frame&,ftl::codecs::Channel> flush_;
+	ftl::Handler<ftl::data::FrameSet&,ftl::codecs::Channel> flush_fs_;
 };
 
 }
@@ -30,7 +37,7 @@ namespace streams {
 // Only Pool can create frames so make a mock Feed.
 class Feed {
 	public:
-	static Frame make(Session *s, int id, uint64_t ts) { return ftl::data::Pool::make(s, id, ts); }
+	static Frame make(Session *s, FrameID id, uint64_t ts) { return ftl::data::Pool::make(s, id, ts); }
 };
 
 }
@@ -42,11 +49,21 @@ void ftl::data::Pool::release(Frame &f) {
 
 }
 
+Frame ftl::data::Pool::allocate(FrameID id, int64_t ts) {
+	return make(nullptr, id, ts);
+}
+
+#define _FTL_DATA_FRAMEPOOL_HPP_
+#include <../src/new_frame.cpp>
+
+
+
 /* #1.1.1 */
 static_assert(sizeof(ftl::codecs::Channel) >= 4, "Channel must be at least 32bit");
 
 /* #1.1.2 */
-static_assert(std::is_integral<decltype(ftl::data::Frame::id)>::value, "Integral ID requried in Frame");
+//static_assert(std::is_integral<decltype(ftl::data::Frame::id)>::value, "Integral ID requried in Frame");
+static_assert(std::is_member_function_pointer<decltype(&ftl::data::Frame::id)>::value, "ID is required");
 static_assert(std::is_member_function_pointer<decltype(&ftl::data::Frame::timestamp)>::value, "Timestamp is required");
 
 /* #1.1.3  */
@@ -55,64 +72,75 @@ static_assert(std::is_member_function_pointer<decltype(&ftl::data::Frame::mutex)
 /* #1.1.4 */
 TEST_CASE("ftl::data::Frame encoded data", "[1.1.4]") {
 	SECTION("provide encoded data") {
-		Frame f = Feed::make(nullptr, 0, 0);
-		std::vector<uint8_t> data{44,22,33};
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
+		ftl::codecs::Packet data;
+		data.flags = 45;
 
 		f.createChange<int>(Channel::Pose, ftl::data::ChangeType::FOREIGN, data) = 55;
 		const auto &x = f.get<int>(Channel::Pose);
 		REQUIRE( x == 55 );
 
 		// Data has been moved.
-		REQUIRE(data.size() == 0);
+		//REQUIRE(data.size() == 0);
 	}
 
 	SECTION("get encoded data") {
-		Frame f = Feed::make(nullptr, 0, 0);
-		std::vector<uint8_t> data{44,22,33};
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
+		ftl::codecs::Packet data;
+		data.flags = 45;
 
 		f.createChange<int>(Channel::Pose, ftl::data::ChangeType::FOREIGN, data);
 
 		auto &data2 = f.getEncoded(Channel::Pose);
-		REQUIRE( data2.size() == 3 );
-		REQUIRE( data2[0] == 44 );
+		REQUIRE( data2.size() == 1 );
+		REQUIRE( data2.front().flags == 45 );
 	}
 }
 
 /* #1.1.5 */
 TEST_CASE("ftl::data::Frame clear encoded on change", "[1.1.5]") {
 	SECTION("change by set") {
-		Frame f = Feed::make(nullptr, 0, 0);
-		std::vector<uint8_t> data{44,22,33};
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
+		ftl::codecs::Packet data;
+		data.flags = 45;
 
 		f.createChange<int>(Channel::Pose, ftl::data::ChangeType::FOREIGN, data);
 		f.store();
 
 		auto &data2 = f.getEncoded(Channel::Pose);
-		REQUIRE( data2.size() == 3 );
+		REQUIRE( data2.size() == 1 );
 		
 		f.set<int>(Channel::Pose) = 66;
 		REQUIRE(f.getEncoded(Channel::Pose).size() == 0);
 	}
 
 	SECTION("change by create") {
-		Frame f = Feed::make(nullptr, 0, 0);
-		std::vector<uint8_t> data{44,22,33};
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
+		ftl::codecs::Packet data;
+		data.flags = 45;
 
 		f.createChange<int>(Channel::Pose, ftl::data::ChangeType::FOREIGN, data);
 		f.store();
 
 		auto &data2 = f.getEncoded(Channel::Pose);
-		REQUIRE( data2.size() == 3 );
+		REQUIRE( data2.size() == 1 );
 		
 		f.create<int>(Channel::Pose) = 66;
 		REQUIRE(f.getEncoded(Channel::Pose).size() == 0);
 	}
 }
 
+struct Test {
+	int a=44;
+	float b=33.0f;
+
+	MSGPACK_DEFINE(a,b);
+};
+
 /* #1.2.1 */
 TEST_CASE("ftl::data::Frame create get", "[Frame]") {
 	SECTION("write and read integers") {
-		Frame f = Feed::make(nullptr, 0, 0);
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
 		f.store();
 
 		f.create<int>(Channel::Pose) = 55;
@@ -122,7 +150,7 @@ TEST_CASE("ftl::data::Frame create get", "[Frame]") {
 	}
 
 	SECTION("write and read floats") {
-		Frame f = Feed::make(nullptr, 0, 0);
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
 		f.store();
 
 		f.create<float>(Channel::Pose) = 44.0f;
@@ -132,11 +160,7 @@ TEST_CASE("ftl::data::Frame create get", "[Frame]") {
 	}
 
 	SECTION("write and read structures") {
-		struct Test {
-			int a=44;
-			float b=33.0f;
-		};
-		Frame f = Feed::make(nullptr, 0, 0);
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
 		f.store();
 
 		f.create<Test>(Channel::Pose) = {};
@@ -147,7 +171,7 @@ TEST_CASE("ftl::data::Frame create get", "[Frame]") {
 	}
 
 	SECTION("is int type") {
-		Frame f = Feed::make(nullptr, 0, 0);
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
 		f.store();
 
 		f.create<int>(Channel::Pose) = 55;
@@ -157,11 +181,7 @@ TEST_CASE("ftl::data::Frame create get", "[Frame]") {
 	}
 
 	SECTION("is struct type") {
-		struct Test {
-			int a; int b;
-		};
-
-		Frame f = Feed::make(nullptr, 0, 0);
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
 		f.store();
 
 		f.create<Test>(Channel::Pose) = {3,4};
@@ -171,7 +191,7 @@ TEST_CASE("ftl::data::Frame create get", "[Frame]") {
 	}
 
 	SECTION("missing") {
-		Frame f = Feed::make(nullptr, 0, 0);
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
 
 		REQUIRE( !f.isType<float>(Channel::Pose) );
 	}
@@ -180,7 +200,7 @@ TEST_CASE("ftl::data::Frame create get", "[Frame]") {
 /* #1.2.2 */
 TEST_CASE("ftl::data::registerChannel", "[Frame]") {
 	SECTION("register typed channel and valid create") {
-		Frame f = Feed::make(nullptr, 0, 0);
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
 		f.store();
 
 		ftl::data::make_channel<float>(Channel::Colour, "colour", ftl::data::StorageMode::PERSISTENT);
@@ -191,7 +211,7 @@ TEST_CASE("ftl::data::registerChannel", "[Frame]") {
 	}
 
 	SECTION("register typed channel and invalid create") {
-		Frame f = Feed::make(nullptr, 0, 0);
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
 		f.store();
 
 		ftl::data::make_channel<float>(Channel::Colour, "colour", ftl::data::StorageMode::PERSISTENT);
@@ -209,7 +229,7 @@ TEST_CASE("ftl::data::registerChannel", "[Frame]") {
 	}
 
 	SECTION("register void for any type") {
-		Frame f = Feed::make(nullptr, 0, 0);
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
 		f.store();
 
 		ftl::data::make_channel<void>(Channel::Colour, "colour", ftl::data::StorageMode::PERSISTENT);
@@ -224,11 +244,7 @@ TEST_CASE("ftl::data::registerChannel", "[Frame]") {
 /* #1.2.3 */
 TEST_CASE("ftl::data::Frame type failure") {
 	SECTION("write and read fail") {
-		struct Test {
-			int a=44;
-			float b=33.0f;
-		};
-		Frame f = Feed::make(nullptr, 0, 0);
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
 		f.store();
 		f.create<Test>(Channel::Pose) = {};
 
@@ -244,7 +260,7 @@ TEST_CASE("ftl::data::Frame type failure") {
 	}
 
 	SECTION("same value on create") {
-		Frame f = Feed::make(nullptr, 0, 0);
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
 		f.store();
 
 		f.create<int>(Channel::Pose) = 55;
@@ -257,7 +273,7 @@ TEST_CASE("ftl::data::Frame type failure") {
 	}
 
 	SECTION("change of type by recreate") {
-		Frame f = Feed::make(nullptr, 0, 0);
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
 		f.store();
 
 		f.create<int>(Channel::Pose) = 55;
@@ -280,7 +296,7 @@ TEST_CASE("ftl::data::Frame persistent data", "[1.2.5]") {
 
 	SECTION("persistent through createChange") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 
 		f.createChange<int>(Channel::Density, ChangeType::FOREIGN) = 44;
 		f.store();	
@@ -309,18 +325,18 @@ TEST_CASE("ftl::data::Frame persistent data", "[1.2.5]") {
 
 	SECTION("available in other frame") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 
 		f.createChange<int>(Channel::Density, ChangeType::FOREIGN) = 44;	
 		f.store();	
 
-		Frame f2 = Feed::make(&p, 0, 0);
+		Frame f2 = Feed::make(&p, FrameID(0,0), 0);
 		REQUIRE( f2.get<int>(Channel::Density) == 44 );
 	}
 
 	SECTION("get from parent") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 		f.store();
 
 		p.create<int>(Channel::Pose) = 55;
@@ -333,9 +349,20 @@ TEST_CASE("ftl::data::Frame persistent data", "[1.2.5]") {
 		REQUIRE( x == y );
 	}
 
+	SECTION("get from parent not ptr") {
+		Session p;
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
+		f.store();
+
+		p.create<int>(Channel::Pose) = 55;
+
+		auto x = f.get<int>(Channel::Pose);
+		REQUIRE( x == 55 );
+	}
+
 	SECTION("has from parent") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 		f.store();
 
 		p.create<int>(Channel::Pose) = 55;
@@ -344,7 +371,7 @@ TEST_CASE("ftl::data::Frame persistent data", "[1.2.5]") {
 
 	SECTION("no change in parent") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 		f.store();
 
 		p.create<int>(Channel::Pose) = 55;
@@ -376,7 +403,7 @@ TEST_CASE("ftl::data::Frame transient data", "[1.2.6]") {
 
 	SECTION("not persistent after store") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 
 		f.createChange<int>(Channel::Density, ChangeType::FOREIGN) = 44;
 		f.store();	
@@ -393,7 +420,7 @@ TEST_CASE("ftl::data::Frame aggregate data", "[1.2.7]") {
 
 	SECTION("not persistent after store") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 
 		f.createChange<std::list<int>>(Channel::Density, ChangeType::FOREIGN) = {44};
 		f.store();	
@@ -406,7 +433,7 @@ TEST_CASE("ftl::data::Frame aggregate data", "[1.2.7]") {
 
 	SECTION("aggregate channels actually aggregate with createChange") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 
 		f.createChange<std::list<int>>(Channel::Density, ChangeType::FOREIGN) = {34};
 		f.createChange<std::list<int>>(Channel::Density, ChangeType::FOREIGN) = {55};
@@ -422,7 +449,7 @@ TEST_CASE("ftl::data::Frame aggregate data", "[1.2.7]") {
 
 	SECTION("non aggregate channels do not aggregate with createChange") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 
 		f.createChange<std::list<int>>(Channel::Colour, ChangeType::FOREIGN) = {34};
 		f.createChange<std::list<int>>(Channel::Colour, ChangeType::FOREIGN) = {55};
@@ -436,7 +463,7 @@ TEST_CASE("ftl::data::Frame aggregate data", "[1.2.7]") {
 
 	SECTION("aggregate channels allow move aggregate with createChange") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 
 		std::list<int> data1 = {34};
 		std::list<int> data2 = {55};
@@ -454,7 +481,7 @@ TEST_CASE("ftl::data::Frame aggregate data", "[1.2.7]") {
 
 	SECTION("aggregate channels actually aggregate with create") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 		f.store();
 
 		f.create<std::list<int>>(Channel::Density) = {34};
@@ -470,7 +497,7 @@ TEST_CASE("ftl::data::Frame aggregate data", "[1.2.7]") {
 
 	SECTION("non aggregate channels do not aggregate with create") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 		f.store();
 
 		f.create<std::list<int>>(Channel::Colour) = {34};
@@ -484,7 +511,7 @@ TEST_CASE("ftl::data::Frame aggregate data", "[1.2.7]") {
 
 	SECTION("aggregate channels actually aggregate with set") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 		f.store();
 
 		f.create<std::list<int>>(Channel::Density) = {34};
@@ -500,7 +527,7 @@ TEST_CASE("ftl::data::Frame aggregate data", "[1.2.7]") {
 
 	SECTION("non aggregate channels do not aggregate with set") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 		f.store();
 
 		f.create<std::list<int>>(Channel::Colour) = {34};
@@ -523,7 +550,7 @@ TEST_CASE("ftl::data::Frame aggregate lists", "[1.2.9]") {
 
 	SECTION("only allow stl list container") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 		f.store();
 
 		f.create<std::list<int>>(Channel::Density) = {44};
@@ -568,8 +595,8 @@ static_assert(std::is_move_assignable<Frame>::value, "Must allow move assignment
 /* #2.1.8 */
 TEST_CASE("ftl::data::Frame merging", "[2.1.8]") {
 	SECTION("merge replaces data in destination") {
-		Frame f1 = Feed::make(nullptr, 0, 0);
-		Frame f2 = Feed::make(nullptr, 0, 0);
+		Frame f1 = Feed::make(nullptr, FrameID(0,0), 0);
+		Frame f2 = Feed::make(nullptr, FrameID(0,0), 0);
 		f1.store();
 		f2.store();
 
@@ -584,8 +611,8 @@ TEST_CASE("ftl::data::Frame merging", "[2.1.8]") {
 	}
 
 	SECTION("new items are created") {
-		Frame f1 = Feed::make(nullptr, 0, 0);
-		Frame f2 = Feed::make(nullptr, 0, 0);
+		Frame f1 = Feed::make(nullptr, FrameID(0,0), 0);
+		Frame f2 = Feed::make(nullptr, FrameID(0,0), 0);
 		f1.store();
 		f2.store();
 
@@ -600,8 +627,8 @@ TEST_CASE("ftl::data::Frame merging", "[2.1.8]") {
 	}
 
 	SECTION("old items remain") {
-		Frame f1 = Feed::make(nullptr, 0, 0);
-		Frame f2 = Feed::make(nullptr, 0, 0);
+		Frame f1 = Feed::make(nullptr, FrameID(0,0), 0);
+		Frame f2 = Feed::make(nullptr, FrameID(0,0), 0);
 		f1.store();
 		f2.store();
 
@@ -616,8 +643,8 @@ TEST_CASE("ftl::data::Frame merging", "[2.1.8]") {
 	}
 
 	SECTION("flushed status is removed") {
-		Frame f1 = Feed::make(nullptr, 0, 0);
-		Frame f2 = Feed::make(nullptr, 0, 0);
+		Frame f1 = Feed::make(nullptr, FrameID(0,0), 0);
+		Frame f2 = Feed::make(nullptr, FrameID(0,0), 0);
 		f1.store();
 		f2.store();
 
@@ -636,8 +663,8 @@ TEST_CASE("ftl::data::Frame merging", "[2.1.8]") {
 /* #2.1.9 */
 TEST_CASE("ftl::data::Frame merge is change", "[2.1.9]") {
 	SECTION("merges are marked as changes") {
-		Frame f1 = Feed::make(nullptr, 0, 0);
-		Frame f2 = Feed::make(nullptr, 0, 0);
+		Frame f1 = Feed::make(nullptr, FrameID(0,0), 0);
+		Frame f2 = Feed::make(nullptr, FrameID(0,0), 0);
 		f1.store();
 		f2.store();
 
@@ -646,7 +673,7 @@ TEST_CASE("ftl::data::Frame merge is change", "[2.1.9]") {
 		f2.untouch(Channel::Colour2);
 		f2.merge(f1);
 
-		REQUIRE( f2.getChangeType(Channel::Colour) == ChangeType::LOCAL );
+		REQUIRE( f2.getChangeType(Channel::Colour) == ChangeType::PRIMARY );
 		REQUIRE( !f2.changed(Channel::Colour2) );
 	}
 }
@@ -654,14 +681,15 @@ TEST_CASE("ftl::data::Frame merge is change", "[2.1.9]") {
 /* #2.1.10 Unimplemented, merge is move only. This tests for the move instead */
 TEST_CASE("ftl::data::Frame merge moves encoded", "[2.1.10]") {
 	SECTION("encoded data moved") {
-		Frame f1 = Feed::make(nullptr, 0, 0);
-		Frame f2 = Feed::make(nullptr, 0, 0);
+		Frame f1 = Feed::make(nullptr, FrameID(0,0), 0);
+		Frame f2 = Feed::make(nullptr, FrameID(0,0), 0);
 
-		std::vector<uint8_t> data{88,99,100};
+		ftl::codecs::Packet data;
+		data.flags = 45;
 		f1.createChange<int>(Channel::Colour, ChangeType::FOREIGN, data);
 		f2.merge(f1);
 
-		REQUIRE( f2.getEncoded(Channel::Colour).size() == 3 );
+		REQUIRE( f2.getEncoded(Channel::Colour).size() == 1 );
 		REQUIRE( !f1.has(Channel::Colour) );
 	}
 }
@@ -669,7 +697,7 @@ TEST_CASE("ftl::data::Frame merge moves encoded", "[2.1.10]") {
 /* #2.2.1 */
 TEST_CASE("ftl::data::Frame modify after flush", "[2.2.1]") {
 	SECTION("create fails after flush") {
-		Frame f = Feed::make(nullptr, 0, 0);
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
 		f.store();
 
 		f.create<int>(Channel::Colour) = 89;
@@ -687,7 +715,7 @@ TEST_CASE("ftl::data::Frame modify after flush", "[2.2.1]") {
 	}
 
 	SECTION("set fails after flush") {
-		Frame f = Feed::make(nullptr, 0, 0);
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
 		f.store();
 
 		f.create<int>(Channel::Colour) = 89;
@@ -705,7 +733,7 @@ TEST_CASE("ftl::data::Frame modify after flush", "[2.2.1]") {
 	}
 
 	SECTION("createChange fails after flush") {
-		Frame f = Feed::make(nullptr, 0, 0);
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
 		f.store();
 
 		f.create<int>(Channel::Colour) = 89;
@@ -723,7 +751,7 @@ TEST_CASE("ftl::data::Frame modify after flush", "[2.2.1]") {
 	}
 
 	SECTION("channel marked readonly after flush") {
-		Frame f = Feed::make(nullptr, 0, 0);
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
 		f.store();
 
 		f.create<int>(Channel::Colour) = 89;
@@ -737,7 +765,7 @@ TEST_CASE("ftl::data::Frame modify after flush", "[2.2.1]") {
 /* #2.2.3 */
 TEST_CASE("ftl::data::Frame multiple flush", "[Frame]") {
 	SECTION("fail on multiple frame flush") {
-		Frame f = Feed::make(nullptr, 0, 0);
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
 		f.store();
 
 		f.create<int>(Channel::Colour) = 89;
@@ -759,9 +787,9 @@ TEST_CASE("ftl::data::Frame multiple flush", "[Frame]") {
 TEST_CASE("ftl::data::Frame locality of changes", "[2.2.4]") {
 	ftl::data::make_channel<int>(Channel::Density, "density", ftl::data::StorageMode::PERSISTENT);
 
-	SECTION("not persistent after flush only") {
+	SECTION("persistent after flush only for primary frame") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 		f.store();
 
 		f.create<int>(Channel::Density) = 44;
@@ -775,12 +803,33 @@ TEST_CASE("ftl::data::Frame locality of changes", "[2.2.4]") {
 			e.ignore();
 			err = true;
 		}
-		REQUIRE( err );
+		REQUIRE( !err );
 	}
 
+	// FIXME: Need a way to change frame mode or generate response frame.
+	/*SECTION("not persistent after flush only for response frame") {
+		Session p;
+		Frame ff = Feed::make(&p, FrameID(0,0), 0);
+		ff.store();
+		Frame f = ff.response();
+
+		f.create<int>(Channel::Density) = 44;
+		f.flush();
+
+		bool err=false;
+
+		try {		
+			p.get<int>(Channel::Density);
+		} catch(const ftl::exception &e) {
+			e.ignore();
+			err = true;
+		}
+		REQUIRE( err );
+	}*/
+
 	SECTION("not persistent without store") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 		f.store();
 
 		f.create<int>(Channel::Density) = 44;
@@ -802,7 +851,7 @@ TEST_CASE("ftl::data::Frame locality of changes", "[2.2.4]") {
 /* #2.2.5 */
 TEST_CASE("ftl::data::Frame changed status", "[2.2.5]") {
 	SECTION("change on create") {
-		Frame f = Feed::make(nullptr, 0, 0);
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
 		f.store();
 
 		REQUIRE( !f.changed(Channel::Pose) );
@@ -811,7 +860,7 @@ TEST_CASE("ftl::data::Frame changed status", "[2.2.5]") {
 	}
 
 	SECTION("no change on untouch") {
-		Frame f = Feed::make(nullptr, 0, 0);
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
 		f.store();
 
 		f.create<int>(Channel::Pose) = 55;
@@ -828,33 +877,33 @@ TEST_CASE("ftl::data::Frame changed status", "[2.2.5]") {
 /* #2.3.3 */
 TEST_CASE("ftl::data::Frame change type", "[2.3.3]") {
 	SECTION("changes are local type") {
-		Frame f = Feed::make(nullptr, 0, 0);
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
 		f.store();
 
 		REQUIRE( !f.changed(Channel::Pose) );
 		f.create<int>(Channel::Pose) = 55;
-		REQUIRE( f.getChangeType(Channel::Pose) == ChangeType::LOCAL );
+		REQUIRE( f.getChangeType(Channel::Pose) == ChangeType::PRIMARY );
 	}
 
 	SECTION("local change overrides foreign change") {
-		Frame f = Feed::make(nullptr, 0, 0);
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
 
 		f.createChange<int>(Channel::Pose, ChangeType::FOREIGN) = 55;
 		REQUIRE( f.getChangeType(Channel::Pose) == ChangeType::FOREIGN );
 		f.store();
 
 		f.set<int>(Channel::Pose) = 66;
-		REQUIRE( f.getChangeType(Channel::Pose) == ChangeType::LOCAL );
+		REQUIRE( f.getChangeType(Channel::Pose) == ChangeType::PRIMARY );
 	}
 
 	SECTION("local change overrides completed change") {
-		Frame f = Feed::make(nullptr, 0, 0);
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
 
 		f.createChange<int>(Channel::Pose, ChangeType::COMPLETED) = 55;
 		REQUIRE( f.getChangeType(Channel::Pose) == ChangeType::COMPLETED );
 		f.store();
 		f.set<int>(Channel::Pose) = 66;
-		REQUIRE( f.getChangeType(Channel::Pose) == ChangeType::LOCAL );
+		REQUIRE( f.getChangeType(Channel::Pose) == ChangeType::PRIMARY );
 	}
 }
 
@@ -876,7 +925,7 @@ TEST_CASE("ftl::data::Frame change type", "[2.3.3]") {
 TEST_CASE("ftl::data::Frame override of persistent", "[3.1.4]") {
 	SECTION("local changes override persistent data") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 		f.store();
 
 		p.create<int>(Channel::Colour) = 44;
@@ -910,7 +959,7 @@ TEST_CASE("ftl::data::Frame override of persistent", "[3.1.4]") {
 TEST_CASE("ftl::data::Frame initial store", "[3.2.5]") {
 	SECTION("cannot create before store") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 
 		bool err = false;
 		try {
@@ -924,14 +973,14 @@ TEST_CASE("ftl::data::Frame initial store", "[3.2.5]") {
 
 	SECTION("can createChange before store") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 
 		f.createChange<int>(Channel::Colour, ChangeType::FOREIGN) = 89;
 	}
 
 	SECTION("cannot createChange after store") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 
 		f.store();
 
@@ -947,7 +996,7 @@ TEST_CASE("ftl::data::Frame initial store", "[3.2.5]") {
 
 	SECTION("cannot store twice") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 
 		f.store();
 
@@ -963,7 +1012,7 @@ TEST_CASE("ftl::data::Frame initial store", "[3.2.5]") {
 
 	SECTION("cannot flush before store") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 
 		bool err = false;
 		try {
@@ -988,7 +1037,7 @@ TEST_CASE("ftl::data::Frame initial store", "[3.2.5]") {
 TEST_CASE("ftl::data::Frame change events", "[3.4.1]") {
 	SECTION("event on store of foreign change") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 
 		int event = 0;
 		auto h = f.onChange([&event](Frame &frame, Channel c) {
@@ -1005,7 +1054,7 @@ TEST_CASE("ftl::data::Frame change events", "[3.4.1]") {
 
 	SECTION("event on store of completed change") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 
 		int event = 0;
 		auto h = f.onChange([&event](Frame &frame, Channel c) {
@@ -1019,6 +1068,43 @@ TEST_CASE("ftl::data::Frame change events", "[3.4.1]") {
 		f.store();
 		REQUIRE( event == 1 );
 	}
+
+	SECTION("event on store of foreign change with flush") {
+		Session p;
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
+
+		int event = 0;
+		auto h = f.onChange([&event](Frame &frame, Channel c) {
+			event++;
+			return true;
+		});
+
+		f.createChange<int>(Channel::Pose, ChangeType::FOREIGN);
+		REQUIRE( event == 0 );
+
+		f.store();
+		f.flush();
+		REQUIRE( event == 1 );
+	}
+
+	SECTION("No event on flush of response frame") {
+		ftl::data::Pool p;
+		Session s;
+		Frame f = ftl::data::Pool::make(&p, &s, FrameID(0,0), 0);
+
+		int event = 0;
+		auto h = f.onChange([&event](Frame &frame, Channel c) {
+			event++;
+			return true;
+		});
+
+		{
+			auto response = f.response();
+			REQUIRE( event == 0 );
+			response.create<int>(Channel::Control) = 55;
+		}
+		REQUIRE( event == 0 );
+	}
 }
 
 /* #3.4.2 Not applicable as a unit test of Frame. See #3.2.5 */
@@ -1029,7 +1115,7 @@ TEST_CASE("ftl::data::Frame change events", "[3.4.1]") {
 TEST_CASE("ftl::data::Frame parallel change events", "[3.4.4]") {
 	SECTION("event for each of multiple changes") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 
 		int event = 0;
 		auto h = f.onChange([&event](Frame &frame, Channel c) {
@@ -1055,7 +1141,7 @@ TEST_CASE("ftl::data::Frame aggregate changes", "[3.4.6]") {
 
 	SECTION("multiple changes cause single event") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 
 		int event = 0;
 		auto h = f.onChange([&event](Frame &frame, Channel c) {
@@ -1084,7 +1170,7 @@ TEST_CASE("ftl::data::Frame aggregate changes", "[3.4.6]") {
 TEST_CASE("ftl::data::Frame flush events", "[4.1.1]") {
 	SECTION("event on flush") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 		f.store();
 
 		int event = 0;
@@ -1102,7 +1188,7 @@ TEST_CASE("ftl::data::Frame flush events", "[4.1.1]") {
 
 	SECTION("parent event on flush") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 		f.store();
 
 		int event = 0;
@@ -1123,7 +1209,7 @@ TEST_CASE("ftl::data::Frame flush events", "[4.1.1]") {
 TEST_CASE("ftl::data::Frame flush per channel", "[4.1.2]") {
 	SECTION("event on flush of channel") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 		f.store();
 
 		int event = 0;
@@ -1142,7 +1228,7 @@ TEST_CASE("ftl::data::Frame flush per channel", "[4.1.2]") {
 
 	SECTION("flushed channel readonly") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 		f.store();
 
 		f.create<int>(Channel::Pose) = 55;
@@ -1172,7 +1258,7 @@ TEST_CASE("ftl::data::Frame flush on destruct", "[4.1.6]") {
 		});
 
 		{
-			Frame f = Feed::make(&p, 0, 0);
+			Frame f = Feed::make(&p, FrameID(0,0), 0);
 			f.store();
 			f.create<int>(Channel::Pose) = 55;
 			REQUIRE( event == 0 );
@@ -1191,7 +1277,7 @@ TEST_CASE("ftl::data::Frame flush on destruct", "[4.1.6]") {
 		});
 
 		{
-			Frame f = Feed::make(&p, 0, 0);
+			Frame f = Feed::make(&p, FrameID(0,0), 0);
 			f.store();
 			f.create<int>(Channel::Pose) = 55;
 			f.flush();
@@ -1208,7 +1294,7 @@ TEST_CASE("ftl::data::Frame flush foreign", "[4.2.1]") {
 
 	SECTION("event on foreign flush") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 
 		f.createChange<int>(Channel::Colour, ChangeType::FOREIGN) = 55;
 		f.store();
@@ -1231,7 +1317,7 @@ TEST_CASE("ftl::data::Frame no flush of completed", "[4.2.2]") {
 
 	SECTION("no event on completed flush") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 
 		f.createChange<int>(Channel::Colour, ChangeType::COMPLETED) = 55;
 		f.store();
@@ -1264,7 +1350,7 @@ TEST_CASE("ftl::data::Frame no flush of completed", "[4.2.2]") {
 TEST_CASE("ftl::data::Frame parallel flush events", "[4.4.3]") {
 	SECTION("event for each of multiple changes") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 		f.store();
 
 		int event = 0;
@@ -1291,7 +1377,7 @@ TEST_CASE("ftl::data::Frame aggregate flush events", "[4.4.5]") {
 
 	SECTION("multiple changes cause single event") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 		f.store();
 
 		int event = 0;
@@ -1319,7 +1405,7 @@ TEST_CASE("ftl::data::Frame aggregate flush events", "[4.4.5]") {
 TEST_CASE("ftl::data::Frame status after flush", "[4.4.7]") {
 	SECTION("still changed after flush") {
 		Session p;
-		Frame f = Feed::make(&p, 0, 0);
+		Frame f = Feed::make(&p, FrameID(0,0), 0);
 		f.store();
 
 		f.create<int>(Channel::Colour) = 55;
@@ -1350,6 +1436,11 @@ struct TestC {
 	TestB b;
 };
 
+template <>
+bool ftl::data::make_type<TestC>() {
+	return false;
+}
+
 template <>
 TestA &ftl::data::Frame::create<TestA>(ftl::codecs::Channel c) {
 	return create<TestC>(c).a;
@@ -1376,7 +1467,7 @@ TEST_CASE("ftl::data::Frame Complex Overload", "[Frame]") {
 	ftl::data::make_channel<TestC>(Channel::Pose, "pose", ftl::data::StorageMode::PERSISTENT);
 
 	SECTION("Create and get first type with default") {
-		Frame f = Feed::make(nullptr, 0, 0);
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
 		f.store();
 		f.create<TestA>(Channel::Pose);
 		
@@ -1390,7 +1481,7 @@ TEST_CASE("ftl::data::Frame Complex Overload", "[Frame]") {
 	}
 
 	SECTION("Create and get first type with value") {
-		Frame f = Feed::make(nullptr, 0, 0);
+		Frame f = Feed::make(nullptr, FrameID(0,0), 0);
 		f.store();
 		f.create<TestA>(Channel::Pose) = {77};
 		
diff --git a/components/structures/test/pool_unit.cpp b/components/structures/test/pool_unit.cpp
index fd96db8c8..141acfb0d 100644
--- a/components/structures/test/pool_unit.cpp
+++ b/components/structures/test/pool_unit.cpp
@@ -16,15 +16,18 @@ using ftl::codecs::Channel;
 using ftl::data::ChangeType;
 using ftl::data::StorageMode;
 using ftl::data::FrameStatus;
+using ftl::data::FrameID;
 
 /* #5.1 */
 TEST_CASE("ftl::data::Pool create frames", "[5.1]") {
 	SECTION("can allocate valid frame from pool") {
 		Pool pool(5,5);
 
-		Frame f = pool.allocate(0, 0);
+		Frame f = pool.allocate(ftl::data::FrameID(0,2), 100);
 		REQUIRE( f.status() == FrameStatus::CREATED );
 		REQUIRE( pool.size() == 4 );
+		REQUIRE( f.source() == 2 );
+		REQUIRE( f.timestamp() == 100 );
 	}
 }
 
@@ -34,7 +37,7 @@ TEST_CASE("ftl::data::Pool release frames on destruct", "[5.1]") {
 		Pool pool(5,5);
 
 		{
-			Frame f = pool.allocate(0, 0);
+			Frame f = pool.allocate(ftl::data::FrameID(0,0), 0);
 			REQUIRE( f.status() == FrameStatus::CREATED );
 			REQUIRE( pool.size() == 4 );
 		}
@@ -48,7 +51,7 @@ TEST_CASE("ftl::data::Pool release frames on destruct", "[5.1]") {
 		const int *ptr = nullptr;
 
 		{
-			Frame f = pool.allocate(0, 0);
+			Frame f = pool.allocate(ftl::data::FrameID(0,0), 0);
 			f.store();
 			f.create<std::vector<int>>(Channel::Colour) = {44,55,66};
 			ptr = f.get<std::vector<int>>(Channel::Colour).data();
@@ -57,7 +60,7 @@ TEST_CASE("ftl::data::Pool release frames on destruct", "[5.1]") {
 		REQUIRE( pool.size() == 1 );
 
 		{
-			Frame f = pool.allocate(0, 0);
+			Frame f = pool.allocate(ftl::data::FrameID(0,0), 0);
 			f.store();
 			auto &v = f.create<std::vector<int>>(Channel::Colour);
 
@@ -76,7 +79,7 @@ TEST_CASE("ftl::data::Pool reused frames are stale", "[5.3]") {
 		Pool pool(1,1);
 
 		{
-			Frame f = pool.allocate(0, 0);
+			Frame f = pool.allocate(ftl::data::FrameID(0,0), 0);
 			f.store();
 			f.create<std::vector<int>>(Channel::Colour) = {44,55,66};
 		}
@@ -84,7 +87,7 @@ TEST_CASE("ftl::data::Pool reused frames are stale", "[5.3]") {
 		REQUIRE( pool.size() == 1 );
 
 		{
-			Frame f = pool.allocate(0, 0);
+			Frame f = pool.allocate(ftl::data::FrameID(0,0), 0);
 			f.store();
 
 			REQUIRE( !f.has(Channel::Colour) );
@@ -107,7 +110,7 @@ TEST_CASE("ftl::data::Pool excessive allocations", "[5.5]") {
 		{
 			std::list<Frame> l;
 			for (int i=0; i<100; ++i) {
-				l.push_back(std::move(pool.allocate(0,0)));
+				l.push_back(std::move(pool.allocate(FrameID(0,0),0)));
 			}
 
 			REQUIRE( pool.size() >= 10 );
@@ -118,3 +121,107 @@ TEST_CASE("ftl::data::Pool excessive allocations", "[5.5]") {
 	}
 }
 
+TEST_CASE("ftl::data::Pool persistent sessions", "[]") {
+	SECTION("persistent across timetstamps") {
+		Pool pool(10,20);
+
+		{
+			Frame f = pool.allocate(ftl::data::FrameID(0,0), 10);
+			f.store();
+			f.create<int>(Channel::Pose) = 567;
+		}
+
+		REQUIRE( (pool.session(FrameID(0,0)).get<int>(Channel::Pose) == 567) );
+
+		{
+			Frame f = pool.allocate(ftl::data::FrameID(0,0), 20);
+			f.store();
+			REQUIRE( f.get<int>(Channel::Pose) == 567 );
+		}
+	}
+
+	SECTION("persistent across many timetstamps") {
+		Pool pool(10,20);
+
+		{
+			Frame f = pool.allocate(ftl::data::FrameID(0,0), 10);
+			f.store();
+			f.create<int>(Channel::Pose) = 567;
+		}
+
+		REQUIRE( (pool.session(FrameID(0,0)).get<int>(Channel::Pose) == 567) );
+
+		{
+			Frame f = pool.allocate(ftl::data::FrameID(0,0), 20);
+			f.store();
+			REQUIRE( f.get<int>(Channel::Pose) == 567 );
+		}
+
+		{
+			Frame f = pool.allocate(ftl::data::FrameID(0,0), 30);
+			f.store();
+			REQUIRE( f.get<int>(Channel::Pose) == 567 );
+		}
+	}
+
+	SECTION("persistent across frames and timetstamps") {
+		Pool pool(10,20);
+
+		{
+			Frame f = pool.allocate(ftl::data::FrameID(0,0), 10);
+			f.store();
+			f.create<int>(Channel::Pose) = 567;
+		}
+
+		{
+			Frame f = pool.allocate(ftl::data::FrameID(0,1), 10);
+			f.store();
+			f.create<int>(Channel::Pose) = 568;
+		}
+
+		REQUIRE( (pool.session(FrameID(0,0)).get<int>(Channel::Pose) == 567) );
+
+		{
+			Frame f = pool.allocate(ftl::data::FrameID(0,0), 20);
+			f.store();
+			REQUIRE( f.get<int>(Channel::Pose) == 567 );
+		}
+
+		{
+			Frame f = pool.allocate(ftl::data::FrameID(0,1), 20);
+			f.store();
+			REQUIRE( f.get<int>(Channel::Pose) == 568 );
+		}
+	}
+
+	SECTION("persistent across framesets and timetstamps") {
+		Pool pool(10,20);
+
+		{
+			Frame f = pool.allocate(ftl::data::FrameID(0,0), 10);
+			f.store();
+			f.create<int>(Channel::Pose) = 567;
+		}
+
+		{
+			Frame f = pool.allocate(ftl::data::FrameID(1,0), 10);
+			f.store();
+			f.create<int>(Channel::Pose) = 568;
+		}
+
+		REQUIRE( (pool.session(FrameID(0,0)).get<int>(Channel::Pose) == 567) );
+
+		{
+			Frame f = pool.allocate(ftl::data::FrameID(0,0), 20);
+			f.store();
+			REQUIRE( f.get<int>(Channel::Pose) == 567 );
+		}
+
+		{
+			Frame f = pool.allocate(ftl::data::FrameID(1,0), 20);
+			f.store();
+			REQUIRE( f.get<int>(Channel::Pose) == 568 );
+		}
+	}	
+}
+
diff --git a/env b/env
new file mode 100644
index 000000000..b0f2a02e8
--- /dev/null
+++ b/env
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/lib/libsgm/CMakeLists.txt b/lib/libsgm/CMakeLists.txt
index 497411f85..6fe014026 100644
--- a/lib/libsgm/CMakeLists.txt
+++ b/lib/libsgm/CMakeLists.txt
@@ -1,9 +1,9 @@
-cmake_minimum_required(VERSION 3.1)
+#cmake_minimum_required(VERSION 3.1)
 
-set(CMAKE_CXX_STANDARD 11)
-set(CMAKE_CXX_EXTENSIONS OFF)
+#set(CMAKE_CXX_STANDARD 11)
+#set(CMAKE_CXX_EXTENSIONS OFF)
 
-set(CUDA_ARCH "-arch=sm_50" CACHE STRING "Value of the NVCC -arch option.")
+#set(CUDA_ARCH "-arch=sm_50" CACHE STRING "Value of the NVCC -arch option.")
 
 option(ENABLE_ZED_DEMO      "Build a Demo using ZED Camera" OFF)
 option(ENABLE_SAMPLES       "Build samples" OFF)
@@ -19,10 +19,10 @@ else()
   set(ZED_SDK_INCLUDE_DIR "/usr/local/zed/include" CACHE STRING "ZED SDK include path.")
 endif()
 
-project(libSGM VERSION 2.4.0)
+#project(libSGM VERSION 2.4.0)
 
 if(BUILD_OPENCV_WRAPPER)
-	find_package(OpenCV REQUIRED core)
+	#find_package(OpenCV REQUIRED core)
 	include_directories(${OpenCV_INCLUDE_DIRS})
 endif()
 
@@ -32,20 +32,3 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/include/libsgm_config.h.in
 
 add_subdirectory(src)
 
-if(ENABLE_SAMPLES)
-    add_subdirectory(sample/image)
-    add_subdirectory(sample/movie)
-#    add_subdirectory(sample/reprojection)
-    add_subdirectory(sample/benchmark)
-    if(BUILD_OPENCV_WRAPPER)
-        add_subdirectory(sample/image_cv_gpumat)
-    endif()
-endif()
-
-if(ENABLE_TESTS)
-	add_subdirectory(test)
-endif()
-
-if(ENABLE_ZED_DEMO)
-	add_subdirectory(sample/zed)
-endif()
diff --git a/lib/libsgm/include/libsgm_config.h b/lib/libsgm/include/libsgm_config.h
index 67444c41f..eeba490fd 100644
--- a/lib/libsgm/include/libsgm_config.h
+++ b/lib/libsgm/include/libsgm_config.h
@@ -3,10 +3,10 @@
 
 /* #undef LIBSGM_SHARED */
 
-#define LIBSGM_VERSION 2.4.0
-#define LIBSGM_VERSION_MAJOR 2
-#define LIBSGM_VERSION_MINOR 4
-#define LIBSGM_VERSION_PATCH 0
+#define LIBSGM_VERSION 
+#define LIBSGM_VERSION_MAJOR 
+#define LIBSGM_VERSION_MINOR 
+#define LIBSGM_VERSION_PATCH 
 
 /* #undef BUILD_OPENCV_WRAPPER */
 
diff --git a/lib/libsgm/src/CMakeLists.txt b/lib/libsgm/src/CMakeLists.txt
index d1bc459e4..5f09382ea 100644
--- a/lib/libsgm/src/CMakeLists.txt
+++ b/lib/libsgm/src/CMakeLists.txt
@@ -1,44 +1,47 @@
-cmake_minimum_required(VERSION 3.1)
+#cmake_minimum_required(VERSION 3.1)
 
-find_package(CUDA REQUIRED)
+#find_package(CUDA REQUIRED)
 
 include_directories(../include)
 
-if (CMAKE_COMPILER_IS_GNUCXX)
-	set(CMAKE_CUDA_HOST_COMPILER gcc-7)
-	set(CUDA_HOST_COMPILER gcc-7)
-	set(CMAKE_CXX_FLAGS "-O3 -Wall -fPIC")
-	set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -std=c++11")
-endif()
+#if (CMAKE_COMPILER_IS_GNUCXX)
+#	set(CMAKE_CUDA_HOST_COMPILER gcc-7)
+#	set(CUDA_HOST_COMPILER gcc-7)
+#	set(CMAKE_CXX_FLAGS "-O3 -Wall -fPIC")
+#	set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -std=c++11")
+#endif()
 
-SET(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} ${CUDA_ARCH}")
+#SET(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} ${CUDA_ARCH}")
 
 file(GLOB STEREOSRCS "*.cu" "*.cpp")
 
-if(LIBSGM_SHARED)
-	CUDA_ADD_LIBRARY(sgm stereo_sgm.cpp ${STEREOSRCS} SHARED)
-	target_link_libraries(sgm ${CUDA_LIBRARIES})
-	if(BUILD_OPENCV_WRAPPER)
-		target_link_libraries(sgm ${OpenCV_LIBS})
-	endif()
-else()
-	CUDA_ADD_LIBRARY(sgm stereo_sgm.cpp ${STEREOSRCS} STATIC)
-endif()
-
-install(
-	TARGETS sgm
-	ARCHIVE DESTINATION ${CMAKE_INSTALL_PREFIX}/lib
-	LIBRARY DESTINATION ${CMAKE_INSTALL_PREFIX}/lib
-	RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/bin
-)
-
-install(
-	DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/../include
-	DESTINATION ${CMAKE_INSTALL_PREFIX}
-	FILES_MATCHING PATTERN "*.h" PATTERN "*.hpp"
-)
-
-install(
-	FILES ${CMAKE_CURRENT_SOURCE_DIR}/../FindLibSGM.cmake
-	DESTINATION ${CMAKE_INSTALL_PREFIX}
-)
+#if(LIBSGM_SHARED)
+#	CUDA_ADD_LIBRARY(sgm stereo_sgm.cpp ${STEREOSRCS} SHARED)
+#	target_link_libraries(sgm ${CUDA_LIBRARIES})
+#	if(BUILD_OPENCV_WRAPPER)
+#		target_link_libraries(sgm ${OpenCV_LIBS})
+#	endif()
+#else()
+	#CUDA_ADD_LIBRARY(sgm stereo_sgm.cpp ${STEREOSRCS} STATIC)
+	add_library(sgm stereo_sgm.cpp ${STEREOSRCS})
+#endif()
+
+set_property(TARGET sgm PROPERTY CUDA_ARCHITECTURES 61)
+
+#install(
+#	TARGETS sgm
+#	ARCHIVE DESTINATION ${CMAKE_INSTALL_PREFIX}/lib
+#	LIBRARY DESTINATION ${CMAKE_INSTALL_PREFIX}/lib
+#	RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/bin
+#)
+
+#install(
+#	DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/../include
+#	DESTINATION ${CMAKE_INSTALL_PREFIX}
+#	FILES_MATCHING PATTERN "*.h" PATTERN "*.hpp"
+#)
+
+#install(
+#	FILES ${CMAKE_CURRENT_SOURCE_DIR}/../FindLibSGM.cmake
+#	DESTINATION ${CMAKE_INSTALL_PREFIX}
+#)
diff --git a/lib/libstereo/CMakeLists.txt b/lib/libstereo/CMakeLists.txt
index 7095e1679..f60cc79a1 100644
--- a/lib/libstereo/CMakeLists.txt
+++ b/lib/libstereo/CMakeLists.txt
@@ -119,6 +119,7 @@ endif()
 target_include_directories(libstereo PRIVATE src/ include/)
 target_include_directories(libstereo PUBLIC ${OpenCV_INCLUDE_DIRS})
 target_link_libraries(libstereo Threads::Threads ${OpenCV_LIBS} ${CUDA_LIBRARIES})
+set_property(TARGET libstereo PROPERTY CUDA_ARCHITECTURES OFF)
 
 if (BUILD_MIDDLEBURY)
     add_subdirectory(middlebury/)
diff --git a/lib/libstereo/test/CMakeLists.txt b/lib/libstereo/test/CMakeLists.txt
index ee222cd76..d5e6f318d 100644
--- a/lib/libstereo/test/CMakeLists.txt
+++ b/lib/libstereo/test/CMakeLists.txt
@@ -7,6 +7,7 @@ $<TARGET_OBJECTS:CatchTest>
 target_include_directories(dsi_cpu_unit PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include")
 target_include_directories(dsi_cpu_unit PUBLIC ${OpenCV_INCLUDE_DIRS})
 target_link_libraries(dsi_cpu_unit Threads::Threads ${OpenCV_LIBS})
+#set_property(TARGET dsi_cpu_unit PROPERTY CUDA_ARCHITECTURES OFF)
 
 add_test(DSICPUUnitTest dsi_cpu_unit)
 
@@ -18,6 +19,7 @@ target_include_directories(dsi_gpu_unit PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../i
 target_include_directories(dsi_gpu_unit PUBLIC ${OpenCV_INCLUDE_DIRS})
 target_compile_definitions(dsi_gpu_unit PUBLIC USE_GPU)
 target_link_libraries(dsi_gpu_unit Threads::Threads ${OpenCV_LIBS})
+set_property(TARGET dsi_gpu_unit PROPERTY CUDA_ARCHITECTURES OFF)
 
 add_test(DSIGPUUnitTest dsi_gpu_unit)
 
@@ -28,6 +30,7 @@ $<TARGET_OBJECTS:CatchTest>
 target_include_directories(array2d_unit PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include")
 target_include_directories(array2d_unit PUBLIC ${OpenCV_INCLUDE_DIRS})
 target_link_libraries(array2d_unit Threads::Threads ${OpenCV_LIBS})
+set_property(TARGET array2d_unit PROPERTY CUDA_ARCHITECTURES OFF)
 
 add_test(Array2DUnitTest array2d_unit)
 
@@ -41,6 +44,7 @@ $<TARGET_OBJECTS:CatchTest>
 target_include_directories(matching_cost_unit PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include" "${CMAKE_CURRENT_SOURCE_DIR}/../src")
 target_include_directories(matching_cost_unit PUBLIC ${OpenCV_INCLUDE_DIRS})
 target_link_libraries(matching_cost_unit Threads::Threads ${OpenCV_LIBS})
+set_property(TARGET matching_cost_unit PROPERTY CUDA_ARCHITECTURES OFF)
 
 add_test(MatchingCostUnitTest matching_cost_unit)
 
@@ -51,6 +55,7 @@ $<TARGET_OBJECTS:CatchTest>
 target_include_directories(aggregation_unit PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include")
 target_include_directories(aggregation_unit PUBLIC ${OpenCV_INCLUDE_DIRS})
 target_link_libraries(aggregation_unit Threads::Threads ${OpenCV_LIBS})
+set_property(TARGET aggregation_unit PROPERTY CUDA_ARCHITECTURES OFF)
 
 add_test(AggregationUnitTest aggregation_unit)
 
@@ -61,5 +66,6 @@ $<TARGET_OBJECTS:CatchTest>
 target_include_directories(wta_unit PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include")
 target_include_directories(wta_unit PUBLIC ${OpenCV_INCLUDE_DIRS})
 target_link_libraries(wta_unit Threads::Threads ${OpenCV_LIBS})
+set_property(TARGET wta_unit PROPERTY CUDA_ARCHITECTURES OFF)
 
 add_test(WTAUnitTest wta_unit)
diff --git a/web-service/public/js/bundle.js b/web-service/public/js/bundle.js
index 074e04643..a89bb20e0 100644
--- a/web-service/public/js/bundle.js
+++ b/web-service/public/js/bundle.js
@@ -71306,14 +71306,14 @@ renderThumbnails = async () => {
             const encodedURI = encodeURIComponent(thumbnails[i])
             current_data.uri = thumbnails[i]
             try{
-                const someData = await fetch(`./stream/rgb?uri=${encodedURI}`)
-                if(!someData.ok){
-                    throw new Error('Image not found')
-                }
-                const myBlob = await someData.blob();
-                const objectURL = URL.createObjectURL(myBlob);
+                //const someData = await fetch(`./stream/rgb?uri=${encodedURI}`)
+                //if(!someData.ok){
+                //    throw new Error('Image not found')
+                //}
+                //const myBlob = await someData.blob();
+                //const objectURL = URL.createObjectURL(myBlob);
                 // containerDiv.innerHTML += createCard()
-                containerDiv.innerHTML += createCard(objectURL, i+4)
+                containerDiv.innerHTML += createCard(encodedURI, i+4)
             }catch(err){
                 console.log("Couldn't create thumbnail");
                 console.log(err) 
@@ -71328,7 +71328,7 @@ renderThumbnails = async () => {
  */
 createCard = (url, viewers) => {
     return `<div class='ftlab-card-component' >
-                <img src='${url}' class="thumbnail-img" alt="Hups" width="250px"></img>
+                <img src='stream/rgb?uri=${url}' class="thumbnail-img" alt="Hups" width="250px"></img>
                 <p>Viewers: ${viewers}</p>
                 <button onclick="createVideoPlayer()">button</button>
             </div>`
@@ -72693,8 +72693,8 @@ function Peer(ws) {
 		this._notify("disconnect", this);
 	}
 
-	let error = () => {
-		console.error("Socket error");
+	let error = (e) => {
+		console.error("Socket error: ", e);
 		this.sock.close();
 		this.status = kDisconnected;
 	}
diff --git a/web-service/public/js/index.js b/web-service/public/js/index.js
index 7100c66a8..213be7a97 100644
--- a/web-service/public/js/index.js
+++ b/web-service/public/js/index.js
@@ -82,14 +82,14 @@ renderThumbnails = async () => {
             const encodedURI = encodeURIComponent(thumbnails[i])
             current_data.uri = thumbnails[i]
             try{
-                const someData = await fetch(`./stream/rgb?uri=${encodedURI}`)
-                if(!someData.ok){
-                    throw new Error('Image not found')
-                }
-                const myBlob = await someData.blob();
-                const objectURL = URL.createObjectURL(myBlob);
+                //const someData = await fetch(`./stream/rgb?uri=${encodedURI}`)
+                //if(!someData.ok){
+                //    throw new Error('Image not found')
+                //}
+                //const myBlob = await someData.blob();
+                //const objectURL = URL.createObjectURL(myBlob);
                 // containerDiv.innerHTML += createCard()
-                containerDiv.innerHTML += createCard(objectURL, i+4)
+                containerDiv.innerHTML += createCard(encodedURI, i+4)
             }catch(err){
                 console.log("Couldn't create thumbnail");
                 console.log(err) 
@@ -104,7 +104,7 @@ renderThumbnails = async () => {
  */
 createCard = (url, viewers) => {
     return `<div class='ftlab-card-component' >
-                <img src='${url}' class="thumbnail-img" alt="Hups" width="250px"></img>
+                <img src='stream/rgb?uri=${url}' class="thumbnail-img" alt="Hups" width="250px"></img>
                 <p>Viewers: ${viewers}</p>
                 <button onclick="createVideoPlayer()">button</button>
             </div>`
diff --git a/web-service/server/src/index.js b/web-service/server/src/index.js
index 0a0e17938..22eb16270 100644
--- a/web-service/server/src/index.js
+++ b/web-service/server/src/index.js
@@ -7,7 +7,11 @@ const config = require('./utils/config')
 const User = require('./models/users')
 const Configs = require('./models/generic')
 const bodyParser = require('body-parser')
-const Url = require('url-parse')
+const Url = require('url-parse');
+const { LogLuvEncoding } = require('three');
+const msgpack = require('msgpack5')()
+  , encode  = msgpack.encode
+  , decode  = msgpack.decode;
 
 // ---- INDEXES ----------------------------------------------------------------
 app.use(express.static(__dirname + '/../../public'));
@@ -76,6 +80,8 @@ function RGBDStream(uri, peer) {
 	this.rxcount = 10;
 	this.rxmax = 10;
 
+	this.data = {};
+
 	let ix = uri.indexOf("?");
 	this.base_uri = (ix >= 0) ? uri.substring(0, ix) : uri;
 
@@ -87,6 +93,7 @@ function RGBDStream(uri, peer) {
 		//if (this.rxcount >= this.rxmax && this.clients.length > 0) {
 		//	this.subscribe();
 		//}
+		//console.log("Got frame: ", spacket);
 	});
 
 	/*peer.bind(uri, (frame, ttime, chunk, rgb, depth) => {
@@ -97,6 +104,9 @@ function RGBDStream(uri, peer) {
 			this.subscribe();
 		}
 	});*/
+
+	console.log("Sending request");
+	this.peer.send(this.base_uri, 0, [1,255,255,74,1],[7,0,1,255,0,new Uint8Array(0)]);
 }
 
 RGBDStream.prototype.addClient = function(peer) {
@@ -124,9 +134,8 @@ RGBDStream.prototype.subscribe = function() {
 
 RGBDStream.prototype.pushFrames = function(latency, spacket, packet) {
 	//Checks that the type is jpg
-	if (packet[0] === 0){
-		if (spacket[3] > 0) this.depth = packet[5];
-		else this.rgb = packet[5];
+	if (spacket[3] >= 64) {
+		this.data[spacket[3]] = decode(packet[5]);
 	}
 
 	//console.log("Frame = ", packet[0], packet[1]);
@@ -161,24 +170,29 @@ app.get('/streams', (req, res) => {
  * binded to that 
  */
 app.get('/stream/rgb', (req, res) => {
-	let uri = req.query.uri;
+	let uri = decodeURI(req.query.uri);
+	let ix = uri.indexOf("?");
+	let base_uri = (ix >= 0) ? uri.substring(0, ix) : uri;
+
 	if (uri_data.hasOwnProperty(uri)) {
 		//uri_data[uri].peer.send("get_stream", uri, 3, 9, [Peer.uuid], uri);
 		res.writeHead(200, {'Content-Type': 'image/jpeg'});
-		res.end(uri_data[uri].rgb);
+		res.end(uri_data[uri].data[74]);
 	}
 	res.end();
 });
 
 
-app.get('/stream/depth', (req, res) => {
+app.get('/stream/data', (req, res) => {
 	let uri = req.query.uri;
+	let channel = parseInt(req.query.channel);
 	const parsedURI = stringSplitter(uri)
 	if (uri_data.hasOwnProperty(parsedURI)) {
-		res.writeHead(200, {'Content-Type': 'image/png'});
-    	res.end(uri_data[parsedURI].depth);
+		//res.writeHead(200, {'Content-Type': 'image/png'});
+    	res.status(200).json(uri_data[parsedURI].data[channel]);
+	} else {
+		res.end();
 	}
-	res.end();
 });
 
 app.post('/stream/config', async (req, res) => {
@@ -220,23 +234,19 @@ app.get('/stream/config', async(req, res) => {
 	
 	//example of uri ftlab.utu.fi/stream/config?uri=ftl://utu.fi#reconstruction_snap10/merge
 	const settings = req.query.settings;
-	const uri = req.query.uri;
-	const parsedURI = stringSplitter(uri)
+	const uri = decodeURI(req.query.uri);
+	//const parsedURI = stringSplitter(uri)
 
-	// //Checks if DB has data
-	// let dbData = await Configs.find({Settings: settings});
-	// if(dbData[0].data){
-	// 	return res.status(200).json(dbData[0]);
-	// }else{
-		let peer = uri_data[parsedURI].peer
-		if(peer){
-			peer.rpc("get_cfg", (response) => {
+	if (uri_data.hasOwnProperty(uri)) {
+		let peer = uri_data[uri].peer
+		if (peer){
+			peer.rpc("get_configurable", (response) => {
 				if(response){
-					return res.status(200).json(response);
+					return res.status(200).json(JSON.parse(response));
 				}
-			}, settings)
+			}, uri);
 		}
-	// }
+	}
 })
 
 
@@ -370,7 +380,9 @@ app.ws('/', (ws, req) => {
 		}
 	});
 
-	p.bind("find_stream", (uri) => {
+	p.bind("find_stream", (uri, proxy) => {
+		if (!proxy) return null;
+		
 		const parsedURI = stringSplitter(uri)
 		if (uri_to_peer.hasOwnProperty(parsedURI)) {
 			console.log("Stream found: ", uri, parsedURI);
-- 
GitLab