diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index ecee499c0b127dc1783fe009e250be10315ee927..b9712ca6487b4ddda8db7a98f509716c935ffd5a 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -6,7 +6,7 @@
 
 variables:
   GIT_SUBMODULE_STRATEGY: recursive
-  CMAKE_ARGS_WINDOWS: '-DCMAKE_GENERATOR_PLATFORM=x64 -DPORTAUDIO_DIR="D:/Build/portaudio" -DNVPIPE_DIR="D:/Build/NvPipe" -DEigen3_DIR="C:/Program Files (x86)/Eigen3/share/eigen3/cmake" -DOpenCV_DIR="D:/Build/opencv-4.1.1" -DCUDA_TOOLKIT_ROOT_DIR="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v10.1" -DWITH_CERES=FALSE'
+  CMAKE_ARGS_WINDOWS: '-DCMAKE_GENERATOR_PLATFORM=x64 -DPORTAUDIO_DIR="D:/Build/portaudio" -DNVPIPE_DIR="D:/Build/NvPipe" -DEigen3_DIR="C:/Program Files (x86)/Eigen3/share/eigen3/cmake" -DOpenCV_DIR="D:/Build/opencv-4.1.1" -DCUDA_TOOLKIT_ROOT_DIR="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v10.1" -DWITH_OPENVR=TRUE -DOPENVR_DIR="D:/Build/OpenVRSDK" -DWITH_CERES=FALSE'
 
 stages:
  - all
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 7cf418098a856904c30a4f88a3f90dcb1e998555..ce1b438e4e2a5659f2166b178585128989816063 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -70,20 +70,51 @@ endif()
 #	set(HAVE_LIBARCHIVE true)
 #endif()
 
-if (WITH_OPENVR)
+#if (WITH_OPENVR)
 	## OpenVR API path
-	find_library(OPENVR_LIBRARIES
-		NAMES
-			openvr_api
-	)
-	set(OPENVR_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../headers)
-
-	if (OPENVR_LIBRARIES)
-		message(STATUS "Found OpenVR: ${OPENVR_LIBRARIES}")
-		set(HAVE_OPENVR true)
+#	find_library(OPENVR_LIBRARIES
+#		NAMES
+#			openvr_api
+#	)
+#	set(OPENVR_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../headers)
+#
+#	if (OPENVR_LIBRARIES)
+#		message(STATUS "Found OpenVR: ${OPENVR_LIBRARIES}")
+#		set(HAVE_OPENVR true)
+#	endif()
+#endif()
+
+# ============== OPEN VR =======================================================
+
+if (WITH_OPENVR)
+	find_library( OPENVR_LIBRARY NAMES openvr_api libopenvr_api openvr_api64 PATHS ${OPENVR_DIR} PATH_SUFFIXES lib)
+	if (OPENVR_LIBRARY)
+		set(HAVE_OPENVR TRUE)
+		add_library(openvr UNKNOWN IMPORTED)
+		#set_property(TARGET nanogui PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${NANOGUI_EXTRA_INCS})
+		set_property(TARGET openvr PROPERTY IMPORTED_LOCATION ${OPENVR_LIBRARY})
+		message(STATUS "Found OpenVR: ${OPENVR_LIBRARY}")
+
+		if(WIN32)
+			# Find include
+			find_path(OPENVR_INCLUDE_DIRS
+				NAMES openvr/openvr.h
+				PATHS "C:/Program Files/OpenVRSDK" "C:/Program Files (x86)/OpenVRSDK" ${OPENVR_DIR}
+				PATH_SUFFIXES include
+			)
+			set_property(TARGET openvr PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${OPENVR_INCLUDE_DIRS})
+		endif()
+	else()
+		set(OPENVR_LIBRARY "")
+		add_library(openvr INTERFACE)
 	endif()
+else()
+	set(OPENVR_LIBRARY "")
+	add_library(openvr INTERFACE)
 endif()
 
+# ==============================================================================
+
 if (WITH_FIXSTARS)
 	find_package(LibSGM REQUIRED)
 	if (LibSGM_FOUND)
diff --git a/applications/gui/CMakeLists.txt b/applications/gui/CMakeLists.txt
index 02402d2e96ba805ac789d324166dec4140bc4512..a30525ddc67d057d1a64ddb17c1e85901e5b6259 100644
--- a/applications/gui/CMakeLists.txt
+++ b/applications/gui/CMakeLists.txt
@@ -40,6 +40,6 @@ target_include_directories(ftl-gui PUBLIC
 #endif()
 
 #target_include_directories(cv-node PUBLIC ${PROJECT_SOURCE_DIR}/include)
-target_link_libraries(ftl-gui ftlcommon ftlctrl ftlrgbd ftlstreams ftlrender Threads::Threads ${OpenCV_LIBS} ${OPENVR_LIBRARIES} glog::glog ftlnet nanogui ${NANOGUI_EXTRA_LIBS})
+target_link_libraries(ftl-gui ftlcommon ftlctrl ftlrgbd ftlstreams ftlrender Threads::Threads ${OpenCV_LIBS} openvr ftlnet nanogui ${NANOGUI_EXTRA_LIBS})
 
 
diff --git a/components/audio/include/ftl/audio/buffer.hpp b/components/audio/include/ftl/audio/buffer.hpp
index 8fb6d580686b4fddf481fc6aa53e3f899c1220c4..fad4f447acde457619476602de88ded940bb2f4a 100644
--- a/components/audio/include/ftl/audio/buffer.hpp
+++ b/components/audio/include/ftl/audio/buffer.hpp
@@ -6,6 +6,38 @@
 namespace ftl {
 namespace audio {
 
+template <typename T>
+class Buffer {
+	public:
+	typedef T type;
+
+	Buffer(int channels, int framesize, int rate) : rate_(rate), cur_delay_(0.0f), req_delay_(0.0f), channels_(channels), frame_size_(framesize) {}
+	virtual ~Buffer() {}
+
+	virtual void write(const std::vector<T> &in)=0;
+	virtual void read(std::vector<T> &out, int)=0;
+
+	inline int channels() const { return channels_; }
+	inline int frameSize() const { return frame_size_; }
+	inline int sampleRate() const { return rate_; }
+
+	void setDelay(float d) {
+		req_delay_ = d  * static_cast<float>(rate_);
+	}
+
+	float delay() const { return cur_delay_ / static_cast<float>(rate_); }
+
+	virtual int size() const=0;
+	virtual int frames() const=0;
+
+	protected:
+	int rate_;
+	float cur_delay_;
+	float req_delay_;
+	int channels_;
+	int frame_size_;
+};
+
 //static constexpr int kBufferCount = 100;
 
 /**
@@ -16,27 +48,15 @@ namespace audio {
  * dilation / shifting, and amplitude control.
  */
 template <typename T, int CHAN, int FRAME, int SIZE>
-class FixedBuffer {
+class FixedBuffer : public ftl::audio::Buffer<T> {
 	public:
-	typedef T type;
-
-	FixedBuffer() : write_position_(0), read_position_(-1), offset_(0), rate_(44100),
-			cur_delay_(0.0f), req_delay_(0.0f) {}
-	explicit FixedBuffer(int rate) : write_position_(0), read_position_(-1),
-			offset_(0), rate_(rate), cur_delay_(0.0f), req_delay_(0.0f) {}
+	FixedBuffer() : Buffer<T>(CHAN, FRAME, 44100), write_position_(0), read_position_(-1), offset_(0) {}
+	explicit FixedBuffer(int rate) : Buffer<T>(CHAN, FRAME, rate), write_position_(0), read_position_(-1),
+			offset_(0) {}
 
-	int sampleRate() const { return rate_; }
 
-	inline int channels() const { return CHAN; }
-	inline int frameSize() const { return FRAME; }
 	inline int maxFrames() const { return SIZE; }
 
-	void setDelay(float d) {
-		req_delay_ = d  * static_cast<float>(rate_);
-	}
-
-	float delay() const { return cur_delay_ / static_cast<float>(rate_); }
-
 	inline void writeFrame(const T *d) {
 		const T *in = d;
 		T *out = &data_[(write_position_++) % SIZE][0];
@@ -54,25 +74,23 @@ class FixedBuffer {
 		}
 	}
 
-	int size() const { return (read_position_>=0) ? write_position_ - 2 - read_position_ : 0; }
-	int frames() const { return (read_position_>=0) ? write_position_ - 2 - read_position_ : 0; }
+	int size() const override { return (read_position_>=0) ? write_position_ - 2 - read_position_ : 0; }
+	int frames() const override { return (read_position_>=0) ? write_position_ - 2 - read_position_ : 0; }
 
 	/**
 	 * Append sound samples to the end of the buffer. The samples may be over
 	 * or under sampled so as to gradually introduce or remove a requested
 	 * delay and hence change the latency of the audio.
 	 */
-	void write(const std::vector<T> &in);
+	void write(const std::vector<T> &in) override;
+
+	void read(std::vector<T> &out, int frames) override;
 
 	private:
 	int write_position_;
 	int read_position_;
 	int offset_;
 	T data_[SIZE][CHAN*FRAME];
-	int rate_;
-
-	float cur_delay_;
-	float req_delay_;
 };
 
 // ==== Implementations ========================================================
@@ -97,7 +115,7 @@ void FixedBuffer<T,CHAN,FRAME,SIZE>::write(const std::vector<T> &in) {
 		
 		for (int c=0; c<CHAN; ++c) *ptr++ = fracIndex<T,CHAN>(in, i, c);
 
-		const float d = 0.6f*clamp((req_delay_ - cur_delay_) / static_cast<float>(rate_), 0.5f);
+		const float d = 0.6f*clamp((this->req_delay_ - this->cur_delay_) / static_cast<float>(this->rate_), 0.5f);
 		i += 1.0f - d;  // FIXME: Is this correct? Seems to function but perhaps not ideal
 
 		/*if (d > 0.0f) {	// Increase delay = oversample with increment < 1.0
@@ -107,7 +125,7 @@ void FixedBuffer<T,CHAN,FRAME,SIZE>::write(const std::vector<T> &in) {
 			//i += 1.0f / (1.0f + d);
 			i += 1.0f - d;
 		}*/
-		cur_delay_ += d;
+		this->cur_delay_ += d;
 
 		offset_+= CHAN;
 		if (offset_ == CHAN*FRAME) {
@@ -118,6 +136,16 @@ void FixedBuffer<T,CHAN,FRAME,SIZE>::write(const std::vector<T> &in) {
 	if (write_position_ > 20 && read_position_ < 0) read_position_ = 0;
 }
 
+template <typename T, int CHAN, int FRAME, int SIZE>
+void FixedBuffer<T,CHAN,FRAME,SIZE>::read(std::vector<T> &out, int count) {
+	out.resize(FRAME*count*CHAN);
+	T *ptr = out.data();
+	for (int i=0; i<count; ++i) {
+		readFrame(ptr);
+		ptr += FRAME*CHAN;
+	}
+}
+
 // ==== Common forms ===========================================================
 
 template <int SIZE>
diff --git a/components/audio/include/ftl/audio/frame.hpp b/components/audio/include/ftl/audio/frame.hpp
index 845123a8ffbab470998bfc657164c6c451cfdb1e..c30fb66e5660dac88e14cf67ee06bf69d9e5b58e 100644
--- a/components/audio/include/ftl/audio/frame.hpp
+++ b/components/audio/include/ftl/audio/frame.hpp
@@ -12,6 +12,7 @@ namespace audio {
 struct AudioSettings {
 	int sample_rate;
 	int frame_size;
+	int channels;
 };
 
 struct AudioData {
diff --git a/components/audio/include/ftl/audio/source.hpp b/components/audio/include/ftl/audio/source.hpp
index c7ba25608f35e478aa7828846e937e4bbd7938cc..8b810e3e8bae58ba4cab89dfb3d432bc93607579 100644
--- a/components/audio/include/ftl/audio/source.hpp
+++ b/components/audio/include/ftl/audio/source.hpp
@@ -41,7 +41,7 @@ class Source : public ftl::Configurable, public ftl::audio::Generator {
 	ftl::timer::TimerHandle timer_main_;
 	ftl::audio::FrameSet::Callback cb_;
 
-	ftl::audio::StereoBuffer16<100> buffer_;
+	ftl::audio::Buffer<short> *buffer_;
 	int to_read_;
 
 	ftl::audio::FrameSet frameset_;
diff --git a/components/audio/include/ftl/audio/speaker.hpp b/components/audio/include/ftl/audio/speaker.hpp
index b70c6e65f0bc249261d16b4ec0d1f8cd5ec92dd3..ea6d6d92d9b11b580fc3de2dcd895ecaa2379ebf 100644
--- a/components/audio/include/ftl/audio/speaker.hpp
+++ b/components/audio/include/ftl/audio/speaker.hpp
@@ -23,13 +23,15 @@ class Speaker : public ftl::Configurable {
 	void setDelay(int64_t ms);
 
 	private:
-	ftl::audio::StereoBuffer16<2000> buffer_;
+	ftl::audio::Buffer<short> *buffer_;
 	bool active_;
 	float extra_delay_;
 
 	#ifdef HAVE_PORTAUDIO
 	PaStream *stream_;
 	#endif
+
+	void _open(int fsize, int sample, int channels);
 };
 
 }
diff --git a/components/audio/src/source.cpp b/components/audio/src/source.cpp
index 0e6c4f4e3c0b15c86be8585e7f1601b7aa46c3a7..48d176eaab6e558e5d66a3bc388b330b8fdbd839 100644
--- a/components/audio/src/source.cpp
+++ b/components/audio/src/source.cpp
@@ -17,31 +17,20 @@ using ftl::codecs::Channel;
 //static double ltime = 0.0;
 
 /* Portaudio callback to receive audio data. */
+template <typename BUFFER>
 static int pa_source_callback(const void *input, void *output,
         unsigned long frameCount, const PaStreamCallbackTimeInfo *timeInfo,
         PaStreamCallbackFlags statusFlags, void *userData) {
 
-    auto *buffer = (ftl::audio::StereoBuffer16<100>*)userData;
+    auto *buffer = (BUFFER*)userData;
     short *in = (short*)input;
-
-	//short *out = (short*)output;
-	//buffer->readFrame(out);
-
-	//if (timeInfo->currentTime - ltime < (1.0 / 128.0)) return 0;
-	//ltime = timeInfo->inputBufferAdcTime;
-
-    //int i=0;
-    //while (i < frameCount) {
-	    buffer->writeFrame(in);
-        //i+=2*ftl::audio::kFrameSize;
-    //
-
+	buffer->writeFrame(in);
     return 0;
 }
 
 #endif
 
-Source::Source(nlohmann::json &config) : ftl::Configurable(config), buffer_(48000) {
+Source::Source(nlohmann::json &config) : ftl::Configurable(config), buffer_(nullptr) {
 	if (!value("enabled",true)) {
 		active_ = false;
 		return;
@@ -50,12 +39,51 @@ Source::Source(nlohmann::json &config) : ftl::Configurable(config), buffer_(4800
 	#ifdef HAVE_PORTAUDIO
     ftl::audio::pa_init();
 
-	int device = value("audio_device",-1);
-	if (device >= Pa_GetDeviceCount()) device = -1;
+	int device = Pa_GetDefaultInputDevice();
+	int channels = 1;
+
+	if (get<std::string>("audio_device")) {
+		std::string devname = *get<std::string>("audio_device");
+
+        int numDevices = Pa_GetDeviceCount();
+
+        for (int i=0; i<numDevices; ++i) {
+            const   PaDeviceInfo *deviceInfo = Pa_GetDeviceInfo(i);
+            if (std::string(deviceInfo->name).find(devname) != std::string::npos) {
+				device = i;
+				break;
+			}
+        }
+	} else {
+		device = value("audio_device", device);
+		if (device >= Pa_GetDeviceCount()) device = Pa_GetDefaultInputDevice();
+	}
+
+	//if (device >= 0) {
+		const PaDeviceInfo *deviceInfo = Pa_GetDeviceInfo(device);
+		if (deviceInfo) {
+			LOG(INFO) << "Using audio device: " << deviceInfo->name;
+			if (deviceInfo->maxInputChannels == 0) {
+				device = -1;
+				LOG(ERROR) << "Selected audio device has no input channels";
+			} else {
+				channels = (deviceInfo->maxInputChannels >= 2) ? 2 : 1;
+			}
+		} else {
+			LOG(ERROR) << "No selected audio device";
+			return;
+		}
+	//}
+
+	if (channels >= 2) {
+		buffer_ = new ftl::audio::StereoBuffer16<100>(48000);
+	} else {
+		buffer_ = new ftl::audio::MonoBuffer16<100>(48000);
+	}
 
     PaStreamParameters inputParameters;
     //bzero( &inputParameters, sizeof( inputParameters ) );
-    inputParameters.channelCount = 2;
+    inputParameters.channelCount = channels;
     inputParameters.device = device;
     inputParameters.sampleFormat = paInt16;
     inputParameters.suggestedLatency = (device >= 0) ? Pa_GetDeviceInfo(device)->defaultLowInputLatency : 0;
@@ -71,19 +99,19 @@ Source::Source(nlohmann::json &config) : ftl::Configurable(config), buffer_(4800
 			48000,  // Sample rate
 			ftl::audio::kFrameSize,    // Size of single frame
 			paNoFlag,
-			pa_source_callback,
-			&this->buffer_
+			(buffer_->channels() == 1) ? pa_source_callback<ftl::audio::MonoBuffer16<100>> : pa_source_callback<ftl::audio::StereoBuffer16<100>>,
+			this->buffer_
 		);
 	} else {
 		err = Pa_OpenDefaultStream(
 			&stream_,
-			2,
+			channels,
 			0,
 			paInt16,
 			48000,  // Sample rate
 			ftl::audio::kFrameSize,    // Size of single frame
-			pa_source_callback,
-			&this->buffer_
+			(buffer_->channels() == 1) ? pa_source_callback<ftl::audio::MonoBuffer16<100>> : pa_source_callback<ftl::audio::StereoBuffer16<100>>,
+			this->buffer_
 		);
 	}
 
@@ -105,8 +133,14 @@ Source::Source(nlohmann::json &config) : ftl::Configurable(config), buffer_(4800
 
 	to_read_ = 0;
 
+	ftl::audio::AudioSettings settings;
+	settings.channels = channels;
+	settings.sample_rate = 48000;
+	settings.frame_size = 256;
+	state_.setLeft(settings);
+
     timer_hp_ = ftl::timer::add(ftl::timer::kTimerHighPrecision, [this](int64_t ts) {
-        to_read_ = buffer_.size();
+        if (buffer_) to_read_ = buffer_->size();
         return true;
     });
 
@@ -119,20 +153,23 @@ Source::Source(nlohmann::json &config) : ftl::Configurable(config), buffer_(4800
 		frameset_.count = 1;
 		frameset_.stale = false;
 
-        if (to_read_ < 1) return true;
+        if (to_read_ < 1 || !buffer_) return true;
 
 		if (frameset_.frames.size() < 1) frameset_.frames.emplace_back();
 
 		auto &frame = frameset_.frames[0];
 		frame.reset();
-        std::vector<short> &data = frame.create<Audio>(Channel::Audio).data();
+		frame.setOrigin(&state_);
+        std::vector<short> &data = frame.create<Audio>((buffer_->channels() == 2) ? Channel::AudioStereo : Channel::AudioMono).data();
 
-		data.resize(2*ftl::audio::kFrameSize*to_read_);
+		/*data.resize(ftl::audio::kFrameSize*to_read_*channels_);  // For stereo * 2
 		short *ptr = data.data();
 		for (int i=0; i<to_read_; ++i) {
-			buffer_.readFrame(ptr);
-			ptr += 2*ftl::audio::kFrameSize;
-		}
+			if (channels_ == 1) mono_buffer_.readFrame(ptr);
+			else stereo_buffer_.readFrame(ptr);
+			ptr += ftl::audio::kFrameSize*channels_;  // For stereo * 2
+		}*/
+		buffer_->read(data, to_read_);
 
 		// Then do something with the data!
 		//LOG(INFO) << "Audio Frames Sent: " << to_read_ << " - " << ltime;
diff --git a/components/audio/src/speaker.cpp b/components/audio/src/speaker.cpp
index adb54aa82ab20e23eef82d3a96061793b70435dc..64c333851a1cd95e37902b8dd672423bc69dc79b 100644
--- a/components/audio/src/speaker.cpp
+++ b/components/audio/src/speaker.cpp
@@ -15,11 +15,12 @@ using ftl::codecs::Channel;
 #ifdef HAVE_PORTAUDIO
 
 /* Portaudio callback to receive audio data. */
+template <typename BUFFER>
 static int pa_speaker_callback(const void *input, void *output,
 		unsigned long frameCount, const PaStreamCallbackTimeInfo *timeInfo,
 		PaStreamCallbackFlags statusFlags, void *userData) {
 
-	auto *buffer = (ftl::audio::StereoBuffer16<2000>*)userData;
+	auto *buffer = (BUFFER*)userData;  // ftl::audio::MonoBuffer16<2000>
 	short *out = (short*)output;
 
 	buffer->readFrame(out);
@@ -29,39 +30,9 @@ static int pa_speaker_callback(const void *input, void *output,
 
 #endif
 
-Speaker::Speaker(nlohmann::json &config) : ftl::Configurable(config), buffer_(48000) {
+Speaker::Speaker(nlohmann::json &config) : ftl::Configurable(config), buffer_(nullptr) {
 	#ifdef HAVE_PORTAUDIO
 	ftl::audio::pa_init();
-
-	auto err = Pa_OpenDefaultStream(
-		&stream_,
-		0,
-		2,
-		paInt16,
-		48000,  // Sample rate
-		256,    // Size of single frame
-		pa_speaker_callback,
-		&this->buffer_
-	);
-
-	if (err != paNoError) {
-		LOG(ERROR) << "Portaudio open stream error: " << Pa_GetErrorText(err);
-		active_ = false;
-		return;
-	} else {
-		active_ = true;
-	}
-
-	err = Pa_StartStream(stream_);
-
-	if (err != paNoError) {
-		LOG(ERROR) << "Portaudio start stream error: " << Pa_GetErrorText(err);
-		//active_ = false;
-		return;
-	}
-
-	LOG(INFO) << "Speaker ready.";
-
 	#else  // No portaudio
 
 	active_ = false;
@@ -100,16 +71,63 @@ Speaker::~Speaker() {
 	#endif
 }
 
+void Speaker::_open(int fsize, int sample, int channels) {
+	if (buffer_) delete buffer_;
+
+	LOG(INFO) << "Create speaker: " << sample << "," << channels;
+	if (sample == 0 || channels == 0) return;
+
+	if (channels >= 2) {
+		buffer_ = new ftl::audio::StereoBuffer16<2000>(sample);
+	} else {
+		buffer_ = new ftl::audio::MonoBuffer16<2000>(sample);
+	}
+
+	auto err = Pa_OpenDefaultStream(
+		&stream_,
+		0,
+		channels,
+		paInt16,
+		sample,  // Sample rate
+		256,    // Size of single frame
+		(channels == 1) ? pa_speaker_callback<ftl::audio::MonoBuffer16<2000>> : pa_speaker_callback<ftl::audio::StereoBuffer16<2000>>,
+		this->buffer_
+	);
+
+	if (err != paNoError) {
+		LOG(ERROR) << "Portaudio open stream error: " << Pa_GetErrorText(err);
+		active_ = false;
+		return;
+	} else {
+		active_ = true;
+	}
+
+	err = Pa_StartStream(stream_);
+
+	if (err != paNoError) {
+		LOG(ERROR) << "Portaudio start stream error: " << Pa_GetErrorText(err);
+		//active_ = false;
+		return;
+	}
+
+	LOG(INFO) << "Speaker ready.";
+}
+
 void Speaker::queue(int64_t ts, ftl::audio::Frame &frame) {
-	auto &audio = frame.get<ftl::audio::Audio>(Channel::Audio);
+	auto &audio = frame.get<ftl::audio::Audio>((frame.hasChannel(Channel::AudioStereo)) ? Channel::AudioStereo : Channel::AudioMono);
+
+	if (!buffer_) {
+		_open(256, frame.getSettings().sample_rate, frame.getSettings().channels);
+	}
+	if (!buffer_) return;
 
-	//LOG(INFO) << "Buffer Fullness (" << ts << "): " << buffer_.size();
-	buffer_.write(audio.data());
+	//LOG(INFO) << "Buffer Fullness (" << ts << "): " << buffer_->size() << " - " << audio.size();
+	buffer_->write(audio.data());
 	//LOG(INFO) << "Audio delay: " << buffer_.delay() << "s";
 }
 
 void Speaker::setDelay(int64_t ms) {
 	float d = static_cast<float>(ms) / 1000.0f + extra_delay_;
 	if (d < 0.0f) d = 0.0f;  // Clamp to 0 delay (not ideal to be exactly 0)
-	buffer_.setDelay(d);
+	if (buffer_) buffer_->setDelay(d);
 }
diff --git a/components/codecs/include/ftl/codecs/channels.hpp b/components/codecs/include/ftl/codecs/channels.hpp
index fea88edf3eea2f0d2a77aae2d8edb655cc657d2f..5af559d560a1af4c6b996d3435b3dd2c0ba4e2bf 100644
--- a/components/codecs/include/ftl/codecs/channels.hpp
+++ b/components/codecs/include/ftl/codecs/channels.hpp
@@ -37,8 +37,8 @@ enum struct Channel : int {
 	RightHighRes	= 20,	// 8UC3 or 8UC4
 
 	Audio			= 32,
-	AudioLeft		= 32,
-	AudioRight		= 33,
+	AudioMono		= 32,
+	AudioStereo		= 33,
 
 	Configuration	= 64,	// JSON Data
 	Settings1		= 65,
diff --git a/components/codecs/include/ftl/codecs/codecs.hpp b/components/codecs/include/ftl/codecs/codecs.hpp
index 58002ac42cba821f1de0979e8f15e85e46126d2e..6ab79574fc22615d1622b56d14531a950b2d51f1 100644
--- a/components/codecs/include/ftl/codecs/codecs.hpp
+++ b/components/codecs/include/ftl/codecs/codecs.hpp
@@ -61,7 +61,8 @@ enum struct definition_t : uint8_t {
 	HTC_VIVE = 8,
 	OLD_SKOOL = 9,
 
-	// TODO: Add audio definitions
+	hz48000 = 32,
+	hz44100 = 33,
 
 	Invalid
 };
diff --git a/components/rgbd-sources/src/sources/stereovideo/local.cpp b/components/rgbd-sources/src/sources/stereovideo/local.cpp
index dd1b10138ff5c003b01ad12d760da58223007c16..89eeca114786f77c32bf863d92388a4fef7798eb 100644
--- a/components/rgbd-sources/src/sources/stereovideo/local.cpp
+++ b/components/rgbd-sources/src/sources/stereovideo/local.cpp
@@ -17,6 +17,19 @@
 
 #include <ftl/timer.hpp>
 
+#ifndef WIN32
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <linux/videodev2.h>
+#else
+#include <mfapi.h>
+#include <mfidl.h>
+#pragma comment(lib, "mf.lib")
+#pragma comment(lib, "mfplat.lib")
+#pragma comment(lib, "mfuuid.lib")
+#endif
+
 using ftl::rgbd::detail::LocalSource;
 using ftl::rgbd::detail::Calibrate;
 using cv::Mat;
@@ -32,16 +45,50 @@ using std::this_thread::sleep_for;
 LocalSource::LocalSource(nlohmann::json &config)
 		: Configurable(config), timestamp_(0.0) {
 
+	std::vector<ftl::rgbd::detail::DeviceDetails> devices = _selectDevices();
+
+	int device_left = 0;
+	int device_right = -1;
+
+	LOG(INFO) << "Found " << devices.size() << " cameras";
+
+	if (Configurable::get<std::string>("device_left")) {
+		for (auto &d : devices) {
+			if (d.name.find(*Configurable::get<std::string>("device_left")) != std::string::npos) {
+				device_left = d.id;
+				LOG(INFO) << "Device left = " << device_left;
+				break;
+			}
+		}
+	} else {
+		device_left = value("device_left", (devices.size() > 0) ? devices[0].id : 0);
+	}
+
+	if (Configurable::get<std::string>("device_right")) {
+		for (auto &d : devices) {
+			if (d.name.find(*Configurable::get<std::string>("device_right")) != std::string::npos) {
+				if (d.id == device_left) continue;
+				device_right = d.id;
+				break;
+			}
+		}
+	} else {
+		device_right = value("device_right", (devices.size() > 1) ? devices[1].id : 1);
+	}
+
 	nostereo_ = value("nostereo", false);
-	int device_left = value("device_left", 0);
-	int device_right = value("device_right", 1);
+
+	if (device_left < 0) {
+		LOG(ERROR) << "No available cameras";
+		return;
+	}
 
 	// Use cameras
 	camera_a_ = new VideoCapture;
 	LOG(INFO) << "Cameras check... ";
 	camera_a_->open(device_left);
 
-	if (!nostereo_) {
+	if (!nostereo_ && device_right >= 0) {
 		camera_b_ = new VideoCapture(device_right);
 	} else {
 		camera_b_ = nullptr;
@@ -100,6 +147,153 @@ LocalSource::LocalSource(nlohmann::json &config, const string &vid)
 	LOG(FATAL) << "Stereo video file sources no longer supported";
 }
 
+std::vector<ftl::rgbd::detail::DeviceDetails> LocalSource::_selectDevices() {
+	std::vector<ftl::rgbd::detail::DeviceDetails> devices;
+
+#ifdef WIN32
+	UINT32 count = 0;
+
+	IMFAttributes *pConfig = NULL;
+	IMFActivate **ppDevices = NULL;
+
+	// Create an attribute store to hold the search criteria.
+	HRESULT hr = MFCreateAttributes(&pConfig, 1);
+
+	// Request video capture devices.
+	if (SUCCEEDED(hr))
+	{
+		hr = pConfig->SetGUID(
+			MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE,
+			MF_DEVSOURCE_ATTRIBUTE_SOURCE_TYPE_VIDCAP_GUID
+		);
+	}
+
+	// Enumerate the devices,
+	if (SUCCEEDED(hr))
+	{
+		hr = MFEnumDeviceSources(pConfig, &ppDevices, &count);
+	}
+
+	// Create a media source for the first device in the list.
+	if (SUCCEEDED(hr))
+	{
+		if (count > 0)
+		{
+			for (int i = 0; i < count; ++i) {
+				HRESULT hr = S_OK;
+				WCHAR *szFriendlyName = NULL;
+
+				// Try to get the display name.
+				UINT32 cchName;
+				hr = ppDevices[i]->GetAllocatedString(
+					MF_DEVSOURCE_ATTRIBUTE_FRIENDLY_NAME,
+					&szFriendlyName, &cchName);
+
+				char temp[100];
+				size_t size;
+				wcstombs_s(&size, temp, 100, szFriendlyName, _TRUNCATE);
+
+				if (SUCCEEDED(hr))
+				{
+					LOG(INFO) << " -- " << temp;
+					devices.push_back({
+						std::string((const char*)temp),
+						i,
+						0,
+						0
+					});
+				}
+				CoTaskMemFree(szFriendlyName);
+			}
+		}
+		else
+		{
+			
+		}
+	}
+
+	for (DWORD i = 0; i < count; i++)
+	{
+		ppDevices[i]->Release();
+	}
+	CoTaskMemFree(ppDevices);
+#else
+
+	int fd;
+    v4l2_capability video_cap;
+	v4l2_frmsizeenum video_fsize;
+
+	LOG(INFO) << "Video Devices:";
+
+	for (int i=0; i<10; ++i) {
+		std::string path = "/dev/video";
+		path += std::to_string(i);
+
+		if ((fd = open(path.c_str(), O_RDONLY)) == -1) {
+			break;
+		}
+
+		if(ioctl(fd, VIDIOC_QUERYCAP, &video_cap) == -1) {
+			LOG(WARNING) << "Can't get video capabilities";
+			continue;
+		}// else {
+
+		// Get some formats
+		v4l2_fmtdesc pixfmt;
+		pixfmt.index = 0;
+		pixfmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+		while (ioctl(fd, VIDIOC_ENUM_FMT, &pixfmt) == 0) {
+			LOG(INFO) << " -- -- format = " << pixfmt.description << " code = " << ((char*)&pixfmt.pixelformat)[0] << ((char*)&pixfmt.pixelformat)[1] << ((char*)&pixfmt.pixelformat)[2] << ((char*)&pixfmt.pixelformat)[3];
+			pixfmt.index++;
+		}
+
+		memset(&video_fsize, 0, sizeof(video_fsize));
+		video_fsize.index = 0;
+		video_fsize.pixel_format = v4l2_fourcc('Y','U','Y','V');
+
+		size_t maxwidth = 0;
+		size_t maxheight = 0;
+
+		while (ioctl(fd, VIDIOC_ENUM_FRAMESIZES, &video_fsize) == 0) {
+			maxwidth = max(maxwidth, (video_fsize.type == V4L2_FRMSIZE_TYPE_DISCRETE) ? video_fsize.discrete.width : video_fsize.stepwise.max_width);
+			maxheight = max(maxheight, (video_fsize.type == V4L2_FRMSIZE_TYPE_DISCRETE) ? video_fsize.discrete.height : video_fsize.stepwise.max_height);
+			video_fsize.index++;
+		}
+
+			//printf("Name:\t\t '%s'\n", video_cap.name);
+			//printf("Minimum size:\t%d x %d\n", video_cap.minwidth, video_cap.minheight);
+			//printf("Maximum size:\t%d x %d\n", video_cap.maxwidth, video_cap.maxheight);
+
+		if (maxwidth > 0 && maxheight > 0) {
+			devices.push_back({
+				std::string((const char*)video_cap.card),
+				i,
+				maxwidth,
+				maxheight
+			});
+		}
+
+		LOG(INFO) << " -- " << video_cap.card << " (" << maxwidth << "x" << maxheight << ")";
+		//}
+
+		/*if(ioctl(fd, VIDIOCGWIN, &video_win) == -1)
+			perror("cam_info: Can't get window information");
+		else
+			printf("Current size:\t%d x %d\n", video_win.width, video_win.height);
+
+		if(ioctl(fd, VIDIOCGPICT, &video_pic) == -1)
+			perror("cam_info: Can't get picture information");
+		else
+			printf("Current depth:\t%d\n", video_pic.depth);*/
+
+		close(fd);
+	}
+
+#endif
+
+	return devices;
+}
+
 
 bool LocalSource::grab() {
 	if (!camera_a_) return false;
diff --git a/components/rgbd-sources/src/sources/stereovideo/local.hpp b/components/rgbd-sources/src/sources/stereovideo/local.hpp
index 90654916489ed27d111ab616a220c2bd5323a38a..243df4c4787ea83707b70d12520baeee5b0f42bb 100644
--- a/components/rgbd-sources/src/sources/stereovideo/local.hpp
+++ b/components/rgbd-sources/src/sources/stereovideo/local.hpp
@@ -16,6 +16,13 @@ namespace detail {
 
 class Calibrate;
 
+struct DeviceDetails {
+	std::string name;
+	int id;
+	size_t maxwidth;
+	size_t maxheight;
+};
+
 class LocalSource : public Configurable {
 	public:
 	explicit LocalSource(nlohmann::json &config);
@@ -64,6 +71,8 @@ class LocalSource : public Configurable {
 
 	cv::Mat frame_l_;
 	cv::Mat frame_r_;
+
+	std::vector<DeviceDetails> _selectDevices();
 };
 
 }
diff --git a/components/streams/src/receiver.cpp b/components/streams/src/receiver.cpp
index 8d3aef21af0f5ad40f94d3b146660f6ef8d169bb..b2029d56f06b0a92fb014aea5c0088f98eb1a9b8 100644
--- a/components/streams/src/receiver.cpp
+++ b/components/streams/src/receiver.cpp
@@ -21,6 +21,7 @@ using ftl::stream::parsePose;
 using ftl::stream::parseConfig;
 using ftl::stream::injectCalibration;
 using ftl::stream::injectPose;
+using ftl::codecs::definition_t;
 
 Receiver::Receiver(nlohmann::json &config) : ftl::Configurable(config), stream_(nullptr) {
 	timestamp_ = 0;
@@ -102,7 +103,7 @@ Receiver::InternalAudioStates &Receiver::_getAudioFrame(const StreamPacket &spkt
 		audio_frames_[spkt.streamID][audio_frames_[spkt.streamID].size()-1]->state.set("name",std::string("Source ")+std::to_string(fn+1));
 	}
 	auto &f = *audio_frames_[spkt.streamID][fn];
-	if (!f.frame.origin()) f.frame.setOrigin(&f.state);
+	//if (!f.frame.origin()) f.frame.setOrigin(&f.state);
 	return f;
 }
 
@@ -131,6 +132,7 @@ void Receiver::_processAudio(const StreamPacket &spkt, const Packet &pkt) {
 	// Audio Data
 	InternalAudioStates &frame = _getAudioFrame(spkt);
 
+	frame.frame.reset();
 	frame.timestamp = spkt.timestamp;
 	auto &audio = frame.frame.create<ftl::audio::Audio>(spkt.channel);
 	size_t size = pkt.data.size()/sizeof(short);
@@ -138,6 +140,20 @@ void Receiver::_processAudio(const StreamPacket &spkt, const Packet &pkt) {
 	auto *ptr = (short*)pkt.data.data();
 	for (size_t i=0; i<size; i++) audio.data()[i] = ptr[i];
 
+	// Generate settings from packet data
+	ftl::audio::AudioSettings settings;
+	settings.channels = (spkt.channel == Channel::AudioStereo) ? 2 : 1;
+	settings.frame_size = 256;
+	
+	switch (pkt.definition) {
+	case definition_t::hz48000		: settings.sample_rate = 48000; break;
+	case definition_t::hz44100		: settings.sample_rate = 44100; break;
+	default: settings.sample_rate = 48000; break;
+	}
+
+	frame.state.setLeft(settings);
+	frame.frame.setOrigin(&frame.state);
+
 	if (audio_cb_) {
 		// Create an audio frameset wrapper.
 		ftl::audio::FrameSet fs;
diff --git a/components/streams/src/sender.cpp b/components/streams/src/sender.cpp
index d39d0a0ddd80dcbb19d678510bb6600ff4697615..659e8f5151a6714d11661ca238914679d4a8ff18 100644
--- a/components/streams/src/sender.cpp
+++ b/components/streams/src/sender.cpp
@@ -62,18 +62,29 @@ void Sender::post(const ftl::audio::FrameSet &fs) {
 	for (size_t i=0; i<fs.frames.size(); ++i) {
 		if (!fs.frames[i].hasChannel(Channel::Audio)) continue;
 
-		auto &data = fs.frames[i].get<ftl::audio::Audio>(Channel::Audio);
+		auto &data = (fs.frames[i].hasChannel(Channel::AudioStereo)) ?
+			fs.frames[i].get<ftl::audio::Audio>(Channel::AudioStereo) :
+			fs.frames[i].get<ftl::audio::Audio>(Channel::AudioMono);
+
+		auto &settings = fs.frames[i].getSettings();
 
 		StreamPacket spkt;
 		spkt.version = 4;
 		spkt.timestamp = fs.timestamp;
 		spkt.streamID = fs.id;
 		spkt.frame_number = i;
-		spkt.channel = Channel::Audio;
+		spkt.channel = (fs.frames[i].hasChannel(Channel::AudioStereo)) ? Channel::AudioStereo : Channel::AudioMono;
 
 		ftl::codecs::Packet pkt;
 		pkt.codec = ftl::codecs::codec_t::RAW;
 		pkt.definition = ftl::codecs::definition_t::Any;
+
+		switch (settings.sample_rate) {
+		case 48000		: pkt.definition = ftl::codecs::definition_t::hz48000; break;
+		case 44100		: pkt.definition = ftl::codecs::definition_t::hz44100; break;
+		default: break;
+		}
+
 		pkt.frame_count = 1;
 		pkt.flags = 0;
 		pkt.bitrate = 0;
diff --git a/components/structures/include/ftl/data/frame.hpp b/components/structures/include/ftl/data/frame.hpp
index 182e8e9cee975258170a9d197a63ee193b057da4..612b459b7872a4737b76013e28d84acb5d8cccee 100644
--- a/components/structures/include/ftl/data/frame.hpp
+++ b/components/structures/include/ftl/data/frame.hpp
@@ -492,6 +492,11 @@ const typename STATE::Settings &ftl::data::Frame<BASE,N,STATE,DATA>::getLeft() c
 	return get<typename STATE::Settings>(ftl::codecs::Channel::Calibration);
 }
 
+template <int BASE, int N, typename STATE, typename DATA>
+const typename STATE::Settings &ftl::data::Frame<BASE,N,STATE,DATA>::getSettings() const {
+	return get<typename STATE::Settings>(ftl::codecs::Channel::Calibration);
+}
+
 template <int BASE, int N, typename STATE, typename DATA>
 const typename STATE::Settings &ftl::data::Frame<BASE,N,STATE,DATA>::getRight() const {
 	return get<typename STATE::Settings>(ftl::codecs::Channel::Calibration2);