Skip to content
Snippets Groups Projects
Commit 274e9ab8 authored by Nicolas Pope's avatar Nicolas Pope
Browse files

Merge branch 'feature/121/vr' into 'master'

Feature/121/vr

See merge request nicolas.pope/ftl!110
parents 89cd1653 43fc61a7
No related branches found
No related tags found
1 merge request!110Feature/121/vr
Pipeline #14420 passed
......@@ -40,6 +40,19 @@ if (LibArchive_FOUND)
set(HAVE_LIBARCHIVE true)
endif()
## OpenVR API path
find_library(OPENVR_LIBRARIES
NAMES
openvr_api
)
set(OPENVR_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../headers)
if (OPENVR_LIBRARIES)
message(STATUS "Found OpenVR: ${OPENVR_LIBRARIES}")
set(HAVE_OPENVR true)
endif()
if (WITH_FIXSTARS)
find_package( LibSGM )
if (LibSGM_FOUND)
......
......@@ -27,6 +27,6 @@ target_include_directories(ftl-gui PUBLIC
#endif()
#target_include_directories(cv-node PUBLIC ${PROJECT_SOURCE_DIR}/include)
target_link_libraries(ftl-gui ftlcommon ftlctrl ftlrgbd Threads::Threads ${OpenCV_LIBS} glog::glog ftlnet nanogui GL)
target_link_libraries(ftl-gui ftlcommon ftlctrl ftlrgbd Threads::Threads ${OpenCV_LIBS} ${OPENVR_LIBRARIES} glog::glog ftlnet nanogui GL)
......@@ -151,6 +151,8 @@ ftl::gui::Camera::Camera(ftl::gui::Screen *screen, ftl::rgbd::Source *src) : scr
depth_.create(depth.size(), depth.type());
cv::swap(rgb_,rgb);
cv::swap(depth_, depth);
cv::flip(rgb_,rgb_,0);
cv::flip(depth_,depth_,0);
});
}
......@@ -253,6 +255,17 @@ void ftl::gui::Camera::setChannel(Channel c) {
}
}
static Eigen::Matrix4d ConvertSteamVRMatrixToMatrix4( const vr::HmdMatrix34_t &matPose )
{
Eigen::Matrix4d matrixObj;
matrixObj <<
matPose.m[0][0], matPose.m[1][0], matPose.m[2][0], 0.0,
matPose.m[0][1], matPose.m[1][1], matPose.m[2][1], 0.0,
matPose.m[0][2], matPose.m[1][2], matPose.m[2][2], 0.0,
matPose.m[0][3], matPose.m[1][3], matPose.m[2][3], 1.0f;
return matrixObj;
}
static void visualizeDepthMap( const cv::Mat &depth, cv::Mat &out,
const float max_depth)
{
......@@ -307,16 +320,44 @@ const GLTexture &ftl::gui::Camera::captureFrame() {
if (src_ && src_->isReady()) {
UNIQUE_LOCK(mutex_, lk);
// Lerp the Eye
eye_[0] += (neye_[0] - eye_[0]) * lerpSpeed_ * delta_;
eye_[1] += (neye_[1] - eye_[1]) * lerpSpeed_ * delta_;
eye_[2] += (neye_[2] - eye_[2]) * lerpSpeed_ * delta_;
if (screen_->hasVR()) {
#ifdef HAVE_OPENVR
src_->setChannel(Channel::Right);
vr::VRCompositor()->WaitGetPoses(rTrackedDevicePose_, vr::k_unMaxTrackedDeviceCount, NULL, 0 );
if ( rTrackedDevicePose_[vr::k_unTrackedDeviceIndex_Hmd].bPoseIsValid )
{
auto pose = ConvertSteamVRMatrixToMatrix4( rTrackedDevicePose_[vr::k_unTrackedDeviceIndex_Hmd].mDeviceToAbsoluteTracking );
pose.inverse();
Eigen::Translation3d trans(eye_);
Eigen::Affine3d t(trans);
Eigen::Matrix4d viewPose = t.matrix() * rotmat_;
// Lerp the Eye
eye_[0] += (neye_[0] - eye_[0]) * lerpSpeed_ * delta_;
eye_[1] += (neye_[1] - eye_[1]) * lerpSpeed_ * delta_;
eye_[2] += (neye_[2] - eye_[2]) * lerpSpeed_ * delta_;
Eigen::Translation3d trans(eye_);
Eigen::Affine3d t(trans);
Eigen::Matrix4d viewPose = t.matrix() * pose;
if (src_->hasCapabilities(ftl::rgbd::kCapMovable)) src_->setPose(viewPose);
} else {
LOG(ERROR) << "No VR Pose";
}
#endif
} else {
// Lerp the Eye
eye_[0] += (neye_[0] - eye_[0]) * lerpSpeed_ * delta_;
eye_[1] += (neye_[1] - eye_[1]) * lerpSpeed_ * delta_;
eye_[2] += (neye_[2] - eye_[2]) * lerpSpeed_ * delta_;
Eigen::Translation3d trans(eye_);
Eigen::Affine3d t(trans);
Eigen::Matrix4d viewPose = t.matrix() * rotmat_;
if (src_->hasCapabilities(ftl::rgbd::kCapMovable)) src_->setPose(viewPose);
}
if (src_->hasCapabilities(ftl::rgbd::kCapMovable)) src_->setPose(viewPose);
src_->grab();
//src_->getFrames(rgb, depth);
......@@ -368,6 +409,13 @@ const GLTexture &ftl::gui::Camera::captureFrame() {
if (rgb_.rows == 0) { break; }
//imageSize = Vector2f(rgb.cols,rgb.rows);
texture_.update(rgb_);
#ifdef HAVE_OPENVR
if (screen_->hasVR() && depth_.channels() >= 3) {
LOG(INFO) << "DRAW RIGHT";
textureRight_.update(depth_);
}
#endif
}
}
......
......@@ -6,6 +6,10 @@
#include <string>
#ifdef HAVE_OPENVR
#include <openvr/openvr.h>
#endif
class StatisticsImage;
namespace ftl {
......@@ -41,6 +45,8 @@ class Camera {
const ftl::rgbd::Channels &availableChannels();
const GLTexture &captureFrame();
const GLTexture &getLeft() const { return texture_; }
const GLTexture &getRight() const { return textureRight_; }
bool thumbnail(cv::Mat &thumb);
......@@ -53,6 +59,7 @@ class Camera {
ftl::rgbd::Source *src_;
GLTexture thumb_;
GLTexture texture_;
GLTexture textureRight_;
ftl::gui::PoseWindow *posewin_;
nlohmann::json meta_;
Eigen::Vector4d neye_;
......@@ -69,6 +76,10 @@ class Camera {
cv::Mat rgb_;
cv::Mat depth_;
MUTEX mutex_;
#ifdef HAVE_OPENVR
vr::TrackedDevicePose_t rTrackedDevicePose_[ vr::k_unMaxTrackedDeviceCount ];
#endif
};
}
......
......@@ -37,7 +37,7 @@ namespace {
uv = vertex;
vec2 scaledVertex = (vertex * scaleFactor) + position;
gl_Position = vec4(2.0*scaledVertex.x - 1.0,
1.0 - 2.0*scaledVertex.y,
2.0*scaledVertex.y - 1.0,
0.0, 1.0);
})";
......@@ -244,10 +244,31 @@ ftl::gui::Screen::Screen(ftl::Configurable *proot, ftl::net::Universe *pnet, ftl
setVisible(true);
performLayout();
#ifdef HAVE_OPENVR
if (vr::VR_IsHmdPresent()) {
// Loading the SteamVR Runtime
vr::EVRInitError eError = vr::VRInitError_None;
HMD_ = vr::VR_Init( &eError, vr::VRApplication_Scene );
if ( eError != vr::VRInitError_None )
{
HMD_ = nullptr;
LOG(ERROR) << "Unable to init VR runtime: " << vr::VR_GetVRInitErrorAsEnglishDescription( eError );
}
} else {
HMD_ = nullptr;
}
#endif
}
ftl::gui::Screen::~Screen() {
mShader.free();
#ifdef HAVE_OPENVR
vr::VR_Shutdown();
#endif
}
void ftl::gui::Screen::setActiveCamera(ftl::gui::Camera *cam) {
......@@ -337,6 +358,18 @@ void ftl::gui::Screen::draw(NVGcontext *ctx) {
imageSize = {camera_->width(), camera_->height()};
mImageID = camera_->captureFrame().texture();
leftEye_ = mImageID;
rightEye_ = camera_->getRight().texture();
#ifdef HAVE_OPENVR
if (hasVR() && imageSize[0] > 0 && camera_->getLeft().isValid() && camera_->getRight().isValid()) {
vr::Texture_t leftEyeTexture = {(void*)(uintptr_t)leftEye_, vr::TextureType_OpenGL, vr::ColorSpace_Gamma };
vr::VRCompositor()->Submit(vr::Eye_Left, &leftEyeTexture );
glBindTexture(GL_TEXTURE_2D, rightEye_);
vr::Texture_t rightEyeTexture = {(void*)(uintptr_t)rightEye_, vr::TextureType_OpenGL, vr::ColorSpace_Gamma };
vr::VRCompositor()->Submit(vr::Eye_Right, &rightEyeTexture );
}
#endif
if (mImageID < std::numeric_limits<unsigned int>::max() && imageSize[0] > 0) {
auto mScale = (screenSize.cwiseQuotient(imageSize).minCoeff());
......
......@@ -11,6 +11,10 @@
#include "src_window.hpp"
#include "gltexture.hpp"
#ifdef HAVE_OPENVR
#include <openvr/openvr.h>
#endif
class StatisticsImageNSamples;
namespace ftl {
......@@ -39,6 +43,12 @@ class Screen : public nanogui::Screen {
void setActiveCamera(ftl::gui::Camera*);
ftl::gui::Camera *activeCamera() { return camera_; }
#ifdef HAVE_OPENVR
bool hasVR() const { return HMD_ != nullptr; }
#else
bool hasVR() const { return false; }
#endif
nanogui::Theme *windowtheme;
nanogui::Theme *specialtheme;
nanogui::Theme *mediatheme;
......@@ -68,6 +78,13 @@ class Screen : public nanogui::Screen {
ftl::Configurable *root_;
std::string status_;
ftl::gui::Camera *camera_;
GLuint leftEye_;
GLuint rightEye_;
#ifdef HAVE_OPENVR
vr::IVRSystem *HMD_;
#endif
};
}
......
......@@ -23,6 +23,7 @@
#cmakedefine HAVE_REALSENSE
#cmakedefine HAVE_NANOGUI
#cmakedefine HAVE_LIBARCHIVE
#cmakedefine HAVE_OPENVR
#cmakedefine HAVE_NVPIPE
extern const char *FTL_BRANCH;
......
......@@ -22,9 +22,11 @@ class Splatter : public ftl::render::Renderer {
~Splatter();
bool render(ftl::rgbd::VirtualSource *src, ftl::rgbd::Frame &out, cudaStream_t stream=0) override;
//void setOutputDevice(int);
protected:
void renderChannel(ftl::render::SplatParams &params, ftl::rgbd::Frame &out, const ftl::rgbd::Channel &channel, cudaStream_t stream);
private:
int device_;
/*ftl::cuda::TextureObject<int> depth1_;
......
......@@ -19,75 +19,18 @@ Splatter::~Splatter() {
}
bool Splatter::render(ftl::rgbd::VirtualSource *src, ftl::rgbd::Frame &out, cudaStream_t stream) {
SHARED_LOCK(scene_->mtx, lk);
if (!src->isReady()) return false;
const auto &camera = src->parameters();
//cudaSafeCall(cudaSetDevice(scene_->getCUDADevice()));
// Create all the required channels
out.create<GpuMat>(Channel::Depth, Format<float>(camera.width, camera.height));
out.create<GpuMat>(Channel::Colour, Format<uchar4>(camera.width, camera.height));
// FIXME: Use source resolutions, not virtual resolution
temp_.create<GpuMat>(Channel::Colour, Format<float4>(camera.width, camera.height));
temp_.create<GpuMat>(Channel::Colour2, Format<uchar4>(camera.width, camera.height));
temp_.create<GpuMat>(Channel::Contribution, Format<float>(camera.width, camera.height));
temp_.create<GpuMat>(Channel::Depth, Format<int>(camera.width, camera.height));
temp_.create<GpuMat>(Channel::Depth2, Format<int>(camera.width, camera.height));
temp_.create<GpuMat>(Channel::Normals, Format<float4>(camera.width, camera.height));
void Splatter::renderChannel(
ftl::render::SplatParams &params, ftl::rgbd::Frame &out,
const Channel &channel, cudaStream_t stream)
{
cv::cuda::Stream cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
// Create buffers if they don't exist
/*if ((unsigned int)depth1_.width() != camera.width || (unsigned int)depth1_.height() != camera.height) {
depth1_ = ftl::cuda::TextureObject<int>(camera.width, camera.height);
}
if ((unsigned int)depth3_.width() != camera.width || (unsigned int)depth3_.height() != camera.height) {
depth3_ = ftl::cuda::TextureObject<int>(camera.width, camera.height);
}
if ((unsigned int)colour1_.width() != camera.width || (unsigned int)colour1_.height() != camera.height) {
colour1_ = ftl::cuda::TextureObject<uchar4>(camera.width, camera.height);
}
if ((unsigned int)colour_tmp_.width() != camera.width || (unsigned int)colour_tmp_.height() != camera.height) {
colour_tmp_ = ftl::cuda::TextureObject<float4>(camera.width, camera.height);
}
if ((unsigned int)normal1_.width() != camera.width || (unsigned int)normal1_.height() != camera.height) {
normal1_ = ftl::cuda::TextureObject<float4>(camera.width, camera.height);
}
if ((unsigned int)depth2_.width() != camera.width || (unsigned int)depth2_.height() != camera.height) {
depth2_ = ftl::cuda::TextureObject<float>(camera.width, camera.height);
}
if ((unsigned int)colour2_.width() != camera.width || (unsigned int)colour2_.height() != camera.height) {
colour2_ = ftl::cuda::TextureObject<uchar4>(camera.width, camera.height);
}*/
// Parameters object to pass to CUDA describing the camera
SplatParams params;
params.m_flags = 0;
if (src->value("splatting", true) == false) params.m_flags |= ftl::render::kNoSplatting;
if (src->value("upsampling", true) == false) params.m_flags |= ftl::render::kNoUpsampling;
if (src->value("texturing", true) == false) params.m_flags |= ftl::render::kNoTexturing;
params.m_viewMatrix = MatrixConversion::toCUDA(src->getPose().cast<float>().inverse());
params.m_viewMatrixInverse = MatrixConversion::toCUDA(src->getPose().cast<float>());
params.camera = camera;
// Clear all channels to 0 or max depth
temp_.get<GpuMat>(Channel::Depth).setTo(cv::Scalar(0x7FFFFFFF), cvstream);
temp_.get<GpuMat>(Channel::Depth2).setTo(cv::Scalar(0x7FFFFFFF), cvstream);
temp_.get<GpuMat>(Channel::Colour).setTo(cv::Scalar(0.0f,0.0f,0.0f,0.0f), cvstream);
temp_.get<GpuMat>(Channel::Contribution).setTo(cv::Scalar(0.0f), cvstream);
out.get<GpuMat>(Channel::Depth).setTo(cv::Scalar(1000.0f), cvstream);
out.get<GpuMat>(Channel::Colour).setTo(cv::Scalar(76,76,76), cvstream);
//LOG(INFO) << "Render ready: " << camera.width << "," << camera.height;
temp_.createTexture<int>(Channel::Depth);
// Render each camera into virtual view
for (size_t i=0; i<scene_->frames.size(); ++i) {
for (size_t i=0; i < scene_->frames.size(); ++i) {
auto &f = scene_->frames[i];
auto *s = scene_->sources[i];
......@@ -145,23 +88,97 @@ bool Splatter::render(ftl::rgbd::VirtualSource *src, ftl::rgbd::Frame &out, cuda
// Normalise attribute contributions
ftl::cuda::dibr_normalise(
temp_.createTexture<float4>(Channel::Colour),
out.createTexture<uchar4>(Channel::Colour),
out.createTexture<uchar4>(channel),
temp_.createTexture<float>(Channel::Contribution),
stream
);
}
bool Splatter::render(ftl::rgbd::VirtualSource *src, ftl::rgbd::Frame &out, cudaStream_t stream) {
SHARED_LOCK(scene_->mtx, lk);
if (!src->isReady()) return false;
const auto &camera = src->parameters();
//cudaSafeCall(cudaSetDevice(scene_->getCUDADevice()));
// Create all the required channels
out.create<GpuMat>(Channel::Depth, Format<float>(camera.width, camera.height));
out.create<GpuMat>(Channel::Colour, Format<uchar4>(camera.width, camera.height));
// FIXME: Use source resolutions, not virtual resolution
temp_.create<GpuMat>(Channel::Colour, Format<float4>(camera.width, camera.height));
temp_.create<GpuMat>(Channel::Colour2, Format<uchar4>(camera.width, camera.height));
temp_.create<GpuMat>(Channel::Contribution, Format<float>(camera.width, camera.height));
temp_.create<GpuMat>(Channel::Depth, Format<int>(camera.width, camera.height));
temp_.create<GpuMat>(Channel::Depth2, Format<int>(camera.width, camera.height));
temp_.create<GpuMat>(Channel::Normals, Format<float4>(camera.width, camera.height));
cv::cuda::Stream cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
// Create buffers if they don't exist
/*if ((unsigned int)depth1_.width() != camera.width || (unsigned int)depth1_.height() != camera.height) {
depth1_ = ftl::cuda::TextureObject<int>(camera.width, camera.height);
}
if ((unsigned int)depth3_.width() != camera.width || (unsigned int)depth3_.height() != camera.height) {
depth3_ = ftl::cuda::TextureObject<int>(camera.width, camera.height);
}
if ((unsigned int)colour1_.width() != camera.width || (unsigned int)colour1_.height() != camera.height) {
colour1_ = ftl::cuda::TextureObject<uchar4>(camera.width, camera.height);
}
if ((unsigned int)colour_tmp_.width() != camera.width || (unsigned int)colour_tmp_.height() != camera.height) {
colour_tmp_ = ftl::cuda::TextureObject<float4>(camera.width, camera.height);
}
if ((unsigned int)normal1_.width() != camera.width || (unsigned int)normal1_.height() != camera.height) {
normal1_ = ftl::cuda::TextureObject<float4>(camera.width, camera.height);
}
if ((unsigned int)depth2_.width() != camera.width || (unsigned int)depth2_.height() != camera.height) {
depth2_ = ftl::cuda::TextureObject<float>(camera.width, camera.height);
}
if ((unsigned int)colour2_.width() != camera.width || (unsigned int)colour2_.height() != camera.height) {
colour2_ = ftl::cuda::TextureObject<uchar4>(camera.width, camera.height);
}*/
// Parameters object to pass to CUDA describing the camera
SplatParams params;
params.m_flags = 0;
if (src->value("splatting", true) == false) params.m_flags |= ftl::render::kNoSplatting;
if (src->value("upsampling", true) == false) params.m_flags |= ftl::render::kNoUpsampling;
if (src->value("texturing", true) == false) params.m_flags |= ftl::render::kNoTexturing;
params.m_viewMatrix = MatrixConversion::toCUDA(src->getPose().cast<float>().inverse());
params.m_viewMatrixInverse = MatrixConversion::toCUDA(src->getPose().cast<float>());
params.camera = camera;
// Clear all channels to 0 or max depth
out.get<GpuMat>(Channel::Depth).setTo(cv::Scalar(1000.0f), cvstream);
out.get<GpuMat>(Channel::Colour).setTo(cv::Scalar(76,76,76), cvstream);
//LOG(INFO) << "Render ready: " << camera.width << "," << camera.height;
temp_.createTexture<int>(Channel::Depth);
renderChannel(params, out, Channel::Colour, stream);
Channel chan = src->getChannel();
if (chan == Channel::Depth) {
if (chan == Channel::Depth)
{
temp_.get<GpuMat>(Channel::Depth).convertTo(out.get<GpuMat>(Channel::Depth), CV_32F, 1.0f / 1000.0f, cvstream);
} else if (chan == Channel::Energy) {
}
else if (chan == Channel::Energy)
{
cv::cuda::swap(temp_.get<GpuMat>(Channel::Energy), out.create<GpuMat>(Channel::Energy));
} else if (chan == Channel::Right) {
}
else if (chan == Channel::Right)
{
Eigen::Affine3f transform(Eigen::Translation3f(camera.baseline,0.0f,0.0f));
Eigen::Matrix4f matrix = src->getPose().cast<float>() * transform.matrix();
params.m_viewMatrix = MatrixConversion::toCUDA(matrix.inverse());
params.m_viewMatrixInverse = MatrixConversion::toCUDA(matrix);
// TODO: Repeat rendering process...
out.create<GpuMat>(Channel::Right, Format<uchar4>(camera.width, camera.height));
out.get<GpuMat>(Channel::Right).setTo(cv::Scalar(76,76,76), cvstream);
renderChannel(params, out, Channel::Right, stream);
}
return true;
......
......@@ -6,22 +6,26 @@
namespace ftl {
namespace cuda {
void dibr_merge(ftl::cuda::TextureObject<float4> &points, ftl::cuda::TextureObject<int> &depth, ftl::render::SplatParams params, cudaStream_t stream);
void dibr_merge(
ftl::cuda::TextureObject<float4> &points,
ftl::cuda::TextureObject<int> &depth,
ftl::render::SplatParams params,
cudaStream_t stream);
void dibr_attribute(
ftl::cuda::TextureObject<uchar4> &colour_in, // Original colour image
ftl::cuda::TextureObject<float4> &points, // Original 3D points
ftl::cuda::TextureObject<int> &depth_in, // Virtual depth map
ftl::cuda::TextureObject<float4> &colour_out, // Accumulated output
//TextureObject<float4> normal_out,
ftl::cuda::TextureObject<float> &contrib_out,
ftl::render::SplatParams &params, cudaStream_t stream);
void dibr_attribute(
ftl::cuda::TextureObject<uchar4> &colour_in, // Original colour image
ftl::cuda::TextureObject<float4> &points, // Original 3D points
ftl::cuda::TextureObject<int> &depth_in, // Virtual depth map
ftl::cuda::TextureObject<float4> &colour_out, // Accumulated output
//TextureObject<float4> normal_out,
ftl::cuda::TextureObject<float> &contrib_out,
ftl::render::SplatParams &params, cudaStream_t stream);
void dibr_normalise(
ftl::cuda::TextureObject<float4> &colour_in,
ftl::cuda::TextureObject<uchar4> &colour_out,
ftl::cuda::TextureObject<float> &contribs,
cudaStream_t stream);
void dibr_normalise(
ftl::cuda::TextureObject<float4> &colour_in,
ftl::cuda::TextureObject<uchar4> &colour_out,
ftl::cuda::TextureObject<float> &contribs,
cudaStream_t stream);
}
}
......
......@@ -496,7 +496,7 @@ void Streamer::_transmitPacket(StreamSource *src, const ftl::codecs::Packet &pkt
frame_no_,
static_cast<uint8_t>((chan & 0x1) | ((hasChan2) ? 0x2 : 0x0))
};
LOG(INFO) << "codec:" << (int) pkt.codec;
// Lock to prevent clients being added / removed
//SHARED_LOCK(src->mutex,lk);
auto c = src->clients.begin();
......
......@@ -36,13 +36,19 @@ class VirtualImpl : public ftl::rgbd::detail::Source {
LOG(ERROR) << "Unknown exception in render callback";
}
if (frame.hasChannel(Channel::Colour) && frame.hasChannel(Channel::Depth)) {
frame.download(Channel::Colour + Channel::Depth);
cv::swap(frame.get<cv::Mat>(Channel::Colour), rgb_);
cv::swap(frame.get<cv::Mat>(Channel::Depth), depth_);
LOG(INFO) << "Written: " << rgb_.cols;
if (frame.hasChannel(Channel::Colour)) {
frame.download(Channel::Colour);
cv::swap(frame.get<cv::Mat>(Channel::Colour), rgb_);
} else {
LOG(ERROR) << "Missing colour or depth frame in rendering";
LOG(ERROR) << "Channel 1 frame in rendering";
}
if ((host_->getChannel() != Channel::None) &&
frame.hasChannel(host_->getChannel())) {
frame.download(host_->getChannel());
cv::swap(frame.get<cv::Mat>(host_->getChannel()), depth_);
} else {
LOG(ERROR) << "Channel 2 frame in rendering";
}
auto cb = host_->callback();
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment