diff --git a/CMakeLists.txt b/CMakeLists.txt index 7dc1566bc1604d4dc336a159f0698b7fa69d85c0..b0aecb25a357a5c978db35a80cb20487adb5d08d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -40,6 +40,19 @@ if (LibArchive_FOUND) set(HAVE_LIBARCHIVE true) endif() +## OpenVR API path +find_library(OPENVR_LIBRARIES + NAMES + openvr_api +) +set(OPENVR_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../headers) + +if (OPENVR_LIBRARIES) + message(STATUS "Found OpenVR: ${OPENVR_LIBRARIES}") + set(HAVE_OPENVR true) +endif() + + if (WITH_FIXSTARS) find_package( LibSGM ) if (LibSGM_FOUND) diff --git a/applications/gui/CMakeLists.txt b/applications/gui/CMakeLists.txt index ce12244c67ee0e5ddff94dd3d178ed6aa4eef351..fbed0680dde997c0f0a76519ac272fb3e5c1c64f 100644 --- a/applications/gui/CMakeLists.txt +++ b/applications/gui/CMakeLists.txt @@ -27,6 +27,6 @@ target_include_directories(ftl-gui PUBLIC #endif() #target_include_directories(cv-node PUBLIC ${PROJECT_SOURCE_DIR}/include) -target_link_libraries(ftl-gui ftlcommon ftlctrl ftlrgbd Threads::Threads ${OpenCV_LIBS} glog::glog ftlnet nanogui GL) +target_link_libraries(ftl-gui ftlcommon ftlctrl ftlrgbd Threads::Threads ${OpenCV_LIBS} ${OPENVR_LIBRARIES} glog::glog ftlnet nanogui GL) diff --git a/applications/gui/src/camera.cpp b/applications/gui/src/camera.cpp index fe7b115c779989468644014675b020e1a0ed7d52..32d78a4a0ff8bbccfed60b13293b1277ab599792 100644 --- a/applications/gui/src/camera.cpp +++ b/applications/gui/src/camera.cpp @@ -151,6 +151,8 @@ ftl::gui::Camera::Camera(ftl::gui::Screen *screen, ftl::rgbd::Source *src) : scr depth_.create(depth.size(), depth.type()); cv::swap(rgb_,rgb); cv::swap(depth_, depth); + cv::flip(rgb_,rgb_,0); + cv::flip(depth_,depth_,0); }); } @@ -253,6 +255,17 @@ void ftl::gui::Camera::setChannel(Channel c) { } } +static Eigen::Matrix4d ConvertSteamVRMatrixToMatrix4( const vr::HmdMatrix34_t &matPose ) +{ + Eigen::Matrix4d matrixObj; + matrixObj << + matPose.m[0][0], matPose.m[1][0], matPose.m[2][0], 0.0, + matPose.m[0][1], matPose.m[1][1], matPose.m[2][1], 0.0, + matPose.m[0][2], matPose.m[1][2], matPose.m[2][2], 0.0, + matPose.m[0][3], matPose.m[1][3], matPose.m[2][3], 1.0f; + return matrixObj; +} + static void visualizeDepthMap( const cv::Mat &depth, cv::Mat &out, const float max_depth) { @@ -307,16 +320,44 @@ const GLTexture &ftl::gui::Camera::captureFrame() { if (src_ && src_->isReady()) { UNIQUE_LOCK(mutex_, lk); - // Lerp the Eye - eye_[0] += (neye_[0] - eye_[0]) * lerpSpeed_ * delta_; - eye_[1] += (neye_[1] - eye_[1]) * lerpSpeed_ * delta_; - eye_[2] += (neye_[2] - eye_[2]) * lerpSpeed_ * delta_; + if (screen_->hasVR()) { + #ifdef HAVE_OPENVR + src_->setChannel(Channel::Right); + + vr::VRCompositor()->WaitGetPoses(rTrackedDevicePose_, vr::k_unMaxTrackedDeviceCount, NULL, 0 ); + + if ( rTrackedDevicePose_[vr::k_unTrackedDeviceIndex_Hmd].bPoseIsValid ) + { + auto pose = ConvertSteamVRMatrixToMatrix4( rTrackedDevicePose_[vr::k_unTrackedDeviceIndex_Hmd].mDeviceToAbsoluteTracking ); + pose.inverse(); - Eigen::Translation3d trans(eye_); - Eigen::Affine3d t(trans); - Eigen::Matrix4d viewPose = t.matrix() * rotmat_; + // Lerp the Eye + eye_[0] += (neye_[0] - eye_[0]) * lerpSpeed_ * delta_; + eye_[1] += (neye_[1] - eye_[1]) * lerpSpeed_ * delta_; + eye_[2] += (neye_[2] - eye_[2]) * lerpSpeed_ * delta_; + + Eigen::Translation3d trans(eye_); + Eigen::Affine3d t(trans); + Eigen::Matrix4d viewPose = t.matrix() * pose; + + if (src_->hasCapabilities(ftl::rgbd::kCapMovable)) src_->setPose(viewPose); + } else { + LOG(ERROR) << "No VR Pose"; + } + #endif + } else { + // Lerp the Eye + eye_[0] += (neye_[0] - eye_[0]) * lerpSpeed_ * delta_; + eye_[1] += (neye_[1] - eye_[1]) * lerpSpeed_ * delta_; + eye_[2] += (neye_[2] - eye_[2]) * lerpSpeed_ * delta_; + + Eigen::Translation3d trans(eye_); + Eigen::Affine3d t(trans); + Eigen::Matrix4d viewPose = t.matrix() * rotmat_; + + if (src_->hasCapabilities(ftl::rgbd::kCapMovable)) src_->setPose(viewPose); + } - if (src_->hasCapabilities(ftl::rgbd::kCapMovable)) src_->setPose(viewPose); src_->grab(); //src_->getFrames(rgb, depth); @@ -368,6 +409,13 @@ const GLTexture &ftl::gui::Camera::captureFrame() { if (rgb_.rows == 0) { break; } //imageSize = Vector2f(rgb.cols,rgb.rows); texture_.update(rgb_); + + #ifdef HAVE_OPENVR + if (screen_->hasVR() && depth_.channels() >= 3) { + LOG(INFO) << "DRAW RIGHT"; + textureRight_.update(depth_); + } + #endif } } diff --git a/applications/gui/src/camera.hpp b/applications/gui/src/camera.hpp index 0d6e99b6d08d5cd5f71dfc20a94d0f35e26518b0..55042fe0d7b3baf5f24a26e390b1348084bcf320 100644 --- a/applications/gui/src/camera.hpp +++ b/applications/gui/src/camera.hpp @@ -6,6 +6,10 @@ #include <string> +#ifdef HAVE_OPENVR +#include <openvr/openvr.h> +#endif + class StatisticsImage; namespace ftl { @@ -41,6 +45,8 @@ class Camera { const ftl::rgbd::Channels &availableChannels(); const GLTexture &captureFrame(); + const GLTexture &getLeft() const { return texture_; } + const GLTexture &getRight() const { return textureRight_; } bool thumbnail(cv::Mat &thumb); @@ -53,6 +59,7 @@ class Camera { ftl::rgbd::Source *src_; GLTexture thumb_; GLTexture texture_; + GLTexture textureRight_; ftl::gui::PoseWindow *posewin_; nlohmann::json meta_; Eigen::Vector4d neye_; @@ -69,6 +76,10 @@ class Camera { cv::Mat rgb_; cv::Mat depth_; MUTEX mutex_; + + #ifdef HAVE_OPENVR + vr::TrackedDevicePose_t rTrackedDevicePose_[ vr::k_unMaxTrackedDeviceCount ]; + #endif }; } diff --git a/applications/gui/src/screen.cpp b/applications/gui/src/screen.cpp index 22382626d15ffe037020fd8545772cbd506905e8..03d1847112fa86468d9661d5a9966b71a3d46b09 100644 --- a/applications/gui/src/screen.cpp +++ b/applications/gui/src/screen.cpp @@ -37,7 +37,7 @@ namespace { uv = vertex; vec2 scaledVertex = (vertex * scaleFactor) + position; gl_Position = vec4(2.0*scaledVertex.x - 1.0, - 1.0 - 2.0*scaledVertex.y, + 2.0*scaledVertex.y - 1.0, 0.0, 1.0); })"; @@ -244,10 +244,31 @@ ftl::gui::Screen::Screen(ftl::Configurable *proot, ftl::net::Universe *pnet, ftl setVisible(true); performLayout(); + + + #ifdef HAVE_OPENVR + if (vr::VR_IsHmdPresent()) { + // Loading the SteamVR Runtime + vr::EVRInitError eError = vr::VRInitError_None; + HMD_ = vr::VR_Init( &eError, vr::VRApplication_Scene ); + + if ( eError != vr::VRInitError_None ) + { + HMD_ = nullptr; + LOG(ERROR) << "Unable to init VR runtime: " << vr::VR_GetVRInitErrorAsEnglishDescription( eError ); + } + } else { + HMD_ = nullptr; + } + #endif } ftl::gui::Screen::~Screen() { mShader.free(); + + #ifdef HAVE_OPENVR + vr::VR_Shutdown(); + #endif } void ftl::gui::Screen::setActiveCamera(ftl::gui::Camera *cam) { @@ -337,6 +358,18 @@ void ftl::gui::Screen::draw(NVGcontext *ctx) { imageSize = {camera_->width(), camera_->height()}; mImageID = camera_->captureFrame().texture(); + leftEye_ = mImageID; + rightEye_ = camera_->getRight().texture(); + + #ifdef HAVE_OPENVR + if (hasVR() && imageSize[0] > 0 && camera_->getLeft().isValid() && camera_->getRight().isValid()) { + vr::Texture_t leftEyeTexture = {(void*)(uintptr_t)leftEye_, vr::TextureType_OpenGL, vr::ColorSpace_Gamma }; + vr::VRCompositor()->Submit(vr::Eye_Left, &leftEyeTexture ); + glBindTexture(GL_TEXTURE_2D, rightEye_); + vr::Texture_t rightEyeTexture = {(void*)(uintptr_t)rightEye_, vr::TextureType_OpenGL, vr::ColorSpace_Gamma }; + vr::VRCompositor()->Submit(vr::Eye_Right, &rightEyeTexture ); + } + #endif if (mImageID < std::numeric_limits<unsigned int>::max() && imageSize[0] > 0) { auto mScale = (screenSize.cwiseQuotient(imageSize).minCoeff()); diff --git a/applications/gui/src/screen.hpp b/applications/gui/src/screen.hpp index 8bfce830a034e024ec42037dbc43539a62d50101..d51cec2bf2c34afc7c589b7e6114f503379afe30 100644 --- a/applications/gui/src/screen.hpp +++ b/applications/gui/src/screen.hpp @@ -11,6 +11,10 @@ #include "src_window.hpp" #include "gltexture.hpp" +#ifdef HAVE_OPENVR +#include <openvr/openvr.h> +#endif + class StatisticsImageNSamples; namespace ftl { @@ -39,6 +43,12 @@ class Screen : public nanogui::Screen { void setActiveCamera(ftl::gui::Camera*); ftl::gui::Camera *activeCamera() { return camera_; } + #ifdef HAVE_OPENVR + bool hasVR() const { return HMD_ != nullptr; } + #else + bool hasVR() const { return false; } + #endif + nanogui::Theme *windowtheme; nanogui::Theme *specialtheme; nanogui::Theme *mediatheme; @@ -68,6 +78,13 @@ class Screen : public nanogui::Screen { ftl::Configurable *root_; std::string status_; ftl::gui::Camera *camera_; + + GLuint leftEye_; + GLuint rightEye_; + + #ifdef HAVE_OPENVR + vr::IVRSystem *HMD_; + #endif }; } diff --git a/components/codecs/src/opencv_encoder.cpp b/components/codecs/src/opencv_encoder.cpp index 5dafabf29a64c14969504960da8af2bc2e27fd2a..028395b9e32865adb7a907d148308c9085588fa3 100644 --- a/components/codecs/src/opencv_encoder.cpp +++ b/components/codecs/src/opencv_encoder.cpp @@ -37,9 +37,8 @@ bool OpenCVEncoder::encode(const cv::Mat &in, definition_t definition, bitrate_t tmp.convertTo(tmp, CV_16UC1, 1000); } - // TODO: Choose these base upon resolution - chunk_count_ = 16; - chunk_dim_ = 4; + chunk_dim_ = (definition == definition_t::LD360) ? 1 : 4; + chunk_count_ = chunk_dim_ * chunk_dim_; jobs_ = chunk_count_; for (int i=0; i<chunk_count_; ++i) { diff --git a/components/common/cpp/include/ftl/config.h.in b/components/common/cpp/include/ftl/config.h.in index 121abb0605ae912de254f91a4a5be665e7867fff..540b332035a2ad5f671a51ed80f0de31d60f1ce7 100644 --- a/components/common/cpp/include/ftl/config.h.in +++ b/components/common/cpp/include/ftl/config.h.in @@ -23,6 +23,7 @@ #cmakedefine HAVE_REALSENSE #cmakedefine HAVE_NANOGUI #cmakedefine HAVE_LIBARCHIVE +#cmakedefine HAVE_OPENVR #cmakedefine HAVE_NVPIPE extern const char *FTL_BRANCH; diff --git a/components/net/cpp/include/ftl/net/peer.hpp b/components/net/cpp/include/ftl/net/peer.hpp index 2c3e1fd636edd16772b3bf44ecb0e15fefa3b16b..976155ac94e323b112306f4fa675512ff3d23aca 100644 --- a/components/net/cpp/include/ftl/net/peer.hpp +++ b/components/net/cpp/include/ftl/net/peer.hpp @@ -6,6 +6,7 @@ #endif #include <ftl/net/common.hpp> +#include <ftl/exception.hpp> //#define GLOG_NO_ABBREVIATED_SEVERITIES #include <loguru.hpp> @@ -343,7 +344,8 @@ R Peer::call(const std::string &name, ARGS... args) { if (!hasreturned) { cancelCall(id); - throw 1; + LOG(ERROR) << "RPC Timeout: " << name; + throw ftl::exception("RPC failed with timeout"); } return result; diff --git a/components/net/cpp/include/ftl/net/universe.hpp b/components/net/cpp/include/ftl/net/universe.hpp index b4419b1f7fc713259b0d9f1a14f00ff12332dd6b..29680c601c19ba37ff20771659ef379ce3511631 100644 --- a/components/net/cpp/include/ftl/net/universe.hpp +++ b/components/net/cpp/include/ftl/net/universe.hpp @@ -378,7 +378,7 @@ R Universe::call(const ftl::UUID &pid, const std::string &name, ARGS... args) { if (p == nullptr || !p->isConnected()) { if (p == nullptr) DLOG(WARNING) << "Attempting to call an unknown peer : " << pid.to_string(); else DLOG(WARNING) << "Attempting to call an disconnected peer : " << pid.to_string(); - throw -1; + throw ftl::exception("Calling disconnected peer"); } return p->call<R>(name, args...); } diff --git a/components/net/cpp/src/dispatcher.cpp b/components/net/cpp/src/dispatcher.cpp index 3231b8ddc4604c7929a04c5c33a36385e1bd94ea..7a5df5091235dc081c0c9e51c199dcca5e7f0f31 100644 --- a/components/net/cpp/src/dispatcher.cpp +++ b/components/net/cpp/src/dispatcher.cpp @@ -2,6 +2,7 @@ #include <loguru.hpp> #include <ftl/net/dispatcher.hpp> #include <ftl/net/peer.hpp> +#include <ftl/exception.hpp> #include <iostream> using ftl::net::Peer; @@ -88,13 +89,6 @@ void ftl::net::Dispatcher::dispatch_call(Peer &s, const msgpack::object &msg) { std::stringstream buf; msgpack::pack(buf, res_obj); s.send("__return__", buf.str());*/ - } catch (int e) { - //throw; - LOG(ERROR) << "Exception when attempting to call RPC (" << e << ")"; - /*response_t res_obj = std::make_tuple(1,id,msgpack::object(e),msgpack::object()); - std::stringstream buf; - msgpack::pack(buf, res_obj); - s.send("__return__", buf.str());*/ } } else { LOG(WARNING) << "No binding found for " << name; @@ -150,7 +144,7 @@ void ftl::net::Dispatcher::enforce_arg_count(std::string const &func, std::size_ std::size_t expected) { if (found != expected) { LOG(FATAL) << "RPC argument missmatch for '" << func << "' - " << found << " != " << expected; - throw -1; + throw ftl::exception("RPC argument missmatch"); } } @@ -158,7 +152,7 @@ void ftl::net::Dispatcher::enforce_unique_name(std::string const &func) { auto pos = funcs_.find(func); if (pos != end(funcs_)) { LOG(FATAL) << "RPC non unique binding for '" << func << "'"; - throw -1; + throw ftl::exception("RPC binding not unique"); } } diff --git a/components/renderers/cpp/include/ftl/render/splat_render.hpp b/components/renderers/cpp/include/ftl/render/splat_render.hpp index 55522c483c25f58d843543ee8d5ba42aae9c32c8..2cbb82a8183a3a6269a3888134a57144c93050ca 100644 --- a/components/renderers/cpp/include/ftl/render/splat_render.hpp +++ b/components/renderers/cpp/include/ftl/render/splat_render.hpp @@ -22,9 +22,11 @@ class Splatter : public ftl::render::Renderer { ~Splatter(); bool render(ftl::rgbd::VirtualSource *src, ftl::rgbd::Frame &out, cudaStream_t stream=0) override; - //void setOutputDevice(int); + protected: + void renderChannel(ftl::render::SplatParams ¶ms, ftl::rgbd::Frame &out, const ftl::rgbd::Channel &channel, cudaStream_t stream); + private: int device_; /*ftl::cuda::TextureObject<int> depth1_; diff --git a/components/renderers/cpp/src/splat_render.cpp b/components/renderers/cpp/src/splat_render.cpp index daf9f5f64c019d24ae9afb9f2e4540c59b722922..1b39ccebf69e589372ab2944cb907bb03d1dbdd8 100644 --- a/components/renderers/cpp/src/splat_render.cpp +++ b/components/renderers/cpp/src/splat_render.cpp @@ -19,6 +19,113 @@ Splatter::~Splatter() { } +void Splatter::renderChannel( + ftl::render::SplatParams ¶ms, ftl::rgbd::Frame &out, + const Channel &channel, cudaStream_t stream) +{ + cv::cuda::Stream cvstream = cv::cuda::StreamAccessor::wrapStream(stream); + temp_.get<GpuMat>(Channel::Depth).setTo(cv::Scalar(0x7FFFFFFF), cvstream); + temp_.get<GpuMat>(Channel::Depth2).setTo(cv::Scalar(0x7FFFFFFF), cvstream); + temp_.get<GpuMat>(Channel::Colour).setTo(cv::Scalar(0.0f,0.0f,0.0f,0.0f), cvstream); + temp_.get<GpuMat>(Channel::Contribution).setTo(cv::Scalar(0.0f), cvstream); + + bool is_float = ftl::rgbd::isFloatChannel(channel); + + // Render each camera into virtual view + for (size_t i=0; i < scene_->frames.size(); ++i) { + auto &f = scene_->frames[i]; + auto *s = scene_->sources[i]; + + if (f.empty(Channel::Depth + Channel::Colour)) { + LOG(ERROR) << "Missing required channel"; + continue; + } + + // Needs to create points channel first? + if (!f.hasChannel(Channel::Points)) { + //LOG(INFO) << "Creating points... " << s->parameters().width; + + auto &t = f.createTexture<float4>(Channel::Points, Format<float4>(f.get<GpuMat>(Channel::Colour).size())); + auto pose = MatrixConversion::toCUDA(s->getPose().cast<float>()); //.inverse()); + ftl::cuda::point_cloud(t, f.createTexture<float>(Channel::Depth), s->parameters(), pose, stream); + + //LOG(INFO) << "POINTS Added"; + } + + ftl::cuda::dibr_merge( + f.createTexture<float4>(Channel::Points), + temp_.getTexture<int>(Channel::Depth), + params, stream + ); + + //LOG(INFO) << "DIBR DONE"; + } + + // TODO: Add the depth splatting step.. + + temp_.createTexture<float4>(Channel::Colour); + temp_.createTexture<float>(Channel::Contribution); + + // Accumulate attribute contributions for each pixel + for (auto &f : scene_->frames) { + // Convert colour from BGR to BGRA if needed + if (f.get<GpuMat>(Channel::Colour).type() == CV_8UC3) { + // Convert to 4 channel colour + auto &col = f.get<GpuMat>(Channel::Colour); + GpuMat tmp(col.size(), CV_8UC4); + cv::cuda::swap(col, tmp); + cv::cuda::cvtColor(tmp,col, cv::COLOR_BGR2BGRA); + } + + if (is_float) { + ftl::cuda::dibr_attribute( + f.createTexture<float>(channel), + f.createTexture<float4>(Channel::Points), + temp_.getTexture<int>(Channel::Depth), + temp_.getTexture<float4>(Channel::Colour), + temp_.getTexture<float>(Channel::Contribution), + params, stream + ); + } else if (channel == Channel::Colour || channel == Channel::Right) { + ftl::cuda::dibr_attribute( + f.createTexture<uchar4>(Channel::Colour), + f.createTexture<float4>(Channel::Points), + temp_.getTexture<int>(Channel::Depth), + temp_.getTexture<float4>(Channel::Colour), + temp_.getTexture<float>(Channel::Contribution), + params, stream + ); + } else { + ftl::cuda::dibr_attribute( + f.createTexture<uchar4>(channel), + f.createTexture<float4>(Channel::Points), + temp_.getTexture<int>(Channel::Depth), + temp_.getTexture<float4>(Channel::Colour), + temp_.getTexture<float>(Channel::Contribution), + params, stream + ); + } + } + + if (is_float) { + // Normalise attribute contributions + ftl::cuda::dibr_normalise( + temp_.createTexture<float4>(Channel::Colour), + out.createTexture<float>(channel), + temp_.createTexture<float>(Channel::Contribution), + stream + ); + } else { + // Normalise attribute contributions + ftl::cuda::dibr_normalise( + temp_.createTexture<float4>(Channel::Colour), + out.createTexture<uchar4>(channel), + temp_.createTexture<float>(Channel::Contribution), + stream + ); + } +} + bool Splatter::render(ftl::rgbd::VirtualSource *src, ftl::rgbd::Frame &out, cudaStream_t stream) { SHARED_LOCK(scene_->mtx, lk); if (!src->isReady()) return false; @@ -75,10 +182,7 @@ bool Splatter::render(ftl::rgbd::VirtualSource *src, ftl::rgbd::Frame &out, cuda params.camera = camera; // Clear all channels to 0 or max depth - temp_.get<GpuMat>(Channel::Depth).setTo(cv::Scalar(0x7FFFFFFF), cvstream); - temp_.get<GpuMat>(Channel::Depth2).setTo(cv::Scalar(0x7FFFFFFF), cvstream); - temp_.get<GpuMat>(Channel::Colour).setTo(cv::Scalar(0.0f,0.0f,0.0f,0.0f), cvstream); - temp_.get<GpuMat>(Channel::Contribution).setTo(cv::Scalar(0.0f), cvstream); + out.get<GpuMat>(Channel::Depth).setTo(cv::Scalar(1000.0f), cvstream); out.get<GpuMat>(Channel::Colour).setTo(cv::Scalar(76,76,76), cvstream); @@ -86,82 +190,27 @@ bool Splatter::render(ftl::rgbd::VirtualSource *src, ftl::rgbd::Frame &out, cuda temp_.createTexture<int>(Channel::Depth); - // Render each camera into virtual view - for (size_t i=0; i<scene_->frames.size(); ++i) { - auto &f = scene_->frames[i]; - auto *s = scene_->sources[i]; - - if (f.empty(Channel::Depth + Channel::Colour)) { - LOG(ERROR) << "Missing required channel"; - continue; - } - - // Needs to create points channel first? - if (!f.hasChannel(Channel::Points)) { - //LOG(INFO) << "Creating points... " << s->parameters().width; - - auto &t = f.createTexture<float4>(Channel::Points, Format<float4>(f.get<GpuMat>(Channel::Colour).size())); - auto pose = MatrixConversion::toCUDA(s->getPose().cast<float>()); //.inverse()); - ftl::cuda::point_cloud(t, f.createTexture<float>(Channel::Depth), s->parameters(), pose, stream); - - //LOG(INFO) << "POINTS Added"; - } - - ftl::cuda::dibr_merge( - f.createTexture<float4>(Channel::Points), - temp_.getTexture<int>(Channel::Depth), - params, stream - ); - - //LOG(INFO) << "DIBR DONE"; - } - - // TODO: Add the depth splatting step.. - - temp_.createTexture<float4>(Channel::Colour); - temp_.createTexture<float>(Channel::Contribution); - - // Accumulate attribute contributions for each pixel - for (auto &f : scene_->frames) { - // Convert colour from BGR to BGRA if needed - if (f.get<GpuMat>(Channel::Colour).type() == CV_8UC3) { - // Convert to 4 channel colour - auto &col = f.get<GpuMat>(Channel::Colour); - GpuMat tmp(col.size(), CV_8UC4); - cv::cuda::swap(col, tmp); - cv::cuda::cvtColor(tmp,col, cv::COLOR_BGR2BGRA); - } + renderChannel(params, out, Channel::Colour, stream); - ftl::cuda::dibr_attribute( - f.createTexture<uchar4>(Channel::Colour), - f.createTexture<float4>(Channel::Points), - temp_.getTexture<int>(Channel::Depth), - temp_.getTexture<float4>(Channel::Colour), - temp_.getTexture<float>(Channel::Contribution), - params, stream - ); - } - - // Normalise attribute contributions - ftl::cuda::dibr_normalise( - temp_.createTexture<float4>(Channel::Colour), - out.createTexture<uchar4>(Channel::Colour), - temp_.createTexture<float>(Channel::Contribution), - stream - ); - Channel chan = src->getChannel(); - if (chan == Channel::Depth) { + if (chan == Channel::Depth) + { temp_.get<GpuMat>(Channel::Depth).convertTo(out.get<GpuMat>(Channel::Depth), CV_32F, 1.0f / 1000.0f, cvstream); - } else if (chan == Channel::Energy) { - cv::cuda::swap(temp_.get<GpuMat>(Channel::Energy), out.create<GpuMat>(Channel::Energy)); - } else if (chan == Channel::Right) { + } + else if (chan == Channel::Contribution) + { + cv::cuda::swap(temp_.get<GpuMat>(Channel::Contribution), out.create<GpuMat>(Channel::Contribution)); + } + else if (chan == Channel::Right) + { Eigen::Affine3f transform(Eigen::Translation3f(camera.baseline,0.0f,0.0f)); Eigen::Matrix4f matrix = src->getPose().cast<float>() * transform.matrix(); params.m_viewMatrix = MatrixConversion::toCUDA(matrix.inverse()); params.m_viewMatrixInverse = MatrixConversion::toCUDA(matrix); - - // TODO: Repeat rendering process... + + out.create<GpuMat>(Channel::Right, Format<uchar4>(camera.width, camera.height)); + out.get<GpuMat>(Channel::Right).setTo(cv::Scalar(76,76,76), cvstream); + renderChannel(params, out, Channel::Right, stream); } return true; diff --git a/components/renderers/cpp/src/splatter.cu b/components/renderers/cpp/src/splatter.cu index c1b46fc1d5768dc15fe54bcf6e37f4655a076b56..3b1ae4b47ef0fe6b29b15d1fa20fdc9a0fd0b9bb 100644 --- a/components/renderers/cpp/src/splatter.cu +++ b/components/renderers/cpp/src/splatter.cu @@ -137,6 +137,70 @@ __global__ void dibr_attribute_contrib_kernel( } } +__global__ void dibr_attribute_contrib_kernel( + TextureObject<float> colour_in, // Original colour image + TextureObject<float4> points, // Original 3D points + TextureObject<int> depth_in, // Virtual depth map + TextureObject<float4> colour_out, // Accumulated output + //TextureObject<float4> normal_out, + TextureObject<float> contrib_out, + SplatParams params) { + + //const ftl::voxhash::DepthCameraCUDA &camera = c_cameras[cam]; + + const int tid = (threadIdx.x + threadIdx.y * blockDim.x); + //const int warp = tid / WARP_SIZE; + const int x = (blockIdx.x*blockDim.x + threadIdx.x) / WARP_SIZE; + const int y = blockIdx.y*blockDim.y + threadIdx.y; + + const float3 worldPos = make_float3(points.tex2D(x, y)); + //const float3 normal = make_float3(tex2D<float4>(camera.normal, x, y)); + if (worldPos.x == MINF) return; + //const float r = (camera.poseInverse * worldPos).z / camera.params.fx; + + const float3 camPos = params.m_viewMatrix * worldPos; + if (camPos.z < params.camera.minDepth) return; + if (camPos.z > params.camera.maxDepth) return; + const uint2 screenPos = params.camera.camToScreen<uint2>(camPos); + + const int upsample = 8; //min(UPSAMPLE_MAX, int((5.0f*r) * params.camera.fx / camPos.z)); + + // Not on screen so stop now... + if (screenPos.x >= depth_in.width() || screenPos.y >= depth_in.height()) return; + + // Is this point near the actual surface and therefore a contributor? + const float d = ((float)depth_in.tex2D((int)screenPos.x, (int)screenPos.y)/1000.0f); + //if (abs(d - camPos.z) > DEPTH_THRESHOLD) return; + + // TODO:(Nick) Should just one thread load these to shared mem? + const float colour = (colour_in.tex2D(x, y)); + //const float4 normal = tex2D<float4>(camera.normal, x, y); + + // Each thread in warp takes an upsample point and updates corresponding depth buffer. + const int lane = tid % WARP_SIZE; + for (int i=lane; i<upsample*upsample; i+=WARP_SIZE) { + const float u = (i % upsample) - (upsample / 2); + const float v = (i / upsample) - (upsample / 2); + + // Use the depth buffer to determine this pixels 3D position in camera space + const float d = ((float)depth_in.tex2D(screenPos.x+u, screenPos.y+v)/1000.0f); + const float3 nearest = params.camera.screenToCam((int)(screenPos.x+u),(int)(screenPos.y+v),d); + + // What is contribution of our current point at this pixel? + const float weight = ftl::cuda::spatialWeighting(length(nearest - camPos), SMOOTHING_MULTIPLIER_C*(nearest.z/params.camera.fx)); + if (screenPos.x+u < colour_out.width() && screenPos.y+v < colour_out.height() && weight > 0.0f) { // TODO: Use confidence threshold here + const float wcolour = colour * weight; + //const float4 wnormal = normal * weight; + + //printf("Z %f\n", d); + + // Add this points contribution to the pixel buffer + atomicAdd((float*)&colour_out(screenPos.x+u, screenPos.y+v), wcolour); + atomicAdd(&contrib_out(screenPos.x+u, screenPos.y+v), weight); + } + } +} + void ftl::cuda::dibr_attribute( TextureObject<uchar4> &colour_in, // Original colour image TextureObject<float4> &points, // Original 3D points @@ -159,6 +223,28 @@ void ftl::cuda::dibr_attribute( cudaSafeCall( cudaGetLastError() ); } +void ftl::cuda::dibr_attribute( + TextureObject<float> &colour_in, // Original colour image + TextureObject<float4> &points, // Original 3D points + TextureObject<int> &depth_in, // Virtual depth map + TextureObject<float4> &colour_out, // Accumulated output + //TextureObject<float4> normal_out, + TextureObject<float> &contrib_out, + SplatParams ¶ms, cudaStream_t stream) { + const dim3 gridSize((depth_in.width() + 2 - 1)/2, (depth_in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK); + const dim3 blockSize(2*WARP_SIZE, T_PER_BLOCK); + + dibr_attribute_contrib_kernel<<<gridSize, blockSize, 0, stream>>>( + colour_in, + points, + depth_in, + colour_out, + contrib_out, + params + ); + cudaSafeCall( cudaGetLastError() ); +} + //============================================================================== __global__ void dibr_normalise_kernel( @@ -181,6 +267,26 @@ __global__ void dibr_normalise_kernel( } } +__global__ void dibr_normalise_kernel( + TextureObject<float4> colour_in, + TextureObject<float> colour_out, + //TextureObject<float4> normals, + TextureObject<float> contribs) { + const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; + const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; + + if (x < colour_in.width() && y < colour_in.height()) { + const float4 colour = colour_in.tex2D((int)x,(int)y); + //const float4 normal = normals.tex2D((int)x,(int)y); + const float contrib = contribs.tex2D((int)x,(int)y); + + if (contrib > 0.0f) { + colour_out(x,y) = colour.x / contrib; + //normals(x,y) = normal / contrib; + } + } +} + void ftl::cuda::dibr_normalise(TextureObject<float4> &colour_in, TextureObject<uchar4> &colour_out, TextureObject<float> &contribs, cudaStream_t stream) { const dim3 gridSize((colour_in.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (colour_in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK); const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); @@ -188,3 +294,11 @@ void ftl::cuda::dibr_normalise(TextureObject<float4> &colour_in, TextureObject<u dibr_normalise_kernel<<<gridSize, blockSize, 0, stream>>>(colour_in, colour_out, contribs); cudaSafeCall( cudaGetLastError() ); } + +void ftl::cuda::dibr_normalise(TextureObject<float4> &colour_in, TextureObject<float> &colour_out, TextureObject<float> &contribs, cudaStream_t stream) { + const dim3 gridSize((colour_in.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (colour_in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK); + const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); + + dibr_normalise_kernel<<<gridSize, blockSize, 0, stream>>>(colour_in, colour_out, contribs); + cudaSafeCall( cudaGetLastError() ); +} diff --git a/components/renderers/cpp/src/splatter_cuda.hpp b/components/renderers/cpp/src/splatter_cuda.hpp index 8f6557b7f3e9d9ce99bceed615946c19b1afbec2..8c57d58486c04aff5960f215c6a6308719e6af7b 100644 --- a/components/renderers/cpp/src/splatter_cuda.hpp +++ b/components/renderers/cpp/src/splatter_cuda.hpp @@ -6,22 +6,41 @@ namespace ftl { namespace cuda { - void dibr_merge(ftl::cuda::TextureObject<float4> &points, ftl::cuda::TextureObject<int> &depth, ftl::render::SplatParams params, cudaStream_t stream); + void dibr_merge( + ftl::cuda::TextureObject<float4> &points, + ftl::cuda::TextureObject<int> &depth, + ftl::render::SplatParams params, + cudaStream_t stream); - void dibr_attribute( - ftl::cuda::TextureObject<uchar4> &colour_in, // Original colour image - ftl::cuda::TextureObject<float4> &points, // Original 3D points - ftl::cuda::TextureObject<int> &depth_in, // Virtual depth map - ftl::cuda::TextureObject<float4> &colour_out, // Accumulated output - //TextureObject<float4> normal_out, - ftl::cuda::TextureObject<float> &contrib_out, - ftl::render::SplatParams ¶ms, cudaStream_t stream); + void dibr_attribute( + ftl::cuda::TextureObject<uchar4> &in, // Original colour image + ftl::cuda::TextureObject<float4> &points, // Original 3D points + ftl::cuda::TextureObject<int> &depth_in, // Virtual depth map + ftl::cuda::TextureObject<float4> &out, // Accumulated output + //TextureObject<float4> normal_out, + ftl::cuda::TextureObject<float> &contrib_out, + ftl::render::SplatParams ¶ms, cudaStream_t stream); - void dibr_normalise( - ftl::cuda::TextureObject<float4> &colour_in, - ftl::cuda::TextureObject<uchar4> &colour_out, - ftl::cuda::TextureObject<float> &contribs, - cudaStream_t stream); + void dibr_attribute( + ftl::cuda::TextureObject<float> &in, // Original colour image + ftl::cuda::TextureObject<float4> &points, // Original 3D points + ftl::cuda::TextureObject<int> &depth_in, // Virtual depth map + ftl::cuda::TextureObject<float4> &out, // Accumulated output + //TextureObject<float4> normal_out, + ftl::cuda::TextureObject<float> &contrib_out, + ftl::render::SplatParams ¶ms, cudaStream_t stream); + + void dibr_normalise( + ftl::cuda::TextureObject<float4> &in, + ftl::cuda::TextureObject<uchar4> &out, + ftl::cuda::TextureObject<float> &contribs, + cudaStream_t stream); + + void dibr_normalise( + ftl::cuda::TextureObject<float4> &in, + ftl::cuda::TextureObject<float> &out, + ftl::cuda::TextureObject<float> &contribs, + cudaStream_t stream); } } diff --git a/components/rgbd-sources/src/realsense_source.cpp b/components/rgbd-sources/src/realsense_source.cpp index df4c0fe2535426ac52808ea985911968efb74e15..b458aa3e77c8557ee828a8f71431bb5e4e665066 100644 --- a/components/rgbd-sources/src/realsense_source.cpp +++ b/components/rgbd-sources/src/realsense_source.cpp @@ -57,6 +57,9 @@ bool RealsenseSource::compute(int n, int b) { cv::Mat tmp(cv::Size((int)w, (int)h), CV_16UC1, (void*)depth.get_data(), depth.get_stride_in_bytes()); tmp.convertTo(depth_, CV_32FC1, scale_); rgb_ = cv::Mat(cv::Size(w, h), CV_8UC4, (void*)rscolour_.get_data(), cv::Mat::AUTO_STEP); + + auto cb = host_->callback(); + if (cb) cb(timestamp_, rgb_, depth_); return true; } diff --git a/components/rgbd-sources/src/streamer.cpp b/components/rgbd-sources/src/streamer.cpp index 7a9118c9f47975a31d6389982b2adb818ed8a046..676cf58cf3045111cf966d78d587c0e918a351e7 100644 --- a/components/rgbd-sources/src/streamer.cpp +++ b/components/rgbd-sources/src/streamer.cpp @@ -496,7 +496,7 @@ void Streamer::_transmitPacket(StreamSource *src, const ftl::codecs::Packet &pkt frame_no_, static_cast<uint8_t>((chan & 0x1) | ((hasChan2) ? 0x2 : 0x0)) }; - + LOG(INFO) << "codec:" << (int) pkt.codec; // Lock to prevent clients being added / removed //SHARED_LOCK(src->mutex,lk); auto c = src->clients.begin(); diff --git a/components/rgbd-sources/src/virtual.cpp b/components/rgbd-sources/src/virtual.cpp index 62b404155e1f302111e99183347d87c9dbda3a0b..0e6db973884a3c8361fcaae764cc1688d2434d9d 100644 --- a/components/rgbd-sources/src/virtual.cpp +++ b/components/rgbd-sources/src/virtual.cpp @@ -36,13 +36,19 @@ class VirtualImpl : public ftl::rgbd::detail::Source { LOG(ERROR) << "Unknown exception in render callback"; } - if (frame.hasChannel(Channel::Colour) && frame.hasChannel(Channel::Depth)) { - frame.download(Channel::Colour + Channel::Depth); - cv::swap(frame.get<cv::Mat>(Channel::Colour), rgb_); - cv::swap(frame.get<cv::Mat>(Channel::Depth), depth_); - LOG(INFO) << "Written: " << rgb_.cols; + if (frame.hasChannel(Channel::Colour)) { + frame.download(Channel::Colour); + cv::swap(frame.get<cv::Mat>(Channel::Colour), rgb_); } else { - LOG(ERROR) << "Missing colour or depth frame in rendering"; + LOG(ERROR) << "Channel 1 frame in rendering"; + } + + if ((host_->getChannel() != Channel::None) && + frame.hasChannel(host_->getChannel())) { + frame.download(host_->getChannel()); + cv::swap(frame.get<cv::Mat>(host_->getChannel()), depth_); + } else { + LOG(ERROR) << "Channel 2 frame in rendering"; } auto cb = host_->callback(); diff --git a/web-service/src/index.js b/web-service/src/index.js index c6b37247ac0c0e63ad0eefbd8b81ad5594d6b432..7dc39a2a177eb8e9999f0a889740245ec54efcd6 100644 --- a/web-service/src/index.js +++ b/web-service/src/index.js @@ -32,8 +32,8 @@ function RGBDClient(peer, N, rate, dest) { /** * Actually send a frame over network to the client. */ -RGBDClient.prototype.push = function(uri, frame, ttime, chunk, rgb, depth) { - this.peer.send(uri, frame, ttime, chunk, rgb, depth); +RGBDClient.prototype.push = function(uri, latency, spacket, packet) { + this.peer.send(uri, latency, spacket, packet); this.txcount++; } @@ -58,14 +58,23 @@ function RGBDStream(uri, peer) { this.rxmax = 10; // Add RPC handler to receive frames from the source - peer.bind(uri, (frame, ttime, chunk, rgb, depth) => { + peer.bind(uri, (latency, spacket, packet) => { // Forward frames to all clients - this.pushFrames(frame, ttime, chunk, rgb, depth); + this.pushFrames(latency, spacket, packet); this.rxcount++; if (this.rxcount >= this.rxmax && this.clients.length > 0) { this.subscribe(); } }); + + /*peer.bind(uri, (frame, ttime, chunk, rgb, depth) => { + // Forward frames to all clients + this.pushFrames(frame, ttime, chunk, rgb, depth); + this.rxcount++; + if (this.rxcount >= this.rxmax && this.clients.length > 0) { + this.subscribe(); + } + });*/ } RGBDStream.prototype.addClient = function(peer, N, rate, dest) { @@ -85,15 +94,19 @@ RGBDStream.prototype.subscribe = function() { this.rxcount = 0; this.rxmax = 10; //console.log("Subscribe to ", this.uri); - this.peer.send("get_stream", this.uri, 10, 0, [Peer.uuid], this.uri); + // TODO: Don't hard code 9 here, instead use 9 for thumbnails and 0 for + // the video... + this.peer.send("get_stream", this.uri, 10, 9, [Peer.uuid], this.uri); } -RGBDStream.prototype.pushFrames = function(frame, ttime, chunk, rgb, depth) { - this.rgb = rgb; - this.depth = depth; +RGBDStream.prototype.pushFrames = function(latency, spacket, packet) { + if (spacket[1] & 0x1) this.depth = packet[4]; + else this.rgb = packet[4]; + + console.log("Frame = ", packet[0], packet[1]); for (let i=0; i < this.clients.length; i++) { - this.clients[i].push(this.uri, frame, ttime, chunk, rgb, depth); + this.clients[i].push(this.uri, latency, spacket, packet); } let i=0;