diff --git a/applications/reconstruct/src/ilw/ilw.cpp b/applications/reconstruct/src/ilw/ilw.cpp index 65e1a19dfe35e89019f5b3aa9dfbe85310b03f33..1400592e48273f2dee21d45d87a7225ab6a840ae 100644 --- a/applications/reconstruct/src/ilw/ilw.cpp +++ b/applications/reconstruct/src/ilw/ilw.cpp @@ -22,6 +22,7 @@ ILW::ILW(nlohmann::json &config) : ftl::Configurable(config) { params_.colour_smooth = value("colour_smooth", 50.0f); params_.spatial_smooth = value("spatial_smooth", 0.04f); params_.cost_ratio = value("cost_ratio", 0.75f); + discon_mask_ = value("discontinuity_mask",2); on("ilw_align", [this](const ftl::config::Event &e) { enabled_ = value("ilw_align", true); @@ -39,6 +40,10 @@ ILW::ILW(nlohmann::json &config) : ftl::Configurable(config) { motion_window_ = value("motion_window", 3); }); + on("discontinuity_mask", [this](const ftl::config::Event &e) { + discon_mask_ = value("discontinuity_mask", 2); + }); + on("use_Lab", [this](const ftl::config::Event &e) { use_lab_ = value("use_Lab", false); }); @@ -116,7 +121,7 @@ bool ILW::_phase0(ftl::rgbd::FrameSet &fs, cudaStream_t stream) { auto &t = f.createTexture<float4>(Channel::Points, Format<float4>(f.get<GpuMat>(Channel::Colour).size())); auto pose = MatrixConversion::toCUDA(s->getPose().cast<float>()); //.inverse()); - ftl::cuda::point_cloud(t, f.createTexture<float>(Channel::Depth), s->parameters(), pose, stream); + ftl::cuda::point_cloud(t, f.createTexture<float>(Channel::Depth), s->parameters(), pose, discon_mask_, stream); // TODO: Create energy vector texture and clear it // Create energy and clear it diff --git a/applications/reconstruct/src/ilw/ilw.hpp b/applications/reconstruct/src/ilw/ilw.hpp index 66de927a285716b2a1d9efd1b2fdb8577a1f6149..96a5341544471ed4fbe6f6269f6531177a26b9ef 100644 --- a/applications/reconstruct/src/ilw/ilw.hpp +++ b/applications/reconstruct/src/ilw/ilw.hpp @@ -69,6 +69,7 @@ class ILW : public ftl::Configurable { float motion_rate_; int motion_window_; bool use_lab_; + int discon_mask_; }; } diff --git a/applications/reconstruct/src/main.cpp b/applications/reconstruct/src/main.cpp index 92a65ea44c3598fd257d53ac6c10c22dafdba0e6..c2f357e187e99c04c190df448346b1954d9e95a9 100644 --- a/applications/reconstruct/src/main.cpp +++ b/applications/reconstruct/src/main.cpp @@ -136,6 +136,9 @@ static void run(ftl::Configurable *root) { ftl::rgbd::Group group; ftl::ILW *align = ftl::create<ftl::ILW>(root, "merge"); + int o = root->value("origin_pose", 0) % sources.size(); + virt->setPose(sources[o]->getPose()); + // Generate virtual camera render when requested by streamer virt->onRender([splat,virt,&scene_B,align](ftl::rgbd::Frame &out) { virt->setTimestamp(scene_B.timestamp); diff --git a/components/renderers/cpp/include/ftl/cuda/normals.hpp b/components/renderers/cpp/include/ftl/cuda/normals.hpp index f692e02a44b773ad07b7f5f982788f5ff83c7070..5481b4662a937bb6915b53b2150c3782b937af2e 100644 --- a/components/renderers/cpp/include/ftl/cuda/normals.hpp +++ b/components/renderers/cpp/include/ftl/cuda/normals.hpp @@ -17,6 +17,11 @@ void normal_visualise(ftl::cuda::TextureObject<float4> &norm, const ftl::rgbd::Camera &camera, const float4x4 &pose, cudaStream_t stream); +void normal_filter(ftl::cuda::TextureObject<float4> &norm, + ftl::cuda::TextureObject<float4> &points, + const ftl::rgbd::Camera &camera, const float4x4 &pose, + float thresh, cudaStream_t stream); + } } diff --git a/components/renderers/cpp/include/ftl/cuda/points.hpp b/components/renderers/cpp/include/ftl/cuda/points.hpp index cb996c4ec68a794b2b9ede7a03cf2b2c5d92047d..a705e39e2c9f4e94f3c216f6f9168586ede4811c 100644 --- a/components/renderers/cpp/include/ftl/cuda/points.hpp +++ b/components/renderers/cpp/include/ftl/cuda/points.hpp @@ -16,7 +16,7 @@ struct ClipSpace { void point_cloud(ftl::cuda::TextureObject<float4> &output, ftl::cuda::TextureObject<float> &depth, const ftl::rgbd::Camera ¶ms, - const float4x4 &pose, cudaStream_t stream); + const float4x4 &pose, uint discon, cudaStream_t stream); void clipping(ftl::cuda::TextureObject<float4> &points, const ClipSpace &clip, cudaStream_t stream); diff --git a/components/renderers/cpp/include/ftl/render/splat_params.hpp b/components/renderers/cpp/include/ftl/render/splat_params.hpp index 4f9c8882b161d7774388e8d9fff7337cb1d6e685..5bd7d2edbc0b30863e5ecfe04f2e7bc0ce662f20 100644 --- a/components/renderers/cpp/include/ftl/render/splat_params.hpp +++ b/components/renderers/cpp/include/ftl/render/splat_params.hpp @@ -8,7 +8,7 @@ namespace ftl { namespace render { -static const uint kShowBlockBorders = 0x00000001; // Deprecated: from voxels system +static const uint kShowDisconMask = 0x00000001; static const uint kNoSplatting = 0x00000002; static const uint kNoUpsampling = 0x00000004; static const uint kNoTexturing = 0x00000008; diff --git a/components/renderers/cpp/include/ftl/render/splat_render.hpp b/components/renderers/cpp/include/ftl/render/splat_render.hpp index ca7bfa2fba66c2bb1673a6fd153625065960775b..bca5a232b937479234866d42847896f653bd70d9 100644 --- a/components/renderers/cpp/include/ftl/render/splat_render.hpp +++ b/components/renderers/cpp/include/ftl/render/splat_render.hpp @@ -43,6 +43,7 @@ class Splatter : public ftl::render::Renderer { ftl::rgbd::FrameSet *scene_; ftl::cuda::ClipSpace clip_; bool clipping_; + float norm_filter_; }; } diff --git a/components/renderers/cpp/src/normals.cu b/components/renderers/cpp/src/normals.cu index 040f761b0f42b92ceacfeeca63be843e55e39ac3..ad6c34cf4cadcda6f46fa677f48502099ef1a013 100644 --- a/components/renderers/cpp/src/normals.cu +++ b/components/renderers/cpp/src/normals.cu @@ -20,14 +20,14 @@ __global__ void computeNormals_kernel(ftl::cuda::TextureObject<float4> output, const float3 MC = make_float3(input.tex2D((int)x+0, (int)y-1)); //[(y-1)*width+(x+0)]; const float3 CM = make_float3(input.tex2D((int)x-1, (int)y+0)); //[(y+0)*width+(x-1)]; - //if(CC.x != MINF && PC.x != MINF && CP.x != MINF && MC.x != MINF && CM.x != MINF) { + if(CC.x != MINF) { // && PC.x != MINF && CP.x != MINF && MC.x != MINF && CM.x != MINF) { const float3 n = cross(PC-MC, CP-CM); const float l = length(n); if(l > 0.0f) { output(x,y) = make_float4(n/-l, 1.0f); } - //} + } } } @@ -51,6 +51,7 @@ __global__ void smooth_normals_kernel(ftl::cuda::TextureObject<float4> norms, const float3 p = make_float3(points.tex2D((int)x+u,(int)y+v)); if (p.x == MINF) continue; const float s = ftl::cuda::spatialWeighting(p0, p, smoothing); + //const float s = 1.0f; if (s > 0.0f) { const float4 n = norms.tex2D((int)x+u,(int)y+v); @@ -76,7 +77,7 @@ void ftl::cuda::normals(ftl::cuda::TextureObject<float4> &output, computeNormals_kernel<<<gridSize, blockSize, 0, stream>>>(temp, input); cudaSafeCall( cudaGetLastError() ); - smooth_normals_kernel<1><<<gridSize, blockSize, 0, stream>>>(temp, output, input, 0.04f); + smooth_normals_kernel<3><<<gridSize, blockSize, 0, stream>>>(temp, output, input, 0.04f); cudaSafeCall( cudaGetLastError() ); #ifdef _DEBUG @@ -96,14 +97,19 @@ __global__ void vis_normals_kernel(ftl::cuda::TextureObject<float4> norm, if(x >= norm.width() || y >= norm.height()) return; output(x,y) = 0.0f; - float3 ray = make_float3(0.0f, 0.0f, 1.0f); //pose * camera.screenToCam(x,y,1.0f); + float3 ray = pose.getFloat3x3() * camera.screenToCam(x,y,1.0f); ray = ray / length(ray); float3 n = make_float3(norm.tex2D((int)x,(int)y)); float l = length(n); if (l == 0) return; n /= l; - output(x,y) = (1.0f + dot(ray, n))*3.5f; // FIXME: Do not hard code these value scalings + const float d = dot(ray, n); + output(x,y) = (1.0f + d)*3.5f; // FIXME: Do not hard code these value scalings + + //if (d > 0.2f) { + // output(x,y) = d * 7.0f; + //} } void ftl::cuda::normal_visualise(ftl::cuda::TextureObject<float4> &norm, @@ -122,3 +128,47 @@ void ftl::cuda::normal_visualise(ftl::cuda::TextureObject<float4> &norm, //cutilCheckMsg(__FUNCTION__); #endif } + +//============================================================================== + +__global__ void filter_normals_kernel(ftl::cuda::TextureObject<float4> norm, + ftl::cuda::TextureObject<float4> output, + ftl::rgbd::Camera camera, float4x4 pose, float thresh) { + const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; + const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; + + if(x >= norm.width() || y >= norm.height()) return; + + float3 ray = pose.getFloat3x3() * camera.screenToCam(x,y,1.0f); + ray = ray / length(ray); + float3 n = make_float3(norm.tex2D((int)x,(int)y)); + float l = length(n); + if (l == 0) { + output(x,y) = make_float4(MINF); + return; + } + n /= l; + + const float d = dot(ray, n); + if (d <= thresh) { + output(x,y) = make_float4(MINF); + } +} + +void ftl::cuda::normal_filter(ftl::cuda::TextureObject<float4> &norm, + ftl::cuda::TextureObject<float4> &output, + const ftl::rgbd::Camera &camera, const float4x4 &pose, + float thresh, + cudaStream_t stream) { + + const dim3 gridSize((norm.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (norm.height() + T_PER_BLOCK - 1)/T_PER_BLOCK); + const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); + + filter_normals_kernel<<<gridSize, blockSize, 0, stream>>>(norm, output, camera, pose, thresh); + + cudaSafeCall( cudaGetLastError() ); + #ifdef _DEBUG + cudaSafeCall(cudaDeviceSynchronize()); + //cutilCheckMsg(__FUNCTION__); + #endif +} diff --git a/components/renderers/cpp/src/points.cu b/components/renderers/cpp/src/points.cu index 1bfca42fcdfa0332a5f3701635c7dede497dae5c..f73eaa2085e901991f9cc40580a3273864ceda4f 100644 --- a/components/renderers/cpp/src/points.cu +++ b/components/renderers/cpp/src/points.cu @@ -2,25 +2,61 @@ #define T_PER_BLOCK 8 +template <int RADIUS> __global__ void point_cloud_kernel(ftl::cuda::TextureObject<float4> output, ftl::cuda::TextureObject<float> depth, ftl::rgbd::Camera params, float4x4 pose) { const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < params.width && y < params.height) { + output(x,y) = make_float4(MINF, MINF, MINF, MINF); + + const float d = depth.tex2D((int)x, (int)y); + float p = d; + + if (d >= params.minDepth && d <= params.maxDepth) { + /* Orts-Escolano S. et al. 2016. Holoportation: Virtual 3D teleportation in real-time. */ + // Is there a discontinuity nearby? + for (int u=-RADIUS; u<=RADIUS; ++u) { + for (int v=-RADIUS; v<=RADIUS; ++v) { + // If yes, the flag using w = -1 + if (fabs(depth.tex2D((int)x+u, (int)y+v) - d) > 0.1f) p = -1.0f; + } + } + + output(x,y) = make_float4(pose * params.screenToCam(x, y, d), p); + } + } +} + +template <> +__global__ void point_cloud_kernel<0>(ftl::cuda::TextureObject<float4> output, ftl::cuda::TextureObject<float> depth, ftl::rgbd::Camera params, float4x4 pose) +{ + const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; + const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; + + if (x < params.width && y < params.height) { + output(x,y) = make_float4(MINF, MINF, MINF, MINF); + float d = depth.tex2D((int)x, (int)y); - output(x,y) = (d >= params.minDepth && d <= params.maxDepth) ? - make_float4(pose * params.screenToCam(x, y, d), 0.0f) : - make_float4(MINF, MINF, MINF, MINF); + if (d >= params.minDepth && d <= params.maxDepth) { + output(x,y) = make_float4(pose * params.screenToCam(x, y, d), d); + } } } -void ftl::cuda::point_cloud(ftl::cuda::TextureObject<float4> &output, ftl::cuda::TextureObject<float> &depth, const ftl::rgbd::Camera ¶ms, const float4x4 &pose, cudaStream_t stream) { +void ftl::cuda::point_cloud(ftl::cuda::TextureObject<float4> &output, ftl::cuda::TextureObject<float> &depth, const ftl::rgbd::Camera ¶ms, const float4x4 &pose, uint discon, cudaStream_t stream) { const dim3 gridSize((params.width + T_PER_BLOCK - 1)/T_PER_BLOCK, (params.height + T_PER_BLOCK - 1)/T_PER_BLOCK); const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); - point_cloud_kernel<<<gridSize, blockSize, 0, stream>>>(output, depth, params, pose); + switch (discon) { + case 4 : point_cloud_kernel<4><<<gridSize, blockSize, 0, stream>>>(output, depth, params, pose); break; + case 3 : point_cloud_kernel<3><<<gridSize, blockSize, 0, stream>>>(output, depth, params, pose); break; + case 2 : point_cloud_kernel<2><<<gridSize, blockSize, 0, stream>>>(output, depth, params, pose); break; + case 1 : point_cloud_kernel<1><<<gridSize, blockSize, 0, stream>>>(output, depth, params, pose); break; + default: point_cloud_kernel<0><<<gridSize, blockSize, 0, stream>>>(output, depth, params, pose); + } cudaSafeCall( cudaGetLastError() ); #ifdef _DEBUG diff --git a/components/renderers/cpp/src/splat_render.cpp b/components/renderers/cpp/src/splat_render.cpp index ceaff532557ee274ebf5ec6390f7110d07a1b961..6635d1ab4295d708fef458214e613d0c554f03b4 100644 --- a/components/renderers/cpp/src/splat_render.cpp +++ b/components/renderers/cpp/src/splat_render.cpp @@ -49,6 +49,11 @@ Splatter::Splatter(nlohmann::json &config, ftl::rgbd::FrameSet *fs) : ftl::rende on("clipping_enabled", [this](const ftl::config::Event &e) { clipping_ = value("clipping_enabled", true); }); + + norm_filter_ = value("normal_filter", -1.0f); + on("normal_filter", [this](const ftl::config::Event &e) { + norm_filter_ = value("normal_filter", -1.0f); + }); } Splatter::~Splatter() { @@ -80,22 +85,6 @@ void Splatter::renderChannel( continue; } - // Needs to create points channel first? - if (!f.hasChannel(Channel::Points)) { - //LOG(INFO) << "Creating points... " << s->parameters().width; - - auto &t = f.createTexture<float4>(Channel::Points, Format<float4>(f.get<GpuMat>(Channel::Colour).size())); - auto pose = MatrixConversion::toCUDA(s->getPose().cast<float>()); //.inverse()); - ftl::cuda::point_cloud(t, f.createTexture<float>(Channel::Depth), s->parameters(), pose, stream); - - //LOG(INFO) << "POINTS Added"; - } - - // Clip first? - if (clipping_) { - ftl::cuda::clipping(f.createTexture<float4>(Channel::Points), clip_, stream); - } - ftl::cuda::dibr_merge( f.createTexture<float4>(Channel::Points), temp_.getTexture<int>(Channel::Depth), @@ -235,9 +224,10 @@ bool Splatter::render(ftl::rgbd::VirtualSource *src, ftl::rgbd::Frame &out, cuda // Parameters object to pass to CUDA describing the camera SplatParams params; params.m_flags = 0; - if (src->value("splatting", true) == false) params.m_flags |= ftl::render::kNoSplatting; - if (src->value("upsampling", true) == false) params.m_flags |= ftl::render::kNoUpsampling; - if (src->value("texturing", true) == false) params.m_flags |= ftl::render::kNoTexturing; + if (src->value("show_discontinuity_mask", false)) params.m_flags |= ftl::render::kShowDisconMask; + //if (src->value("splatting", true) == false) params.m_flags |= ftl::render::kNoSplatting; + //if (src->value("upsampling", true) == false) params.m_flags |= ftl::render::kNoUpsampling; + //if (src->value("texturing", true) == false) params.m_flags |= ftl::render::kNoTexturing; params.m_viewMatrix = MatrixConversion::toCUDA(src->getPose().cast<float>().inverse()); params.m_viewMatrixInverse = MatrixConversion::toCUDA(src->getPose().cast<float>()); params.camera = camera; @@ -251,6 +241,42 @@ bool Splatter::render(ftl::rgbd::VirtualSource *src, ftl::rgbd::Frame &out, cuda temp_.createTexture<int>(Channel::Depth); + // First make sure each input has normals + temp_.createTexture<float4>(Channel::Normals); + for (int i=0; i<scene_->frames.size(); ++i) { + auto &f = scene_->frames[i]; + auto s = scene_->sources[i]; + + // Needs to create points channel first? + if (!f.hasChannel(Channel::Points)) { + //LOG(INFO) << "Creating points... " << s->parameters().width; + + auto &t = f.createTexture<float4>(Channel::Points, Format<float4>(f.get<GpuMat>(Channel::Colour).size())); + auto pose = MatrixConversion::toCUDA(s->getPose().cast<float>()); //.inverse()); + ftl::cuda::point_cloud(t, f.createTexture<float>(Channel::Depth), s->parameters(), pose, 0, stream); + + //LOG(INFO) << "POINTS Added"; + } + + // Clip first? + if (clipping_) { + ftl::cuda::clipping(f.createTexture<float4>(Channel::Points), clip_, stream); + } + + if (!f.hasChannel(Channel::Normals)) { + auto &g = f.get<GpuMat>(Channel::Colour); + ftl::cuda::normals(f.createTexture<float4>(Channel::Normals, Format<float4>(g.cols, g.rows)), + temp_.getTexture<float4>(Channel::Normals), // FIXME: Uses assumption of vcam res same as input res + f.getTexture<float4>(Channel::Points), stream); + + if (norm_filter_ > -0.1f) { + Eigen::Matrix4f matrix = s->getPose().cast<float>(); + auto pose = MatrixConversion::toCUDA(matrix); + ftl::cuda::normal_filter(f.getTexture<float4>(Channel::Normals), f.getTexture<float4>(Channel::Points), s->parameters(), pose, norm_filter_, stream); + } + } + } + renderChannel(params, out, Channel::Colour, stream); Channel chan = src->getChannel(); @@ -258,31 +284,13 @@ bool Splatter::render(ftl::rgbd::VirtualSource *src, ftl::rgbd::Frame &out, cuda { temp_.get<GpuMat>(Channel::Depth).convertTo(out.get<GpuMat>(Channel::Depth), CV_32F, 1.0f / 1000.0f, cvstream); } else if (chan == Channel::Normals) { - //temp_.get<GpuMat>(Channel::Depth).convertTo(out.get<GpuMat>(Channel::Depth), CV_32F, 1.0f / 1000.0f, cvstream); - //ftl::cuda::point_cloud(temp_.createTexture<float4>(Channel::Points, Format<float4>(camera.width, camera.height)), - // temp_.createTexture<float>(Channel::Depth), camera, params.m_viewMatrixInverse, stream); - //ftl::cuda::normals(temp_.getTexture<float4>(Channel::Normals), temp_.getTexture<float4>(Channel::Points), stream); - //ftl::cuda::normal_visualise(temp_.getTexture<float4>(Channel::Normals), temp_.getTexture<float>(Channel::Contribution), camera); - - // First make sure each input has normals - temp_.createTexture<float4>(Channel::Normals); - for (auto &f : scene_->frames) { - if (!f.hasChannel(Channel::Normals)) { - auto &g = f.get<GpuMat>(Channel::Colour); - LOG(INFO) << "Make normals channel"; - ftl::cuda::normals(f.createTexture<float4>(Channel::Normals, Format<float4>(g.cols, g.rows)), - temp_.getTexture<float4>(Channel::Normals), // FIXME: Uses assumption of vcam res same as input res - f.getTexture<float4>(Channel::Points), stream); - } - } - out.create<GpuMat>(Channel::Normals, Format<float4>(camera.width, camera.height)); // Render normal attribute renderChannel(params, out, Channel::Normals, stream); // Convert normal to single float value - ftl::cuda::normal_visualise(out.getTexture<float4>(Channel::Normals), temp_.getTexture<float>(Channel::Contribution), camera, params.m_viewMatrix, stream); + ftl::cuda::normal_visualise(out.getTexture<float4>(Channel::Normals), temp_.getTexture<float>(Channel::Contribution), camera, params.m_viewMatrixInverse, stream); // Put in output as single float cv::cuda::swap(temp_.get<GpuMat>(Channel::Contribution), out.create<GpuMat>(Channel::Normals)); diff --git a/components/renderers/cpp/src/splatter.cu b/components/renderers/cpp/src/splatter.cu index b89b6083ee25318f294943df9336ca6d1700f148..6f94b7636d4367ae4b465ad58e16163264c3a361 100644 --- a/components/renderers/cpp/src/splatter.cu +++ b/components/renderers/cpp/src/splatter.cu @@ -24,11 +24,11 @@ using ftl::render::SplatParams; const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; - const float3 worldPos = make_float3(points.tex2D(x, y)); - if (worldPos.x == MINF) return; + const float4 worldPos = points.tex2D(x, y); + if (worldPos.x == MINF || (!(params.m_flags & ftl::render::kShowDisconMask) && worldPos.w < 0.0f)) return; // Find the virtual screen position of current point - const float3 camPos = params.m_viewMatrix * worldPos; + const float3 camPos = params.m_viewMatrix * make_float3(worldPos); if (camPos.z < params.camera.minDepth) return; if (camPos.z > params.camera.maxDepth) return; @@ -61,7 +61,7 @@ __device__ inline float4 make_float4(const uchar4 &c) { #define ENERGY_THRESHOLD 0.1f #define SMOOTHING_MULTIPLIER_A 10.0f // For surface search #define SMOOTHING_MULTIPLIER_B 4.0f // For z contribution -#define SMOOTHING_MULTIPLIER_C 4.0f // For colour contribution +#define SMOOTHING_MULTIPLIER_C 1.0f // For colour contribution #define ACCUM_DIAMETER 8 @@ -84,12 +84,12 @@ __global__ void dibr_attribute_contrib_kernel( const int x = (blockIdx.x*blockDim.x + threadIdx.x) / WARP_SIZE; const int y = blockIdx.y*blockDim.y + threadIdx.y; - const float3 worldPos = make_float3(points.tex2D(x, y)); + const float4 worldPos = points.tex2D(x, y); //const float3 normal = make_float3(tex2D<float4>(camera.normal, x, y)); if (worldPos.x == MINF) return; //const float r = (camera.poseInverse * worldPos).z / camera.params.fx; - const float3 camPos = params.m_viewMatrix * worldPos; + const float3 camPos = params.m_viewMatrix * make_float3(worldPos); if (camPos.z < params.camera.minDepth) return; if (camPos.z > params.camera.maxDepth) return; const uint2 screenPos = params.camera.camToScreen<uint2>(camPos); @@ -103,8 +103,9 @@ __global__ void dibr_attribute_contrib_kernel( const float d = ((float)depth_in.tex2D((int)screenPos.x, (int)screenPos.y)/1000.0f); //if (abs(d - camPos.z) > DEPTH_THRESHOLD) return; - // TODO:(Nick) Should just one thread load these to shared mem? - const float4 colour = make_float4(colour_in.tex2D(x, y)); + const float4 colour = (params.m_flags & ftl::render::kShowDisconMask && worldPos.w < 0.0f) ? + make_float4(0.0f,0.0f,255.0f,255.0f) : // Show discontinuity mask in red + make_float4(colour_in.tex2D(x, y)); //const float4 normal = tex2D<float4>(camera.normal, x, y); // Each thread in warp takes an upsample point and updates corresponding depth buffer. diff --git a/components/rgbd-sources/src/virtual.cpp b/components/rgbd-sources/src/virtual.cpp index 0166626957f2ae16ea5b9e3fa9f960dc2fc22538..9d9ccc852fd94a6aa701e7da7e4b4f48cad9d0a0 100644 --- a/components/rgbd-sources/src/virtual.cpp +++ b/components/rgbd-sources/src/virtual.cpp @@ -8,7 +8,8 @@ class VirtualImpl : public ftl::rgbd::detail::Source { public: explicit VirtualImpl(ftl::rgbd::Source *host, const ftl::rgbd::Camera ¶ms) : ftl::rgbd::detail::Source(host) { params_ = params; - capabilities_ = ftl::rgbd::kCapMovable | ftl::rgbd::kCapVideo | ftl::rgbd::kCapStereo; + capabilities_ = ftl::rgbd::kCapVideo | ftl::rgbd::kCapStereo; + if (!host->value("locked", false)) capabilities_ |= ftl::rgbd::kCapMovable; } ~VirtualImpl() {