Skip to content
Snippets Groups Projects
Commit cac55198 authored by Nicolas Pope's avatar Nicolas Pope
Browse files

Move files

parent eb47f4cd
No related branches found
No related tags found
2 merge requests!116Implements #133 point alignment,!114Ongoing #133 improvements
Pipeline #14415 failed
#include "ilw.hpp"
#include <ftl/utility/matrix_conversion.hpp>
#include <ftl/rgbd/source.hpp>
#include <ftl/cuda/points.hpp>
#include <loguru.hpp>
#include "ilw_cuda.hpp"
using ftl::ILW;
using ftl::detail::ILWData;
using ftl::rgbd::Channel;
using ftl::rgbd::Channels;
using ftl::rgbd::Format;
using cv::cuda::GpuMat;
ILW::ILW(nlohmann::json &config) : ftl::Configurable(config) {
}
ILW::~ILW() {
}
bool ILW::process(ftl::rgbd::FrameSet &fs, cudaStream_t stream) {
_phase0(fs, stream);
//for (int i=0; i<2; ++i) {
_phase1(fs, stream);
//for (int j=0; j<3; ++j) {
// _phase2(fs);
//}
// TODO: Break if no time left
//}
return true;
}
bool ILW::_phase0(ftl::rgbd::FrameSet &fs, cudaStream_t stream) {
// Make points channel...
for (size_t i=0; i<fs.frames.size(); ++i) {
auto &f = fs.frames[i];
auto *s = fs.sources[i];
if (f.empty(Channel::Depth + Channel::Colour)) {
LOG(ERROR) << "Missing required channel";
continue;
}
auto &t = f.createTexture<float4>(Channel::Points, Format<float4>(f.get<GpuMat>(Channel::Colour).size()));
auto pose = MatrixConversion::toCUDA(s->getPose().cast<float>()); //.inverse());
ftl::cuda::point_cloud(t, f.createTexture<float>(Channel::Depth), s->parameters(), pose, stream);
// TODO: Create energy vector texture and clear it
// Create energy and clear it
// Convert colour from BGR to BGRA if needed
if (f.get<GpuMat>(Channel::Colour).type() == CV_8UC3) {
// Convert to 4 channel colour
auto &col = f.get<GpuMat>(Channel::Colour);
GpuMat tmp(col.size(), CV_8UC4);
cv::cuda::swap(col, tmp);
cv::cuda::cvtColor(tmp,col, cv::COLOR_BGR2BGRA);
}
f.createTexture<float4>(Channel::EnergyVector, Format<float4>(f.get<GpuMat>(Channel::Colour).size()));
f.createTexture<float>(Channel::Energy, Format<float>(f.get<GpuMat>(Channel::Colour).size()));
f.createTexture<uchar4>(Channel::Colour);
cv::cuda::Stream cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
f.get<GpuMat>(Channel::EnergyVector).setTo(cv::Scalar(0.0f,0.0f,0.0f,0.0f), cvstream);
f.get<GpuMat>(Channel::Energy).setTo(cv::Scalar(0.0f), cvstream);
}
return true;
}
bool ILW::_phase1(ftl::rgbd::FrameSet &fs, cudaStream_t stream) {
// Run correspondence kernel to create an energy vector
// For each camera combination
for (size_t i=0; i<fs.frames.size(); ++i) {
for (size_t j=0; j<fs.frames.size(); ++j) {
if (i == j) continue;
LOG(INFO) << "Running phase1";
auto &f1 = fs.frames[i];
auto &f2 = fs.frames[j];
//auto s1 = fs.frames[i];
auto s2 = fs.sources[j];
auto pose = MatrixConversion::toCUDA(s2->getPose().cast<float>().inverse());
try {
//Calculate energy vector to best correspondence
ftl::cuda::correspondence_energy_vector(
f1.getTexture<float4>(Channel::Points),
f2.getTexture<float4>(Channel::Points),
f1.getTexture<uchar4>(Channel::Colour),
f2.getTexture<uchar4>(Channel::Colour),
// TODO: Add normals and other things...
f1.getTexture<float4>(Channel::EnergyVector),
f1.getTexture<float>(Channel::Energy),
pose,
s2->parameters(),
stream
);
} catch (ftl::exception &e) {
LOG(ERROR) << "Exception in correspondence: " << e.what();
}
LOG(INFO) << "Correspondences done... " << i;
}
}
return true;
}
bool ILW::_phase2(ftl::rgbd::FrameSet &fs) {
// Run energies and motion kernel
// Smooth vectors across a window and iteratively
// strongly disagreeing vectors should cancel out
// A weak vector is overriden by a stronger one.
return true;
}
#include "ilw_cuda.hpp"
using ftl::cuda::TextureObject;
using ftl::rgbd::Camera;
#define WARP_SIZE 32
#define T_PER_BLOCK 8
#define FULL_MASK 0xffffffff
__device__ inline float warpMax(float e) {
for (int i = WARP_SIZE/2; i > 0; i /= 2) {
const float other = __shfl_xor_sync(FULL_MASK, e, i, WARP_SIZE);
e = max(e, other);
}
return e;
}
__global__ void correspondence_energy_vector_kernel(
TextureObject<float4> p1,
TextureObject<float4> p2,
TextureObject<uchar4> c1,
TextureObject<uchar4> c2,
TextureObject<float4> vout,
TextureObject<float> eout,
float4x4 pose2, // Inverse
Camera cam2) {
// Each warp picks point in p1
const int tid = (threadIdx.x + threadIdx.y * blockDim.x);
const int x = (blockIdx.x*blockDim.x + threadIdx.x) / WARP_SIZE;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const float3 world1 = make_float3(p1.tex2D(x, y));
if (world1.x == MINF) return;
const float3 camPos2 = pose2 * world1;
const uint2 screen2 = cam2.camToScreen<uint2>(camPos2);
const int upsample = 8;
// Project to p2 using cam2
// Each thread takes a possible correspondence and calculates a weighting
const int lane = tid % WARP_SIZE;
for (int i=lane; i<upsample*upsample; i+=WARP_SIZE) {
const float u = (i % upsample) - (upsample / 2);
const float v = (i / upsample) - (upsample / 2);
const float3 world2 = make_float3(p2.tex2D(screen2.x+u, screen2.y+v));
if (world2.x == MINF) continue;
// Determine degree of correspondence
const float confidence = 1.0f / length(world1 - world2);
const float maxconf = warpMax(confidence);
// This thread has best confidence value
if (maxconf == confidence) {
vout(x,y) = vout.tex2D(x, y) + make_float4(
(world1.x - world2.x) * maxconf,
(world1.y - world2.y) * maxconf,
(world1.z - world2.z) * maxconf,
maxconf);
eout(x,y) = eout.tex2D(x,y) + length(world1 - world2)*maxconf;
}
}
}
void ftl::cuda::correspondence_energy_vector(
TextureObject<float4> &p1,
TextureObject<float4> &p2,
TextureObject<uchar4> &c1,
TextureObject<uchar4> &c2,
TextureObject<float4> &vout,
TextureObject<float> &eout,
float4x4 &pose2,
const Camera &cam2,
cudaStream_t stream) {
const dim3 gridSize((p1.width() + 2 - 1)/2, (p1.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(2*WARP_SIZE, T_PER_BLOCK);
printf("COR SIZE %d,%d\n", p1.width(), p1.height());
correspondence_energy_vector_kernel<<<gridSize, blockSize, 0, stream>>>(
p1, p2, c1, c2, vout, eout, pose2, cam2
);
cudaSafeCall( cudaGetLastError() );
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment