Skip to content
Snippets Groups Projects
Commit 06246492 authored by Nicolas Pope's avatar Nicolas Pope
Browse files

Merge branch 'feature/360render' into 'master'

Add webgl and 360 render capability

See merge request nicolas.pope/ftl!304
parents e2e259ab 78c05e86
No related branches found
No related tags found
1 merge request!304Add webgl and 360 render capability
Pipeline #26739 passed
......@@ -69,6 +69,7 @@ struct Parameters {
ftl::rgbd::Camera camera; // Virtual camera intrinsics
ftl::render::ViewPort viewport;
ftl::render::ViewPortMode viewPortMode;
ftl::rgbd::Projection projection;
ftl::render::AccumulationFunction accumulationMode;
};
......
......@@ -404,6 +404,7 @@ void CUDARender::_updateParameters(ftl::rgbd::Frame &out, ftl::codecs::Channel c
params_.disconDisparities = value("discon_disparities", 2.0f);
params_.accumulationMode = static_cast<ftl::render::AccumulationFunction>(value("accumulation_func", 0));
params_.m_flags = 0;
params_.projection = static_cast<ftl::rgbd::Projection>(value("projection",0));
if (value("normal_weight_colours", true)) params_.m_flags |= ftl::render::kNormalWeightColours;
if (value("channel_weights", false)) params_.m_flags |= ftl::render::kUseWeightsChannel;
}
......
......@@ -9,6 +9,7 @@
using ftl::cuda::TextureObject;
using ftl::render::Parameters;
using ftl::rgbd::Projection;
/*
* DIBR point cloud with a depth check
......@@ -26,11 +27,13 @@ using ftl::render::Parameters;
const float3 camPos = transform * cam.screenToCam(x,y,d0);
const float d = camPos.z;
//const float d = camPos.z;
const uint2 screenPos = params.camera.camToScreen<uint2>(camPos);
const unsigned int cx = screenPos.x;
const unsigned int cy = screenPos.y;
//const uint2 screenPos = params.camera.camToScreen<uint2>(camPos);
const float3 screenPos = params.camera.project<Projection::PERSPECTIVE>(camPos);
const unsigned int cx = (unsigned int)(screenPos.x+0.5f);
const unsigned int cy = (unsigned int)(screenPos.y+0.5f);
const float d = screenPos.z;
if (d > params.camera.minDepth && d < params.camera.maxDepth && cx < depth_out.width() && cy < depth_out.height()) {
// Transform estimated point to virtual cam space and output z
atomicMin(&depth_out(cx,cy), d * 100000.0f);
......
This diff is collapsed.
......@@ -7,6 +7,7 @@ using ftl::rgbd::Camera;
using ftl::cuda::TextureObject;
using ftl::render::Parameters;
using ftl::render::ViewPortMode;
using ftl::rgbd::Projection;
#define T_PER_BLOCK 8
......@@ -45,7 +46,7 @@ __device__ inline uint2 convertToScreen<ViewPortMode::Warping>(const Parameters
/*
* Convert source screen position to output screen coordinates.
*/
template <ftl::render::ViewPortMode VPMODE>
template <ftl::render::ViewPortMode VPMODE, Projection PROJECT>
__global__ void screen_coord_kernel(TextureObject<float> depth,
TextureObject<float> depth_out,
TextureObject<short2> screen_out, Parameters params, float4x4 pose, Camera camera) {
......@@ -53,22 +54,22 @@ __device__ inline uint2 convertToScreen<ViewPortMode::Warping>(const Parameters
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= 0 && y >= 0 && x < depth.width() && y < depth.height()) {
uint2 screenPos = make_uint2(30000,30000);
//uint2 screenPos = make_uint2(30000,30000);
const float d = depth.tex2D(x, y);
// Find the virtual screen position of current point
const float3 camPos = (d > camera.minDepth && d < camera.maxDepth) ? pose * camera.screenToCam(x,y,d) : make_float3(0.0f,0.0f,0.0f);
screenPos = convertToScreen<VPMODE>(params, camPos);
float3 screenPos = params.camera.project<PROJECT>(camPos); //convertToScreen<VPMODE>(params, camPos);
if ( camPos.z < params.camera.minDepth ||
camPos.z > params.camera.maxDepth ||
if ( screenPos.z < params.camera.minDepth ||
screenPos.z > params.camera.maxDepth ||
//!vp.inside(screenPos.x, screenPos.y))
screenPos.x >= params.camera.width ||
screenPos.y >= params.camera.height)
screenPos = make_uint2(30000,30000);
screenPos = make_float3(30000,30000,0);
screen_out(x,y) = make_short2(screenPos.x, screenPos.y);
depth_out(x,y) = camPos.z;
depth_out(x,y) = screenPos.z;
}
}
......@@ -78,10 +79,18 @@ void ftl::cuda::screen_coord(TextureObject<float> &depth, TextureObject<float> &
const dim3 gridSize((depth.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
if (params.projection == Projection::PERSPECTIVE) {
switch (params.viewPortMode) {
case ViewPortMode::Disabled: screen_coord_kernel<ViewPortMode::Disabled><<<gridSize, blockSize, 0, stream>>>(depth, depth_out, screen_out, params, pose, camera); break;
case ViewPortMode::Clipping: screen_coord_kernel<ViewPortMode::Clipping><<<gridSize, blockSize, 0, stream>>>(depth, depth_out, screen_out, params, pose, camera); break;
case ViewPortMode::Stretch: screen_coord_kernel<ViewPortMode::Stretch><<<gridSize, blockSize, 0, stream>>>(depth, depth_out, screen_out, params, pose, camera); break;
case ViewPortMode::Disabled: screen_coord_kernel<ViewPortMode::Disabled, Projection::PERSPECTIVE><<<gridSize, blockSize, 0, stream>>>(depth, depth_out, screen_out, params, pose, camera); break;
case ViewPortMode::Clipping: screen_coord_kernel<ViewPortMode::Clipping, Projection::PERSPECTIVE><<<gridSize, blockSize, 0, stream>>>(depth, depth_out, screen_out, params, pose, camera); break;
case ViewPortMode::Stretch: screen_coord_kernel<ViewPortMode::Stretch, Projection::PERSPECTIVE><<<gridSize, blockSize, 0, stream>>>(depth, depth_out, screen_out, params, pose, camera); break;
}
} else if (params.projection == Projection::EQUIRECTANGULAR) {
switch (params.viewPortMode) {
case ViewPortMode::Disabled: screen_coord_kernel<ViewPortMode::Disabled, Projection::EQUIRECTANGULAR><<<gridSize, blockSize, 0, stream>>>(depth, depth_out, screen_out, params, pose, camera); break;
case ViewPortMode::Clipping: screen_coord_kernel<ViewPortMode::Clipping, Projection::EQUIRECTANGULAR><<<gridSize, blockSize, 0, stream>>>(depth, depth_out, screen_out, params, pose, camera); break;
case ViewPortMode::Stretch: screen_coord_kernel<ViewPortMode::Stretch, Projection::EQUIRECTANGULAR><<<gridSize, blockSize, 0, stream>>>(depth, depth_out, screen_out, params, pose, camera); break;
}
}
cudaSafeCall( cudaGetLastError() );
}
......
......@@ -14,6 +14,12 @@
namespace ftl{
namespace rgbd {
enum class Projection {
PERSPECTIVE = 0,
ORTHOGRAPHIC = 1,
EQUIRECTANGULAR = 2
};
/**
* All properties associated with cameras. This structure is designed to
* operate on CPU and GPU.
......@@ -37,6 +43,16 @@ struct __align__(16) Camera {
*/
template <typename T> __device__ __host__ T camToScreen(const float3 &pos) const;
/**
* From 3D point to 2D + Depth.
*/
template <Projection P> __device__ __host__ float3 project(const float3 &point) const;
/**
* From 2D + Depth to 3D point.
*/
template <Projection P> __device__ __host__ float3 unproject(const float3 &point) const;
/**
* Convert screen plus depth into camera coordinates.
*/
......@@ -70,6 +86,54 @@ struct __align__(16) Camera {
// ---- IMPLEMENTATIONS --------------------------------------------------------
template <> __device__ __host__
inline float3 ftl::rgbd::Camera::project<ftl::rgbd::Projection::EQUIRECTANGULAR>(const float3 &cam) const {
const float l = length(cam);
const float3 ray3d = cam / l;
//inverse formula for spherical projection, reference Szeliski book "Computer Vision: Algorithms and Applications" p439.
const float theta = atan2(ray3d.y,sqrt(ray3d.x*ray3d.x+ray3d.z*ray3d.z));
const float phi = atan2(ray3d.x, ray3d.z);
const float pi = 3.14159265f;
//get 2D point on equirectangular map
float x_sphere = (((phi*width)/pi+width)/2.0f);
float y_sphere = (theta+ pi/2.0f)*height/pi;
return make_float3(x_sphere,y_sphere, l);
};
template <> __device__ __host__
inline float3 ftl::rgbd::Camera::unproject<ftl::rgbd::Projection::EQUIRECTANGULAR>(const float3 &equi) const {
const float pi = 3.14159265f;
float phi = (equi.x * 2.0f - float(width)) * pi / float(width);
float theta = (equi.y * pi / float(height)) - (pi/2.0f);
float z = cos(theta)*cos(phi);
float x = cos(theta)*sin(phi);
float y = sin(theta);
return make_float3(x*equi.z,y*equi.z,z*equi.z);
};
template <> __device__ __host__
inline float3 ftl::rgbd::Camera::project<ftl::rgbd::Projection::PERSPECTIVE>(const float3 &pos) const {
return make_float3(
static_cast<float>(pos.x*fx/pos.z - cx),
static_cast<float>(pos.y*fy/pos.z - cy),
pos.z
);
}
template <> __device__ __host__
inline float3 ftl::rgbd::Camera::unproject<ftl::rgbd::Projection::PERSPECTIVE>(const float3 &pos) const {
const float x = static_cast<float>((pos.x+cx) / fx);
const float y = static_cast<float>((pos.y+cy) / fy);
return make_float3(pos.z*x, pos.z*y, pos.z);
}
template <> __device__ __host__
inline float2 ftl::rgbd::Camera::camToScreen<float2>(const float3 &pos) const {
return make_float2(
......
This diff is collapsed.
......@@ -150,12 +150,22 @@ function FTLStream(peer, uri, element) {
//this.player = videojs('ftl-video-element');
//this.player.vr({projection: '360'});
if (false) {
this.camera = new THREE.PerspectiveCamera( 75, window.innerWidth / window.innerHeight, 1, 1100 );
} else {
this.camera = new THREE.OrthographicCamera(window.innerWidth/-2, window.innerWidth/2, window.innerHeight/2, window.innerHeight/-2, 1, 4);
}
this.camera.target = new THREE.Vector3( 0, 0, 0 );
this.scene = new THREE.Scene();
var geometry = new THREE.SphereBufferGeometry( 500, 60, 40 );
var geometry;
if (false) {
geometry = new THREE.SphereBufferGeometry( 500, 60, 40 );
} else {
geometry = new THREE.PlaneGeometry(1280, 720, 32);
}
// invert the geometry on the x-axis so that all of the faces point inward
geometry.scale( - 1, 1, 1 );
......@@ -180,7 +190,7 @@ function FTLStream(peer, uri, element) {
this.onPointerDownLat = 0;
this.lon = 0;
this.lat = 0;
this.distance = 1.0;
this.distance = 2.0;
this.overlay = document.createElement("DIV");
this.overlay.classList.add("ftl");
......@@ -202,8 +212,12 @@ function FTLStream(peer, uri, element) {
this.overlay.addEventListener('mousemove', (event) => {
if ( this.isUserInteracting === true ) {
this.lon = ( this.onPointerDownPointerX - event.clientX ) * 0.1 + this.onPointerDownLon;
this.lat = ( this.onPointerDownPointerY - event.clientY ) * 0.1 + this.onPointerDownLat;
//this.lon = ( this.onPointerDownPointerX - event.clientX ) * 0.1 + this.onPointerDownLon;
//this.lat = ( this.onPointerDownPointerY - event.clientY ) * 0.1 + this.onPointerDownLat;
this.rotationX += event.movementY * (1/25) * 5.0;
this.rotationY -= event.movementX * (1/25) * 5.0;
this.updatePose();
}
});
......@@ -222,9 +236,13 @@ function FTLStream(peer, uri, element) {
let phi = THREE.MathUtils.degToRad( 90 - me.lat );
let theta = THREE.MathUtils.degToRad( me.lon );
me.camera.position.x = me.distance * Math.sin( phi ) * Math.cos( theta );
me.camera.position.y = me.distance * Math.cos( phi );
me.camera.position.z = me.distance * Math.sin( phi ) * Math.sin( theta );
//me.camera.position.x = me.distance * Math.sin( phi ) * Math.cos( theta );
//me.camera.position.y = me.distance * Math.cos( phi );
//me.camera.position.z = me.distance * Math.sin( phi ) * Math.sin( theta );
me.camera.position.x = 0;
me.camera.position.y = 0;
me.camera.position.z = -2;
me.camera.lookAt( me.camera.target );
......@@ -325,7 +343,7 @@ function FTLStream(peer, uri, element) {
if (ts > 0) {
dts = streampckg[0] - ts;
console.log("Framerate = ", 1000/dts);
this.converter = new VideoConverter.default(this.element, 25, 4);
this.converter = new VideoConverter.default(this.element, 26, 1);
}
ts = streampckg[0];
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment