diff --git a/components/renderers/cpp/src/points.cu b/components/renderers/cpp/src/points.cu
index f73eaa2085e901991f9cc40580a3273864ceda4f..a188adaeffaad12be42762a3e1b529926dee225e 100644
--- a/components/renderers/cpp/src/points.cu
+++ b/components/renderers/cpp/src/points.cu
@@ -12,7 +12,9 @@ __global__ void point_cloud_kernel(ftl::cuda::TextureObject<float4> output, ftl:
 		output(x,y) = make_float4(MINF, MINF, MINF, MINF);
 
 		const float d = depth.tex2D((int)x, (int)y);
-		float p = d;
+
+		// Calculate depth between 0.0 and 1.0
+		float p = (d - params.minDepth) / (params.maxDepth - params.minDepth);
 
 		if (d >= params.minDepth && d <= params.maxDepth) {
 			/* Orts-Escolano S. et al. 2016. Holoportation: Virtual 3D teleportation in real-time. */
diff --git a/components/renderers/cpp/src/splatter.cu b/components/renderers/cpp/src/splatter.cu
index aa0fc6bed113e2e0066b40b7e64a0cd5218d5908..3567d5c07dcdc819b93989992069b3244149855f 100644
--- a/components/renderers/cpp/src/splatter.cu
+++ b/components/renderers/cpp/src/splatter.cu
@@ -184,42 +184,34 @@ __device__ inline float make(const float4 &v) {
 
         const float3 camPos = params.camera.screenToCam((int)(x+u),(int)(y+v),d);
         const float3 camPos2 = params.camera.screenToCam((int)(x),(int)(y),d);
-        //if (length(camPos - camPos2) > 2.0f*(camPos.z/params.camera.fx)) continue;
         const float3 worldPos = params.m_viewMatrixInverse * camPos;
-        //const float3 camPos2 = pose_inv * worldPos;
-        //const uint2 screenPos = camera.camToScreen<uint2>(camPos2);
-
-        //if (screenPos.x < points.width() && screenPos.y < points.height()) {
-            // Can now read points, normals and colours from source cam
-
-            // What is contribution of our current point at this pixel?
-            //const float3 p = make_float3(points.tex2D((int)screenPos.x, (int)screenPos.y));
-            //const float weight = ftl::cuda::spatialWeighting(worldPos, p, (camPos.z/params.camera.fx)); //*(camPos2.z/camera.fx));
-            //if (weight <= 0.0f) continue;
-
-            float4 n = normals.tex2D((int)(x+u), (int)(y+v));
-            //const float l = length(n);
-            //if (l == 0.0f) continue;
-            //n /= l;
-
-            // Does the ray intersect plane of splat?
-            float t = 1000.0f;
-            if (ftl::cuda::intersectPlane(make_float3(n), worldPos, origin, ray, t)) { //} && fabs(t-camPos.z) < 0.01f) {
-                //t *= (params.m_viewMatrix.getFloat3x3() * ray).z;
-                t *= scale;
-                const float3 camPos3 = params.camera.screenToCam((int)(x),(int)(y),t);
-                float weight = ftl::cuda::spatialWeighting(camPos, camPos3, 2.0f*(camPos3.z/params.camera.fx));
-
-                if (params.m_flags & ftl::render::kNormalWeightColours) weight *= n.w * n.w;
-
-                if (weight <= 0.0f) continue;
-                //depth += t * weight;
-                //contrib += weight;
-                depth = min(depth, t);
-                results[i/WARP_SIZE] = {weight, t, in.tex2D((int)x+u, (int)y+v)}; //make_float2(t, weight);
-                //atomicMin(&depth_out(x,y), (int)(depth * 1000.0f));
-            }
-        //}
+
+
+        // Assumed to be normalised
+        float4 n = normals.tex2D((int)(x+u), (int)(y+v));
+
+        // Does the ray intersect plane of splat?
+        float t = 1000.0f;
+        if (ftl::cuda::intersectPlane(make_float3(n), worldPos, origin, ray, t)) { //} && fabs(t-camPos.z) < 0.01f) {
+            // Adjust from normalised ray back to original meters units
+            t *= scale;
+            const float3 camPos3 = params.camera.screenToCam((int)(x),(int)(y),t);
+            float weight = ftl::cuda::spatialWeighting(camPos, camPos3, 2.0f*(camPos3.z/params.camera.fx));
+
+            /* Buehler C. et al. 2001. Unstructured Lumigraph Rendering. */
+            /* Orts-Escolano S. et al. 2016. Holoportation: Virtual 3D teleportation in real-time. */
+            // This is the simple naive colour weighting. It might be good
+            // enough for our purposes if the alignment step prevents ghosting
+            // TODO: Use depth and perhaps the neighbourhood consistency in:
+            //     Kuster C. et al. 2011. FreeCam: A hybrid camera system for interactive free-viewpoint video
+            if (params.m_flags & ftl::render::kNormalWeightColours) weight *= n.w * n.w;
+            //if (params.m_flags & ftl::render::kDepthWeightColours) weight *= ???
+
+            if (weight <= 0.0f) continue;
+
+            depth = min(depth, t);
+            results[i/WARP_SIZE] = {weight, t, in.tex2D((int)x+u, (int)y+v)};
+        }
     }
 
     depth = warpMin(depth);