How to scale a texture in webgl? - matrix

I have a texture of size 800x600. How do I scale it on a webgl <canvas> at another size and keep the original aspect ratio? Assuming that the drawing buffer and the canvas have the same dimensions.

Given the WebGL only cares about clipsapce coordinates you can just draw a 2 unit quad (-1 to +1) and scale it by the aspect of the canvas vs the aspect of the image.
In other words
const canvasAspect = canvas.clientWidth / canvas.clientHeight;
const imageAspect = image.width / image.height;
let scaleY = 1;
let scaleX = imageAspect / canvasAspect;
Note that you need to decide how you want to fit the image. scaleY= 1 means the image will always fit vertically and horizontally will just be whatever it comes out to.
If you want it to fit horizontally then you need to make scaleX = 1
let scaleX = 1;
let scaleY = canvasAspect / imageAspect;
If you want it to contain then
let scaleY = 1;
let scaleX = imageAspect / canvasAspect;
if (scaleX > 1) {
scaleY = 1 / scaleX;
scaleX = 1;
}
If you want it to cover then
let scaleY = 1;
let scaleX = imageAspect / canvasAspect;
if (scaleX < 1) {
scaleY = 1 / scaleX;
scaleX = 1;
}
let scaleMode = 'fitV';
const gl = document.querySelector("canvas").getContext('webgl');
const vs = `
attribute vec4 position;
uniform mat4 u_matrix;
varying vec2 v_texcoord;
void main() {
gl_Position = u_matrix * position;
v_texcoord = position.xy * .5 + .5; // because we know we're using a -1 + 1 quad
}
`;
const fs = `
precision mediump float;
varying vec2 v_texcoord;
uniform sampler2D u_tex;
void main() {
gl_FragColor = texture2D(u_tex, v_texcoord);
}
`;
let image = { width: 1, height: 1 }; // dummy until loaded
const tex = twgl.createTexture(gl, {
src: 'https://i.imgur.com/TSiyiJv.jpg',
crossOrigin: 'anonymous',
}, (err, tex, img) => {
// called after image as loaded
image = img;
render();
});
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
position: {
numComponents: 2,
data: [
-1, -1, // tri 1
1, -1,
-1, 1,
-1, 1, // tri 2
1, -1,
1, 1,
],
}
});
function render() {
// this line is not needed if you don't
// care that the canvas drawing buffer size
// matches the canvas display size
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
const canvasAspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const imageAspect = image.width / image.height;
let scaleX;
let scaleY;
switch (scaleMode) {
case 'fitV':
scaleY = 1;
scaleX = imageAspect / canvasAspect;
break;
case 'fitH':
scaleX = 1;
scaleY = canvasAspect / imageAspect;
break;
case 'contain':
scaleY = 1;
scaleX = imageAspect / canvasAspect;
if (scaleX > 1) {
scaleY = 1 / scaleX;
scaleX = 1;
}
break;
case 'cover':
scaleY = 1;
scaleX = imageAspect / canvasAspect;
if (scaleX < 1) {
scaleY = 1 / scaleX;
scaleX = 1;
}
break;
}
twgl.setUniforms(programInfo, {
u_matrix: [
scaleX, 0, 0, 0,
0, -scaleY, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
],
});
gl.drawArrays(gl.TRIANGLES, 0, 6);
}
render();
window.addEventListener('resize', render);
document.querySelectorAll('button').forEach((elem) => {
elem.addEventListener('click', setScaleMode);
});
function setScaleMode(e) {
scaleMode = e.target.id;
render();
}
html, body {
margin: 0;
height: 100%;
}
canvas {
width: 100%;
height: 100%;
display: block;
}
.ui {
position: absolute;
left: 0;
top: 0;
}
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>
<div class="ui">
<button id="fitV">fit vertical</button>
<button id="fitH">fit horizontal</button>
<button id="contain">contain</button>
<button id="cover">cover</button>
</div>
The code above uses a 4x4 matrix to apply the scale
gl_Position = u_matrix * position;
It could just as easily pass in the scale directly
uniform vec2 scale;
...
gl_Position = vec4(scale * position.xy, 0, 1);

Related

Is there a way to get the depth values using WebGL and Three.js?

I am trying to get the depth values of each pixel in the canvas element. Is there a way to find these depth values using WebGL and Three.js?
What I majorly want is that for eg. in the image below, the red background should have 0 as the depth value whereas the 3D model should have the depth values based on the distance from the camera.
Using the X,Y coordinates of the canvas, is there a method to access the depth values?
[Edit 1]: Adding more information
I pick three random points as shown below, then I ask the user to input the depth values for each of these points. Once the input is received from the user, I will compute the difference between the depth values in three.js and the values inputted from the user.
Basically, I would require a 2D array of the canvas size where each pixel corresponds to an array value. This 2D array must contain the value 0 if the pixel is a red background, or contain the depth value if the pixel contains the 3D model.
Two ways come to mind.
One you can just use RayCaster
body {
margin: 0;
}
#c {
width: 100vw;
height: 100vh;
display: block;
}
.info {
position: absolute;
left: 1em;
top: 1em;
padding: 1em;
background: rgba(0, 0, 0, 0.7);
color: white;
font-size: xx-small;
}
.info::after{
content: '';
position: absolute;
border: 10px solid transparent;
border-top: 10px solid rgba(0, 0, 0, 0.7);
top: 0;
left: -10px;
}
<canvas id="c"></canvas>
<script type="module">
// Three.js - Picking - RayCaster
// from https://threejsfundamentals.org/threejs/threejs-picking-raycaster.html
import * as THREE from 'https://threejsfundamentals.org/threejs/resources/threejs/r110/build/three.module.js';
function main() {
const canvas = document.querySelector('#c');
const renderer = new THREE.WebGLRenderer({canvas});
const fov = 60;
const aspect = 2; // the canvas default
const near = 0.1;
const far = 200;
const camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.z = 30;
const points = [
[170, 20],
[400, 50],
[225, 120],
].map((point) => {
const infoElem = document.createElement('pre');
document.body.appendChild(infoElem);
infoElem.className = "info";
infoElem.style.left = `${point[0] + 10}px`;
infoElem.style.top = `${point[1]}px`;
return {
point,
infoElem,
};
});
const scene = new THREE.Scene();
scene.background = new THREE.Color('white');
// put the camera on a pole (parent it to an object)
// so we can spin the pole to move the camera around the scene
const cameraPole = new THREE.Object3D();
scene.add(cameraPole);
cameraPole.add(camera);
{
const color = 0xFFFFFF;
const intensity = 1;
const light = new THREE.DirectionalLight(color, intensity);
light.position.set(-1, 2, 4);
camera.add(light);
}
const boxWidth = 1;
const boxHeight = 1;
const boxDepth = 1;
const geometry = new THREE.BoxGeometry(boxWidth, boxHeight, boxDepth);
function rand(min, max) {
if (max === undefined) {
max = min;
min = 0;
}
return min + (max - min) * Math.random();
}
function randomColor() {
return `hsl(${rand(360) | 0}, ${rand(50, 100) | 0}%, 50%)`;
}
const numObjects = 100;
for (let i = 0; i < numObjects; ++i) {
const material = new THREE.MeshPhongMaterial({
color: randomColor(),
});
const cube = new THREE.Mesh(geometry, material);
scene.add(cube);
cube.position.set(rand(-20, 20), rand(-20, 20), rand(-20, 20));
cube.rotation.set(rand(Math.PI), rand(Math.PI), 0);
cube.scale.set(rand(3, 6), rand(3, 6), rand(3, 6));
}
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
const raycaster = new THREE.Raycaster();
function render(time) {
time *= 0.001; // convert to seconds;
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
cameraPole.rotation.y = time * .1;
for (const {point, infoElem} of points) {
const pickPosition = {
x: (point[0] / canvas.clientWidth ) * 2 - 1,
y: (point[1] / canvas.clientHeight) * -2 + 1, // note we flip Y
};
raycaster.setFromCamera(pickPosition, camera);
const intersectedObjects = raycaster.intersectObjects(scene.children);
if (intersectedObjects.length) {
// pick the first object. It's the closest one
const intersection = intersectedObjects[0];
infoElem.textContent = `position : ${point[0]}, ${point[1]}
distance : ${intersection.distance.toFixed(2)}
z depth : ${((intersection.distance - near) / (far - near)).toFixed(3)}
local pos: ${intersection.point.x.toFixed(2)}, ${intersection.point.y.toFixed(2)}, ${intersection.point.z.toFixed(2)}
local uv : ${intersection.uv.x.toFixed(2)}, ${intersection.uv.y.toFixed(2)}`;
} else {
infoElem.textContent = `position : ${point[0]}, ${point[1]}`;
}
}
renderer.render(scene, camera);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
</script>
The other way is to do as you mentioned and read the depth buffer. Unfortunately there is no direct way to read the depth buffer.
To read the depth values you need 2 render targets. You'd render to the first target. That gives you both a color texture with the rendered image and a depth texture with the depth values. You can't read a depth texture directly but you can draw it to another color texture and then read the color texture. Finally you can draw the first color texture to the cavnas.
body {
margin: 0;
}
#c {
width: 100vw;
height: 100vh;
display: block;
}
.info {
position: absolute;
left: 1em;
top: 1em;
padding: 1em;
background: rgba(0, 0, 0, 0.7);
color: white;
font-size: xx-small;
}
.info::after{
content: '';
position: absolute;
border: 10px solid transparent;
border-top: 10px solid rgba(0, 0, 0, 0.7);
top: 0;
left: -10px;
}
<canvas id="c"></canvas>
<script type="module">
import * as THREE from 'https://threejsfundamentals.org/threejs/resources/threejs/r110/build/three.module.js';
function main() {
const canvas = document.querySelector('#c');
const renderer = new THREE.WebGLRenderer({canvas});
const points = [
[170, 20],
[400, 50],
[225, 120],
].map((point) => {
const infoElem = document.createElement('pre');
document.body.appendChild(infoElem);
infoElem.className = "info";
infoElem.style.left = `${point[0] + 10}px`;
infoElem.style.top = `${point[1]}px`;
return {
point,
infoElem,
};
});
const renderTarget = new THREE.WebGLRenderTarget(1, 1);
renderTarget.depthTexture = new THREE.DepthTexture();
const depthRenderTarget = new THREE.WebGLRenderTarget(1, 1, {
depthBuffer: false,
stenciBuffer: false,
});
const rtFov = 60;
const rtAspect = 1;
const rtNear = 0.1;
const rtFar = 200;
const rtCamera = new THREE.PerspectiveCamera(rtFov, rtAspect, rtNear, rtFar);
rtCamera.position.z = 30;
const rtScene = new THREE.Scene();
rtScene.background = new THREE.Color('white');
// put the camera on a pole (parent it to an object)
// so we can spin the pole to move the camera around the scene
const cameraPole = new THREE.Object3D();
rtScene.add(cameraPole);
cameraPole.add(rtCamera);
{
const color = 0xFFFFFF;
const intensity = 1;
const light = new THREE.DirectionalLight(color, intensity);
light.position.set(-1, 2, 4);
rtCamera.add(light);
}
const boxWidth = 1;
const boxHeight = 1;
const boxDepth = 1;
const geometry = new THREE.BoxGeometry(boxWidth, boxHeight, boxDepth);
function rand(min, max) {
if (max === undefined) {
max = min;
min = 0;
}
return min + (max - min) * Math.random();
}
function randomColor() {
return `hsl(${rand(360) | 0}, ${rand(50, 100) | 0}%, 50%)`;
}
const numObjects = 100;
for (let i = 0; i < numObjects; ++i) {
const material = new THREE.MeshPhongMaterial({
color: randomColor(),
});
const cube = new THREE.Mesh(geometry, material);
rtScene.add(cube);
cube.position.set(rand(-20, 20), rand(-20, 20), rand(-20, 20));
cube.rotation.set(rand(Math.PI), rand(Math.PI), 0);
cube.scale.set(rand(3, 6), rand(3, 6), rand(3, 6));
}
const camera = new THREE.OrthographicCamera(-1, 1, 1, -1, -1, 1);
const scene = new THREE.Scene();
camera.position.z = 1;
const sceneMaterial = new THREE.MeshBasicMaterial({
map: renderTarget.texture,
});
const planeGeo = new THREE.PlaneBufferGeometry(2, 2);
const plane = new THREE.Mesh(planeGeo, sceneMaterial);
scene.add(plane);
const depthScene = new THREE.Scene();
const depthMaterial = new THREE.MeshBasicMaterial({
map: renderTarget.depthTexture,
});
const depthPlane = new THREE.Mesh(planeGeo, depthMaterial);
depthScene.add(depthPlane);
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
let depthValues = new Uint8Array(0);
function render(time) {
time *= 0.001;
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
renderTarget.setSize(canvas.width, canvas.height);
depthRenderTarget.setSize(canvas.width, canvas.height);
rtCamera.aspect = canvas.clientWidth / canvas.clientHeight;
rtCamera.updateProjectionMatrix();
}
cameraPole.rotation.y = time * .1;
// draw render target scene to render target
renderer.setRenderTarget(renderTarget);
renderer.render(rtScene, rtCamera);
renderer.setRenderTarget(null);
// render the depth texture to another render target
renderer.setRenderTarget(depthRenderTarget);
renderer.render(depthScene, camera);
renderer.setRenderTarget(null);
{
const {width, height} = depthRenderTarget;
const spaceNeeded = width * height * 4;
if (depthValues.length !== spaceNeeded) {
depthValues = new Uint8Array(spaceNeeded);
}
renderer.readRenderTargetPixels(
depthRenderTarget,
0,
0,
depthRenderTarget.width,
depthRenderTarget.height,
depthValues);
for (const {point, infoElem} of points) {
const offset = ((height - point[1] - 1) * width + point[0]) * 4;
infoElem.textContent = `position : ${point[0]}, ${point[1]}
z depth : ${(depthValues[offset] / 255).toFixed(3)}`;
}
}
// render the color texture to the canvas
renderer.render(scene, camera);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
</script>
The problem is you can only read UNSIGNED_BYTE values from the texture so your depth values only go from 0 to 255 which is not really enough resolution to do much.
To solve that issue you have to encode the depth values across channels when drawing the depth texture to the 2nd render target which means you need to make your own shader. three.js has some shader snippets for packing the values so hacking a shader using ideas from this article we can get better depth values.
body {
margin: 0;
}
#c {
width: 100vw;
height: 100vh;
display: block;
}
.info {
position: absolute;
left: 1em;
top: 1em;
padding: 1em;
background: rgba(0, 0, 0, 0.7);
color: white;
font-size: xx-small;
}
.info::after{
content: '';
position: absolute;
border: 10px solid transparent;
border-top: 10px solid rgba(0, 0, 0, 0.7);
top: 0;
left: -10px;
}
<canvas id="c"></canvas>
<script type="module">
import * as THREE from 'https://threejsfundamentals.org/threejs/resources/threejs/r110/build/three.module.js';
function main() {
const canvas = document.querySelector('#c');
const renderer = new THREE.WebGLRenderer({canvas});
const points = [
[170, 20],
[400, 50],
[225, 120],
].map((point) => {
const infoElem = document.createElement('pre');
document.body.appendChild(infoElem);
infoElem.className = "info";
infoElem.style.left = `${point[0] + 10}px`;
infoElem.style.top = `${point[1]}px`;
return {
point,
infoElem,
};
});
const renderTarget = new THREE.WebGLRenderTarget(1, 1);
renderTarget.depthTexture = new THREE.DepthTexture();
const depthRenderTarget = new THREE.WebGLRenderTarget(1, 1, {
depthBuffer: false,
stenciBuffer: false,
});
const rtFov = 60;
const rtAspect = 1;
const rtNear = 0.1;
const rtFar = 200;
const rtCamera = new THREE.PerspectiveCamera(rtFov, rtAspect, rtNear, rtFar);
rtCamera.position.z = 30;
const rtScene = new THREE.Scene();
rtScene.background = new THREE.Color('white');
// put the camera on a pole (parent it to an object)
// so we can spin the pole to move the camera around the scene
const cameraPole = new THREE.Object3D();
rtScene.add(cameraPole);
cameraPole.add(rtCamera);
{
const color = 0xFFFFFF;
const intensity = 1;
const light = new THREE.DirectionalLight(color, intensity);
light.position.set(-1, 2, 4);
rtCamera.add(light);
}
const boxWidth = 1;
const boxHeight = 1;
const boxDepth = 1;
const geometry = new THREE.BoxGeometry(boxWidth, boxHeight, boxDepth);
function rand(min, max) {
if (max === undefined) {
max = min;
min = 0;
}
return min + (max - min) * Math.random();
}
function randomColor() {
return `hsl(${rand(360) | 0}, ${rand(50, 100) | 0}%, 50%)`;
}
const numObjects = 100;
for (let i = 0; i < numObjects; ++i) {
const material = new THREE.MeshPhongMaterial({
color: randomColor(),
});
const cube = new THREE.Mesh(geometry, material);
rtScene.add(cube);
cube.position.set(rand(-20, 20), rand(-20, 20), rand(-20, 20));
cube.rotation.set(rand(Math.PI), rand(Math.PI), 0);
cube.scale.set(rand(3, 6), rand(3, 6), rand(3, 6));
}
const camera = new THREE.OrthographicCamera(-1, 1, 1, -1, -1, 1);
const scene = new THREE.Scene();
camera.position.z = 1;
const sceneMaterial = new THREE.MeshBasicMaterial({
map: renderTarget.texture,
});
const planeGeo = new THREE.PlaneBufferGeometry(2, 2);
const plane = new THREE.Mesh(planeGeo, sceneMaterial);
scene.add(plane);
const depthScene = new THREE.Scene();
const depthMaterial = new THREE.MeshBasicMaterial({
map: renderTarget.depthTexture,
});
depthMaterial.onBeforeCompile = function(shader) {
// the <packing> GLSL chunk from three.js has the packDeathToRGBA function.
// then at the end of the shader the default MaterialBasicShader has
// already read from the material's `map` texture (the depthTexture)
// which has depth in 'r' and assigned it to gl_FragColor
shader.fragmentShader = shader.fragmentShader.replace(
'#include <common>',
'#include <common>\n#include <packing>',
).replace(
'#include <fog_fragment>',
'gl_FragColor = packDepthToRGBA( gl_FragColor.r );',
);
};
const depthPlane = new THREE.Mesh(planeGeo, depthMaterial);
depthScene.add(depthPlane);
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
let depthValues = new Uint8Array(0);
function render(time) {
time *= 0.001;
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
renderTarget.setSize(canvas.width, canvas.height);
depthRenderTarget.setSize(canvas.width, canvas.height);
rtCamera.aspect = canvas.clientWidth / canvas.clientHeight;
rtCamera.updateProjectionMatrix();
}
cameraPole.rotation.y = time * .1;
// draw render target scene to render target
renderer.setRenderTarget(renderTarget);
renderer.render(rtScene, rtCamera);
renderer.setRenderTarget(null);
// render the depth texture to another render target
renderer.setRenderTarget(depthRenderTarget);
renderer.render(depthScene, camera);
renderer.setRenderTarget(null);
{
const {width, height} = depthRenderTarget;
const spaceNeeded = width * height * 4;
if (depthValues.length !== spaceNeeded) {
depthValues = new Uint8Array(spaceNeeded);
}
renderer.readRenderTargetPixels(
depthRenderTarget,
0,
0,
depthRenderTarget.width,
depthRenderTarget.height,
depthValues);
for (const {point, infoElem} of points) {
const offset = ((height - point[1] - 1) * width + point[0]) * 4;
const depth = depthValues[offset ] * ((255 / 256) / (256 * 256 * 256)) +
depthValues[offset + 1] * ((255 / 256) / (256 * 256)) +
depthValues[offset + 2] * ((255 / 256) / 256);
infoElem.textContent = `position : ${point[0]}, ${point[1]}
z depth : ${depth.toFixed(3)}`;
}
}
// render the color texture to the canvas
renderer.render(scene, camera);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
</script>
Note depthTexture uses a webgl extension which is an optional feature not found on all devices
To work around that would require drawing the scene twice. Once with your normal materials and then again to a color render target using the MeshDepthMaterial.

Animated wireframe lines

I'm just curious if anyone has an idea how to achieve such wireframe "fade in" drawing line by line effect?
Maybe not exact but similar to such svg animation to make it more clear and easier to visualise https://maxwellito.github.io/vivus/
Webgl example here https://www.orano.group/experience/innovation/en/slider if you switch between the slides.
You need to give every element you want to draw a number in the order you want them drawn. For example if you want to draw a wireframe pass in a number for each vertex in the order you want them drawn, pass that number from the vertex shader to the fragment shader, then pass in a time. If the number is greater than the number discard (or in some other way don't draw)
Example:
'use strict';
/* global THREE */
function main() {
const canvas = document.querySelector('#c');
const renderer = new THREE.WebGLRenderer({canvas: canvas});
const fov = 40;
const aspect = 2; // the canvas default
const near = 0.1;
const far = 1000;
const camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.z = 25;
const scene = new THREE.Scene();
scene.background = new THREE.Color('white');
const objects = [];
{
const width = 8;
const height = 8;
const depth = 8;
// using edges just to get rid of the lines triangles
const geometry = new THREE.EdgesGeometry(new THREE.BoxBufferGeometry(width, height, depth));
const numVertices = geometry.getAttribute('position').count;
const counts = new Float32Array(numVertices);
// every 2 points is one line segment so we want the numbers to go
// 0, 1, 1, 2, 2, 3, 3, 4, 4, 5 etc
const numSegments = numVertices / 2;
for (let seg = 0; seg < numSegments; ++seg) {
const off = seg * 2;
counts[off + 0] = seg;
counts[off + 1] = seg + 1;
}
const itemSize = 1;
const normalized = false;
const colorAttrib = new THREE.BufferAttribute(counts, itemSize, normalized); geometry.addAttribute('count', colorAttrib);
const timeLineShader = {
uniforms: {
color: { value: new THREE.Color('red'), },
time: { value: 0 },
},
vertexShader: `
attribute float count;
varying float vCount;
void main() {
vCount = count;
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1);
}
`,
fragmentShader: `
#include <common>
varying float vCount;
uniform vec3 color;
uniform float time;
void main() {
if (vCount > time) {
discard;
}
gl_FragColor = vec4(color, 1);
}
`,
};
const material = new THREE.ShaderMaterial(timeLineShader);
const mesh = new THREE.LineSegments(geometry, material);
scene.add(mesh);
objects.push(mesh);
}
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
function render(time) {
time *= 0.001;
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
objects.forEach((obj, ndx) => {
const speed = .1 + ndx * .05;
const rot = time * speed;
obj.rotation.x = rot;
obj.rotation.y = rot;
obj.material.uniforms.time.value = (time * 4) % 15;
});
renderer.render(scene, camera);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
body { margin: 0; }
#c { width: 100vw; height: 100vh; display: block; }
<canvas id="c"></canvas>
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r98/three.min.js"></script>
If you want multiple objects to draw consecutively just adjust the time for each one
'use strict';
/* global THREE */
function main() {
const canvas = document.querySelector('#c');
const renderer = new THREE.WebGLRenderer({canvas: canvas});
const fov = 40;
const aspect = 2; // the canvas default
const near = 0.1;
const far = 1000;
const camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
camera.position.z = 15;
const scene = new THREE.Scene();
scene.background = new THREE.Color('white');
const objects = [];
{
const width = 2;
const height = 2;
const depth = 2;
// using edges just to get rid of the lines triangles
const geometry = new THREE.EdgesGeometry(new THREE.BoxBufferGeometry(width, height, depth));
const numVertices = geometry.getAttribute('position').count;
const counts = new Float32Array(numVertices);
// every 2 points is one line segment so we want the numbers to go
// 0, 1, 1, 2, 2, 3, 3, 4, 4, 5 etc
const numSegments = numVertices / 2;
for (let seg = 0; seg < numSegments; ++seg) {
const off = seg * 2;
counts[off + 0] = seg;
counts[off + 1] = seg + 1;
}
const itemSize = 1;
const normalized = false;
const colorAttrib = new THREE.BufferAttribute(counts, itemSize, normalized); geometry.addAttribute('count', colorAttrib);
const timeLineShader = {
vertexShader: `
attribute float count;
varying float vCount;
void main() {
vCount = count;
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1);
}
`,
fragmentShader: `
#include <common>
varying float vCount;
uniform vec3 color;
uniform float time;
void main() {
if (vCount > time) {
discard;
}
gl_FragColor = vec4(color, 1);
}
`,
};
for (let x = -2; x <= 2; x += 1) {
timeLineShader.uniforms = {
color: { value: new THREE.Color('red'), },
time: { value: 0 },
};
const material = new THREE.ShaderMaterial(timeLineShader);
const mesh = new THREE.LineSegments(geometry, material);
scene.add(mesh);
mesh.position.x = x * 4;
objects.push(mesh);
}
}
function resizeRendererToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needResize = canvas.width !== width || canvas.height !== height;
if (needResize) {
renderer.setSize(width, height, false);
}
return needResize;
}
function render(time) {
time *= 0.001;
if (resizeRendererToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
objects.forEach((obj, ndx) => {
const rotSpeed = .1;
const rot = time * rotSpeed;
obj.rotation.x = rot;
obj.rotation.y = rot;
const segmentsPer = 12;
const speed = 8;
const totalTime = segmentsPer * objects.length + 5 * speed;
obj.material.uniforms.time.value = ((time * speed) % totalTime) - ndx * segmentsPer;
});
renderer.render(scene, camera);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
body { margin: 0; }
#c { width: 100vw; height: 100vh; display: block; }
<canvas id="c"></canvas>
<script src="https://threejsfundamentals.org/threejs/resources/threejs/r98/three.min.js"></script>
Note that using a count will make each segment take the same amount of time to appear. If you want them to take longer by distance than instead of adding 1 to each segment you'd need to add the distance to the next point
distanceSoFar = 0;
for each segment
data.push(distanceSoFar);
distanceSoFar += distance(segmentStartPosition, segmentEndPosition);
data.push(distanceSoFar);
}

Canvas/WebGL 2D tilemap grid artifacts

I am creating a simple 2D web game that works with your typical tile map and sprites.
The twist is that I want smooth camera controls, both translation and scaling (zooming).
I tried using both the Canvas 2D API, and WebGL, and in both I simply cannot avoid the bleeding grid line artifacts, while also supporting zooming properly.
If it matters, all of my tiles are of size 1, and scaled to whatever size is needed, all of their coordinates are integers, and I am using a texture atlas.
Here's an example picture using my WebGL code, where the thin red/white lines are not wanted.
I remember writing sprite tile maps years ago with desktop GL, ironically using similar code (more or less equivalent to what I could do with WebGL 2), and it never had any of these issues.
I am considering to try DOM based elements next, but I fear it will not feel or look smooth.
One solution is to draw the tiles in the fragment shader
So you have your map, say a Uint32Array. Break it down into units of 4 bytes each. First 2 bytes are the tile ID, last byte is flags
As you walk across the quad for each pixel you lookup in the tilemap texture which tile it is, then you use that to compute UV coordinates to get pixels from that tile out of the texture of tiles. If your texture of tiles has gl.NEAREST sampling set then you'll never get any bleeding
Note that unlike traditional tilemaps the ids of each tile is the X,Y coordinate of the tile in the tile texture. In other words if your tile texture has 16x8 tiles across and you want your map to show the tile 7 over and 4 down then the id of that tile is 7,4 (first byte 7, second byte 4) where as in a traditional CPU based system the tile id would probably be 4*16+7 or 71 (the 71st tile). You could add code to the shader to do more traditional indexing but since the shader has to convert the id into 2d texture coords it just seemed easier to use 2d ids.
const vs = `
attribute vec4 position;
//attribute vec4 texcoord; - since position is a unit square just use it for texcoords
uniform mat4 u_matrix;
uniform mat4 u_texMatrix;
varying vec2 v_texcoord;
void main() {
gl_Position = u_matrix * position;
// v_texcoord = (u_texMatrix * texccord).xy;
v_texcoord = (u_texMatrix * position).xy;
}
`;
const fs = `
precision highp float;
uniform sampler2D u_tilemap;
uniform sampler2D u_tiles;
uniform vec2 u_tilemapSize;
uniform vec2 u_tilesetSize;
varying vec2 v_texcoord;
void main() {
vec2 tilemapCoord = floor(v_texcoord);
vec2 texcoord = fract(v_texcoord);
vec2 tileFoo = fract((tilemapCoord + vec2(0.5, 0.5)) / u_tilemapSize);
vec4 tile = floor(texture2D(u_tilemap, tileFoo) * 256.0);
float flags = tile.w;
float xflip = step(128.0, flags);
flags = flags - xflip * 128.0;
float yflip = step(64.0, flags);
flags = flags - yflip * 64.0;
float xySwap = step(32.0, flags);
if (xflip > 0.0) {
texcoord = vec2(1.0 - texcoord.x, texcoord.y);
}
if (yflip > 0.0) {
texcoord = vec2(texcoord.x, 1.0 - texcoord.y);
}
if (xySwap > 0.0) {
texcoord = texcoord.yx;
}
vec2 tileCoord = (tile.xy + texcoord) / u_tilesetSize;
vec4 color = texture2D(u_tiles, tileCoord);
if (color.a <= 0.1) {
discard;
}
gl_FragColor = color;
}
`;
const tileWidth = 32;
const tileHeight = 32;
const tilesAcross = 8;
const tilesDown = 4;
const m4 = twgl.m4;
const gl = document.querySelector('#c').getContext('webgl');
// compile shaders, link, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
// gl.createBuffer, bindBuffer, bufferData
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
position: {
numComponents: 2,
data: [
0, 0,
1, 0,
0, 1,
0, 1,
1, 0,
1, 1,
],
},
});
function r(min, max) {
if (max === undefined) {
max = min;
min = 0;
}
return min + (max - min) * Math.random();
}
// make some tiles
const ctx = document.createElement('canvas').getContext('2d');
ctx.canvas.width = tileWidth * tilesAcross;
ctx.canvas.height = tileHeight * tilesDown;
ctx.font = "bold 24px sans-serif";
ctx.textAlign = "center";
ctx.textBaseline = "middle";
const f = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ~';
for (let y = 0; y < tilesDown; ++y) {
for (let x = 0; x < tilesAcross; ++x) {
const color = `hsl(${r(360) | 0},${r(50,100)}%,50%)`;
ctx.fillStyle = color;
const tx = x * tileWidth;
const ty = y * tileHeight;
ctx.fillRect(tx, ty, tileWidth, tileHeight);
ctx.fillStyle = "#FFF";
ctx.fillText(f.substr(y * 8 + x, 1), tx + tileWidth * .5, ty + tileHeight * .5);
}
}
document.body.appendChild(ctx.canvas);
const tileTexture = twgl.createTexture(gl, {
src: ctx.canvas,
minMag: gl.NEAREST,
});
// make a tilemap
const mapWidth = 400;
const mapHeight = 300;
const tilemap = new Uint32Array(mapWidth * mapHeight);
const tilemapU8 = new Uint8Array(tilemap.buffer);
const totalTiles = tilesAcross * tilesDown;
for (let i = 0; i < tilemap.length; ++i) {
const off = i * 4;
// mostly tile 9
const tileId = r(10) < 1
? (r(totalTiles) | 0)
: 9;
tilemapU8[off + 0] = tileId % tilesAcross;
tilemapU8[off + 1] = tileId / tilesAcross | 0;
const xFlip = r(2) | 0;
const yFlip = r(2) | 0;
const xySwap = r(2) | 0;
tilemapU8[off + 3] =
(xFlip ? 128 : 0) |
(yFlip ? 64 : 0) |
(xySwap ? 32 : 0) ;
}
const mapTexture = twgl.createTexture(gl, {
src: tilemapU8,
width: mapWidth,
minMag: gl.NEAREST,
});
function ease(t) {
return Math.cos(t) * .5 + .5;
}
function lerp(a, b, t) {
return a + (b - a) * t;
}
function easeLerp(a, b, t) {
return lerp(a, b, ease(t));
}
function render(time) {
time *= 0.001; // convert to seconds;
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.clearColor(0, 1, 0, 1);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
const mat = m4.ortho(0, gl.canvas.width, gl.canvas.height, 0, -1, 1);
m4.scale(mat, [gl.canvas.width, gl.canvas.height, 1], mat);
const scaleX = easeLerp(.5, 2, time * 1.1);
const scaleY = easeLerp(.5, 2, time * 1.1);
const dispScaleX = 1;
const dispScaleY = 1;
// origin of scale/rotation
const originX = gl.canvas.width * .5;
const originY = gl.canvas.height * .5;
// scroll position in pixels
const scrollX = time % (mapWidth * tileWidth );
const scrollY = time % (mapHeight * tileHeight);
const rotation = time;
const tmat = m4.identity();
m4.translate(tmat, [scrollX, scrollY, 0], tmat);
m4.rotateZ(tmat, rotation, tmat);
m4.scale(tmat, [
gl.canvas.width / tileWidth / scaleX * (dispScaleX),
gl.canvas.height / tileHeight / scaleY * (dispScaleY),
1,
], tmat);
m4.translate(tmat, [
-originX / gl.canvas.width,
-originY / gl.canvas.height,
0,
], tmat);
twgl.setUniforms(programInfo, {
u_matrix: mat,
u_texMatrix: tmat,
u_tilemap: mapTexture,
u_tiles: tileTexture,
u_tilemapSize: [mapWidth, mapHeight],
u_tilesetSize: [tilesAcross, tilesDown],
});
gl.drawArrays(gl.TRIANGLES, 0, 6);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
canvas { border: 1px solid black; }
<canvas id="c"></canvas>
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>

Three.js: Determining World coordinates of mouse position

I have a Three.js scene with points and am trying to figure out the relationship between my points' positions and screen coordinates. I thought I could use the function #WestLangley provided to a previous question but implementing this function has raised some confusion.
In the scene below, I'm storing the x coordinates of the left and right-most points in world.bb.x, and am logging the world coordinates of the cursor each time the mouse moves. However, when I mouse to the left and right-most points, the world coordinates do not match the min or max x-coordinate values in world.bb.x, which is what I expected.
Do others know what I can do to figure out the world coordinates of my cursor at any given time? Any help others can offer is greatly appreciated!
function World() {
this.scene = this.getScene();
this.camera = this.getCamera();
this.renderer = this.getRenderer();
this.controls = this.getControls();
this.color = new THREE.Color();
this.addPoints();
this.render();
}
World.prototype.getScene = function() {
var scene = new THREE.Scene();
scene.background = new THREE.Color(0xefefef);
return scene;
}
World.prototype.getCamera = function() {
var renderSize = getRenderSize(),
aspectRatio = renderSize.w / renderSize.h,
camera = new THREE.PerspectiveCamera(75, aspectRatio, 0.1, 100000);
camera.position.set(0, 1, -10);
return camera;
}
World.prototype.getRenderer = function() {
var renderSize = getRenderSize(),
renderer = new THREE.WebGLRenderer({antialias: true});
renderer.setPixelRatio(window.devicePixelRatio); // retina displays
renderer.setSize(renderSize.w, renderSize.h); // set w,h
find('#gl-target').appendChild(renderer.domElement);
return renderer;
}
World.prototype.getControls = function() {
var controls = new THREE.TrackballControls(this.camera, this.renderer.domElement);
controls.zoomSpeed = 0.4;
controls.panSpeed = 0.4;
return controls;
}
World.prototype.render = function() {
requestAnimationFrame(this.render.bind(this));
this.renderer.render(this.scene, this.camera);
this.controls.update();
}
World.prototype.getMouseWorldCoords = function(e) {
var vector = new THREE.Vector3(),
camera = world.camera,
x = (e.clientX / window.innerWidth) * 2 - 1,
y = (e.clientY / window.innerHeight) * 2 + 1;
vector.set(x, y, 0.5);
vector.unproject(camera);
var direction = vector.sub(camera.position).normalize(),
distance = - camera.position.z / direction.z,
scaled = direction.multiplyScalar(distance),
coords = camera.position.clone().add(scaled);
return {
x: coords.x, y: coords.y,
};
}
World.prototype.addPoints = function() {
// this geometry builds a blueprint and many copies of the blueprint
var IBG = THREE.InstancedBufferGeometry,
BA = THREE.BufferAttribute,
IBA = THREE.InstancedBufferAttribute,
Vec3 = THREE.Vector3,
Arr = Float32Array;
// add data for each observation; n = num observations
var geometry = new IBG(),
n = 10000,
rootN = n**(1/2),
// find max min for each dim to center camera
xMax = Number.NEGATIVE_INFINITY,
xMin = Number.POSITIVE_INFINITY,
yMax = Number.NEGATIVE_INFINITY,
yMin = Number.POSITIVE_INFINITY;
var translations = new Arr(n * 3),
colors = new Arr(n * 3),
uidColors = new Arr(n * 3),
translationIterator = 0,
colorIterator = 0,
uidColorIterator = 0;
var colorMap = this.getColorMap();
for (var i=0; i<n; i++) {
var x = Math.sin(i) * 4,
y = Math.floor(i / (n/20)) * 0.3,
color = colorMap[ Math.floor(i / (n/20)) ],
uidColor = this.color.setHex(i + 1);
if (x > xMax) xMax = x;
if (x < xMin) xMin = x;
if (y > yMax) yMax = y;
if (y < yMin) yMin = y;
translations[translationIterator++] = x;
translations[translationIterator++] = y;
translations[translationIterator++] = 0;
colors[colorIterator++] = color.r / 255;
colors[colorIterator++] = color.g / 255;
colors[colorIterator++] = color.b / 255;
uidColors[uidColorIterator++] = uidColor.r;
uidColors[uidColorIterator++] = uidColor.g;
uidColors[uidColorIterator++] = uidColor.b;
}
// store the min and max coords in each dimension
this.bb = {
x: {
min: xMin,
max: xMax,
},
y: {
min: yMin,
max: yMax,
}
}
// center the camera
this.center = {
x: (xMax + xMin) / 2,
y: (yMax + yMin) / 2
}
this.camera.position.set(this.center.x, this.center.y, -6);
this.camera.lookAt(this.center.x, this.center.y, 0);
this.controls.target = new Vec3(this.center.x, this.center.y, 0);
// add attributes
geometry.addAttribute('position', new BA( new Arr([0, 0, 0]), 3));
geometry.addAttribute('translation', new IBA(translations, 3, 1) );
geometry.addAttribute('color', new IBA(colors, 3, 1) );
geometry.addAttribute('uidColor', new IBA(uidColors, 3, 1) );
var material = new THREE.RawShaderMaterial({
vertexShader: find('#vertex-shader').textContent,
fragmentShader: find('#fragment-shader').textContent,
});
var mesh = new THREE.Points(geometry, material);
mesh.frustumCulled = false; // prevent the mesh from being clipped on drag
this.scene.add(mesh);
}
World.prototype.getColorMap = function() {
function toHex(c) {
var hex = c.toString(16);
return hex.length == 1 ? '0' + hex : hex;
}
function rgbToHex(r, g, b) {
return '#' + toHex(r) + toHex(g) + toHex(b);
}
function hexToRgb(hex) {
var result = /^#?([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i.exec(hex);
return result ? {
r: parseInt(result[1], 16),
g: parseInt(result[2], 16),
b: parseInt(result[3], 16),
} : null;
}
var hexes = [
'#fe4445','#ff583b','#ff6a2f','#ff7a20','#ff8800',
'#ff9512','#ffa31f','#ffaf2a','#ffbb34',
'#cfc522','#99cc01',
'#91c14a','#85b66e','#73ac8f','#57a3ac','#0099cb',
'#14a0d1','#20a7d8','#2aaedf','#33b5e6'
]
var colorMap = {};
hexes.forEach(function(c, idx) { colorMap[idx] = hexToRgb(c) })
return colorMap;
}
/**
* Helpers
**/
function getRenderSize() {
var elem = find('#gl-target');
return {
w: elem.clientWidth,
h: elem.clientHeight,
}
}
function find(selector) {
return document.querySelector(selector);
}
/**
* Main
**/
var world = new World();
world.controls.enabled = false;
find('canvas').addEventListener('mousemove', function(e) {
find('#bar').style.left = e.clientX + 'px';
var coords = world.getMouseWorldCoords(e);
console.log(coords, world.bb.x);
})
html, body {
width: 100%;
height: 100%;
background: #000;
}
body {
margin: 0;
overflow: hidden;
}
canvas {
width: 100%;
height: 100%;
}
.gl-container {
position: relative;
}
#gl-target {
width:700px;
height:400px
}
#bar {
width: 1px;
height: 100%;
display: inline-block;
position: absolute;
left: 30px;
background: red;
}
<script src='https://cdnjs.cloudflare.com/ajax/libs/three.js/95/three.min.js'></script>
<script src='https://rawgit.com/YaleDHLab/pix-plot/master/assets/js/trackball-controls.js'></script>
<script type='x-shader/x-vertex' id='vertex-shader'>
precision highp float;
uniform mat4 modelViewMatrix;
uniform mat4 projectionMatrix;
attribute vec3 position;
attribute vec3 translation;
#ifdef PICKING
attribute vec3 uidColor;
varying vec3 vUidColor;
#else
attribute vec3 color;
#endif
varying vec3 vColor;
void main() {
#ifdef PICKING
vUidColor = uidColor;
#else
vColor = color;
#endif
// set point position
vec3 raw = position + translation;
vec4 pos = projectionMatrix * modelViewMatrix * vec4(raw, 1.0);
gl_Position = pos;
// set point size
gl_PointSize = 10.0;
}
</script>
<script type='x-shader/x-fragment' id='fragment-shader'>
precision highp float;
#ifdef PICKING
varying vec3 vUidColor;
#else
varying vec3 vColor;
#endif
void main() {
// make point circular
vec2 coord = gl_PointCoord - vec2(0.5);
if (length(coord) > 0.5) discard;
// color the point
#ifdef PICKING
gl_FragColor = vec4(vUidColor, 1.0);
#else
gl_FragColor = vec4(vColor, 1.0);
#endif
}
</script>
<div class='gl-container'>
<div id='bar'></div>
<div id='gl-target'></div>
</div>
Aha, instead of dividing the event x and y coordinates by the window width (which only applies to canvases that extend through the full window height and width), I need to divide the event x and y coordinates by the canvas's width and height!
function World() {
this.scene = this.getScene();
this.camera = this.getCamera();
this.renderer = this.getRenderer();
this.color = new THREE.Color();
this.addPoints();
this.render();
}
World.prototype.getScene = function() {
var scene = new THREE.Scene();
scene.background = new THREE.Color(0xefefef);
return scene;
}
World.prototype.getCamera = function() {
var renderSize = getRenderSize(),
aspectRatio = renderSize.w / renderSize.h,
camera = new THREE.PerspectiveCamera(75, aspectRatio, 0.1, 100000);
camera.position.set(0, 1, -10);
return camera;
}
World.prototype.getRenderer = function() {
var renderSize = getRenderSize(),
renderer = new THREE.WebGLRenderer({antialias: true});
renderer.setPixelRatio(window.devicePixelRatio); // retina displays
renderer.setSize(renderSize.w, renderSize.h); // set w,h
find('#gl-target').appendChild(renderer.domElement);
return renderer;
}
World.prototype.render = function() {
requestAnimationFrame(this.render.bind(this));
this.renderer.render(this.scene, this.camera);
}
World.prototype.getMouseWorldCoords = function(e) {
var elem = find('#gl-target'),
vector = new THREE.Vector3(),
camera = world.camera,
x = (e.clientX / elem.clientWidth) * 2 - 1,
y = (e.clientY / elem.clientHeight) * 2 + 1;
vector.set(x, y, 0.5);
vector.unproject(camera);
var direction = vector.sub(camera.position).normalize(),
distance = - camera.position.z / direction.z,
scaled = direction.multiplyScalar(distance),
coords = camera.position.clone().add(scaled);
return {
x: coords.x,
y: coords.y,
};
}
World.prototype.addPoints = function() {
// this geometry builds a blueprint and many copies of the blueprint
var IBG = THREE.InstancedBufferGeometry,
BA = THREE.BufferAttribute,
IBA = THREE.InstancedBufferAttribute,
Vec3 = THREE.Vector3,
Arr = Float32Array;
// add data for each observation; n = num observations
var geometry = new IBG(),
n = 10000,
rootN = n**(1/2),
// find max min for each dim to center camera
xMax = Number.NEGATIVE_INFINITY,
xMin = Number.POSITIVE_INFINITY,
yMax = Number.NEGATIVE_INFINITY,
yMin = Number.POSITIVE_INFINITY;
var translations = new Arr(n * 3),
colors = new Arr(n * 3),
uidColors = new Arr(n * 3),
translationIterator = 0,
colorIterator = 0,
uidColorIterator = 0;
var colorMap = this.getColorMap();
for (var i=0; i<n; i++) {
var x = Math.sin(i) * 4,
y = Math.floor(i / (n/20)) * 0.3,
color = colorMap[ Math.floor(i / (n/20)) ],
uidColor = this.color.setHex(i + 1);
if (x > xMax) xMax = x;
if (x < xMin) xMin = x;
if (y > yMax) yMax = y;
if (y < yMin) yMin = y;
translations[translationIterator++] = x;
translations[translationIterator++] = y;
translations[translationIterator++] = 0;
colors[colorIterator++] = color.r / 255;
colors[colorIterator++] = color.g / 255;
colors[colorIterator++] = color.b / 255;
uidColors[uidColorIterator++] = uidColor.r;
uidColors[uidColorIterator++] = uidColor.g;
uidColors[uidColorIterator++] = uidColor.b;
}
// store the min and max coords in each dimension
this.bb = {
x: {
min: xMin,
max: xMax,
},
y: {
min: yMin,
max: yMax,
}
}
// center the camera
this.center = {
x: (xMax + xMin) / 2,
y: (yMax + yMin) / 2
}
this.camera.position.set(this.center.x, this.center.y, -6);
this.camera.lookAt(this.center.x, this.center.y, 0);
// add attributes
geometry.addAttribute('position', new BA( new Arr([0, 0, 0]), 3));
geometry.addAttribute('translation', new IBA(translations, 3, 1) );
geometry.addAttribute('color', new IBA(colors, 3, 1) );
geometry.addAttribute('uidColor', new IBA(uidColors, 3, 1) );
var material = new THREE.RawShaderMaterial({
vertexShader: find('#vertex-shader').textContent,
fragmentShader: find('#fragment-shader').textContent,
});
var mesh = new THREE.Points(geometry, material);
mesh.frustumCulled = false; // prevent the mesh from being clipped on drag
this.scene.add(mesh);
}
World.prototype.getColorMap = function() {
function toHex(c) {
var hex = c.toString(16);
return hex.length == 1 ? '0' + hex : hex;
}
function rgbToHex(r, g, b) {
return '#' + toHex(r) + toHex(g) + toHex(b);
}
function hexToRgb(hex) {
var result = /^#?([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i.exec(hex);
return result ? {
r: parseInt(result[1], 16),
g: parseInt(result[2], 16),
b: parseInt(result[3], 16),
} : null;
}
var hexes = [
'#fe4445','#ff583b','#ff6a2f','#ff7a20','#ff8800',
'#ff9512','#ffa31f','#ffaf2a','#ffbb34',
'#cfc522','#99cc01',
'#91c14a','#85b66e','#73ac8f','#57a3ac','#0099cb',
'#14a0d1','#20a7d8','#2aaedf','#33b5e6'
]
var colorMap = {};
hexes.forEach(function(c, idx) { colorMap[idx] = hexToRgb(c) })
return colorMap;
}
/**
* Helpers
**/
function getRenderSize() {
var elem = find('#gl-target');
return {
w: elem.clientWidth,
h: elem.clientHeight,
}
}
function find(selector) {
return document.querySelector(selector);
}
/**
* Main
**/
var world = new World();
find('canvas').addEventListener('mousemove', function(e) {
find('#bar').style.left = e.clientX + 'px';
var coords = world.getMouseWorldCoords(e);
console.log(coords, world.bb.x);
})
html, body {
width: 100%;
height: 100%;
background: #000;
}
body {
margin: 0;
overflow: hidden;
}
canvas {
width: 100%;
height: 100%;
}
.gl-container {
position: relative;
}
#gl-target {
width:700px;
height:400px
}
#bar {
width: 1px;
height: 100%;
display: inline-block;
position: absolute;
left: 30px;
background: red;
}
<div class='gl-container'>
<div id='bar'></div>
<div id='gl-target'></div>
</div>
<script src='https://cdnjs.cloudflare.com/ajax/libs/three.js/95/three.min.js'></script>
<script type='x-shader/x-vertex' id='vertex-shader'>
precision highp float;
uniform mat4 modelViewMatrix;
uniform mat4 projectionMatrix;
attribute vec3 position;
attribute vec3 translation;
#ifdef PICKING
attribute vec3 uidColor;
varying vec3 vUidColor;
#else
attribute vec3 color;
#endif
varying vec3 vColor;
void main() {
#ifdef PICKING
vUidColor = uidColor;
#else
vColor = color;
#endif
// set point position
vec3 raw = position + translation;
vec4 pos = projectionMatrix * modelViewMatrix * vec4(raw, 1.0);
gl_Position = pos;
// set point size
gl_PointSize = 10.0;
}
</script>
<script type='x-shader/x-fragment' id='fragment-shader'>
precision highp float;
#ifdef PICKING
varying vec3 vUidColor;
#else
varying vec3 vColor;
#endif
void main() {
// make point circular
vec2 coord = gl_PointCoord - vec2(0.5);
if (length(coord) > 0.5) discard;
// color the point
#ifdef PICKING
gl_FragColor = vec4(vUidColor, 1.0);
#else
gl_FragColor = vec4(vColor, 1.0);
#endif
}
</script>

Warp / curve all vertices around a pivot point / axis (Three.js / GLSL)

I'm trying to work out how to warp all coordinates in a Three.js scene around a specific pivot point / axis. The best way to describe it is as if I was to place a tube somewhere in the scene and everything else in the scene would curve around that axis and keep the same distance from that axis.
If it helps, this diagram is what I'm trying to achieve. The top part is as if you were looking at the scene from the side and the bottom part is as if you were looking at it from a perspective. The red dot / line is where the pivot point is.
To further complicate matters, I'd like to stop the curve / warp from wrapping back on itself, so the curve stops when it's horizontal or vertical like the top-right example in the diagram.
Any insight into how to achieve this using GLSL shaders, ideally in Three.js but I'll try to translate if they can be described clearly otherwise?
I'm also open to alternative approaches to this as I'm unsure how best to describe what I'm after. Basically I want an inverted "curved world" effect where the scene is bending up and away from you.
First I'd do it in 2D just like your top diagram.
I have no idea if this is the correct way to do this or even a good way but, doing it in 2D seemed easier than 3D and besides the effect you want is actually a 2D. X is not changing at all, only Y, and Z so solving it in 2D seems like it would lead to solution.
Basically we choose a radius for a circle. At that radius for every unit of X past the circle's center we want to wrap one horizontal unit to one unit around the circle. Given the radius we know the distance around the circle is 2 * PI * radius so we can easily compute how far to rotate around our circle to get one unit. It's just 1 / circumference * Math.PI * 2 We do that for some specified distance past the circle's center
const m4 = twgl.m4;
const v3 = twgl.v3;
const ctx = document.querySelector('canvas').getContext('2d');
const gui = new dat.GUI();
resizeToDisplaySize(ctx.canvas);
const g = {
rotationPoint: {x: 100, y: ctx.canvas.height / 2 - 50},
radius: 50,
range: 60,
};
gui.add(g.rotationPoint, 'x', 0, ctx.canvas.width).onChange(render);
gui.add(g.rotationPoint, 'y', 0, ctx.canvas.height).onChange(render);
gui.add(g, 'radius', 1, 100).onChange(render);
gui.add(g, 'range', 0, 300).onChange(render);
render();
window.addEventListener('resize', render);
function render() {
resizeToDisplaySize(ctx.canvas);
ctx.clearRect(0, 0, ctx.canvas.width, ctx.canvas.height);
const start = g.rotationPoint.x;
const curveAmount = g.range / g.radius;
const y = ctx.canvas.height / 2;
drawDot(ctx, g.rotationPoint.x, g.rotationPoint.y, 'red');
ctx.beginPath();
ctx.arc(g.rotationPoint.x, g.rotationPoint.y, g.radius, 0, Math.PI * 2, false);
ctx.strokeStyle = 'red';
ctx.stroke();
ctx.fillStyle = 'black';
const invRange = g.range > 0 ? 1 / g.range : 0; // so we don't divide by 0
for (let x = 0; x < ctx.canvas.width; x += 5) {
for (let yy = 0; yy <= 30; yy += 10) {
const sign = Math.sign(g.rotationPoint.y - y);
const amountToApplyCurve = clamp((x - start) * invRange, 0, 1);
let mat = m4.identity();
mat = m4.translate(mat, [g.rotationPoint.x, g.rotationPoint.y, 0]);
mat = m4.rotateZ(mat, curveAmount * amountToApplyCurve * sign);
mat = m4.translate(mat, [-g.rotationPoint.x, -g.rotationPoint.y, 0]);
const origP = [x, y + yy, 0];
origP[0] += -g.range * amountToApplyCurve;
const newP = m4.transformPoint(mat, origP);
drawDot(ctx, newP[0], newP[1], 'black');
}
}
}
function drawDot(ctx, x, y, color) {
ctx.fillStyle = color;
ctx.fillRect(x - 1, y - 1, 3, 3);
}
function clamp(v, min, max) {
return Math.min(max, Math.max(v, min));
}
function resizeToDisplaySize(canvas) {
const width = canvas.clientWidth;
const height = canvas.clientHeight;
if (canvas.width !== width || canvas.height !== height) {
canvas.width = width;
canvas.height = height;
}
}
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<canvas></canvas>
<!-- using twgl just for its math library -->
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/dat-gui/0.7.2/dat.gui.min.js"></script>
Notice the only place that matches perfectly is when the radius touches a line of points. Inside the radius things will get pinched, outside they'll get stretched.
Putting that in a shader in the Z direction for actual use
const renderer = new THREE.WebGLRenderer({
canvas: document.querySelector('canvas'),
});
const gui = new dat.GUI();
const scene = new THREE.Scene();
const fov = 75;
const aspect = 2; // the canvas default
const zNear = 1;
const zFar = 1000;
const camera = new THREE.PerspectiveCamera(fov, aspect, zNear, zFar);
function lookSide() {
camera.position.set(-170, 35, 210);
camera.lookAt(0, 25, 210);
}
function lookIn() {
camera.position.set(0, 35, -50);
camera.lookAt(0, 25, 0);
}
{
scene.add(new THREE.HemisphereLight(0xaaaaaa, 0x444444, .5));
const light = new THREE.DirectionalLight(0xffffff, 1);
light.position.set(-1, 20, 4 - 15);
scene.add(light);
}
const point = function() {
const material = new THREE.MeshPhongMaterial({
color: 'red',
emissive: 'hsl(0,50%,25%)',
wireframe: true,
});
const radiusTop = 1;
const radiusBottom = 1;
const height = 0.001;
const radialSegments = 32;
const geo = new THREE.CylinderBufferGeometry(
radiusTop, radiusBottom, height, radialSegments);
const sphere = new THREE.Mesh(geo, material);
sphere.rotation.z = Math.PI * .5;
const mesh = new THREE.Object3D();
mesh.add(sphere);
scene.add(mesh);
mesh.position.y = 88;
mesh.position.z = 200;
return {
point: mesh,
rep: sphere,
};
}();
const vs = `
// -------------------------------------- [ VS ] ---
#define PI radians(180.0)
uniform mat4 center;
uniform mat4 invCenter;
uniform float range;
uniform float radius;
varying vec3 vNormal;
mat4 rotZ(float angleInRadians) {
float s = sin(angleInRadians);
float c = cos(angleInRadians);
return mat4(
c,-s, 0, 0,
s, c, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1);
}
mat4 rotX(float angleInRadians) {
float s = sin(angleInRadians);
float c = cos(angleInRadians);
return mat4(
1, 0, 0, 0,
0, c, s, 0,
0, -s, c, 0,
0, 0, 0, 1);
}
void main() {
float curveAmount = range / radius;
float invRange = range > 0.0 ? 1.0 / range : 0.0;
vec4 mvPosition = modelViewMatrix * vec4(position, 1.0);
vec4 point = invCenter * mvPosition;
float amountToApplyCurve = clamp(point.z * invRange, 0.0, 1.0);
float s = sign(point.y);
mat4 mat = rotX(curveAmount * amountToApplyCurve * s);
point = center * mat * (point + vec4(0, 0, -range * amountToApplyCurve, 0));
vNormal = mat3(mat) * normalMatrix * normal;
gl_Position = projectionMatrix * point;
}
`;
const fs = `
// -------------------------------------- [ FS ] ---
varying vec3 vNormal;
uniform vec3 color;
void main() {
vec3 light = vec3( 0.5, 2.2, 1.0 );
light = normalize( light );
float dProd = dot( vNormal, light ) * 0.5 + 0.5;
gl_FragColor = vec4( vec3( dProd ) * vec3( color ), 1.0 );
}
`;
const centerUniforms = {
radius: { value: 0 },
range: { value: 0 },
center: { value: new THREE.Matrix4() },
invCenter: { value: new THREE.Matrix4() },
};
function addUniforms(uniforms) {
return Object.assign(uniforms, centerUniforms);
}
{
const uniforms = addUniforms({
color: { value: new THREE.Color('hsl(100,50%,50%)') },
});
const material = new THREE.ShaderMaterial( {
uniforms: uniforms,
vertexShader: vs,
fragmentShader: fs,
});
const planeGeo = new THREE.PlaneBufferGeometry(1000, 1000, 100, 100);
const mesh = new THREE.Mesh(planeGeo, material);
mesh.rotation.x = Math.PI * -.5;
scene.add(mesh);
}
{
const uniforms = addUniforms({
color: { value: new THREE.Color('hsl(180,50%,50%)' ) },
});
const material = new THREE.ShaderMaterial( {
uniforms: uniforms,
vertexShader: vs,
fragmentShader: fs,
});
const boxGeo = new THREE.BoxBufferGeometry(10, 10, 10, 20, 20, 20);
for (let x = -41; x <= 41; x += 2) {
for (let z = 0; z <= 40; z += 2) {
const base = new THREE.Object3D();
const mesh = new THREE.Mesh(boxGeo, material);
mesh.position.set(0, 5, 0);
base.position.set(x * 10, 0, z * 10);
base.scale.y = 1 + Math.random() * 2;
base.add(mesh);
scene.add(base);
}
}
}
const g = {
radius: 59,
range: 60,
side: true,
};
class DegRadHelper {
constructor(obj, prop) {
this.obj = obj;
this.prop = prop;
}
get v() {
return THREE.Math.radToDeg(this.obj[this.prop]);
}
set v(v) {
this.obj[this.prop] = THREE.Math.degToRad(v);
}
}
gui.add(point.point.position, 'z', -300, 300).onChange(render);
gui.add(point.point.position, 'y', -150, 300).onChange(render);
gui.add(g, 'radius', 1, 100).onChange(render);
gui.add(g, 'range', 0, 300).onChange(render);
gui.add(g, 'side').onChange(render);
gui.add(new DegRadHelper(point.point.rotation, 'x'), 'v', -180, 180).name('rotX').onChange(render);
gui.add(new DegRadHelper(point.point.rotation, 'y'), 'v', -180, 180).name('rotY').onChange(render);
gui.add(new DegRadHelper(point.point.rotation, 'z'), 'v', -180, 180).name('rotZ').onChange(render);
render();
window.addEventListener('resize', render);
function render() {
if (resizeToDisplaySize(renderer)) {
const canvas = renderer.domElement;
camera.aspect = canvas.clientWidth / canvas.clientHeight;
camera.updateProjectionMatrix();
}
if (g.side) {
lookSide();
} else {
lookIn();
}
camera.updateMatrixWorld();
point.rep.scale.set(g.radius, g.radius, g.radius);
point.point.updateMatrixWorld();
centerUniforms.center.value.multiplyMatrices(
camera.matrixWorldInverse, point.point.matrixWorld);
centerUniforms.invCenter.value.getInverse(centerUniforms.center.value);
centerUniforms.range.value = g.range;
centerUniforms.radius.value = g.radius;
renderer.render(scene, camera);
}
function resizeToDisplaySize(renderer) {
const canvas = renderer.domElement;
const width = canvas.clientWidth;
const height = canvas.clientHeight;
const needUpdate = canvas.width !== width || canvas.height !== height;
if (needUpdate) {
renderer.setSize(width, height, false);
}
return needUpdate;
}
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<canvas></canvas>
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/95/three.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/dat-gui/0.7.2/dat.gui.min.js"></script>
Honestly I have a feeling there's an easier way I'm missing but for the moment it seems to kind of be working.

Resources