Mapping texture to THREE.Points - three.js

I'm trying to map one texture (same as on cube's side) to multiple points so that one point is colored with part of a texture and all the points together make up a complete image.
For doing this I have a custom shader that tries to map point position to texture like this:
var uniform = THREE.TextureShader.uniforms();
uniform.texture.value = texture;
uniform.bbMin.value = new THREE.Vector3(-2.5, -2.5, 0); //THREE.Points Box3 min value
uniform.bbMax.value = new THREE.Vector3(2.5, 2.5, 0); //THREE.Points Box3 max value
//Shader
"vec3 p = (position - bbMin) / (bbMax - bbMin);",
/*This should give me fraction between 0 and 1 to match part of texture but it is not*/
"vColor = texture2D(texture, p.xy).rgb;",
Codepen for testing is here.
Any ideas how to calculate it correctly?
Desired result would be something like this, only there would be space between tiles.

50000 points or 50000 planes it's all the same, you need some way to pass in data per point or per plane that lets you compute your texture coordinates. Personally I'd choose planes because you can rotate, scale, and flip planes where's you can't do that with POINTS.
In any case though there's an infinite number of ways to that so it's really up to you to pick one. For points you get 1 "chunk" of data per point where by "chunk" I mean all the data from all the attributes you set up.
So for example you could set up an attribute with an X,Y position representing which piece of that sprite you want to draw. In your example you've divided it 6x6 so make a vec2 attribute with values 0-5, 0-5 selecting the portion of the sprite.
Pass that into the vertex shader then you can either do some math there or pass it into the fragment shader directly. Let's assume you pass it into the fragment shader directly.
gl_PointCoord are the texture coordinates for the POINT that go from 0 to 1 so
varying vec2 segment; // the segment of the sprite 0-5x, 0-5y
vec2 uv = (v_segment + gl_PointCoord) / 6.0;
vec4 color = texture2D(yourTextureUniform, uv);
Seems like it would work.
That one is hardcoded to 6x6. Change it to NxM by passing that in
varying vec2 segment; // the segment of the sprite
uniform vec2 numSegments; // number of segments across and down sprite
vec2 uv = (v_segment + gl_PointCoord) / numSegments
vec4 color = texture2D(yourTextureUniform, uv);
Example:
"use strict";
var gl = twgl.getWebGLContext(document.getElementById("c"));
var programInfo = twgl.createProgramInfo(gl, ["vs", "fs"]);
// make a rainbow circle texture from a 2d canvas as it's easier than downloading
var ctx = document.createElement("canvas").getContext("2d");
ctx.canvas.width = 128;
ctx.canvas.height = 128;
var gradient = ctx.createRadialGradient(64,64,60,64,64,0);
for (var i = 0; i <= 12; ++i) {
gradient.addColorStop(i / 12,"hsl(" + (i / 12 * 360) + ",100%,50%");
}
ctx.fillStyle = gradient;
ctx.fillRect(0, 0, 128, 128);
// make points and segment data
var numSegmentsAcross = 6;
var numSegmentsDown = 5;
var positions = [];
var segments = [];
for (var y = 0; y < numSegmentsDown; ++y) {
for (var x = 0; x < numSegmentsAcross; ++x) {
positions.push(x / (numSegmentsAcross - 1) * 2 - 1, y / (numSegmentsDown - 1) * 2 - 1);
segments.push(x, y);
}
}
var arrays = {
position: { size: 2, data: positions },
segment: { size: 2, data: segments },
};
var bufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);
var tex = twgl.createTexture(gl, { src: ctx.canvas });
var uniforms = {
u_numSegments: [numSegmentsAcross, numSegmentsDown],
u_texture: tex,
};
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, uniforms);
twgl.drawBufferInfo(gl, gl.POINTS, bufferInfo);
canvas { border: 1px solid black; }
<script id="vs" type="notjs">
attribute vec4 position;
attribute vec2 segment;
varying vec2 v_segment;
void main() {
gl_Position = position;
v_segment = segment;
gl_PointSize = 20.0;
}
</script>
<script id="fs" type="notjs">
precision mediump float;
varying vec2 v_segment;
uniform vec2 u_numSegments;
uniform sampler2D u_texture;
void main() {
vec2 uv = (v_segment + vec2(gl_PointCoord.x, 1.0 - gl_PointCoord.y)) / u_numSegments;
gl_FragColor = texture2D(u_texture, uv);
}
</script>
<script src="https://twgljs.org/dist/twgl.min.js"></script>
<canvas id="c"></canvas>

Related

Use 2 meshes + shader materials with each a different fragment shader in 1 scene (three.js)

I have 2 meshes with each a shaderMaterial and each a different fragment shader. When I add both meshes to my scene, only one will show up. Below you can find my 2 fragment shaders (see both images to see what they look like). They're basically the same.
What I want to achieve: Use mesh1 as a mask and put the other one, mesh2 (purple blob) on top of the mask.
Purple blob:
// three.js code
const geometry1 = new THREE.PlaneBufferGeometry(1, 1, 1, 1);
const material1 = new THREE.ShaderMaterial({
uniforms: this.uniforms,
vertexShader,
fragmentShader,
defines: {
PR: window.devicePixelRatio.toFixed(1)
}
});
const mesh1 = new THREE.Mesh(geometry1, material1);
this.scene.add(mesh1);
// fragment shader
void main() {
vec2 res = u_res * PR;
vec2 st = gl_FragCoord.xy / res.xy - 0.5;
st.y *= u_res.y / u_res.x * 0.8;
vec2 circlePos = st;
float c = circle(circlePos, 0.2 + 0. * 0.1, 1.) * 2.5;
float offx = v_uv.x + sin(v_uv.y + u_time * .1);
float offy = v_uv.y * .1 - u_time * 0.005 - cos(u_time * .001) * .01;
float n = snoise3(vec3(offx, offy, .9) * 2.5) - 2.1;
float finalMask = smoothstep(1., 0.99, n + pow(c, 1.5));
vec4 bg = vec4(0.12, 0.07, 0.28, 1.0);
vec4 bg2 = vec4(0., 0., 0., 0.);
gl_FragColor = mix(bg, bg2, finalMask);
}
Blue mask
// three.js code
const geometry2 = new THREE.PlaneBufferGeometry(1, 1, 1, 1);
const material2 = new THREE.ShaderMaterial({
uniforms,
vertexShader,
fragmentShader,
defines: {
PR: window.devicePixelRatio.toFixed(1)
}
});
const mesh2 = new THREE.Mesh(geometry2, material2);
this.scene.add(mesh2);
// fragment shader
void main() {
vec2 res = u_res * PR;
vec2 st = gl_FragCoord.xy / res.xy - 0.5;
st.y *= u_res.y / u_res.x * 0.8;
vec2 circlePos = st;
float c = circle(circlePos, 0.2 + 0. * 0.1, 1.) * 2.5;
float offx = v_uv.x + sin(v_uv.y + u_time * .1);
float offy = v_uv.y * .1 - u_time * 0.005 - cos(u_time * .001) * .01;
float n = snoise3(vec3(offx, offy, .9) * 2.5) - 2.1;
float finalMask = smoothstep(1., 0.99, n + pow(c, 1.5));
vec4 bg = vec4(0.12, 0.07, 0.28, 1.0);
vec4 bg2 = vec4(0., 0., 0., 0.);
gl_FragColor = mix(bg, bg2, finalMask);
}
Render Target code
this.rtWidth = window.innerWidth;
this.rtHeight = window.innerHeight;
this.renderTarget = new THREE.WebGLRenderTarget(this.rtWidth, this.rtHeight);
this.rtCamera = new THREE.PerspectiveCamera(
this.camera.settings.fov,
this.camera.settings.aspect,
this.camera.settings.near,
this.camera.settings.far
);
this.rtCamera.position.set(0, 0, this.camera.settings.perspective);
this.rtScene = new THREE.Scene();
this.rtScene.add(this.purpleBlob);
const geometry = new THREE.PlaneGeometry(window.innerWidth, window.innerHeight, 1);
const material = new THREE.MeshPhongMaterial({
map: this.renderTarget.texture,
});
this.mesh = new THREE.Mesh(geometry, material);
this.scene.add(this.mesh);
I'm still new to shaders so please be patient. :-)
There are probably infinite ways to mask in three.js. Here's a few
Use the stencil buffer
The stencil buffer is similar to the depth buffer in that it for every pixel in the canvas or render target there is a corresponding stencil pixel. You need to tell three.js you want a stencil buffer and then you can tell it when rendering what to do with the stencil buffer when you're drawing things.
You the stencil settings on Material
You tell three.js
what to do if the pixel you're drawing fails the stencil test
what to do if the pixel your drawing fails the depth test
what to do if the pixel you're drawing passes the depth test.
The things you can tell it to do for each of those conditions are keep (do nothing), increment, decrement, increment wraparound, decrement wraparound, set to a specific value.
You can also specify what the stencil test is by setting Material.stencilFunc
So, for example you can clear the stencil buffer to 0 (the default?), set the stencil test so it always passes, and set the conditions so if the depth test passes you set the stencil to 1. You then draw a bunch of things. Everywhere they are drawn there will now be a 1 in then stencil buffer.
Now you change the stencil test so it only passes if it equals 1 (or 0) and then draw more stuff, now things will only be drawn where the stencil equals the value you set
This exmaple uses the stencil
Mask with an alpha mask
In this case you need 2 color textures and an alpha texture. How you get those is up to you. For example you could load all 3 from images. Or you could generate all 3 using 3 render targets. Finally you pass all 3 to a shader that mixes them as in
gl_FragColor = mix(colorFromTexture1, colorFromTexture2, valueFromAlphaTexture);
This example uses this alpha mixing method
Note that if one of your 2 colors textures has an alpha channel you could use just 2 textures. You'd just pass one of the color textures as your mask.
Or of course you could calculate a mask based on the colors in one image or the other or both. For example
// assume you have function that converts from rgb to hue,saturation,value
vec3 hsv = rgb2hsv(colorFromTexture1.rgb);
float hue = hsv.x;
// pick one or the other if color1 is close to green
float mixAmount = step(abs(hue - 0.33), 0.05);
gl_FragColor = mix(colorFromTexture1, colorFromTexture2, mixAmount);
The point here is not that exact code, it's that you can make any formula you want for the mask, based on whatever you want, color, position, random math, sine waves based on time, some formula that generates a blob, whatever. The most common is some code that just looks up a mixAmount from a texture which is what the linked example above does.
ShaderToy style
Your code above appears to be a shadertoy style shader which is drawing a fullscreen quad. Instead of drawing 2 separate things you can just draw them in the same shader
vec4 computeBlueBlob() {
...
return blueBlobColor;
}
vec4 computeWhiteBlob() {
...
return whtieBlobColor;
}
vec4 main() {
vec4 color1 = computeBlueBlob();
vec4 color2 = computeWhiteBlob();
float mixAmount = color.a; // note: color2.a could be any
// formula to decide which colors
// to draw
gl_FragColor = mix(color1, color2, mixAmount);
}
note just like above how you compute mixAmount is up to you. Based it off anything, color1.r, color2.r, some formula, some hue, some other blob generation function, whatever.

Morphing in WebGL shaders with mix() between more then two targets

I am trying to build an image slider using three.js and am having difficulties with wrapping my head around passing the appropriate state to the glsl shaders so I can transition between the slides. I can easily do it between two targets (be it textures or models) with simply easing between 0 and 1 and passing it as an attrib float like this:
attribute float mix;
vec4 color = mix(tex1, tex2, mix);
But I can't understand how to approach it with more then 2 targets. Should I pass a number and do a bunch of if statements?
I set up my buffer plane geometry and my shader material, which contains my 3 textures, like this:
const uniforms = {
time: { value: 0 },
tex1: { type: 't', value: null },
tex2: { type: 't', value: null },
tex3: { type: 't', value: null },
activeTexture: { type: 'i', value: 0 },
mixFactor: { value: 0 }
}
const vertexShader = document.querySelector('#vertex-shader').text
const fragmentShader = document.querySelector('#fragment-shader').text
const geometry = new THREE.PlaneBufferGeometry(80, 40, 20, 20)
const material = new THREE.ShaderMaterial({
uniforms,
vertexShader,
fragmentShader
})
// textures are loaded here...
// transition using GSAP
function shift () {
let ease = Power3.easeInOut
if (counter === 0) {
TweenMax.to(uniforms.mixFactor, 2, { value: 1, ease, onStart () {
uniforms.activeTexture.value = 1
} })
} else if (counter === 1) {
TweenMax.to(uniforms.mixFactor, 2, { value: 1, ease, onComplete () {
uniforms.activeTexture.value = 2
} })
} else if (counter === 2) {
TweenMax.to(uniforms.mixFactor, 2, { value: 2, ease, onComplete () {
uniforms.activeTexture.value = 0
} })
console.log(uniforms.activeTexture.value)
counter += 1
if (counter === 3) counter = 0
}
// glsl
// morph between different targets depending on the passed int attribute
void main () {
vec4 texColor = vec4(0.0);
if (activeTexture == 0) {
texColor = transition(tex1, tex2, vUv, mixFactor);
} else if (activeTexture == 1) {
texColor = transition(tex2, tex3, vUv, mixFactor);
} else if (activeTexture == 2) {
texColor = transition(tex3, tex1, vUv, mixFactor);
}
gl_FragColor = texColor;
}
This doesn't give me the desired effect (the textures abruptly switch between one another, don't transition into place, also it's a bit ugly). I am new to three and am clueless how should I even approach the problem. How does one do this?
I brought my 5 kopeikas :)
For example, we want to have transition for several pics. So we can use arrays in our uniforms.
Here we go
var uniforms = {
textures: {
value: []
},
transition: {
value: 0
}
};
var textureLoader = new THREE.TextureLoader();
textureLoader.setCrossOrigin("");
var pics = [
"https://threejs.org/examples/textures/UV_Grid_Sm.jpg",
"https://threejs.org/examples/textures/colors.png",
"https://threejs.org/examples/textures/planets/moon_1024.jpg",
"https://threejs.org/examples/textures/decal/decal-normal.jpg"
];
pics.forEach((p, idx)=>{
textureLoader.load(p, function(tex){
uniforms.textures.value[idx] = tex;
tex.needsUpdate = true;
})
});
Our geometry and vertex shader are usual:
var planeGeom = new THREE.PlaneBufferGeometry(10, 10);
var vertShader = `
varying vec2 vUv;
void main()
{
vUv = uv;
vec4 mvPosition = modelViewMatrix * vec4(position, 1.0 );
gl_Position = projectionMatrix * mvPosition;
}
`;
Magic comes here in our fragment shader, which built dynamically and based on the length of our array with links to pics:
var fragShader = `
uniform sampler2D textures[` + pics.length + `];
uniform float transition;
varying vec2 vUv;
vec4 getTexture(int index){
for(int i = 0; i < ` + pics.length + `; i++){
if (i == index){ return texture2D(textures[i],vUv); }
}
}
void main()
{
if (transition == 1.){
gl_FragColor = texture2D(textures[` + (pics.length - 1) + `], vUv); // show last
}
else {
float chunk = 1. / ` + (pics.length - 1) + `.; // amount of transitions = amount of pics - 1
float t = floor(transition / chunk);
int idx0 = int(t);
int idx1 = int(t) + 1;
gl_FragColor = mix(
getTexture(idx0),
getTexture(idx1),
(transition - (float(t) * chunk)) * ` + (pics.length - 1) + `.
);
}
}
`;
The solution is flexible enough, thus you can have as many transitions as you want.
jsfiddle example r86
I would do the mix in GLSL and rest outside the shaders managing what gets drawn. You can have one shader that takes 2 or more textures, transition between them, but once they get to 0 or 1, switch out the texture with another one. If you need just three though... this is overkill.
Something along the lines of this:
const myTransitionMaterial = new THREE.ShaderMaterial({
uniforms:{
uLerp: {value: 0},
uTexA: {value: null},
uTexB: {value: null},
},
vertexShader: vs,
fragmentShader: fs,
})
//lets say you have a list of a bunch of textures, and you add them
myTransitionMaterial.textures = [tex1,tex2,tex3]
//and you want to lerp through them linearly using 0-1 regardless of how many there are
myTransitionMaterial.lerp = (normalizedFactor)=>{
const length = myTransitionMaterial.textures.length
const index = normalizedFactor * length // 0-3
//at 0.00 we want 0-1 indecis and 0.00 f
//at 0.99 we want 0-1 indecis and 0.99 f
//at 1.00 we want 1-2 indecis and 0.00 f
//at 1.99 we want 1-2 indecis and 0.99 f
//at 2.00 we want 2-3 indecis and 0.00 f
//at 2.99 we want 2-3 indecis and 0.99 f
//at 3.00 we want 3-4 indecis and 0.00 f
const f = index - Math.floor(index)
const i0 = Math.floor(index)
const i1 = i0 <= length ? i0 + 1 : null //catch edge
this.uniforms.uLerp.value = f
this.uniforms.uTexA.value = this.textures[i0]
this.uniforms.uTexB.value = this.textures[i1]
}.bind(myTransitionMaterial)
vs:
varying vec2 vUv;
void main(){
vUv = uv;
gl_Position = vec4(position.xy,0.,1.);
}
fs:
uniform float uLerp;
uniform sampler2D uTexA;
uniform sampler2D uTexB;
varying vec2 vUv;
void main(){
gl_FragColor = vec4( mix( texture2D(uTexA, vUv).xyz, texture2D(uTexB, vUv).xyz, uLerp ), 1. );
}
An important concept to point out here is that if you do something like this, and try to lerp for the first time, your frame rate will get choppy as textures are displayed for the first time. This happens because the renderer will automatically upload them to the gpu as it first encounters them. For example, if you render a frame with each texture once, before even doing this transition, it's going to be smooth as butter.
if the number of textures is already set (and it should be as uniforms anyway) I would do it a little different:
i would define a float uniform that is your mixer and then use a 0-1 value to transition between the two. In this way you can animate the mixer variable however you like and the GLSL stays pretty simple:
uniform sampler2d t1;
uniform sampler2d t2;
uniform sampler2d t3;
uniform float mixer;
void main(){
vec4 c1 = texture2D(t1,vUv);
vec4 c4 = c1; //create a duplicate so you can loop back
vec4 c2 = texture2D(t2,vUv);
vec4 c3 = texture2D(t3,vUv);
float mp1 = .33333; //define the locations of t2
float mp2 = .66666; //and t3
float w= .33333; //define the width
c1 *= 1-mix(0.0,w,abs(mixer)); //this is at 1 when mixer is 0 & 0 when .333
c2 *= 1-mix(0.0,w, abs(mixer-mp1)); //this is 1 when .333 & 0 when 0<mixer>.666
c3 *= 1-mix(0.0,w,abs(mixer-mp2)); //this is 1 when .666 & 0 when .333<mixer>1.0
c4 *= 1-mix(0.0,w,abs(mixer-1.0)); //this is 1 when 1 & 0 when .666<mixer
gl_FragColor=c1+c2+c3+c4; //now it will only ever be a mixture of 2 textures
}
So then you do some border function on mixer so that
if(mixer > 1)mixer --;
if(mixer < 0)mixer ++;
and then you can go from T1 to T2 by tweening from 0-0.3333. You can go from T2 to T3 by tweening from .333 to .666, and from T3 to T1 by tweening from .6666 to 1.0 and so on.
Then you just need to do a little management so that your tweens go circularly- ie, if the distance from current position to a target position is greater than 1/3 some amount you do a jump from 0 to 1 or from 1 to 0

Weird behavior if DataTextures are not square (1:1)

I have a pair of shader programs where everything works great if my DataTextures are square (1:1), but if one or both are 2:1 (width:height) ratio the behavior gets messed up. I can extend each of the buffers with unused filler to make sure they are always square, but this seems unnecessarily costly (memory-wise) in the long run, as one of the two buffer sizes is quite large to start. Is there a way to handle a 2:1 buffer in this scenario?
I have a pair of shader programs:
The first is a single frag shader used to calculate the physics for my program (it writes out a texture tPositions to be read by the second set of shaders). It is driven by Three.js's GPUComputeRenderer script (resolution set at the size of my largest buffer.)
The second pair of shaders (vert and frag) use the data texture tPositions produced by the first shader program to then render out the visualization (resolution set at the window size).
The visualization is a grid of variously shaped particle clouds. In the shader programs, there are textures of two different sizes: The smaller sized textures contain information for each of the particle clouds (one texel per cloud), larger sized textures contain information for each particle in all of the clouds (one texel per particle). Both have a certain amount of unused filler tacked on the end to fill them out to a power of 2.
Texel-per-particle sized textures (large): tPositions, tOffsets
Texel-per-cloud sized textures (small): tGridPositionsAndSeeds, tSelectionFactors
As I said before, the problem is that when these two buffer sizes (the large and the small) are at a 1:1 (width: height) ratio, the programs work just fine; however, when one or both are at a 2:1 (width:height) ratio the behavior is a mess. What accounts for this, and how can I address it? Thanks in advance!
UPDATE: Could the problem be related to my housing the texel coords to read the tPosition texture in the shader's position attribute in the second shader program? If so, perhaps this Github issue regarding texel coords in the position attribute may be related, though I can't find a corresponding question/answer here on SO.
UPDATE 2:
I'm also looking into whether this could be an unpack alignment issue. Thoughts?
Here's the set up in Three.js for the first shader program:
function initComputeRenderer() {
textureData = MotifGrid.getBufferData();
gpuCompute = new GPUComputationRenderer( textureData.uPerParticleBufferWidth, textureData.uPerParticleBufferHeight, renderer );
dtPositions = gpuCompute.createTexture();
dtPositions.image.data = textureData.tPositions;
offsetsTexture = new THREE.DataTexture( textureData.tOffsets, textureData.uPerParticleBufferWidth, textureData.uPerParticleBufferHeight, THREE.RGBAFormat, THREE.FloatType );
offsetsTexture.needsUpdate = true;
gridPositionsAndSeedsTexture = new THREE.DataTexture( textureData.tGridPositionsAndSeeds, textureData.uPerMotifBufferWidth, textureData.uPerMotifBufferHeight, THREE.RGBAFormat, THREE.FloatType );
gridPositionsAndSeedsTexture.needsUpdate = true;
selectionFactorsTexture = new THREE.DataTexture( textureData.tSelectionFactors, textureData.uPerMotifBufferWidth, textureData.uPerMotifBufferHeight, THREE.RGBAFormat, THREE.FloatType );
selectionFactorsTexture.needsUpdate = true;
positionVariable = gpuCompute.addVariable( "tPositions", document.getElementById( 'position_fragment_shader' ).textContent, dtPositions );
positionVariable.wrapS = THREE.RepeatWrapping; // repeat wrapping for use only with bit powers: 8x8, 16x16, etc.
positionVariable.wrapT = THREE.RepeatWrapping;
gpuCompute.setVariableDependencies( positionVariable, [ positionVariable ] );
positionUniforms = positionVariable.material.uniforms;
positionUniforms.tOffsets = { type: "t", value: offsetsTexture };
positionUniforms.tGridPositionsAndSeeds = { type: "t", value: gridPositionsAndSeedsTexture };
positionUniforms.tSelectionFactors = { type: "t", value: selectionFactorsTexture };
positionUniforms.uPerMotifBufferWidth = { type : "f", value : textureData.uPerMotifBufferWidth };
positionUniforms.uPerMotifBufferHeight = { type : "f", value : textureData.uPerMotifBufferHeight };
positionUniforms.uTime = { type: "f", value: 0.0 };
positionUniforms.uXOffW = { type: "f", value: 0.5 };
}
Here is the first shader program (only a frag for physics calculations):
// tPositions is handled by the GPUCompute script
uniform sampler2D tOffsets;
uniform sampler2D tGridPositionsAndSeeds;
uniform sampler2D tSelectionFactors;
uniform float uPerMotifBufferWidth;
uniform float uPerMotifBufferHeight;
uniform float uTime;
uniform float uXOffW;
[...skipping a noise function for brevity...]
void main() {
vec2 uv = gl_FragCoord.xy / resolution.xy;
vec4 offsets = texture2D( tOffsets, uv ).xyzw;
float alphaMass = offsets.z;
float cellIndex = offsets.w;
if (cellIndex >= 0.0) {
float damping = 0.98;
float texelSizeX = 1.0 / uPerMotifBufferWidth;
float texelSizeY = 1.0 / uPerMotifBufferHeight;
vec2 perMotifUV = vec2( mod(cellIndex, uPerMotifBufferWidth)*texelSizeX, floor(cellIndex / uPerMotifBufferHeight)*texelSizeY );
perMotifUV += vec2(0.5*texelSizeX, 0.5*texelSizeY);
vec4 selectionFactors = texture2D( tSelectionFactors, perMotifUV ).xyzw;
float swapState = selectionFactors.x;
vec4 gridPosition = texture2D( tGridPositionsAndSeeds, perMotifUV ).xyzw;
vec2 noiseSeed = gridPosition.zw;
vec4 nowPos;
vec2 velocity;
nowPos = texture2D( tPositions, uv ).xyzw;
velocity = vec2(nowPos.z, nowPos.w);
if ( swapState == 0.0 ) {
nowPos = texture2D( tPositions, uv ).xyzw;
velocity = vec2(nowPos.z, nowPos.w);
} else { // if swapState == 1
//nowPos = vec4( -(uTime) + gridPosition.x + offsets.x, gridPosition.y + offsets.y, 0.0, 0.0 );
nowPos = vec4( -(uTime) + offsets.x, offsets.y, 0.0, 0.0 );
velocity = vec2(0.0, 0.0);
}
[...skipping the physics for brevity...]
vec2 newPosition = vec2(nowPos.x - velocity.x, nowPos.y - velocity.y);
// Write new position out
gl_FragColor = vec4(newPosition.x, newPosition.y, velocity.x, velocity.y);
}
Here is the setup for the second shader program:
Note: The renderer for this section is a WebGLRenderer at window size
function makePerParticleReferencePositions() {
var positions = new Float32Array( perParticleBufferSize * 3 );
var texelSizeX = 1 / perParticleBufferDimensions.width;
var texelSizeY = 1 / perParticleBufferDimensions.height;
for ( var j = 0, j3 = 0; j < perParticleBufferSize; j ++, j3 += 3 ) {
positions[ j3 + 0 ] = ( ( j % perParticleBufferDimensions.width ) / perParticleBufferDimensions.width ) + ( 0.5 * texelSizeX );
positions[ j3 + 1 ] = ( Math.floor( j / perParticleBufferDimensions.height ) / perParticleBufferDimensions.height ) + ( 0.5 * texelSizeY );
positions[ j3 + 2 ] = j * 0.0001; // this is the real z value for the particle display
}
return positions;
}
var positions = makePerParticleReferencePositions();
...
// Add attributes to the BufferGeometry:
gridOfMotifs.geometry.addAttribute( 'position', new THREE.BufferAttribute( positions, 3 ) );
gridOfMotifs.geometry.addAttribute( 'aTextureIndex', new THREE.BufferAttribute( motifGridAttributes.aTextureIndex, 1 ) );
gridOfMotifs.geometry.addAttribute( 'aAlpha', new THREE.BufferAttribute( motifGridAttributes.aAlpha, 1 ) );
gridOfMotifs.geometry.addAttribute( 'aCellIndex', new THREE.BufferAttribute(
motifGridAttributes.aCellIndex, 1 ) );
uniformValues = {};
uniformValues.tSelectionFactors = motifGridAttributes.tSelectionFactors;
uniformValues.uPerMotifBufferWidth = motifGridAttributes.uPerMotifBufferWidth;
uniformValues.uPerMotifBufferHeight = motifGridAttributes.uPerMotifBufferHeight;
gridOfMotifs.geometry.computeBoundingSphere();
...
function makeCustomUniforms( uniformValues ) {
selectionFactorsTexture = new THREE.DataTexture( uniformValues.tSelectionFactors, uniformValues.uPerMotifBufferWidth, uniformValues.uPerMotifBufferHeight, THREE.RGBAFormat, THREE.FloatType );
selectionFactorsTexture.needsUpdate = true;
var customUniforms = {
tPositions : { type : "t", value : null },
tSelectionFactors : { type : "t", value : selectionFactorsTexture },
uPerMotifBufferWidth : { type : "f", value : uniformValues.uPerMotifBufferWidth },
uPerMotifBufferHeight : { type : "f", value : uniformValues.uPerMotifBufferHeight },
uTextureSheet : { type : "t", value : texture }, // this is a sprite sheet of all 10 strokes
uPointSize : { type : "f", value : 18.0 }, // the radius of a point in WebGL units, e.g. 30.0
// Coords for the hatch textures:
uTextureCoordSizeX : { type : "f", value : 1.0 / numTexturesInSheet },
uTextureCoordSizeY : { type : "f", value : 1.0 }, // the size of a texture in the texture map ( they're square, thus only one value )
};
return customUniforms;
}
And here is the corresponding shader program (vert & frag):
Vertex shader:
uniform sampler2D tPositions;
uniform sampler2D tSelectionFactors;
uniform float uPerMotifBufferWidth;
uniform float uPerMotifBufferHeight;
uniform sampler2D uTextureSheet;
uniform float uPointSize; // the radius size of the point in WebGL units, e.g. "30.0"
uniform float uTextureCoordSizeX; // vertical dimension of each texture given the full side = 1
uniform float uTextureCoordSizeY; // horizontal dimension of each texture given the full side = 1
attribute float aTextureIndex;
attribute float aAlpha;
attribute float aCellIndex;
varying float vCellIndex;
varying vec2 vTextureCoords;
varying vec2 vTextureSize;
varying float vAlpha;
varying vec3 vColor;
varying float vDensity;
[...skipping noise function for brevity...]
void main() {
vec4 tmpPos = texture2D( tPositions, position.xy );
vec2 pos = tmpPos.xy;
vec2 vel = tmpPos.zw;
vCellIndex = aCellIndex;
if (aCellIndex >= 0.0) { // buffer filler cell indexes are -1
float texelSizeX = 1.0 / uPerMotifBufferWidth;
float texelSizeY = 1.0 / uPerMotifBufferHeight;
vec2 perMotifUV = vec2( mod(aCellIndex, uPerMotifBufferWidth)*texelSizeX, floor(aCellIndex / uPerMotifBufferHeight)*texelSizeY );
perMotifUV += vec2(0.5*texelSizeX, 0.5*texelSizeY);
vec4 selectionFactors = texture2D( tSelectionFactors, perMotifUV ).xyzw;
float aSelectedMotif = selectionFactors.x;
float aColor = selectionFactors.y;
float fadeFactor = selectionFactors.z;
vTextureCoords = vec2( aTextureIndex * uTextureCoordSizeX, 0 );
vTextureSize = vec2( uTextureCoordSizeX, uTextureCoordSizeY );
vAlpha = aAlpha * fadeFactor;
vDensity = vel.x + vel.y;
vAlpha *= abs( vDensity * 3.0 );
vColor = vec3( 1.0, aColor, 1.0 ); // set RGB color associated to vertex; use later in fragment shader.
gl_PointSize = uPointSize;
} else { // if this is a filler cell index (-1)
vAlpha = 0.0;
vDensity = 0.0;
vColor = vec3(0.0, 0.0, 0.0);
gl_PointSize = 0.0;
}
gl_Position = projectionMatrix * modelViewMatrix * vec4( pos.x, pos.y, position.z, 1.0 ); // position holds the real z value. The z value of "color" is a component of velocity
}
Fragment shader:
uniform sampler2D tPositions;
uniform sampler2D uTextureSheet;
varying float vCellIndex;
varying vec2 vTextureCoords;
varying vec2 vTextureSize;
varying float vAlpha;
varying vec3 vColor;
varying float vDensity;
void main() {
gl_FragColor = vec4( vColor, vAlpha );
if (vCellIndex >= 0.0) { // only render out the texture if this point is not a buffer filler
vec2 realTexCoord = vTextureCoords + ( gl_PointCoord * vTextureSize );
gl_FragColor = gl_FragColor * texture2D( uTextureSheet, realTexCoord );
}
}
Expected Behavior: I can achieve this by forcing all the DataTextures to be 1:1
Weird Behavior: When the smaller DataTextures are 2:1 those perfectly horizontal clouds in the top right of the picture below form and have messed up physics. When the larger DataTextures are 2:1, the grid is skewed, and the clouds appear to be missing parts (as seen below). When both the small and large textures are 2:1, both odd behaviors happen (this is the case in the image below).
Thanks to an answer to my related question here, I now know what was going wrong. The problem was in the way I was using the arrays of indexes (1,2,3,4,5...) to access the DataTextures' texels in the shader.
In this function (and the one for the larger DataTextures)...
float texelSizeX = 1.0 / uPerMotifBufferWidth;
float texelSizeY = 1.0 / uPerMotifBufferHeight;
vec2 perMotifUV = vec2(
mod(aCellIndex, uPerMotifBufferWidth)*texelSizeX,
floor(aCellIndex / uPerMotifBufferHeight)*texelSizeY );
perMotifUV += vec2(0.5*texelSizeX, 0.5*texelSizeY);
...I assumed that in order to create the y value for my custom uv, perMotifUV, I would need to divide the aCellIndex by the height of the buffer, uPerMotifBufferHeight (it's "vertical" dimension). However, as explained in the SO Q&A here the indices should, of course, be divided by the buffer's width, which would then tell you how many rows down you are!
Thus, the function should be revised to...
float texelSizeX = 1.0 / uPerMotifBufferWidth;
float texelSizeY = 1.0 / uPerMotifBufferHeight;
vec2 perMotifUV = vec2(
mod(aCellIndex, uPerMotifBufferWidth)*texelSizeX,
floor(aCellIndex / uPerMotifBufferWidth)*texelSizeY ); **Note the change to uPerMotifBufferWidth here
perMotifUV += vec2(0.5*texelSizeX, 0.5*texelSizeY);
The reason my program worked on square DataTextures (1:1) is that in such cases the height and width were equal, so my function was effectively dividing by width in the incorrect line because height=width!

map polar coordinates to webgl shader uv

In my WebGL shader I would like to map the U value of my texture based on the output of a function (atan) whose range is [0,2*PI). But the range of U (as expected by texture2D) is [0,1]. So I'm trying to map an open interval to a closed interval.
This shows the problem:
The horizontal red gradient is the U axis and goes from Red=1 to Red=0 as my atan goes from 0 to 2*PI. But atan treats 2*PI as zero so there is a red band on the right after the gradient has gone black. (There are red bands on the top and bottom too, but that is a similar problem having to do with the V value, which I'm ignoring for the purposes of this question).
See this image using three.js' ability to show the vertices:
You can see how the right-most vertices (U=1) are red corresponding again to atan=0 instead of 2*PI.
Any suggestions on how to accomplish this? I can't force atan to return a 2*PI. I don't want to tile the texture. Can I map the U value to an open interval somehow?
I keep thinking there must be an easy solution but have tried every fix I can think of.
Here is my vertex shader:
void main()
{
vec4 mvPosition = modelViewMatrix * vec4(position, 1.0 );
gl_Position = projectionMatrix * mvPosition;
// convert from uv to polar coords
vec2 tempuv = uv;
theta = (1.0-tempuv[1]) * PI;
phi = PI * 2.0 * tempuv[0];
// convert polar to cartesian. Theta is polar, phi is azimuth.
x = sin(theta)*cos(phi);
y = sin(theta)*sin(phi);
z = cos(theta);
// and convert back again to demonstrate problem.
// problem: the phi above is [0,2*PI]. this phi is [0,2*PI)
phi = atan2(y, x);
if (phi < 0.0) {
phi = phi + PI*2.0;
}
if (phi > (2.0 * PI)) { // allow 2PI since we gen uv over [0,1]
phi = phi - 2.0 * PI;
}
theta = acos(z);
// now get uv in new chart.
float newv = 1.0 - theta/PI;
float newu = phi/(2.0 * PI);
vec2 newuv = vec2(newu, newv);
vUv = newuv;
}
Here is my fragment shader:
void main() {
vec2 uv = vUv;
gl_FragColor = vec4(1.0- uv[0],0.,0.,1.);
}
One way of looking at the problem is as you mentioned, 1 comes 0 at the edge. But another way of looking at it is if you changed uv to go from 0 to 2 instead of 0 to 1 and you then used fract(uv) you'd get the same problem several times over because you're effectively sampling a function and each point can only choose 1 color whereas to map it correctly you'd need some how have each point magically pick 2 colors for the vertices that need to be one color for interpolating to the left and another for interpolating to the right.
Example with fract(uv * 2.)
var vs = `
#define PI radians(180.)
attribute vec4 position;
attribute vec2 texcoord;
varying vec2 vUv;
void main() {
gl_Position = position;
// convert from uv to polar coords
vec2 tempuv = fract(texcoord * 2.);
float theta = (1.0-tempuv[1]) * PI;
float phi = PI * 2.0 * tempuv[0];
// convert polar to cartesian. Theta is polar, phi is azimuth.
float x = sin(theta)*cos(phi);
float y = sin(theta)*sin(phi);
float z = cos(theta);
// and convert back again to demonstrate problem.
// problem: the phi above is [0,2*PI]. this phi is [0,2*PI)
phi = atan(y, x);
if (phi < 0.0) {
phi = phi + PI * 2.0;
}
if (phi > (2.0 * PI)) { // allow 2PI since we gen uv over [0,1]
phi = phi - 2.0 * PI;
}
theta = acos(z);
// now get uv in new chart.
float newv = 1.0 - theta/PI;
float newu = phi/(2.0 * PI);
vec2 newuv = vec2(newu, newv);
vUv = newuv;
}
`;
var fs = `
precision mediump float;
varying vec2 vUv;
void main() {
vec2 uv = vUv;
gl_FragColor = vec4(1.0- uv[0],0.,0.,1.);
}
`;
var gl = document.querySelector("canvas").getContext("webgl");
var m4 = twgl.m4;
var programInfo = twgl.createProgramInfo(gl, [vs, fs]);
var bufferInfo = twgl.primitives.createPlaneBufferInfo(
gl, 2, 2, 20, 20, m4.rotationX(Math.PI * .5));
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.drawBufferInfo(gl, bufferInfo);
body { margin: 0 }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/2.x/twgl-full.min.js"></script>
<canvas></canvas>
Moving the code to the fragment shader effectively solves it.
Example with code moved to fragment shader
var vs = `
attribute vec4 position;
attribute vec2 texcoord;
varying vec2 vUv;
void main() {
gl_Position = position;
vUv = texcoord;
}
`;
var fs = `
precision mediump float;
varying vec2 vUv;
#define PI radians(180.)
void main() {
// convert from uv to polar coords
vec2 tempuv = vUv;
float theta = (1.0-tempuv[1]) * PI;
float phi = PI * 2.0 * tempuv[0];
// convert polar to cartesian. Theta is polar, phi is azimuth.
float x = sin(theta)*cos(phi);
float y = sin(theta)*sin(phi);
float z = cos(theta);
// and convert back again to demonstrate problem.
// problem: the phi above is [0,2*PI]. this phi is [0,2*PI)
phi = atan(y, x);
if (phi < 0.0) {
phi = phi + PI * 2.0;
}
if (phi > (2.0 * PI)) { // allow 2PI since we gen uv over [0,1]
phi = phi - 2.0 * PI;
}
theta = acos(z);
// now get uv in new chart.
float newv = 1.0 - theta/PI;
float newu = phi/(2.0 * PI);
vec2 newuv = vec2(newu, newv);
gl_FragColor = vec4(1.0- newuv[0],0.,0.,1.);
}
`;
var gl = document.querySelector("canvas").getContext("webgl");
var m4 = twgl.m4;
var programInfo = twgl.createProgramInfo(gl, [vs, fs]);
var bufferInfo = twgl.primitives.createPlaneBufferInfo(
gl, 2, 2, 20, 20, m4.rotationX(Math.PI * .5));
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.drawBufferInfo(gl, bufferInfo);
body { margin: 0 }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/2.x/twgl-full.min.js"></script>
<canvas></canvas>
Keeping it a vertex shader one solution is just to fudge the numbers so they're between say 0.00005 and 0.99995.
var vs = `
#define PI radians(180.)
attribute vec4 position;
attribute vec2 texcoord;
varying vec2 vUv;
void main() {
gl_Position = position;
// convert from uv to polar coords
vec2 tempuv = texcoord * 0.9999 + 0.00005;
float theta = (1.0-tempuv[1]) * PI;
float phi = PI * 2.0 * tempuv[0];
// convert polar to cartesian. Theta is polar, phi is azimuth.
float x = sin(theta)*cos(phi);
float y = sin(theta)*sin(phi);
float z = cos(theta);
// and convert back again to demonstrate problem.
// problem: the phi above is [0,2*PI]. this phi is [0,2*PI)
phi = atan(y, x);
if (phi < 0.0) {
phi = phi + PI * 2.0;
}
if (phi > (2.0 * PI)) { // allow 2PI since we gen uv over [0,1]
phi = phi - 2.0 * PI;
}
theta = acos(z);
// now get uv in new chart.
float newv = 1.0 - theta/PI;
float newu = phi/(2.0 * PI);
vec2 newuv = vec2(newu, newv);
vUv = newuv;
}
`;
var fs = `
precision mediump float;
varying vec2 vUv;
void main() {
vec2 uv = vUv;
gl_FragColor = vec4(1.0- uv[0],0.,0.,1.);
}
`;
var gl = document.querySelector("canvas").getContext("webgl");
var m4 = twgl.m4;
var programInfo = twgl.createProgramInfo(gl, [vs, fs]);
var bufferInfo = twgl.primitives.createPlaneBufferInfo(
gl, 2, 2, 20, 20, m4.rotationX(Math.PI * .5));
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.drawBufferInfo(gl, bufferInfo);
body { margin: 0 }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/2.x/twgl-full.min.js"></script>
<canvas></canvas>
This only works though because the texcoords go from 0 to 1. If they went from zero to > 1 (or less than 0) you'd run into the same problem as above that certain vertices need more than 1 color. You'd basically need to use the fragment shader solution

Artifacts from linear filtering a floating point texture in the fragment shader

I'm using the following code taken from this tutorial to perform linear filtering on a floating point texture in my fragment shader in WebGL:
float fHeight = 512.0;
float fWidth = 1024.0;
float texelSizeX = 1.0/fWidth;
float texelSizeY = 1.0/fHeight;
float tex2DBiLinear( sampler2D textureSampler_i, vec2 texCoord_i )
{
float p0q0 = texture2D(textureSampler_i, texCoord_i)[0];
float p1q0 = texture2D(textureSampler_i, texCoord_i + vec2(texelSizeX, 0))[0];
float p0q1 = texture2D(textureSampler_i, texCoord_i + vec2(0, texelSizeY))[0];
float p1q1 = texture2D(textureSampler_i, texCoord_i + vec2(texelSizeX , texelSizeY))[0];
float a = fract( texCoord_i.x * fWidth ); // Get Interpolation factor for X direction.
// Fraction near to valid data.
float pInterp_q0 = mix( p0q0, p1q0, a ); // Interpolates top row in X direction.
float pInterp_q1 = mix( p0q1, p1q1, a ); // Interpolates bottom row in X direction.
float b = fract( texCoord_i.y * fHeight );// Get Interpolation factor for Y direction.
return mix( pInterp_q0, pInterp_q1, b ); // Interpolate in Y direction.
}
On an Nvidia GPU this looks fine, but on two other computers with an Intel integrated GPU it looks like this:
There are lighter or darker lines appearing that shouldn't be there. They become visible if you zoom in, and tend to get more frequent the more you zoom. When zooming in very closely, they appear at the edge of every texel of the texture I'm filtering. I tried changing the precision statement in the fragment shader, but this didn't fix it.
The built-in linear filtering works on both GPUs, but I still need the manual filtering as a fallback for GPUs that don't support linear filtering on floating point textures with WebGL.
The Intel GPUs are from a desktop Core i5-4460 and a notebook with an Intel HD 5500 GPU. For all precisions of floating point values I get a rangeMin and rangeMax of 127 and a precision of 23 from getShaderPrecisionFormat.
Any idea on what causes these artifacts and how I can work around it?
Edit:
By experimenting a bit more I found that reducing the texel size variable in the fragment shader removes these artifacts:
float texelSizeX = 1.0/fWidth*0.998;
float texelSizeY = 1.0/fHeight*0.998;
Multiplying by 0.999 isn't enough, but multiplying the texel size by 0.998 removes the artifacts.
This is obviously not a satisfying fix, I still don't know what causes it and I probably caused artifacts on other GPUs or drivers now. So I'm still interested in figuring out what the actual issue is here.
It's not clear to me what the code is trying to do. It's not reproducing the GPU's bilinear because that would be using pixels centered around the texcoord.
In other words, as implemented
vec4 c = tex2DBiLinear(someSampler, someTexcoord);
is NOT equivilent to LINEAR
vec4 c = texture2D(someSampler, someTexcoord);
texture2D looks at pixels someTexcoord +/- texelSize * .5 where as tex2DBiLinear is looking at pixels someTexcoord and someTexcoord + texelSize
You haven't given enough code to repo your issue. I'm guessing the size of the source texture is 512x1024 but since you didn't post that code I have no idea if your source texture matches the defined size. You also didn't post what size your target is. The top image you posted is 471x488. Was that your target size? You also didn't post your code for what texture coordinates you're using and the code that manipulates them.
Guessing that your source is 512x1024, your target is 471x488 I can't repo your issue.
const fs = `
precision highp float;
uniform sampler2D tex;
varying vec2 v_texcoord;
float tex2DBiLinear( sampler2D textureSampler_i, vec2 texCoord_i )
{
float fHeight = 1024.0;
float fWidth = 512.0;
float texelSizeX = 1.0/fWidth;
float texelSizeY = 1.0/fHeight;
float p0q0 = texture2D(textureSampler_i, texCoord_i)[0];
float p1q0 = texture2D(textureSampler_i, texCoord_i + vec2(texelSizeX, 0))[0];
float p0q1 = texture2D(textureSampler_i, texCoord_i + vec2(0, texelSizeY))[0];
float p1q1 = texture2D(textureSampler_i, texCoord_i + vec2(texelSizeX , texelSizeY))[0];
float a = fract( texCoord_i.x * fWidth ); // Get Interpolation factor for X direction.
// Fraction near to valid data.
float pInterp_q0 = mix( p0q0, p1q0, a ); // Interpolates top row in X direction.
float pInterp_q1 = mix( p0q1, p1q1, a ); // Interpolates bottom row in X direction.
float b = fract( texCoord_i.y * fHeight );// Get Interpolation factor for Y direction.
return mix( pInterp_q0, pInterp_q1, b ); // Interpolate in Y direction.
}
void main() {
gl_FragColor = vec4(tex2DBiLinear(tex, v_texcoord), 0, 0, 1);
}
`;
const vs = `
attribute vec4 position;
attribute vec2 texcoord;
varying vec2 v_texcoord;
void main() {
gl_Position = position;
v_texcoord = texcoord;
}
`;
const gl = document.querySelector('canvas').getContext('webgl');
// compile shaders, link programs, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
// calls gl.createBuffer, gl.bindBuffer, gl.bufferData for each array
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
position: {
numComponents: 2,
data: [
-1, -1,
1, -1,
-1, 1,
1, 1,
],
},
texcoord: [
0, 0,
1, 0,
0, 1,
1, 1,
],
indices: [
0, 1, 2,
2, 1, 3,
],
});
const ctx = document.createElement('canvas').getContext('2d');
ctx.canvas.width = 512;
ctx.canvas.height = 1024;
const gradient = ctx.createRadialGradient(256, 512, 0, 256, 512, 700);
gradient.addColorStop(0, 'red');
gradient.addColorStop(1, 'cyan');
ctx.fillStyle = gradient;
ctx.fillRect(0, 0, 512, 1024);
const tex = twgl.createTexture(gl, {
src: ctx.canvas,
minMag: gl.NEAREST,
wrap: gl.CLAMP_TO_EDGE,
auto: false,
});
gl.useProgram(programInfo.program);
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
// calls gl.drawArrays or gl.drawElements
twgl.drawBufferInfo(gl, bufferInfo);
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas width="471" height="488"></canvas>
If you think the issue is related to floating point textures I can't repo there either
const fs = `
precision highp float;
uniform sampler2D tex;
varying vec2 v_texcoord;
float tex2DBiLinear( sampler2D textureSampler_i, vec2 texCoord_i )
{
float fHeight = 1024.0;
float fWidth = 512.0;
float texelSizeX = 1.0/fWidth;
float texelSizeY = 1.0/fHeight;
float p0q0 = texture2D(textureSampler_i, texCoord_i)[0];
float p1q0 = texture2D(textureSampler_i, texCoord_i + vec2(texelSizeX, 0))[0];
float p0q1 = texture2D(textureSampler_i, texCoord_i + vec2(0, texelSizeY))[0];
float p1q1 = texture2D(textureSampler_i, texCoord_i + vec2(texelSizeX , texelSizeY))[0];
float a = fract( texCoord_i.x * fWidth ); // Get Interpolation factor for X direction.
// Fraction near to valid data.
float pInterp_q0 = mix( p0q0, p1q0, a ); // Interpolates top row in X direction.
float pInterp_q1 = mix( p0q1, p1q1, a ); // Interpolates bottom row in X direction.
float b = fract( texCoord_i.y * fHeight );// Get Interpolation factor for Y direction.
return mix( pInterp_q0, pInterp_q1, b ); // Interpolate in Y direction.
}
void main() {
gl_FragColor = vec4(tex2DBiLinear(tex, v_texcoord), 0, 0, 1);
}
`;
const vs = `
attribute vec4 position;
attribute vec2 texcoord;
varying vec2 v_texcoord;
void main() {
gl_Position = position;
v_texcoord = texcoord;
}
`;
const gl = document.querySelector('canvas').getContext('webgl');
const ext = gl.getExtension('OES_texture_float');
if (!ext) { alert('need OES_texture_float'); }
// compile shaders, link programs, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
// calls gl.createBuffer, gl.bindBuffer, gl.bufferData for each array
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
position: {
numComponents: 2,
data: [
-1, -1,
1, -1,
-1, 1,
1, 1,
],
},
texcoord: [
0, 0,
1, 0,
0, 1,
1, 1,
],
indices: [
0, 1, 2,
2, 1, 3,
],
});
const ctx = document.createElement('canvas').getContext('2d');
ctx.canvas.width = 512;
ctx.canvas.height = 1024;
const gradient = ctx.createRadialGradient(256, 512, 0, 256, 512, 700);
gradient.addColorStop(0, 'red');
gradient.addColorStop(1, 'cyan');
ctx.fillStyle = gradient;
ctx.fillRect(0, 0, 512, 1024);
const tex = twgl.createTexture(gl, {
src: ctx.canvas,
type: gl.FLOAT,
minMag: gl.NEAREST,
wrap: gl.CLAMP_TO_EDGE,
auto: false,
});
gl.useProgram(programInfo.program);
// calls gl.bindBuffer, gl.enableVertexAttribArray, gl.vertexAttribPointer
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
// calls gl.drawArrays or gl.drawElements
twgl.drawBufferInfo(gl, bufferInfo);
const e = gl.getExtension('WEBGL_debug_renderer_info');
if (e) {
console.log(gl.getParameter(e.UNMASKED_VENDOR_WEBGL));
console.log(gl.getParameter(e.UNMASKED_RENDERER_WEBGL));
}
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas width="471" height="488"></canvas>
If any of the values are off. If your source texture size doesn't match fWidth and fHeigth or if your texture coordinates are different or adjusted in some way then of course maybe I could repo. If any of those are different then I can imagine issues.
Tested in Intel Iris Pro and Intel HD Graphics 630. Also tested on an iPhone6+. Note that you need to make sure your fragment shader is running in precision highp float but that setting would likely only affect mobile GPUs.
We had almost identical issue that ocurred at specific zoom of texture. We found out that positions where artifacts appers can be detected with this conditions:
vec2 imagePosCenterity = fract(uv * imageSize);
if (abs(imagePosCenterity.x-0.5) < 0.001 || abs(imagePosCenterity.y-0.5) < 0.001) {}
Where imageSize is width and height of the texture.
Our solution looks like this:
vec4 texture2DLinear( sampler2D texSampler, vec2 uv) {
vec2 pixelOff = vec2(0.5,0.5)/imageSize;
vec2 imagePosCenterity = fract(uv * imageSize);
if (abs(imagePosCenterity.x-0.5) < 0.001 || abs(imagePosCenterity.y-0.5) < 0.001) {
pixelOff = pixelOff-vec2(0.00001,0.00001);
}
vec4 tl = texture2D(texSampler, uv + vec2(-pixelOff.x,-pixelOff.y));
vec4 tr = texture2D(texSampler, uv + vec2(pixelOff.x,-pixelOff.y));
vec4 bl = texture2D(texSampler, uv + vec2(-pixelOff.x,pixelOff.y));
vec4 br = texture2D(texSampler, uv + vec2(pixelOff.x,pixelOff.y));
vec2 f = fract( (uv.xy-pixelOff) * imageSize );
vec4 tA = mix( tl, tr, f.x );
vec4 tB = mix( bl, br, f.x );
return mix( tA, tB, f.y );
}
It is really dirty solution but it works. Changing texelSize as suggested above only moves artifacts to another positions. We are changing texelSize a little bit only on problematic positions.
Why we are using linear texture interpolation in GLSL shader? It is because we need to use 1 sample per pixel 16 bit per sample texture with broad set of compatibile devices. It is possible to do it only with OES_texture_half_float_linear extension. By our approach it is possible to solve it without using extension.

Resources