Compare current depth with older depth - three.js

We have a scene rendered to a WebGLRenderTarget with a depth texture which is being passed to a ShaderMaterial in a different scene as tDepth. Following is the fragment shader for that ShaderMaterial. It does a depth check and then renders onto the canvas. (I can just disable autoclear of depth and have it working, but I want a custom depth function, hence the code).
varying vec2 vUv;
uniform sampler2D tDepth;
uniform float width;
uniform float height;
void main() {
vec2 clipCoord = vec2(gl_FragCoord.x / width , gl_FragCoord.y / height)
float oldDepth = texture2D(tDepth, clipCoord).r; // Gets wrong value.
float currentDepth = gl_FragCoord.z;
if (oldDepth > currentDepth) {
gl_FragColor = vec4(1,0,0,0);
} else {
discard;
}
}
How do I get the correct value of oldDepth?

Related

How to texture non-unwrapped model using a cubemap

I have lots of models that ain't unwrapped (they don't have UV coordinates). They are quite complex to unwrap them. Thus, I decided to texture them using a seamless cubemap:
[VERT]
attribute vec4 a_position;
varying vec3 texCoord;
uniform mat4 u_worldTrans;
uniform mat4 u_projTrans;
...
void main()
{
gl_Position = u_projTrans * u_worldTrans * a_position;
texCoord = vec3(a_position);
}
[FRAG]
varying vec3 texCoord;
uniform samplerCube u_cubemapTex;
void main()
{
gl_FragColor = textureCube(u_cubemapTex, texCoord);
}
It works, but the result is quite weird due to texturing depends on the vertices position. If my model is more complex than a cube or sphere, I see visible seams and low resolution of the texture on some parts of the object.
Reflection is mapped good on the model, but it has a mirror effect.
Reflection:
[VERT]
attribute vec3 a_normal;
varying vec3 v_reflection;
uniform mat4 u_matViewInverseTranspose;
uniform vec3 u_cameraPos;
...
void main()
{
mat3 normalMatrix = mat3(u_matViewInverseTranspose);
vec3 n = normalize(normalMatrix * a_normal);
//calculate reflection
vec3 vView = a_position.xyz - u_cameraPos.xyz;
v_reflection = reflect(vView, n);
...
}
How to implement something like a reflection, but with “sticky” effect, which means that it’s as if the texture is attached to a certain vertex (not moving). Each side of the model must display its own side of the cubemap, and as a result it should look like a common 2D texturing. Any advice will be appreciated.
UPDATE 1
I summed up all comments and decided to calculate cubemap UV. Since I use LibGDX, some names may differ from OpenGL ones.
Shader class:
public class CubemapUVShader implements com.badlogic.gdx.graphics.g3d.Shader {
ShaderProgram program;
Camera camera;
RenderContext context;
Matrix4 viewInvTraMatrix, viewInv;
Texture texture;
Cubemap cubemapTex;
...
#Override
public void begin(Camera camera, RenderContext context) {
this.camera = camera;
this.context = context;
program.begin();
program.setUniformMatrix("u_matProj", camera.projection);
program.setUniformMatrix("u_matView", camera.view);
cubemapTex.bind(1);
program.setUniformi("u_textureCubemap", 1);
texture.bind(0);
program.setUniformi("u_texture", 0);
context.setDepthTest(GL20.GL_LEQUAL);
context.setCullFace(GL20.GL_BACK);
}
#Override
public void render(Renderable renderable) {
program.setUniformMatrix("u_matModel", renderable.worldTransform);
viewInvTraMatrix.set(camera.view);
viewInvTraMatrix.mul(renderable.worldTransform);
program.setUniformMatrix("u_matModelView", viewInvTraMatrix);
viewInvTraMatrix.inv();
viewInvTraMatrix.tra();
program.setUniformMatrix("u_matViewInverseTranspose", viewInvTraMatrix);
renderable.meshPart.render(program);
}
...
}
Vertex:
attribute vec4 a_position;
attribute vec2 a_texCoord0;
attribute vec3 a_normal;
attribute vec3 a_tangent;
attribute vec3 a_binormal;
varying vec2 v_texCoord;
varying vec3 v_cubeMapUV;
uniform mat4 u_matProj;
uniform mat4 u_matView;
uniform mat4 u_matModel;
uniform mat4 u_matViewInverseTranspose;
uniform mat4 u_matModelView;
void main()
{
gl_Position = u_matProj * u_matView * u_matModel * a_position;
v_texCoord = a_texCoord0;
//CALCULATE CUBEMAP UV (WRONG!)
//I decided that tm_l2g mentioned in comments is u_matView * u_matModel
v_cubeMapUV = vec3(u_matView * u_matModel * vec4(a_normal, 0.0));
/*
mat3 normalMatrix = mat3(u_matViewInverseTranspose);
vec3 t = normalize(normalMatrix * a_tangent);
vec3 b = normalize(normalMatrix * a_binormal);
vec3 n = normalize(normalMatrix * a_normal);
*/
}
Fragment:
varying vec2 v_texCoord;
varying vec3 v_cubeMapUV;
uniform sampler2D u_texture;
uniform samplerCube u_textureCubemap;
void main()
{
vec3 cubeMapUV = normalize(v_cubeMapUV);
vec4 diffuse = textureCube(u_textureCubemap, cubeMapUV);
gl_FragColor.rgb = diffuse;
}
The result is completely wrong:
I expect something like that:
UPDATE 2
The texture looks stretched on the sides and distorted in some places if I use vertices position as a cubemap coordinates in the vertex shader:
v_cubeMapUV = a_position.xyz;
I uploaded euro.blend, euro.obj and cubemap files to review.
that code works only for meshes that are centered around (0,0,0) if that is not the case or even if (0,0,0) is not inside the mesh then artifacts occur...
I would start with computing BBOX BBOXmin(x0,y0,z0),BBOXmax(x1,y1,z1) of your mesh and translate the position used for texture coordinate so its centered around it:
center = 0.5*(BBOXmin+BBOXmax);
texCoord = vec3(a_position-center);
However non uniform vertex density would still lead to texture scaling artifacts especially if BBOX sides sizes differs too much. Rescaling it to cube would help:
vec3 center = 0.5*(BBOXmin+BBOXmax); // center of BBOX
vec3 size = BBOXmax-BBOXmin; // size of BBOX
vec3 r = a_position-center; // position centered around center of BBOX
r.x/=size.x; // rescale it to cube BBOX
r.y/=size.y;
r.z/=size.z;
texCoord = r;
Again if the center of BBOX is not inside mesh then this would not work ...
The reflection part is not clear to me do you got some images/screenshots ?
[Edit1] simple example
I see it like this (without the center offsetting and aspect ratio corrections mentioned above):
[Vertex]
//------------------------------------------------------------------
#version 420 core
//------------------------------------------------------------------
uniform mat4x4 tm_l2g;
uniform mat4x4 tm_g2s;
layout(location=0) in vec3 pos;
layout(location=1) in vec4 col;
out smooth vec4 pixel_col;
out smooth vec3 pixel_txr;
//------------------------------------------------------------------
void main(void)
{
pixel_col=col;
pixel_txr=(tm_l2g*vec4(pos,0.0)).xyz;
gl_Position=tm_g2s*tm_l2g*vec4(pos,1.0);
}
//------------------------------------------------------------------
[Fragment]
//------------------------------------------------------------------
#version 420 core
//------------------------------------------------------------------
in smooth vec4 pixel_col;
in smooth vec3 pixel_txr;
uniform samplerCube txr_skybox;
out layout(location=0) vec4 frag_col;
//------------------------------------------------------------------
void main(void)
{
frag_col=texture(txr_skybox,pixel_txr);
}
//------------------------------------------------------------------
And here preview:
The white torus in first few frames are using fixed function and the rest is using shaders. As you can see the only input I use is the vertex position,color and transform matrices tm_l2g which converts from mesh coordinates to global world and tm_g2s which holds the perspective projection...
As you can see I render BBOX with the same CUBE MAP texture as I use for rendering the model so it looks like cool reflection/transparency effect :) (which was not intentional).
Anyway When I change the line
pixel_txr=(tm_l2g*vec4(pos,0.0)).xyz;
into:
pixel_txr=pos;
In my vertex shader the object will be solid again:
You can combine both by passing two texture coordinate vectors and fetching two texels in fragment adding them with some ratio together. Of coarse you would need to pass 2 Cube map textures one for object and one for skybox ...
The red warnings are from my CPU side code reminding me that I am trying to set uniforms that are not present in the shaders (as I did this from the bump mapping example without changing CPU side code...)
[Edit1] here preview of your mesh with offset
The Vertex changes a bit (just added the offsetting described in the answer):
//------------------------------------------------------------------
#version 420 core
//------------------------------------------------------------------
uniform mat4x4 tm_l2g;
uniform mat4x4 tm_g2s;
uniform vec3 center=vec3(0.0,0.0,2.0);
layout(location=0) in vec3 pos;
layout(location=1) in vec4 col;
out smooth vec4 pixel_col;
out smooth vec3 pixel_txr;
//------------------------------------------------------------------
void main(void)
{
pixel_col=col;
pixel_txr=pos-center;
gl_Position=tm_g2s*tm_l2g*vec4(pos,1.0);
}
//------------------------------------------------------------------
So by offsetting the center point you can get rid of the singular point distortion however as I mentioned in comments for arbitrary meshes there will be always some distortions with cheap texturing tricks instead of proper texture coordinates.
Beware my mesh was resized/normalized (sadly I do not remeber if its <-1,+1> range or different ona and too lazy to dig in my source code of the GLSL engine I tested this in) so the offset might have different magnitude in your environment to achieve the same result.

Whats causing this artefacts?

I'm trying to map a 3D Texture to a Voxel Terrain, the pixel color from the texture should align with the terrain geometry and I don't want any gradients.
It's kinda working but depending on the camera angle I can see artefacts and I would like to know what's causing this and how to fix it.
Is it some kind of bleeding? I've already tried to change my texture as described here:
ogr3d tilemap but without success.
I think it's not a mipmapping bug because I turned it off and if I turn it on it looks way worse.
The geometry is grid aligned.
I set min and mag filtering to nearest and wrapping to clampToEdge.
fragment:
vec2 computeSliceOffset(float slice, float slicesPerRow, vec2 sliceSize) {
return sliceSize * vec2(mod(slice, slicesPerRow), floor(slice / slicesPerRow));
}
vec4 sampleAs3DTexture(sampler2D tex, vec3 texCoord, float size, float numRows, float slicesPerRow) {
float slice = texCoord.z * size;
float sliceZ = floor(slice);
float zOffset = fract(slice);
vec2 sliceSize = vec2(1.0 / slicesPerRow, 1.0 / numRows);
vec2 sliceOffset = computeSliceOffset(sliceZ, slicesPerRow, sliceSize);
vec2 uv = texCoord.xy * sliceSize;
vec4 sliceColor = texture2D(tex, sliceOffset + uv);
return sliceColor;
}
vec3 texCoord = mod(worldPosition.xyz,128.0)/128.0;
vec4 myColor = sampleAs3DTexture(texture_0,texCoord,64.0,8.0,8.0);

GLSL 4.0 mesh rotation messes up normal? help please

Could someone please help me with my OpenGL GLSL 4.0 shader. The problem i am having is when a 3d (0bj file) is loaded and rendered, all works(lighting good, mesh vertices display great) well except the normals of the mesh file. Specifically, when the obj file is rotated in its local/model space the normal does not appear to light mesh in accordance with the light position and its current orientation (I hope that makes some sense).
I believe the problem is with my normal matrix.
Problem: when my 3d mesh rotates, the lighting is meshed up(does not reflect the light position).
Any help would be much appreciated. Thank in advance
VertexShader
#version 400
//Handle translation, projection, etc
struct Matrix {
mat4 mvp;
mat4 mv;
mat4 view;
mat4 projection;
};
struct Light {
vec3 position;
vec3 color;
vec3 direction;
float intensity;
vec3 ambient;
};
//---------------------------------------------------
//INPUT
//---------------------------------------------------
//Per-Vertex Data
//---------------------------------------------------
layout (location = 0) in vec3 inputPosition;
layout (location = 1) in vec3 inputNormal;
layout (location = 2) in vec3 inputTexture;
//--------------------------------------------
// UNIFORM:INPUT Supplied Data from C++ application
//--------------------------------------------
uniform Matrix matrix;
uniform Light light;
uniform vec3 cameraPosition;
out vec3 fragmentNormal;
out vec3 cameraVector;
out vec3 lightVector;
out vec2 texCoord;
void main() {
// output the transformed vertex
gl_Position = matrix.mvp * vec4(inputPosition,1.0);
//When using, (vec3,0.0)
mat3 Normal_Matrix = mat3( transpose(inverse(matrix.mv)) );
// set the normal for the fragment shader and
// the vector from the vertex to the camera
vec3 vertex = (matrix.mv * vec4(inputPosition,1.0)).xyz;
//----------------------------------------------------------
//The problem (i think) is here
//----------------------------------------------------------
fragmentNormal = normalize(Normal_Matrix * inputNormal);
cameraVector = (matrix.mv *vec4(cameraPosition,1.0)).xyz - vertex ;
lightVector = vertex - (matrix.mv * vec4(light.position,1.0)).xyz;
//store the texture data
texCoord = inputTexture.xy;
}
Fragment Shader
#version 400
const int NUM_LIGHTS = 3;
const float MAX_DIST = 15.0;
const float MAX_DIST_SQUARED = MAX_DIST * MAX_DIST;
const vec3 AMBIENT = vec3(0.152, 0.152, 0.152); //0.2 for all component is a good dark value
struct Light {
vec3 position;
vec3 color;
vec3 direction;
float intensity;
vec3 ambient;
};
//the image
uniform sampler2D textureSampler;
uniform Light light;
//in: used interpolation, must define both in vertex&fragment shader;
out vec4 finalOutput;
in vec2 texCoord; //Texture Coordinate
//in: used interpolation, must define both in vertex&fragment shader;
in vec3 fragmentNormal;
in vec3 cameraVector;
in vec3 lightVector;
void main() {
vec4 texColor = texture2D(textureSampler, texCoord);
// initialize diffuse/specular lighting
vec3 diffuse = vec3(0.005f, 0.005f, 0.005f);
vec3 specular = vec3(0.00f, 0.00f, 0.00f);
// normalize the fragment normal and camera direction
vec3 normal = normalize(fragmentNormal);
vec3 cameraDir = normalize(cameraVector);
// loop through each light
// calculate distance between 0.0 and 1.0
float dist = min(dot(lightVector, lightVector), MAX_DIST_SQUARED) / MAX_DIST_SQUARED;
float distFactor = 1.0 - dist;
// diffuse
vec3 lightDir = normalize(lightVector);
float diffuseDot = dot(normal, lightDir);
diffuse += light.color * clamp(diffuseDot, 0.0, 1.0) * distFactor;
// specular
vec3 halfAngle = normalize(cameraDir + lightDir);
vec3 specularColor = min(light.color + 0.8, 1.0);
float specularDot = dot(normal, halfAngle);
specular += specularColor * pow(clamp(specularDot, 0.0, 1.0), 16.0) * distFactor;
vec4 sample0 = vec4(1.0, 1.0, 1.0, 1.0);
vec3 ambDifCombo = (diffuse + AMBIENT);
//calculate the final color
vec3 color = clamp(sample0.rgb * ambDifCombo + specular, 0.0, 1.0);
finalOutput = vec4(color * vec3(texColor), sample0.a);
}
You should not transform your light position. Your light should remain stationary while your mesh rotates. Instead of this:
lightVector = vertex - (matrix.mv * vec4(light.position,1.0)).xyz;
Do this:
lightVector = vertex - light.position;
I would also try not transforming your camera position too.

An outline/sharp transition in a fragment shader

I would like to create a sharp transition effect between pixels in my fragment shader, but I'm not sure how I could do this.
In my vertex shader I have a varying float x; and in my fragment shader I use this value to set the opacity of the color. I quantize the current value to produce a layering effect. What I'd like to do is at a very minimal level of the effect to produce a distinct border (a different color entirely). For example, if x>0.1 and for any neighboring pixel x<0.1 then the resulting color should be black.
It don't see any way in GLSL to gain access to neighbouring pixels (I could be wrong). How could I achieve such an effect. I'm limited to OpenGL-ES2.0 (though if not possible at all on this version, then any solution would be helpful).
You are correct that you cannot access neighboring pixels, this is due to the fact that there is no guarantee which order the pixels are written, they are all drawn in parallel. If you could access neighboring pixels in the framebuffer you would get inconsistent results.
However you can do this in a post-process if you want. Draw your whole scene into a framebuffer texture, and then draw that texture to the screen with a filtering shader.
When drawing from a texture in your shader you can sample neighboring texels all you want, so you could easily compare the delta between two neighboring texels.
If your OpenGL ES implementation supports the OES_standard_derivatives extension, you can get the rate of change of your variable by forward/backward differencing with neighboring pixels in the 2×2 quad being shaded:
float outline(float t, float threshold, float width)
{
return clamp(width - abs(threshold - t) / fwidth(t), 0.0, 1.0);
}
This function returns the coverage for a line of the specified width where t ≈ threshold, using fwidth to determine how far it is from the cutoff. Note that fwidth(t) is equivalent to abs(dFdx(t)) + abs(dFdy(t)) and calculates the width in Manhattan distance, which may overfatten diagonal lines. If you prefer Euclidean distance:
float outline(float t, float threshold, float width)
{
float dx = dFdx(t);
float dy = dFdy(t);
float ewidth = sqrt(dx * dx + dy * dy);
return clamp(width - abs(threshold - t) / ewidth, 0.0, 1.0);
}
In addition to Pivot's implementation based on derivatives, you can grab neighboring pixels from a source image using an offset based on the pixel dimensions of that source. The inverse of the width or height in pixels is the offset from the current texture coordinate that you'll need to use here.
For example, here is a vertex shader I've used to calculate these offsets for the eight pixels that surround a central one:
attribute vec4 position;
attribute vec4 inputTextureCoordinate;
uniform highp float texelWidth;
uniform highp float texelHeight;
varying vec2 textureCoordinate;
varying vec2 leftTextureCoordinate;
varying vec2 rightTextureCoordinate;
varying vec2 topTextureCoordinate;
varying vec2 topLeftTextureCoordinate;
varying vec2 topRightTextureCoordinate;
varying vec2 bottomTextureCoordinate;
varying vec2 bottomLeftTextureCoordinate;
varying vec2 bottomRightTextureCoordinate;
void main()
{
gl_Position = position;
vec2 widthStep = vec2(texelWidth, 0.0);
vec2 heightStep = vec2(0.0, texelHeight);
vec2 widthHeightStep = vec2(texelWidth, texelHeight);
vec2 widthNegativeHeightStep = vec2(texelWidth, -texelHeight);
textureCoordinate = inputTextureCoordinate.xy;
leftTextureCoordinate = inputTextureCoordinate.xy - widthStep;
rightTextureCoordinate = inputTextureCoordinate.xy + widthStep;
topTextureCoordinate = inputTextureCoordinate.xy - heightStep;
topLeftTextureCoordinate = inputTextureCoordinate.xy - widthHeightStep;
topRightTextureCoordinate = inputTextureCoordinate.xy + widthNegativeHeightStep;
bottomTextureCoordinate = inputTextureCoordinate.xy + heightStep;
bottomLeftTextureCoordinate = inputTextureCoordinate.xy - widthNegativeHeightStep;
bottomRightTextureCoordinate = inputTextureCoordinate.xy + widthHeightStep;
}
and here's a fragment shader that uses this to perform Sobel edge detection:
precision mediump float;
varying vec2 textureCoordinate;
varying vec2 leftTextureCoordinate;
varying vec2 rightTextureCoordinate;
varying vec2 topTextureCoordinate;
varying vec2 topLeftTextureCoordinate;
varying vec2 topRightTextureCoordinate;
varying vec2 bottomTextureCoordinate;
varying vec2 bottomLeftTextureCoordinate;
varying vec2 bottomRightTextureCoordinate;
uniform sampler2D inputImageTexture;
void main()
{
float bottomLeftIntensity = texture2D(inputImageTexture, bottomLeftTextureCoordinate).r;
float topRightIntensity = texture2D(inputImageTexture, topRightTextureCoordinate).r;
float topLeftIntensity = texture2D(inputImageTexture, topLeftTextureCoordinate).r;
float bottomRightIntensity = texture2D(inputImageTexture, bottomRightTextureCoordinate).r;
float leftIntensity = texture2D(inputImageTexture, leftTextureCoordinate).r;
float rightIntensity = texture2D(inputImageTexture, rightTextureCoordinate).r;
float bottomIntensity = texture2D(inputImageTexture, bottomTextureCoordinate).r;
float topIntensity = texture2D(inputImageTexture, topTextureCoordinate).r;
float h = -topLeftIntensity - 2.0 * topIntensity - topRightIntensity + bottomLeftIntensity + 2.0 * bottomIntensity + bottomRightIntensity;
float v = -bottomLeftIntensity - 2.0 * leftIntensity - topLeftIntensity + bottomRightIntensity + 2.0 * rightIntensity + topRightIntensity;
float mag = length(vec2(h, v));
gl_FragColor = vec4(vec3(mag), 1.0);
}
I pass in the texelWidth and texelHeight uniforms, which are 1/width and 1/height of the image, respectively. This does require you to track the input image width and height, but it should work on all OpenGL ES devices, not just those with the derivative extensions.
I do the texture offset calculations in the vertex shader for two reasons: so that offset calculations only need to be performed once per vertex instead of once per fragment, and more importantly because some of the tile-based deferred renderers react very poorly to dependent texture reads where texture offsets are calculated in a fragment shader. The performance can be up to 20X higher for a shader program that removes these dependent texture reads on these devices.

offset error after the 2nd texture access

I want my fragment shader to travers a serialized quad tree.
When a inner node is found the rg vales are interpreded as an index into the same texture.
A blue value of 0 marks an inner node.
In a first step a pointer is read from a 2x2 subimage at position 0x0 using the provided uv coords.
Then that pointer is used to access another 2x2 portion of the same texture.
However for each child of the root node there is an increasing offset error that results in the wrong color.
Here is my shader (for debug porpusses the loop is fixed at one iteration, so only 2 levels of the quad tree get accessed).
Also for debugging I did put a red 2x2 image at the location of the top left child a green image for the top right, blue for the bottom left and yellow for the bottom right child.
The resulting image is this:
I am completly clueless. Can one of you think of a reason why this is happening?
I checkt all the coordinate conversion and calculations 3 times they are all correct.
Here is the shader:
// virtual_image.fs
precision highp float;
uniform sampler2D t_atlas;
uniform sampler2D t_tree;
uniform vec2 gridpoolSize;
uniform vec2 atlasTileSize;
uniform vec2 atlasSize;
varying vec2 v_texcoord;
const float LEAF_MARKER = 1.0;
const float NODE_MARKER = 0.0;
const float CHANNEL_PERECISION = 255.0;
vec2 decode(const vec2 vec){
return vec * CHANNEL_PERECISION;
}
void main ()
{
vec4 oc = vec4(1); // output color
vec4 tColor = texture2D(t_tree, v_texcoord); // only for debuging
vec4 aColor = texture2D(t_atlas, v_texcoord); // only for debuging
// oc = mix(tColor, aColor, 0.5);
highp vec2 localCoord = v_texcoord;
// by convention the root node starts at [0,0]
// so we read the first pointer relative to that point
// we use the invertedGridpoolSize to convert the local coords in local coords of the first grid at [0,0]
highp vec3 pointer = texture2D(t_tree, localCoord / gridpoolSize).rgb;// pointer is correct at this point!
for(int i = 0 ; i < 1; i++) {
// divides the local coords into 4 quadrants
localCoord = fract(localCoord * 2.0); // localCoord is correct!
// branch
if(pointer.b <= NODE_MARKER + 0.1){
highp vec2 index = decode(pointer.rg);// index is correct!
highp vec2 absoluteCoord = (localCoord + index) / gridpoolSize;// absoluteCoord is correct!
// we have a inner node get next pointer and continue
pointer = texture2D(t_tree, absoluteCoord).rgb;
oc.rgb = pointer.rgb; // this point in the code introduces a growing offset, I don't know where this comes from. BUG LOCATION
//gl_FragColor = vec4(1,0,0,1);
} else {
if(pointer.b >= LEAF_MARKER - 0.1){
// we have a leaf
vec2 atlasCoord = ((decode(pointer.rg) * atlasTileSize) / atlasSize) + (localCoord * (atlasTileSize / atlasSize));
vec4 atlasColor = texture2D(t_atlas, atlasCoord);
//vec4 atlasCoordColor = vec4(atlasCoord,0,1);
//gl_FragColor = mix(atlasColor, vec4(localCoord, 0, 1), 1.0);
//gl_FragColor = vec4(localCoord, 0, 1);
oc = vec4(1,0,1,1);
} else {
// we have an empty cell
oc = vec4(1,0,1,1);
}
}
}
//oc.rgb = pointer;
//oc.rgb = oc.rgb * (255.0 / 20.0 );
gl_FragColor = oc;
}
For details on how to serialize a quad tree as a texture take a look at this paper: Octree Textures on the GPU
It turns out that its a rounding problem.
The code in the decode function hast to be changed to:
vec2 decode(const vec2 vec){
return floor(0.5 + (vec * CHANNEL_PERECISION))
}
The values returns should have been indexes of int but where slightly to small like 5.99 instead of 6.

Resources