Recently, I encountered a problem when using computer shader to develop matrix multiplication. A common matrix multiplication C = AB. in order to make the memory continuous, I transposed the B matrix. I think this can speed up the running speed. However, when I measured the speed, I found that the form of line X was several times slower than that of line X. I explored it for a long time and couldn't understand it, so I wrote down the problem for help!!!
My environment Mali G77 (MediaTek Dimensity 1200)
A matrix dimension: 4x2048x2048
B matrix dimension: 4x2048x2048
Time comparison:
Row x row: About 9s
Row x column: about 1.6s
Column x column: about 3.3s
question demo:https://github.com/yikox/ProfilerDemo
shader code:
//computer shader
#version 310 es
#define XLOCAL 8
#define YLOCAL 8
#define ZLOCAL 1
layout(binding = 0) writeonly buffer soutput{
vec4 data[];
} uOutput;
layout(binding = 1) readonly buffer sinput0{
vec4 data[];
} uInput0;
layout(binding = 2) readonly buffer sinput1{
vec4 data[];
} uInput1;
layout(location=3) uniform ivec4 uInputSize0;
layout(location=4) uniform ivec4 uInputSize1;
layout(location=5) uniform ivec4 uOutputSize;
layout (local_size_x = XLOCAL, local_size_y = YLOCAL, local_size_z = ZLOCAL) in;
//矩阵A和矩阵B相乘的某一列的第I个元素
vec4 PixelMul(int i, ivec3 pos)
{
// 行x行
// vec4 data0 = uInput0.data[i + pos.y * uInputSize0.x + pos.z * uInputSize0.x * uInputSize0.y];
// vec4 data1 = uInput1.data[i + pos.x * uInputSize1.y + pos.z * uInputSize1.x * uInputSize1.y];
// 行x列
// vec4 data0 = uInput0.data[i + pos.y * uInputSize0.x + pos.z * uInputSize0.x * uInputSize0.y];
// vec4 data1 = uInput1.data[pos.x + i * uInputSize1.y + pos.z * uInputSize1.x * uInputSize1.y];
// 列x列
vec4 data0 = uInput0.data[pos.y + i * uInputSize0.x + pos.z * uInputSize0.x * uInputSize0.y];
vec4 data1 = uInput1.data[pos.x + i * uInputSize1.y + pos.z * uInputSize1.x * uInputSize1.y];
return data0 * data1;
}
void main()
{
ivec3 pos = ivec3(gl_GlobalInvocationID) * ivec3(2, 2, 1);
if(all(lessThan(pos, uOutputSize.xyz)))
{
vec4 outData00 = vec4(0);
vec4 outData01 = vec4(0);
vec4 outData10 = vec4(0);
vec4 outData11 = vec4(0);
for(int i = 0; i < uInputSize0.x; i++)
{
outData00 += PixelMul(i, pos + ivec3(0, 0, 0));
outData01 += PixelMul(i, pos + ivec3(1, 0, 0));
outData10 += PixelMul(i, pos + ivec3(0, 1, 0));
outData11 += PixelMul(i, pos + ivec3(1, 1, 0));
}
uOutput.data[pos.x + 0 + (pos.y + 0) * uOutputSize.x + pos.z * uOutputSize.x * uOutputSize.y] = outData00;
uOutput.data[pos.x + 1 + (pos.y + 0) * uOutputSize.x + pos.z * uOutputSize.x * uOutputSize.y] = outData01;
uOutput.data[pos.x + 0 + (pos.y + 1) * uOutputSize.x + pos.z * uOutputSize.x * uOutputSize.y] = outData10;
uOutput.data[pos.x + 1 + (pos.y + 1) * uOutputSize.x + pos.z * uOutputSize.x * uOutputSize.y] = outData11;
}
}
Could someone help me in understanding what paramaters assume to have such spiral as in this question:Draw equidistant points on a spiral?
I don't understant this parameter: rotation- Overall rotation of the spiral. ('0'=no rotation, '1'=360 degrees, '180/360'=180 degrees) I would be grateful if someone write some sets of parameters (sides,coils,rotation) to get spiral.
It's code in Matlab:
clc
clear all
centerX = 0
centerY = 0
radius = 10
coils = 30
rotation = 360
chord = 2
delta = 1
thetaMax = coils * 2 * pi;
awayStep = radius / thetaMax;
i = 1
for theta = (chord / awayStep):thetaMax;
away = awayStep * theta;
around = theta + rotation;
x(i) = centerX + cos ( around ) * away;
y(i) = centerY + sin ( around ) * away;
i = i + 1
theta = theta + (chord / away);
theta2 = theta + delta
away2 = away + awayStep * delta
delta = 2 * chord / ( away + away2 )
delta = 2 * chord / ( 2*away + awayStep * delta )
2*(away + awayStep * delta ) * delta == 2 * chord
awayStep * delta * 2 + 2*away * delta - 2 * chord == 0
a= awayStep; b = 2*away; c = -2*chord
delta = ( -2 * away + sqrt ( 4 * away * away + 8 * awayStep * chord ) ) / ( 2 * awayStep );
theta = theta + delta;
end
v = [0 x]
w = [0 y]
scatter(v,w)
Thank you in advance
I'm starting a project where I need to control a virtual character. The character is being rendered in multiple 3D engines, such as Three.JS and iOS SceneKit.
I'm getting the Quaternions of every joint of the skeleton with OpenNI, and it looks kind of like this:
The code that saves and pass the quaternion, looks like this:
float confidence = context.getJointOrientationSkeleton(userId, jointName,
joint);
joints[jointIndex]=joint.m00;
joints[jointIndex+1]=joint.m01;
joints[jointIndex+2]=joint.m02;
joints[jointIndex+3]=joint.m10;
joints[jointIndex+4]=joint.m11;
joints[jointIndex+5]=joint.m12;
joints[jointIndex+6]=joint.m20;
joints[jointIndex+7]=joint.m21;
joints[jointIndex+8]=joint.m22;
jointIndex+=9;
This repeats for every joint of the skeleton.
The last row and last column is always [0 0 0 1] [0, 0, 0, 1], so I just append that on the client once I receive it and build a 4x4 matrix.
I want to be able to make the right rotations with this data, but the rotations I'm getting are definitely wrong.
This is how I'm processing the matrix: (pseudo-code)
row1 = [m00 m01 m02 0]
row2 = [m10 m11 m12 0]
row3 = [m20 m21 m22 0]
row4 = [0 0 0 1]
matrix4by4 = matrix4x4(rows:[row1,row2,row3,row4])
And then I got the quaternion with two methods, and both methods were showing bad rotations, I cannot find what's wrong or what I'm missing.
First method
There's an iOS function that gets a 3x3 or 4x4 matrix, and transforms it into quaternion:
boneRotation = simd_quatf.init(matrix4by4).vector //X,Y,Z,W
Second method
I found the following code on the web:
let tr = m00 + m11 + m22
var qw = 0
var qx = 0
var qy = 0
var qz = 0
if (tr > 0) {
var S = sqrt(tr+1.0) * 2 // S=4*qw
qw = 0.25 * S
qx = (m21 - m12) / S
qy = (m02 - m20) / S
qz = (m10 - m01) / S
} else if ((m00 > m11) && (m00 > m22)) {
var S = sqrt(1.0 + m00 - m11 - m22) * 2 // S=4*qx
qw = (m21 - m12) / S
qx = 0.25 * S
qy = (m01 + m10) / S
qz = (m02 + m20) / S
} else if (m11 > m22) {
var S = sqrt(1.0 + m11 - m00 - m22) * 2 // S=4*qy
qw = (m02 - m20) / S
qx = (m01 + m10) / S
qy = 0.25 * S
qz = (m12 + m21) / S
} else {
let S = sqrt(1.0 + m22 - m00 - m11) * 2 // S=4*qz
var = (m10 - m01) / S
qx = (m02 + m20) / S
qy = (m12 + m21) / S
qz = 0.25 * S
}
boneRotation = vector4(qx, qy, qw, qz)
I started testing only with shoulder and elbow rotation to help me visualize what could be wrong or missing, and made a video.
Here's how it's behaving: https://www.youtube.com/watch?v=xUtNiwH_AGk
What could I be missing? For example, the axis of rotation of the shoulder are like this:
X-Axis
Y-Axis
Z-Axis
Thank you in advance :-)
YouTuve Video: https://www.youtube.com/watch?v=xUtNiwH_AGk
This question is very related to the question here(How do I convert a vec4 rgba value to a float?).
There is some of articles or questions related to this question already, but I wonder most of articles are not identifying which type of floating value.
As long as I can come up with, there is some of floating value packing/unpacking formula below.
unsigned normalized float
signed normalized float
signed ranged float (the floating value I can find range limitation)
unsigned ranged float
unsigned float
signed float
However, these are just 2 case actually. The other packing/unpacking can be processed by these 2 method.
unsigned ranged float (I can pack/unpack by easy bitshifting)
signed float
I want to pack and unpack signed floating values into vec3 or vec2 also.
For my case, the floating value is not ensured to be normalized, so I can not use the simple bitshifting way.
If you know the max range of values you want to store, say +5 to -5, than the easiest way is just to pick some convert that range to a value from 0 to 1. Expand that to the number of bits you have and then break it into parts.
vec2 packFloatInto8BitVec2(float v, float min, float max) {
float zeroToOne = (v - min) / (max - min);
float zeroTo16Bit = zeroToOne * 256.0 * 255.0;
return vec2(mod(zeroTo16Bit, 256.0), zeroTo16Bit / 256.0);
}
To put it back you do the opposite. Assemble the parts, divide to get back to a zeroToOne value, then expand by the range.
float unpack8BitVec2IntoFloat(vec2 v, float min, float max) {
float zeroTo16Bit = v.x + v.y * 256.0;
float zeroToOne = zeroTo16Bit / 256.0 / 255.0;
return zeroToOne * (max - min) + min;
}
For vec3 just expand it
vec3 packFloatInto8BitVec3(float v, float min, float max) {
float zeroToOne = (v - min) / (max - min);
float zeroTo24Bit = zeroToOne * 256.0 * 256.0 * 255.0;
return vec3(mod(zeroTo24Bit, 256.0), mod(zeroTo24Bit / 256.0, 256.0), zeroTo24Bit / 256.0 / 256.0);
}
float unpack8BitVec3IntoFloat(vec3 v, float min, float max) {
float zeroTo24Bit = v.x + v.y * 256.0 + v.z * 256.0 * 256.0;
float zeroToOne = zeroTo24Bit / 256.0 / 256.0 / 256.0;
return zeroToOne * (max - min) + min;
}
I have written small example few days ago with shadertoy:
https://www.shadertoy.com/view/XdK3Dh
It stores float as RGB or load float from pixel. There is also test that function are exact inverses (lot of other functions i have seen has bug in some ranges because of bad precision).
Entire example assumes you want to save values in buffer and read it back in next draw. Having only 256 colors, it limits you to get 16777216 different values. Most of the time I dont need larger scale. I also shifted it to have signed float insted in interval <-8388608;8388608>.
float color2float(in vec3 c) {
c *= 255.;
c = floor(c); // without this value could be shifted for some intervals
return c.r*256.*256. + c.g*256. + c.b - 8388608.;
}
// values out of <-8388608;8388608> are stored as min/max values
vec3 float2color(in float val) {
val += 8388608.; // this makes values signed
if(val < 0.) {
return vec3(0.);
}
if(val > 16777216.) {
return vec3(1.);
}
vec3 c = vec3(0.);
c.b = mod(val, 256.);
val = floor(val/256.);
c.g = mod(val, 256.);
val = floor(val/256.);
c.r = mod(val, 256.);
return c/255.;
}
One more thing, values that overflow will be rounded to min/max value.
In order to pack a floating-point value in a vec2, vec3 or vec4, either the range of the source values has to be restricted and well specified, or the exponent has to be stored somehow too. In general, if the significant digits of a floating-point number should be pack in bytes, consecutively 8 bits packages have to be extract from the the significant digits and have to be stored in a byte.
Encode a floating point number in a restricted and predefined range
A value range [minVal, maxVal] must be defined which includes all values that are to be encoded and the value range must be mapped to the range from [0.0, 1.0].
Encoding of a floating point number in the range [minVal, maxVal] to vec2, vec3 and vec4:
vec2 EncodeRangeV2( in float value, in float minVal, in float maxVal )
{
value = clamp( (value-minVal) / (maxVal-minVal), 0.0, 1.0 );
value *= (256.0*256.0 - 1.0) / (256.0*256.0);
vec3 encode = fract( value * vec3(1.0, 256.0, 256.0*256.0) );
return encode.xy - encode.yz / 256.0 + 1.0/512.0;
}
vec3 EncodeRangeV3( in float value, in float minVal, in float maxVal )
{
value = clamp( (value-minVal) / (maxVal-minVal), 0.0, 1.0 );
value *= (256.0*256.0*256.0 - 1.0) / (256.0*256.0*256.0);
vec4 encode = fract( value * vec4(1.0, 256.0, 256.0*256.0, 256.0*256.0*256.0) );
return encode.xyz - encode.yzw / 256.0 + 1.0/512.0;
}
vec4 EncodeRangeV4( in float value, in float minVal, in float maxVal )
{
value = clamp( (value-minVal) / (maxVal-minVal), 0.0, 1.0 );
value *= (256.0*256.0*256.0 - 1.0) / (256.0*256.0*256.0);
vec4 encode = fract( value * vec4(1.0, 256.0, 256.0*256.0, 256.0*256.0*256.0) );
return vec4( encode.xyz - encode.yzw / 256.0, encode.w ) + 1.0/512.0;
}
Decodeing of a vec2, vec3 and vec4 to a floating point number in the range [minVal, maxVal]:
float DecodeRangeV2( in vec2 pack, in float minVal, in float maxVal )
{
float value = dot( pack, 1.0 / vec2(1.0, 256.0) );
value *= (256.0*256.0) / (256.0*256.0 - 1.0);
return mix( minVal, maxVal, value );
}
float DecodeRangeV3( in vec3 pack, in float minVal, in float maxVal )
{
float value = dot( pack, 1.0 / vec3(1.0, 256.0, 256.0*256.0) );
value *= (256.0*256.0*256.0) / (256.0*256.0*256.0 - 1.0);
return mix( minVal, maxVal, value );
}
float DecodeRangeV4( in vec4 pack, in float minVal, in float maxVal )
{
float value = dot( pack, 1.0 / vec4(1.0, 256.0, 256.0*256.0, 256.0*256.0*256.0) );
value *= (256.0*256.0*256.0) / (256.0*256.0*256.0 - 1.0);
return mix( minVal, maxVal, value );
}
Note,Since a standard 32-bit [IEEE 754][2] number has only 24 significant digits, it is completely sufficient to encode the number in 3 bytes.
Encode the significant digits and the exponent of a floating point number
Encoding of the significant digits of a floating point number and its exponent to vec2, vec3 and vec4:
vec2 EncodeExpV2( in float value )
{
int exponent = int( log2( abs( value ) ) + 1.0 );
value /= exp2( float( exponent ) );
value = (value + 1.0) * 255.0 / (2.0*256.0);
vec2 encode = fract( value * vec2(1.0, 256.0) );
return vec2( encode.x - encode.y / 256.0 + 1.0/512.0, (float(exponent) + 127.5) / 256.0 );
}
vec3 EncodeExpV3( in float value )
{
int exponent = int( log2( abs( value ) ) + 1.0 );
value /= exp2( float( exponent ) );
value = (value + 1.0) * (256.0*256.0 - 1.0) / (2.0*256.0*256.0);
vec3 encode = fract( value * vec3(1.0, 256.0, 256.0*256.0) );
return vec3( encode.xy - encode.yz / 256.0 + 1.0/512.0, (float(exponent) + 127.5) / 256.0 );
}
vec4 EncodeExpV4( in float value )
{
int exponent = int( log2( abs( value ) ) + 1.0 );
value /= exp2( float( exponent ) );
value = (value + 1.0) * (256.0*256.0*256.0 - 1.0) / (2.0*256.0*256.0*256.0);
vec4 encode = fract( value * vec4(1.0, 256.0, 256.0*256.0, 256.0*256.0*256.0) );
return vec4( encode.xyz - encode.yzw / 256.0 + 1.0/512.0, (float(exponent) + 127.5) / 256.0 );
}
Decoding of a vec2, vec3 and vec4 to he significant digits of a floating point number and its exponent:
float DecodeExpV2( in vec2 pack )
{
int exponent = int( pack.z * 256.0 - 127.0 );
float value = pack.x * (2.0*256.0) / 255.0 - 1.0;
return value * exp2( float(exponent) );
}
float DecodeExpV3( in vec3 pack )
{
int exponent = int( pack.z * 256.0 - 127.0 );
float value = dot( pack.xy, 1.0 / vec2(1.0, 256.0) );
value = value * (2.0*256.0*256.0) / (256.0*256.0 - 1.0) - 1.0;
return value * exp2( float(exponent) );
}
float DecodeExpV4( in vec4 pack )
{
int exponent = int( pack.w * 256.0 - 127.0 );
float value = dot( pack.xyz, 1.0 / vec3(1.0, 256.0, 256.0*256.0) );
value = value * (2.0*256.0*256.0*256.0) / (256.0*256.0*256.0 - 1.0) - 1.0;
return value * exp2( float(exponent) );
}
See also the answer to the following question:
How do you pack one 32bit int Into 4, 8bit ints in glsl / webgl?
I tested gman's solution and found that the scale factor was incorrect, and it produced roundoff errors, and there needs to be an additional division by 255.0 if you want to store the result in a RGB texture. So this is my revised solution:
#define SCALE_FACTOR (256.0 * 256.0 * 256.0 - 1.0)
vec3 packFloatInto8BitVec3(float v, float min, float max) {
float zeroToOne = (v - min) / (max - min);
float zeroTo24Bit = zeroToOne * SCALE_FACTOR;
return floor(
vec3(
mod(zeroTo24Bit, 256.0),
mod(zeroTo24Bit / 256.0, 256.0),
zeroTo24Bit / 256.0 / 256.0
)
) / 255.0;
}
float unpack8BitVec3IntoFloat(vec3 v, float min, float max) {
vec3 scaleVector = vec3(1.0, 256.0, 256.0 * 256.0) / SCALE_FACTOR * 255.0;
float zeroToOne = dot(v, scaleVector);
return zeroToOne * (max - min) + min;
}
Example:
If you pack 0.25 using min=0 and max=1, you will get (1.0, 1.0, 0.247059)
If you unpack that vector, you will get 0.249999970197678
I am currently writing a cel shading shader, but I'm having issues with edge detection. I am currently using the following code utilizing laplacian edge detection on non-linear depth buffer values:
uniform sampler2d depth_tex;
void main(){
vec4 color_out;
float znear = 1.0;
float zfar = 50000.0;
float depthm = texture2D(depth_tex, gl_TexCoord[0].xy).r;
float lineAmp = mix( 0.001, 0.0, clamp( (500.0 / (zfar + znear - ( 2.0 * depthm - 1.0 ) * (zfar - znear) )/2.0), 0.0, 1.0 ) );// make the lines thicker at close range
float depthn = texture2D(depth_tex, gl_TexCoord[0].xy + vec2( (0.002 + lineAmp)*0.625 , 0.0) ).r;
depthn = depthn / depthm;
float depths = texture2D(depth_tex, gl_TexCoord[0].xy - vec2( (0.002 + lineAmp)*0.625 , 0.0) ).r;
depths = depths / depthm;
float depthw = texture2D(depth_tex, gl_TexCoord[0].xy + vec2(0.0 , 0.002 + lineAmp) ).r;
depthw = depthw / depthm;
float depthe = texture2D(depth_tex, gl_TexCoord[0].xy - vec2(0.0 , 0.002 + lineAmp) ).r;
depthe = depthe / depthm;
float Contour = -4.0 + depthn + depths + depthw + depthe;
float lineAmp2 = 100.0 * clamp( depthm - 0.99, 0.0, 1.0);
lineAmp2 = lineAmp2 * lineAmp2;
Contour = (512.0 + lineAmp2 * 204800.0 ) * Contour;
if(Contour > 0.15){
Contour = (0.15 - Contour) / 1.5 + 0.5;
} else
Contour = 1.0;
color_out.rgb = color_out.rgb * Contour;
color_out.a = 1.0;
gl_FragColor = color_out;
}
but it is hackish[note the lineAmp2], and the details at large distances are lost. So I made up some other algorithm:
[Note that Laplacian edge detection is in use]
1.Get 5 samples from the depth buffer: depthm, depthn, depths, depthw, depthe, where depthm is exactly where the processed fragment is, depthn is slightly to the top, depths is slightly to the bottom etc.
2.Calculate their real coordinates in camera space[as well as convert to linear].
3.Compare the side samples to the middle sample by substracting and then normalize each difference by dividing by difference in distance between two camera-space points and add all four results. This should in theory help with situation, where at large distances from the camera two fragments are very close on the screen but very far in camera space, which is fatal for linear depth testing.
where:
2.a convert the non linear depth to linear using an algorithm from [url=http://stackoverflow.com/questions/6652253/getting-the-true-z-value-from-the-depth-buffer]http://stackoverflow.com/questions/6652253/getting-the-true-z-value-from-the-depth-buffer[/url]
exact code:
uniform sampler2D depthBuffTex;
uniform float zNear;
uniform float zFar;
varying vec2 vTexCoord;
void main(void)
{
float z_b = texture2D(depthBuffTex, vTexCoord).x;
float z_n = 2.0 * z_b - 1.0;
float z_e = 2.0 * zNear * zFar / (zFar + zNear - z_n * (zFar - zNear));
}
2.b convert the screen coordinates to be [tan a, tan b], where a is horizontal angle and b i vertical. There probably is a better terminology with some spherical coordinates but I don't know these yet.
2.c create a 3d vector ( converted screen coordinates, 1.0 ) and scale it by linear depth. I assume this is estimated camera space coordinates of the fragment. It looks like it.
3.a each difference is as follows: (depthm - sidedepth)/lenght( positionm - sideposition)
And I may have messed up something at any point. Code looks fine, but the algorithm may not be, as I made it up myself.
My code:
uniform sampler2d depth_tex;
void main(){
float znear = 1.0;
float zfar = 10000000000.0;
float depthm = texture2D(depth_tex, gl_TexCoord[0].xy + distort ).r;
depthm = 2.0 * zfar * znear / (zfar + znear - ( 2.0 * depthm - 1.0 ) * (zfar - znear) ); //convert to linear
vec2 scorm = (gl_TexCoord[0].xy + distort) -0.5; //conversion to desired coordinates space. This line returns value from range (-0.5,0.5)
scorm = scorm * 2.0 * 0.5; // normalize to (-1, 1) and multiply by tan FOV/2, and default fov is IIRC 60 degrees
scorm.x = scorm.x * 1.6; //1.6 is aspect ratio 16/10
vec3 posm = vec3( scorm, 1.0 );
posm = posm * depthm; //scale by linearized depth
float depthn = texture2D(depth_tex, gl_TexCoord[0].xy + distort + vec2( 0.002*0.625 , 0.0) ).r; //0.625 is aspect ratio 10/16
depthn = 2.0 * zfar * znear / (zfar + znear - ( 2.0 * depthn - 1.0 ) * (zfar - znear) );
vec2 scorn = (gl_TexCoord[0].xy + distort + vec2( 0.002*0.625, 0.0) ) -0.5;
scorn = scorn * 2.0 * 0.5;
scorn.x = scorn.x * 1.6;
vec3 posn = vec3( scorn, 1.0 );
posn = posn * depthn;
float depths = texture2D(depth_tex, gl_TexCoord[0].xy + distort - vec2( 0.002*0.625 , 0.0) ).r;
depths = 2.0 * zfar * znear / (zfar + znear - ( 2.0 * depths - 1.0 ) * (zfar - znear) );
vec2 scors = (gl_TexCoord[0].xy + distort - vec2( 0.002*0.625, 0.0) ) -0.5;
scors = scors * 2.0 * 0.5;
scors.x = scors.x * 1.6;
vec3 poss = vec3( scors, 1.0 );
poss = poss * depths;
float depthw = texture2D(depth_tex, gl_TexCoord[0].xy + distort + vec2(0.0 , 0.002) ).r;
depthw = 2.0 * zfar * znear / (zfar + znear - ( 2.0 * depthw - 1.0 ) * (zfar - znear) );
vec2 scorw = ( gl_TexCoord[0].xy + distort + vec2( 0.0 , 0.002) ) -0.5;
scorw = scorw * 2.0 * 0.5;
scorw.x = scorw.x * 1.6;
vec3 posw = vec3( scorw, 1.0 );
posw = posw * depthw;
float depthe = texture2D(depth_tex, gl_TexCoord[0].xy + distort - vec2(0.0 , 0.002) ).r;
depthe = 2.0 * zfar * znear / (zfar + znear - ( 2.0 * depthe - 1.0 ) * (zfar - znear) );
vec2 score = ( gl_TexCoord[0].xy + distort - vec2( 0.0 , 0.002) ) -0.5;
score = score * 2.0 * 0.5;
score.x = score.x * 1.6;
vec3 pose = vec3( score, 1.0 );
pose = pose * depthe;
float Contour = ( depthn - depthm )/length(posm - posn) + ( depths - depthm )/length(posm - poss) + ( depthw - depthm )/length(posm - posw) + ( depthe - depthm )/length(posm - pose);
Contour = 0.25 * Contour;
color_out.rgb = vec3( Contour, Contour, Contour );
color_out.a = 1.0;
gl_FragColor = color_out;
}
The exact issue with the second code is that it exhibits some awful artifacts at larger distances.
My goal is to make either of them work properly. Are there any tricks I could use to improve precision/quality in both linearized and non-linearized depth buffer? Is anything wrong with my algorithm for linearized depth buffer?