Matrix Translation in GLSL is infinitely stretched - matrix

I work with webgl and modify the shaders (vs.glsls and fs.glsl) to understand the GLSL and graphic programming. I have a model that I want to scale, rotate and translate. Scaling and rotating works fine but when I multiply the translation matrix, the result is weird. I know this is a very basic question, but I am missing something and I need to find it out.
my model is infinitely stretchered through the y axis.
The white area is supposed to be the eye of the model:
this is my vertex shader code:
mat4 rX = mat4 (
1.0, 0.0, 0.0, 0.0,
0.0, 0.0, -1.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0
);
mat4 rZ = mat4 (
0.0, 1.0, 0.0, 0.0,
-1.0, 0.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0
);
mat4 eyeScale = mat4 (
.50,0.0,0.0,0.0,
0.0,.50,0.0,0.0,
0.0,0.0,.50,0.0,
0.0,0.0,0.0,1.0
);
mat4 eyeTrans = mat4(
1.0,0.0,0.0,0.0,
0.0,1.0,0.0,4.0,
0.0,0.0,1.0,0.0,
0.0,0.0,0.0,1.0
);
mat4 iR = eyeTrans*rZ*rX*eyeScale;
gl_Position = projectionMatrix * modelViewMatrix *iR* vec4(position, 1.0);
}

You swapped rows and columns, when you set up the translation matrix
Change it to:
mat4 eyeTrans = mat4(
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 4.0, 0.0, 1.0
);
A 4*4 matrix looks like this:
c0 c1 c2 c3 c0 c1 c2 c3
[ Xx Yx Zx Tx ] [ 0 4 8 12 ]
[ Xy Yy Zy Ty ] [ 1 5 9 13 ]
[ Xz Yz Zz Tz ] [ 2 6 10 14 ]
[ 0 0 0 1 ] [ 3 7 11 15 ]
In GLSL the columns are addressed like this:
vec4 c0 = eyeTrans[0].xyzw;
vec4 c1 = eyeTrans[1].xyzw;
vec4 c2 = eyeTrans[2].xyzw;
vec4 c3 = eyeTrans[3].xyzw;
And the memory image of a 4*4 matrix looks like this:
[ Xx, Xy, Xz, 0, Yx, Yy, Yz, 0, Zx, Zy, Zz, 0, Tx, Ty, Tz, 1 ]
See further:
GLSL 4×4 Matrix Fields

Related

Rotating vertex with different height/width

I'm trying to rotate an image in webgl. If the texture has the same width as height there is no problem, but if width is for example 256px and height only 32px the image gets skewed.
It seems as if only the texture is rotating and not the vertices. However usually when only the texture is rotating it's corners gets clipped as they move outside the vertices. That doesn't happen here so I'm a bit confused.
Here is my vertex shader code:
precision lowp float;
attribute vec3 vertPosition;
attribute vec3 vertColor;
attribute vec2 aTextureCoord;
varying vec3 fragColor;
varying lowp vec2 vTextureCoord;
varying lowp vec2 vTextureCoordBg;
uniform vec2 uvOffsetBg;
uniform vec2 uvScaleBg;
uniform mat4 uPMatrix;
uniform vec2 uvOffset;
uniform vec2 uvScale;
uniform vec3 translation;
uniform vec3 scale;
uniform float rotateZ;
uniform vec2 vertPosFixAfterRotate;
void main()
{
fragColor = vertColor;
vTextureCoord = (vec4(aTextureCoord.x, aTextureCoord.y, 0, 1)).xy * uvScale + uvOffset;
vTextureCoordBg = (vec4(aTextureCoord, 0, 1)).xy * uvScaleBg + uvOffsetBg;
mat4 worldPosTrans = mat4(
vec4(scale.x*cos(rotateZ), scale.y*-sin(rotateZ), 0, 0),
vec4(scale.x*sin(rotateZ), scale.y*cos(rotateZ), 0, 0),
vec4(0, 0, scale.z, 0),
vec4(translation.x, translation.y, translation.z, 1));
gl_Position = (uPMatrix * worldPosTrans) * vec4(vertPosition.x + vertPosFixAfterRotate.x, vertPosition.y + vertPosFixAfterRotate.y, vertPosition.z, 1.0);
}
The rotation is sent from javascript to the shader through the rotateZ uniform.
You have to do the scaling before the rotation:
Scale matrix:
mat4 sm = mat4(
vec4(scale.x, 0.0, 0.0, 0.0),
vec4(0.0, scale.y, 0.0, 0.0),
vec4(0.0, 0.0, scale.z, 0.0),
vec4(0.0, 0.0, 0.0, 1.0));
Rotation matrix:
mat4 rm = mat4(
vec4(cos(rotateZ), -sin(rotateZ), 0.0, 0.0),
vec4(sin(rotateZ), cos(rotateZ), 0.0, 0.0),
vec4(0.0, 0.0, 1.0, 0.0),
vec4(0.0, 0.0, 0.0, 1.0));
Translation matrix:
mat4 tm = mat4(
vec4(1.0, 0.0, 0.0, 0.0),
vec4(0.0, 1.0, 0.0, 0.0),
vec4(0.0, 0.0, 0.0, 0.0),
vec4(translation.x, translation.y, translation.z, 1.0));
Model transformtion:
mat4 worldPosTrans = tm * rm * sm;
See the result and focus on scale.x and scale.y, in compare to the code snippet in your question:
mat4 worldPosTrans = mat4(
vec4(scale.x * cos(rotateZ), scale.x * -sin(rotateZ), 0.0, 0.0),
vec4(scale.y * sin(rotateZ), scale.y * cos(rotateZ), 0.0, 0.0),
vec4(0.0, 0.0, scale.z, 0.0),
vec4(translation.x, translation.y, translation.z, 1.0));

How to setup fixed size(screen space) billboards using glsl

I am trying to create billboard using three.js. I tried using THREE.Sprite however size of sprite changes with distance as I am using perspective projection. I tried to create a plane using custom shaderMaterial with custom shader.
Shader code for scaling billboard
vec4 gl_Position = projectionMatrix * (modelViewMatrix * vec4(0.0, 0.0, 0.0, 1.0) + vec4(position.x, position.y, 0.0, 0.0));'
same for non scaling billboard, which I am trying implement
float distance = length(modelViewMatrix * vec4(0.0, 0.0, 0.0, 1.0));
vec4 newPosition = projectionMatrix * (modelViewMatrix * vec4(0.0, 0.0, 0.0, 1.0) + vec4(position.x, position.y, 0.0, 0.0));
gl_Position= newPosition*distance;
Unfortunately there is no changes in the output. Its still scaled billboard. Any suggestion that might fix the problem?

Image filter kernel to expand 16-235 limited color range

Is it possible to write a 5x5 kernel to process the limited color range into the full range?
This is my sample bitonal kernel, and I don't know what values to use and where to achieve this color expansion:
Grayscale
{ 0.3, 0.3, 0.3, 0.0, 0.0 }
{ 0.6, 0.6, 0.6, 0.0, 0.0 }
{ 0.1, 0.1, 0.1, 0.0, 0.0 }
{ 0.0, 0.0, 0.0, 1.0, 0.0 }
{ 0.0, 0.0, 0.0, 0.0, 1.0 }
I would like RGB color expansion RGB 16-235 => 0-255
However i need the kernel matrix because I am not processing the image but I'm passing the matrix to a windows API function (undocumented: SetMagnificationDesktopColorEffect).
I cannot do a simple subtract/divide/multiply on the pixels. I do not have them.
You can basically do it without kernel by substracting 16 from your image and then dividing it by 219. Then you will have normalized to 1 image which you have to multiply by 255 to get 255 intensity range representation.

Can an ELEMENT_ARRAY_BUFFER be used for texture mapping the same texture on a every face of a cube?

So I'm writing some WebGL, no THREE.JS. I'm trying to render a cube, with a single texture mapped to every face of the cube. In my code where I set up my attributes I have something like:
var vertices = new Float32Array([
// x, y, z u, v
1.0, 1.0, 1.0, /* v0 right top front */ 1.0, 1.0,
-1.0, 1.0, 1.0, /* v1 left top front */ 0.0, 1.0,
-1.0, -1.0, 1.0, /* v2 left bottom front */ 0.0, 0.0,
1.0, -1.0, 1.0, /* v3 right bottom front */ 1.0, 0.0,
// u's switch for back faces
1.0, -1.0, -1.0, /* v4 right bottom back */ 0.0, 0.0,
1.0, 1.0, -1.0, /* v5 right top back */ 0.0, 1.0,
-1.0, 1.0, -1.0, /* v6 left top back */ 1.0, 1.0,
-1.0, -1.0, -1.0, /* v7 left bottom back */ 1.0, 0.0
]);
// the pairs of vertex triples
// 3 vertices = 1 triangle
// 2 triangles = 1 quad = 1 face
var indices = new Uint8Array([
0, 1, 2, 0, 2, 3, // front
0, 3, 4, 0, 4, 5, // right
//0, 5, 6, 0, 6, 1, // top
1, 6, 7, 1, 7, 2, // left
//7, 4, 3, 7, 3, 2, // bottom
4, 7, 6, 4, 6, 5 // back
]);
I wind up with a cube with the texture reflected for the right and left faces, which is fine. For the top and the bottom, I have no faces because of the two commented out lines. When I comment them in, the faces don't have the texture sampled as I expected. Sure enough, if you look at the indices for the top face for instance, and the UV coordinates that they would have:
index | u | v
0 | 1.0 | 1.0
1 | 0.0 | 1.0
5 | 0.0 | 1.0
6 | 1.0 | 1.0
So we can see that index 1 and 5 (also, 0 and 6) have the same UV coordinates, so of course it wont look right on a quad.
I've been trying to draw out on paper, but I can't change the UV's without messing up another face's coordinates. What I'm wondering is: is it possible to use ELEMENT_ARRAY_BUFFERs to map UV coordinates on a cube, or do I need to use more data and draw using an ARRAY_BUFFER?
== EDIT ==
Looks like a dupe: OpenGL ES - texture map all faces of an 8 vertex cube?
Hate to answer my own question, but based on the hint here, I was able to get it to work by using 24 vertices instead of 8. I can use 24 instead of 36 because I'm repeating indices in my ELEMENT_ARRAY_BUFFER (something I wouldn't be able to do with just an ARRAY_BUFFER).
var vertices = new Float32Array([
// x, y, z, u, v
// front face (z: +1)
1.0, 1.0, 1.0, 1.0, 1.0, // top right
-1.0, 1.0, 1.0, 0.0, 1.0, // top left
-1.0, -1.0, 1.0, 0.0, 0.0, // bottom left
1.0, -1.0, 1.0, 1.0, 0.0, // bottom right
// right face (x: +1)
1.0, 1.0, -1.0, 1.0, 1.0, // top right
1.0, 1.0, 1.0, 0.0, 1.0, // top left
1.0, -1.0, 1.0, 0.0, 0.0, // bottom left
1.0, -1.0, -1.0, 1.0, 0.0, // bottom right
// top face (y: +1)
1.0, 1.0, -1.0, 1.0, 1.0, // top right
-1.0, 1.0, -1.0, 0.0, 1.0, // top left
-1.0, 1.0, 1.0, 0.0, 0.0, // bottom left
1.0, 1.0, 1.0, 1.0, 0.0, // bottom right
// left face (x: -1)
-1.0, 1.0, 1.0, 1.0, 1.0, // top right
-1.0, 1.0, -1.0, 0.0, 1.0, // top left
-1.0, -1.0, -1.0, 0.0, 0.0, // bottom left
-1.0, -1.0, 1.0, 1.0, 0.0, // bottom right
// bottom face (y: -1)
1.0, -1.0, 1.0, 1.0, 1.0, // top right
-1.0, -1.0, 1.0, 0.0, 1.0, // top left
-1.0, -1.0, -1.0, 0.0, 0.0, // bottom left
1.0, -1.0, -1.0, 1.0, 0.0, // bottom right
// back face (x: -1)
-1.0, 1.0, -1.0, 1.0, 1.0, // top right
1.0, 1.0, -1.0, 0.0, 1.0, // top left
1.0, -1.0, -1.0, 0.0, 0.0, // bottom left
-1.0, -1.0, -1.0, 1.0, 0.0 // bottom right
]);
// the pairs of vertex triples
// 3 vertices = 1 triangle
// 2 triangles = 1 quad = 1 face
var indices = new Uint8Array([
0, 1, 2, 0, 2, 3,
4, 5, 6, 4, 6, 7,
8, 9, 10, 8, 10, 11,
12, 13, 14, 12, 14, 15,
16, 17, 18, 16, 18, 19,
20, 21, 22, 20, 22, 23
]);
Scroll above code example ^
The number of vertices can be further reduced, because some indices share the same XYZ and UV coordinates, though if I add normals to my interleaved buffer later (or any other attribute) I may need the repeated values.

GLSL Shader - Coverflow Reflection of a 2D object

I want to write a shader that creates a reflection of an image similiar to the ones used for coverflows.
// Vertex Shader
uniform highp mat4 u_modelViewMatrix;
uniform highp mat4 u_projectionMatrix;
attribute highp vec4 a_position;
attribute lowp vec4 a_color;
attribute highp vec2 a_texcoord;
varying lowp vec4 v_color;
varying highp vec2 v_texCoord;
mat4 rot = mat4( -1.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0 );
void main()
{
gl_Position = (u_projectionMatrix * u_modelViewMatrix) * a_position * rot;
v_color = a_color;
v_texCoord = a_texcoord;
}
// Fragment Shader
varying highp vec2 v_texCoord;
uniform sampler2D u_texture0;
uniform int slices;
void main()
{
lowp vec3 w = vec3(1.0,1.0,1.0);
lowp vec3 b = vec3(0.0,0.0,0.0);
lowp vec3 mix = mix(b, w, (v_texCoord.y-(float(slices)/10.0)));
gl_FragColor = texture2D(u_texture0,v_texCoord) * vec4(mix, 1.0);
}
But this shader is creating the following:
current result
And I dont know how to "flip" the image horizontally and I tried so many different parameters in the rotation matrix (I even tried to use a so called "mirror matrix") but I dont know how to reflect the image on the bottom of original image.
If you're talking about what images.google.com returns for "coverflow" result, then you don't need rotation matrix at all.
void main()
{
gl_Position = (u_projectionMatrix * u_modelViewMatrix) * a_position;
v_color = a_color;
v_texCoord = vec2(a_texcoord.x, 1.0 - a_texcoord.y);
}
Simply flip it vertically.
If you insist on using matrix and want to make a "mirror" shader (the one that takes it object, and puts it under "floor" to make reflection) then you need mirror matrix (don't forget to adjust frontface/backface culling):
mat4(1.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0 );
AND you must know where the floor is.
gl_Position = (u_projectionMatrix * u_modelViewMatrix) * (a_position * mirrorMatrix - floor);
Alternatively you could put floor translation into same matrix. Basically, to mirror against arbitrary height, you need to combine three transforms (pseudocode).
translate(0, -floorHeight, 0) * scale(1, -1, 1) * translate(0, floorHeight, 0).
and put them into your matrix.
Also it might make sense to split modelView matrix into "model"(object/world) and "view" matrices. This way it'll be easier to perform transformations like these.

Resources