I want to write a shader that creates a reflection of an image similiar to the ones used for coverflows.
// Vertex Shader
uniform highp mat4 u_modelViewMatrix;
uniform highp mat4 u_projectionMatrix;
attribute highp vec4 a_position;
attribute lowp vec4 a_color;
attribute highp vec2 a_texcoord;
varying lowp vec4 v_color;
varying highp vec2 v_texCoord;
mat4 rot = mat4( -1.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0 );
void main()
{
gl_Position = (u_projectionMatrix * u_modelViewMatrix) * a_position * rot;
v_color = a_color;
v_texCoord = a_texcoord;
}
// Fragment Shader
varying highp vec2 v_texCoord;
uniform sampler2D u_texture0;
uniform int slices;
void main()
{
lowp vec3 w = vec3(1.0,1.0,1.0);
lowp vec3 b = vec3(0.0,0.0,0.0);
lowp vec3 mix = mix(b, w, (v_texCoord.y-(float(slices)/10.0)));
gl_FragColor = texture2D(u_texture0,v_texCoord) * vec4(mix, 1.0);
}
But this shader is creating the following:
current result
And I dont know how to "flip" the image horizontally and I tried so many different parameters in the rotation matrix (I even tried to use a so called "mirror matrix") but I dont know how to reflect the image on the bottom of original image.
If you're talking about what images.google.com returns for "coverflow" result, then you don't need rotation matrix at all.
void main()
{
gl_Position = (u_projectionMatrix * u_modelViewMatrix) * a_position;
v_color = a_color;
v_texCoord = vec2(a_texcoord.x, 1.0 - a_texcoord.y);
}
Simply flip it vertically.
If you insist on using matrix and want to make a "mirror" shader (the one that takes it object, and puts it under "floor" to make reflection) then you need mirror matrix (don't forget to adjust frontface/backface culling):
mat4(1.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0 );
AND you must know where the floor is.
gl_Position = (u_projectionMatrix * u_modelViewMatrix) * (a_position * mirrorMatrix - floor);
Alternatively you could put floor translation into same matrix. Basically, to mirror against arbitrary height, you need to combine three transforms (pseudocode).
translate(0, -floorHeight, 0) * scale(1, -1, 1) * translate(0, floorHeight, 0).
and put them into your matrix.
Also it might make sense to split modelView matrix into "model"(object/world) and "view" matrices. This way it'll be easier to perform transformations like these.
Related
I have this shader code below. I want to add a new uniform for another texture and make it that it would be applied to the vertices that is divisible by 4.
uniform vec3 color;
uniform sampler2D texture;
varying vec4 vColor;
void main() {
vec4 outColor = texture2D( texture, gl_PointCoord );
if ( outColor.a < 0.5 ) discard;
gl_FragColor = outColor * vec4( color * vColor.xyz, 0.5 );
float depth = gl_FragCoord.z / gl_FragCoord.w;
const vec3 fogColor = vec3( 0.0 );
float fogFactor = smoothstep( 200.0, 600.0, depth );
gl_FragColor = mix( gl_FragColor, vec4( fogColor, gl_FragColor.w ), fogFactor );
}
I want to add a condition something like index % 4 === 0 ? firstTexture : secondTexture but I do not know how to get the vertex index and perform a modulo operator in the shader language.
WebGL GLSL does not provide a vertex index, so you'll have to provide that data manually. For more information, see this question.
The modulus operator in GLSL is a function called mod().
When i use TbloomEffect and TgloomEffect (Delphi component) under android/iOS (so with openGL) then it's produce only black & white output :( under windows (DX11) then it's work ok. I guess it's a bug in the delphi source code but i can't find what wrong in the original GLSL code below I extracted from the original delphi source:
TgloomEffect use :
varying vec4 TEX0;
vec4 _ret_0;
vec3 _TMP4;
float _TMP3;
vec3 _x0014;
vec3 _TMP15;
float _grey0022;
float _grey0028;
vec3 _TMP35;
uniform float _GloomIntensity;
uniform float _BaseIntensity;
uniform float _GloomSaturation;
uniform float _BaseSaturation;
uniform sampler2D _Input;
void main()
{
vec4 _color1;
vec3 _base;
vec3 _gloom;
vec3 _TMP10;
_color1 = texture2D(_Input, TEX0.xy);
_base = 1.0 - _color1.xyz/_color1.w;
_x0014 = (_base - 2.50000000E-001)/7.50000000E-001;
_TMP4 = min(vec3( 1.0, 1.0, 1.0), _x0014);
_TMP15 = max(vec3( 0.0, 0.0, 0.0), _TMP4);
_grey0022 = dot(_TMP15, vec3( 3.00000012E-001, 5.89999974E-001, 1.09999999E-001));
_TMP3 = _grey0022 + _GloomSaturation*(_TMP15.x - _grey0022);
_gloom = vec3(_TMP3, _TMP3, _TMP3)*_GloomIntensity;
_grey0028 = dot(_base, vec3( 3.00000012E-001, 5.89999974E-001, 1.09999999E-001));
_TMP3 = _grey0028 + _BaseSaturation*(_base.x - _grey0028);
_base = vec3(_TMP3, _TMP3, _TMP3)*_BaseIntensity;
_TMP4 = min(vec3( 1.0, 1.0, 1.0), _gloom);
_TMP35 = max(vec3( 0.0, 0.0, 0.0), _TMP4);
_base = _base*(1.0 - _TMP35);
_TMP10 = (1.0 - (_base + _gloom))*_color1.w;
_ret_0 = vec4(_TMP10.x, _TMP10.y, _TMP10.z, _color1.w);
gl_FragColor = _ret_0;
return;
}
and TBloomEffect use :
varying vec4 TEX0;
vec4 _ret_0;
vec3 _TMP5;
float _TMP4;
vec3 _TMP3;
vec3 _TMP14;
vec3 _x0015;
float _grey0021;
float _grey0027;
vec3 _TMP34;
uniform float _BloomIntensity;
uniform float _BaseIntensity;
uniform float _BloomSaturation;
uniform float _BaseSaturation;
uniform sampler2D _Input;
void main()
{
vec4 _color1;
vec3 _base;
vec3 _bloom;
vec3 _TMP11;
_color1 = texture2D(_Input, TEX0.xy);
_base = _color1.xyz/_color1.w;
_x0015 = (_base - 2.50000000E-001)/7.50000000E-001;
_TMP3 = min(vec3( 1.0, 1.0, 1.0), _x0015);
_TMP14 = max(vec3( 0.0, 0.0, 0.0), _TMP3);
_grey0021 = dot(_TMP14, vec3( 3.00000012E-001, 5.89999974E-001, 1.09999999E-001));
_TMP4 = _grey0021 + _BloomSaturation*(_TMP14.x - _grey0021);
_bloom = vec3(_TMP4, _TMP4, _TMP4)*_BloomIntensity;
_grey0027 = dot(_base, vec3( 3.00000012E-001, 5.89999974E-001, 1.09999999E-001));
_TMP4 = _grey0027 + _BaseSaturation*(_base.x - _grey0027);
_base = vec3(_TMP4, _TMP4, _TMP4)*_BaseIntensity;
_TMP5 = min(vec3( 1.0, 1.0, 1.0), _bloom);
_TMP34 = max(vec3( 0.0, 0.0, 0.0), _TMP5);
_base = _base*(1.0 - _TMP34);
_TMP11 = (_base + _bloom)*_color1.w;
_ret_0 = vec4(_TMP11.x, _TMP11.y, _TMP11.z, _color1.w);
gl_FragColor = _ret_0;
return;
}
What wrong in those 2 GLSL codes that make output only in black & white ?
because of a bug in delphi ...
good code is :
varying vec4 TEX0;
vec4 _ret_0;
vec3 _TMP4;
vec3 _TMP3;
vec3 _x0014;
vec3 _TMP15;
float _grey0022;
float _grey0028;
vec3 _TMP35;
uniform float _GloomIntensity;
uniform float _BaseIntensity;
uniform float _GloomSaturation;
uniform float _BaseSaturation;
uniform sampler2D _Input;
void main()
{
vec4 _color1;
vec3 _base;
vec3 _gloom;
vec3 _TMP10;
_color1 = texture2D(_Input, TEX0.xy);
_base = 1.0 - _color1.xyz/_color1.w;
_x0014 = (_base - 0.25)/0.75;
_TMP4 = min(vec3( 1.0, 1.0, 1.0), _x0014);
_TMP15 = max(vec3( 0.0, 0.0, 0.0), _TMP4);
_grey0022 = dot(_TMP15, vec3( 0.3, 0.59, 0.11));
_TMP3 = vec3(_grey0022, _grey0022, _grey0022) + _GloomSaturation*(_TMP15 - vec3(_grey0022, _grey0022, _grey0022));
_gloom = _TMP3*_GloomIntensity;
_grey0028 = dot(_base, vec3( 0.3, 0.59, 0.11));
_TMP3 = vec3(_grey0028, _grey0028, _grey0028) + _BaseSaturation*(_base - vec3(_grey0028, _grey0028, _grey0028));
_base = _TMP3*_BaseIntensity;
_TMP4 = min(vec3( 1.0, 1.0, 1.0), _gloom);
_TMP35 = max(vec3( 0.0, 0.0, 0.0), _TMP4);
_base = _base*(1.0 - _TMP35);
_TMP10 = (1.0 - (_base + _gloom))*_color1.w;
_ret_0 = vec4(_TMP10.x, _TMP10.y, _TMP10.z, _color1.w);
gl_FragColor = _ret_0;
return;
}
I'm trying to rotate an image in webgl. If the texture has the same width as height there is no problem, but if width is for example 256px and height only 32px the image gets skewed.
It seems as if only the texture is rotating and not the vertices. However usually when only the texture is rotating it's corners gets clipped as they move outside the vertices. That doesn't happen here so I'm a bit confused.
Here is my vertex shader code:
precision lowp float;
attribute vec3 vertPosition;
attribute vec3 vertColor;
attribute vec2 aTextureCoord;
varying vec3 fragColor;
varying lowp vec2 vTextureCoord;
varying lowp vec2 vTextureCoordBg;
uniform vec2 uvOffsetBg;
uniform vec2 uvScaleBg;
uniform mat4 uPMatrix;
uniform vec2 uvOffset;
uniform vec2 uvScale;
uniform vec3 translation;
uniform vec3 scale;
uniform float rotateZ;
uniform vec2 vertPosFixAfterRotate;
void main()
{
fragColor = vertColor;
vTextureCoord = (vec4(aTextureCoord.x, aTextureCoord.y, 0, 1)).xy * uvScale + uvOffset;
vTextureCoordBg = (vec4(aTextureCoord, 0, 1)).xy * uvScaleBg + uvOffsetBg;
mat4 worldPosTrans = mat4(
vec4(scale.x*cos(rotateZ), scale.y*-sin(rotateZ), 0, 0),
vec4(scale.x*sin(rotateZ), scale.y*cos(rotateZ), 0, 0),
vec4(0, 0, scale.z, 0),
vec4(translation.x, translation.y, translation.z, 1));
gl_Position = (uPMatrix * worldPosTrans) * vec4(vertPosition.x + vertPosFixAfterRotate.x, vertPosition.y + vertPosFixAfterRotate.y, vertPosition.z, 1.0);
}
The rotation is sent from javascript to the shader through the rotateZ uniform.
You have to do the scaling before the rotation:
Scale matrix:
mat4 sm = mat4(
vec4(scale.x, 0.0, 0.0, 0.0),
vec4(0.0, scale.y, 0.0, 0.0),
vec4(0.0, 0.0, scale.z, 0.0),
vec4(0.0, 0.0, 0.0, 1.0));
Rotation matrix:
mat4 rm = mat4(
vec4(cos(rotateZ), -sin(rotateZ), 0.0, 0.0),
vec4(sin(rotateZ), cos(rotateZ), 0.0, 0.0),
vec4(0.0, 0.0, 1.0, 0.0),
vec4(0.0, 0.0, 0.0, 1.0));
Translation matrix:
mat4 tm = mat4(
vec4(1.0, 0.0, 0.0, 0.0),
vec4(0.0, 1.0, 0.0, 0.0),
vec4(0.0, 0.0, 0.0, 0.0),
vec4(translation.x, translation.y, translation.z, 1.0));
Model transformtion:
mat4 worldPosTrans = tm * rm * sm;
See the result and focus on scale.x and scale.y, in compare to the code snippet in your question:
mat4 worldPosTrans = mat4(
vec4(scale.x * cos(rotateZ), scale.x * -sin(rotateZ), 0.0, 0.0),
vec4(scale.y * sin(rotateZ), scale.y * cos(rotateZ), 0.0, 0.0),
vec4(0.0, 0.0, scale.z, 0.0),
vec4(translation.x, translation.y, translation.z, 1.0));
I have a 3x3 homography matrix that works correctly with OpenCV's warpPerspective, but I need to do the warping on GPU for performance reasons. What is the best approach? I tried multiplying in the vertex shader to get the texture coordinates and then render a quad, but I get strange distortions. I'm not sure if it's the interpolation not working as I expect. Attaching output for comparison (it involves two different, but close enough shots).
Absolute difference of warp and other image from GPU:
Composite of warp and other image in OpenCV:
EDIT:
Following are my shaders: the task is image rectification (making epilines become scanlines) + absolute difference.
// Vertex Shader
static const char* warpVS = STRINGIFY
(
uniform highp mat3 homography1;
uniform highp mat3 homography2;
uniform highp int width;
uniform highp int height;
attribute highp vec2 position;
varying highp vec2 refTexCoords;
varying highp vec2 curTexCoords;
highp vec2 convertToTexture(highp vec3 pixelCoords) {
pixelCoords /= pixelCoords.z; // need to project
pixelCoords /= vec3(float(width), float(height), 1.0);
pixelCoords.y = 1.0 - pixelCoords.y; // origin is in bottom left corner for textures
return pixelCoords.xy;
}
void main(void)
{
gl_Position = vec4(position / vec2(float(width) / 2.0, float(height) / 2.0) - vec2(1.0), 0.0, 1.0);
gl_Position.y = -gl_Position.y;
highp vec3 initialCoords = vec3(position, 1.0);
refTexCoords = convertToTexture(homography1 * initialCoords);
curTexCoords = convertToTexture(homography2 * initialCoords);
}
);
// Fragment Shader
static const char* warpFS = STRINGIFY
(
varying highp vec2 refTexCoords;
varying highp vec2 curTexCoords;
uniform mediump sampler2D refTex;
uniform mediump sampler2D curTex;
uniform mediump sampler2D maskTex;
void main(void)
{
if (texture2D(maskTex, refTexCoords).r == 0.0) {
discard;
}
if (any(bvec4(curTexCoords[0] < 0.0, curTexCoords[1] < 0.0, curTexCoords[0] > 1.0, curTexCoords[1] > 1.0))) {
discard;
}
mediump vec4 referenceColor = texture2D(refTex, refTexCoords);
mediump vec4 currentColor = texture2D(curTex, curTexCoords);
gl_FragColor = vec4(abs(referenceColor.r - currentColor.r), 1.0, 0.0, 1.0);
}
);
I think you just need to do the projection per pixel. Make refTexCoords and curTexCoords at least vec3, then do the /z in the pixel shader before texture lookup. Even better use the textureProj GLSL instruction.
You want to do everything that is linear in the vertex shader, but things like projection need to be done in the fragment shader per pixel.
This link might help with some background: http://www.reedbeta.com/blog/2012/05/26/quadrilateral-interpolation-part-1/
Can we have vert shader without attributes?
#version 300 es
out mediump vec4 basecolor;
uniform ivec2 x1;
void main(void)
{
if(x1 == ivec2(10,20))
basecolor = vec4(0.0, 1.0, 0.0, 1.0);
else
basecolor = vec4(1.0, 0.0, 1.0, 1.0);
gl_PointSize = 64.0;
gl_Position = vec4(0.0, 0.0, 0.0, 1.0);
}
#version 300 es
in mediump vec4 basecolor;
out vec4 FragColor;
void main(void)
{
FragColor = basecolor;
}
Technically there is nothing in the specification that actually requires you to have vertex attributes. But by the same token, in OpenGL ES 3.0 you have two intrinsically defined in attributes whether you want them or not:
The built-in vertex shader variables for communicating with fixed functionality are intrinsically declared as follows in the vertex language:
in highp int gl_VertexID;
in highp int gl_InstanceID;
This is really the only time it actually makes sense not to have any attributes. You can dynamically compute the position based on gl_VertexID, gl_InstanceID or some combination of both, which is a major change from OpenGL ES 2.0.