clash between J3DI, Win7 and Nvidia Geforce - windows-7

I have done a sphere using J3DI (which is a webGL library) under WIN 7 64bit as OS
and Nvidia Geforce GT330 M as graphic card .
firstly I have done it in blue color and it appeared correctly.
then, I tried to make a texture on it but the sphere appeared like in this image:
http://s1.postimage.org/1ekqrgolg/earth.jpg
while it has appeared in Mac like this :
http://s1.postimage.org/1eksf0138/error.jpg
so, what is the problem? is it from the OS, J3DI or from the graphic card?
for an additional information, the shader script i used this:
notice:this code has been taken from OpenGL and HTML5 (video course from O'reilly)
VertexShader:
uniform mat4 u_modelViewProjMatrix;
uniform mat4 u_normalMatrix;
uniform vec3 lightDir;
attribute vec3 vNormal;
attribute vec4 vTexCoord;
attribute vec4 vPosition;
varying float v_Dot;
varying vec2 v_texCoord;
void main()
{
gl_Position = u_modelViewProjMatrix * vPosition;
v_texCoord = vTexCoord.st;
vec4 transNormal = u_normalMatrix * vec4(vNormal, 1);
v_Dot = max(dot(normalize(transNormal.xyz), normalize(lightDir)), 0.3);
}
PixelShader:
#ifdef GL_ES
precision highp float;
#endif
uniform sampler2D sampler2d;
varying float v_Dot;
varying vec2 v_texCoord;
void main()
{
vec2 texCoord = vec2(v_texCoord.s, v_texCoord.t);
vec4 color = texture2D(sampler2d, texCoord);
color += vec4(0.1, 0.1, 0.1, 1);
gl_FragColor = vec4(color.xyz * v_Dot, color.a);
}
the context function is:
function(context){
// setup VBOs
context.enableVertexAttribArray(0);
context.enableVertexAttribArray(1);
context.enableVertexAttribArray(2);
context.bindBuffer(context.ARRAY_BUFFER, context.sphere.normalObject);
context.vertexAttribPointer(0, 3, context.FLOAT, false, 0, 0);
context.bindBuffer(context.ARRAY_BUFFER, context.sphere.texCoordObject);
context.vertexAttribPointer(1, 2, context.FLOAT, false, 0, 0);
context.bindBuffer(context.ARRAY_BUFFER, context.sphere.vertexObject);
context.vertexAttribPointer(2, 3, context.FLOAT, false, 0, 0);
context.bindBuffer(context.ELEMENT_ARRAY_BUFFER, context.sphere.indexObject);
//constract the model-view * projection matrix
var mvpMatrix = new J3DIMatrix4(context.perspectiveMatrix);
mvpMatrix.setUniform(context, context.getUniformLocation(context.program, "u_modelViewProjMatrix"), false);
//bind texture
context.bindTexture(context.TEXTURE_2D, this.texture);
context.drawElements(context.TRIANGLES, context.sphere.numIndices, context.UNSIGNED_SHORT, 0);
}
I'm really concern about this issue.

You're almost there. In:
v_Dot = max(dot(normalize(transNormal.xyz), normalize(lightDir)), 0.3);
Change 0.3 at the end to 1.0, that should work (edit: at least for Windows, MAC could be outdated drivers).

Related

How to place a texture in a specific location?

In my code, I'm mixing two textures. I want to position a texture at any place on the plane but when I add an offset to the texture UV XY coordinate the image just gets stretched.
offsetText1 = vec2(0.1,0.1);
vec4 displacement = texture2D(utexture1,vUv+offsetText1);
How do I move the texture to any position without stretching it?
VERTEX SHADER:
varying vec2 vUv;
uniform sampler2D utexture1;
uniform sampler2D utexture2;
varying vec2 offsetText1;
void main() {
offsetText1 = vec2(0.1,0.1);
vUv = uv;
vec4 modelPosition = modelMatrix * vec4(position, 1.0);
vec4 displacement = texture2D(utexture1,vUv+offsetText1);
vec4 displacement2 = texture2D(utexture2,vUv);
modelPosition.z += displacement.r*1.0;
modelPosition.z += displacement2.r*40.0;
gl_Position = projectionMatrix * viewMatrix * modelPosition;
}
FRAGMENT SHADER:
#ifdef GL_ES
precision highp float;
#endif
uniform sampler2D utexture1;
uniform sampler2D utexture2;
varying vec2 vUv;
varying vec2 offsetText1;
void main() {
vec3 c;
vec4 Ca = texture2D(utexture1,vUv+offsetText1 );
vec4 Cb = texture2D(utexture2,vUv);
c = Ca.rgb * Ca.a + Cb.rgb * Cb.a * (2.0 - Ca.a);
gl_FragColor = vec4(c, 1.0);
}
image with offsetText1 = vec2(0.0,0.0);
no stretching
image with offsetText1 = vec2(0.1,0.1); image is being stretched from the top right corner.
stretching
That's the behavior of textures. They extend in the range from [0, 1], so when you go beyond 1 or below 0, they'll "wrap". You need to tell it what to do when wrapping. Do you want it to repeat, stretch, or mirror?
You could establish this with the texture.wrapS and .wrapT properties, which accepts one of 3 values:
THREE.RepeatWrapping
THREE.ClampToEdgeWrapping
THREE.MirroredRepeatWrapping
If you want to just show white where the texture extends out of bounds, then you'd have to do that programmatically in your shader code. Here's some pseudocode:
if (uv < 0 || > 1)
color = white

Weird behaviour with OpenGL Uniform Buffers on OSX

I am having some weird behaviour with uniform buffers in my hobby OpenGL4.1 engine.
On windows everything works fine (both Intel and Nvidia GPUs) but on my MacBook (also Intel) this isn't working.
So to explain what is happening on OSX: if I hardcode all my Uniform Buffer variables in the actual fragment shader code then I am able to render perfectly fine but if I set them back to the variables - I get nothing.
Had a look at the OpenGL state using apitrace and all the variables values are perfect so I am a bit confused as to what is going on here.
I am hoping this is just a code bug and not some underlying issue with the drivers.
Below is the fragment shader code where if I hardcode all the DirectionLight variables everything works fine.
#version 410
struct DirectionalLightData
{
vec4 Colour;
vec3 Direction;
float Intensity;
};
layout(std140) uniform ObjectBuffer
{
mat4 Model;
};
layout(std140) uniform FrameBuffer
{
mat4 Projection;
mat4 View;
DirectionalLightData DirectionalLight;
vec3 ViewPos;
};
uniform sampler2D PositionMap;
uniform sampler2D NormalMap;
uniform sampler2D AlbedoSpecMap;
layout(location = 0) in vec2 TexCoord;
out vec4 FinalColour;
float CalcDiffuseContribution(vec3 lightDir, vec3 normal)
{
return max(dot(normal, -lightDir), 0.0f);
}
float CalcSpecularContribution(vec3 lightDir, vec3 viewDir, vec3 normal, float specularExponent)
{
vec3 reflectDir = reflect(lightDir, normal);
vec3 halfwayDir = normalize(lightDir + viewDir);
return pow(max(dot(normal, halfwayDir), 0.0f), specularExponent);
}
float CalcDirectionLightFactor(vec3 viewDir, vec3 lightDir, vec3 normal)
{
float diffuseFactor = CalcDiffuseContribution(lightDir, normal);
float specularFactor = CalcSpecularContribution(normal, viewDir, normal, 1.0f);
return diffuseFactor * specularFactor;
}
void main()
{
vec3 position = texture(PositionMap, TexCoord).rgb;
vec3 normal = texture(NormalMap, TexCoord).rgb;
vec3 albedo = texture(AlbedoSpecMap, TexCoord).rgb;
vec3 viewDir = normalize(ViewPos - position);
float directionLightFactor = CalcDirectionLightFactor(viewDir, DirectionalLight.Direction, normal) * DirectionalLight.Intensity;
FinalColour.rgb = albedo * directionLightFactor * DirectionalLight.Colour.rgb;
FinalColour.a = 1.0f * DirectionalLight.Colour.a;
}
Here is the order of where I update and bind the UBO (I have pulled these from apitrace as there is too much code to copy paste here):
glGetActiveUniformBlockName(5, 0, 255, NULL, FrameBuffer);
glGetUniformBlockIndex(5, FrameBuffer) = 0;
glGetActiveUniformBlockName(5, 1, 255, NULL, ObjectBuffer);
glGetUniformBlockIndex(5, ObjectBuffer) = 1;
glBindBuffer(GL_UNIFORM_BUFFER, 1);
glMapBufferRange(GL_UNIFORM_BUFFER, 0, 172,GL_MAP_WRITE_BIT);
memcpy(0x10b9f8000, [binary data, size = 172 bytes], 172);
glUnmapBuffer(GL_UNIFORM_BUFFER);
glBindBufferBase(GL_UNIFORM_BUFFER, 0, 2);
glBindBufferBase(GL_UNIFORM_BUFFER, 1, 1);
glBindBuffer(GL_UNIFORM_BUFFER, 2);
glMapBufferRange(GL_UNIFORM_BUFFER, 0, 64, GL_MAP_WRITE_BIT);
memcpy(0x10b9f9000, [binary data, size = 64 bytes], 64);
glUnmapBuffer(GL_UNIFORM_BUFFER);
glUniformBlockBinding(5, 1, 0);
glUniformBlockBinding(5, 0, 1);
glDrawArrays(GL_TRIANGLES, 0, 6);
Note that the FrameBuffer UBO has ID 1 and ObjectBuffer UBO has ID 2
I think when you are using std140 layout your data members should be byte aligned so you cannot mix vec4 and vec3 or float keep all variables mat4 and vec4 else dnt use std140 layout and in application side calculate ubo alignment and offsets of your variables on ubo and set values. See usage of GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT.
As experiment change all variables to mat4 and vec4 and see your issue should go away.
If you did not use the std140 layout for a block, you will need to query the byte offset for each uniform within the block. The OpenGL specification explains the storage of each of the basic types, but not the alignment between types. Struct members, just like regular uniforms, each have a separate offset that must be individually queried.
After a few days of digging I seem to have found the issue.
I was not calling glBindBufferBase() after binding a different shader program.
Such a silly mistake caused me so much grief.
Thanks everyone for the help.

How to write a only alpha > 0 to the stencil buffer?

I am trying to render a texture to the stencil buffer. I only need pixels where their alpha is > 0, but my code is rendering every pixel of my quad - even the ones with 0 alpha. How can I avoid this?
Heres my code:
GL.StencilOp(StencilOp.Keep, StencilOp.Keep, StencilOp.Incr);
GL.ColorMask(false, false, false, false);
GL.DepthMask(false);
RenderMask(mask);
GL.StencilFunc(StencilFunction.Equal, 1, 0xFF);
GL.StencilOp(StencilOp.Keep, StencilOp.Keep, StencilOp.Keep);
GL.ColorMask(true, true, true, true);
GL.DepthMask(true);
When debugging with RenderDoc I see that the stencil buffer contains 1s where my texture is... but its a rectangle, it does not take alpha into account.
Heres my fragment shader (it works fine for normal rendering):
varying lowp vec4 vColor;
varying lowp vec2 vTexCoords;
uniform lowp sampler2D uTexture;
void main() {
gl_FragColor = texture2D(uTexture, vTexCoords) * vColor;
}
Use a "discard" statement in the shader to drop the fragments you don't want to keep.
varying lowp vec4 vColor;
varying lowp vec2 vTexCoords;
uniform lowp sampler2D uTexture;
void main() {
vec4 color = texture2D(uTexture, vTexCoords) * vColor;
if (color.a == 0.0) {
discard;
}
gl_FragColor = color;
}

Can't get OpenGL code to work properly

I'm very new to OpenGL, GLSL and WebGL. I'm trying to get this sample code to work in a tool like http://shdr.bkcore.com/ but I can't get it to work.
Vertex Shader:
void main()
{
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
gl_TexCoord[0] = gl_MultiTexCoord0;
}
Fragment Shader:
precision highp float;
uniform float time;
uniform vec2 resolution;
varying vec3 fPosition;
varying vec3 fNormal;
uniform sampler2D tex0;
void main()
{
float border = 0.01;
float circle_radius = 0.5;
vec4 circle_color = vec4(1.0, 1.0, 1.0, 1.0);
vec2 circle_center = vec2(0.5, 0.5);
vec2 uv = gl_TexCoord[0].xy;
vec4 bkg_color = texture2D(tex0,uv * vec2(1.0, -1.0));
// Offset uv with the center of the circle.
uv -= circle_center;
float dist = sqrt(dot(uv, uv));
if ( (dist > (circle_radius+border)) || (dist < (circle_radius-border)) )
gl_FragColor = bkg_color;
else
gl_FragColor = circle_color;
}
I figured that this code must be from an outdated version of the language, so I changed the vertex shader to:
precision highp float;
attribute vec2 position;
attribute vec3 normal;
varying vec2 TextCoord;
attribute vec2 textCoord;
uniform mat3 normalMatrix;
uniform mat4 modelViewMatrix;
uniform mat4 projectionMatrix;
varying vec3 fNormal;
varying vec3 fPosition;
void main()
{
gl_Position = vec4(position, 0.0, 1.0);
TextCoord = vec2(textCoord);
}
That seemed to fix the error messages about undeclared identifiers and not being able to "convert from 'float' to highp 4-component something-or-other", but I have no idea if, functionally, this will do the same thing as the original intended.
Also, when I convert to this version of the Vertex Shader I have no idea what I'm supposed to do with this line in the Fragment Shader:
vec2 uv = gl_TexCoord[0].xy;
How do I convert this line to fit in with the converted vertex shader and how can I be sure that the vertex shader is even converted correctly?
gl_TexCoord is from desktop OpenGL, and not part of OpenGL ES. You'll need to create a new user-defined vec2 varying to hold the coordinate value.

How to use a 3x3 homography matrix in OpenGL ES shaders?

I have a 3x3 homography matrix that works correctly with OpenCV's warpPerspective, but I need to do the warping on GPU for performance reasons. What is the best approach? I tried multiplying in the vertex shader to get the texture coordinates and then render a quad, but I get strange distortions. I'm not sure if it's the interpolation not working as I expect. Attaching output for comparison (it involves two different, but close enough shots).
Absolute difference of warp and other image from GPU:
Composite of warp and other image in OpenCV:
EDIT:
Following are my shaders: the task is image rectification (making epilines become scanlines) + absolute difference.
// Vertex Shader
static const char* warpVS = STRINGIFY
(
uniform highp mat3 homography1;
uniform highp mat3 homography2;
uniform highp int width;
uniform highp int height;
attribute highp vec2 position;
varying highp vec2 refTexCoords;
varying highp vec2 curTexCoords;
highp vec2 convertToTexture(highp vec3 pixelCoords) {
pixelCoords /= pixelCoords.z; // need to project
pixelCoords /= vec3(float(width), float(height), 1.0);
pixelCoords.y = 1.0 - pixelCoords.y; // origin is in bottom left corner for textures
return pixelCoords.xy;
}
void main(void)
{
gl_Position = vec4(position / vec2(float(width) / 2.0, float(height) / 2.0) - vec2(1.0), 0.0, 1.0);
gl_Position.y = -gl_Position.y;
highp vec3 initialCoords = vec3(position, 1.0);
refTexCoords = convertToTexture(homography1 * initialCoords);
curTexCoords = convertToTexture(homography2 * initialCoords);
}
);
// Fragment Shader
static const char* warpFS = STRINGIFY
(
varying highp vec2 refTexCoords;
varying highp vec2 curTexCoords;
uniform mediump sampler2D refTex;
uniform mediump sampler2D curTex;
uniform mediump sampler2D maskTex;
void main(void)
{
if (texture2D(maskTex, refTexCoords).r == 0.0) {
discard;
}
if (any(bvec4(curTexCoords[0] < 0.0, curTexCoords[1] < 0.0, curTexCoords[0] > 1.0, curTexCoords[1] > 1.0))) {
discard;
}
mediump vec4 referenceColor = texture2D(refTex, refTexCoords);
mediump vec4 currentColor = texture2D(curTex, curTexCoords);
gl_FragColor = vec4(abs(referenceColor.r - currentColor.r), 1.0, 0.0, 1.0);
}
);
I think you just need to do the projection per pixel. Make refTexCoords and curTexCoords at least vec3, then do the /z in the pixel shader before texture lookup. Even better use the textureProj GLSL instruction.
You want to do everything that is linear in the vertex shader, but things like projection need to be done in the fragment shader per pixel.
This link might help with some background: http://www.reedbeta.com/blog/2012/05/26/quadrilateral-interpolation-part-1/

Resources