Displaying incorrect texture when handling multiple animated 2d sprites in webGL - animation

I'm working on a fairly simple 2d sprite based game that uses webGL. Individual sprites can be translated, scaled, and rotated. I'm using sprite sheets as textures and then modifying texture coordinates on the fly to create animation effects. Just to make things interesting, new sprites are instanciated on the fly. All of this works fine and everything renders properly when I'm only using two different textures, but it breaks down when I try to add a third. I can have multiple instances of sprites using the two textures, but as soon as I try to create an instance of a sprite with the third texture, it all goes wrong. I'm new to WebGL and I can't seem to find a tutorial that covers multiple textures inside an event loop. I figure I was doing it wrong even with two sprites, but managed to get away with until I added more complexity.
Here's my shaders:
void main() {
// Multiply the position by the matrix.
vec2 position = (u_matrix * vec3(a_position, 1)).xy;
// convert the position from pixels to 0.0 to 1.0
vec2 zeroToOne = position / u_resolution;
// convert from 0->1 to 0->2
vec2 zeroToTwo = zeroToOne * 2.0;
// convert from 0->2 to -1->+1 (clipspace)
vec2 clipSpace = zeroToTwo - 1.0;
gl_Position = vec4(clipSpace * vec2(1, -1), 0, 1);
v_texCoord = a_texCoord;
}
</script>
<script id="2d-fragment-shader" type="x-shader/x-fragment">
precision mediump float;
// our texture
uniform sampler2D u_image0;
uniform sampler2D u_image1;
uniform sampler2D u_image2;
// the texCoords passed in from the vertex shader.
varying vec2 v_texCoord;
void main() {
// Look up a color from the texture.
vec4 textureColor = texture2D(u_image0, v_texCoord);
if (textureColor.a < 0.5)
discard;
else
gl_FragColor = vec4(textureColor.rgb, textureColor.a);
vec4 textureColor1 = texture2D(u_image1, v_texCoord);
if (textureColor1.a < 0.5)
discard;
else
gl_FragColor = vec4(textureColor1.rgb, textureColor1.a);
vec4 textureColor2 = texture2D(u_image2, v_texCoord);
// if (textureColor2.a < 0.5)
// discard;
// else
// gl_FragColor = vec4(textureColor2.rgb, textureColor2.a);
}
</script>
Note how the third conditional block in the fragment shader is commented out. If I include this, it breaks. Well, the code runs, but the textures are all over the place.
This is the code that I run when the the sprite is instanciated, after the texture image loads.
image.onload = function() {
that.buffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, that.buffer);
var xMin = 0;
var xMax = that.width;
var yMin = 0;
var yMax = that.height;
// setup a rectangle from 0, that.width to 0, that.height in pixels
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
xMin, yMax,
xMax, yMax,
xMin, yMin,
xMin, yMin,
xMax, yMax,
xMax, yMin]), gl.STATIC_DRAW);
gl.enableVertexAttribArray(globalGL.positionLocation);
gl.vertexAttribPointer(globalGL.positionLocation, 2, gl.FLOAT, false, 0, 0);
// look up where the texture coordinates need to go.
that.texCoordLocation = gl.getAttribLocation(globalGL.program, "a_texCoord");
//create a texture map object and attach to that
that.texMap = new TextureMap({horizontalNum: that.texHorizontalNum, verticalNum: that.texVerticalNum});
var tex = that.texMap.getTile([0, 0]);
// provide texture coordinates for the rectangle.
that.texCoordBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, that.texCoordBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
tex.minX, tex.maxY,
tex.maxX, tex.maxY,
tex.minX, tex.minY,
tex.minX, tex.minY,
tex.maxX, tex.maxY,
tex.maxX, tex.minY]), gl.STATIC_DRAW);
gl.enableVertexAttribArray(that.texCoordLocation);
gl.vertexAttribPointer(that.texCoordLocation, 2, gl.FLOAT, false, 0, 0);
// Create a texture.
that.texture = gl.createTexture();
that.u_imageLocation = gl.getUniformLocation(globalGL.program, "u_image" + that.textureIndex);
gl.uniform1i(that.u_imageLocation, that.textureIndex);
gl.activeTexture(gl.TEXTURE0 + that.textureIndex);
gl.bindTexture(gl.TEXTURE_2D, that.texture);
// Set the parameters so we can render any size image.
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
// Upload the image into the texture.
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, image);
globalObj.agents[that.id] = that;
};
"that" is reference to the sprite object that I'm using. that.texMap is an object that tracks the texture coordinate data for the sprite. that.textureIndex is an integer unique to each type of sprite. I also save reference to the GL texture itself as that.texture.
This is what I run in the event loop for each instance of a sprite:
this.draw = function() {
var tex, texCoordLocation, texCoordBuffer, i;
//This pulls up the correct texture coordinates depending on what the sprite is doing.
if (this.shooting) {
tex = this.texMap.getTile([0, 1]);
} else if ( this.moving) {
if (this.moving < 15 / this.speed) {
this.moving++;
tex = this.texMap.getTile();
} else {
this.moving = 1;
tex = this.texMap.getTile('next');
}
} else {
tex = this.texMap.getTile([0, 0]);
}
//binds the texture associated with the sprite.
gl.bindTexture(gl.TEXTURE_2D, this.texture);
//gets a reference to the textCoord attribute
texCoordLocation = gl.getAttribLocation(globalGL.program, 'a_texCoord');
//create a buffer for texture coodinates
texCoordBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, texCoordBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
tex.minX, tex.maxY,
tex.maxX, tex.maxY,
tex.minX, tex.minY,
tex.minX, tex.minY,
tex.maxX, tex.maxY,
tex.maxX, tex.minY]), gl.STATIC_DRAW);
gl.enableVertexAttribArray(texCoordLocation);
gl.vertexAttribPointer(texCoordLocation, 2, gl.FLOAT, false, 0, 0);
var matrixLocation = gl.getUniformLocation(globalGL.program,'u_matrix');
//sets up arrays needed to rotate and translate the sprite
var centerTranslation = [-this.width / 2, -this.height / 2];
var decenterTranslation = [this.width / 2 , this.height / 2];
var translation = [this.x, this.y];
var angleInRadians = this.rotation;
var scale = [1, 1];
// Compute the matrices
var centerTranslationMatrix = makeTranslation(centerTranslation[0], centerTranslation[1]);
var decenterTranslationMatrix = makeTranslation(decenterTranslation[0], decenterTranslation[1]);
var translationMatrix = makeTranslation(translation[0], translation[1]);
var rotationMatrix = makeRotation(angleInRadians);
var scaleMatrix = makeScale(scale[0], scale[1]);
// Multiply the matrices.
var matrix = matrixMultiply(scaleMatrix, centerTranslationMatrix);
matrix = matrixMultiply(matrix, rotationMatrix);
matrix = matrixMultiply(matrix, decenterTranslationMatrix);
matrix = matrixMultiply(matrix, translationMatrix);
// Set the matrix.
gl.uniformMatrix3fv(matrixLocation, false, matrix);
// draw
gl.drawArrays(gl.TRIANGLES, 0, 6);
};
Hopefully this hasn't been to verbose. I've been all over the web looking for tutorials or other scenarios that hanfle this sort of situation and I just can't seem to find anything.
Thanks!
EDIT: So, I know this problem isn't too hard for the community, yet quite a few people have viewed my my question and no one has responded. This prompted me to do a little self-reflection and take a good, long, hard look at my sample code.
I've restructured it significantly. I realized I don't need to be making a new texture every time a sprite is instanciated. Instead, I load up all the textures that I'll need once at the beginning. So the second code block has been completely reworked. It still does a lot of the same stuff, but only runs once for each texture in a for loop at the beginning. I'd be happy to upload the new code if anyone wants to have a look at it, or if someone could point me in the direction of a tutorial that uses multiple textures on multiple 2d quads (more than one quad per texture) in an event loop, I'd be happy to do the research myself.

the issue is that everything will be the same texture, then a new sprite is instanciated and everything turns into the new texture.
From this, I'd suspect you aren't selecting the correct texture before binding (i.e. using gl.activeTexture() with correct parameters). Looks fine in the onload function, but you don't use it in your this.draw() function before binding the sprite texture, which may be the problem.

Related

Use 2 meshes + shader materials with each a different fragment shader in 1 scene (three.js)

I have 2 meshes with each a shaderMaterial and each a different fragment shader. When I add both meshes to my scene, only one will show up. Below you can find my 2 fragment shaders (see both images to see what they look like). They're basically the same.
What I want to achieve: Use mesh1 as a mask and put the other one, mesh2 (purple blob) on top of the mask.
Purple blob:
// three.js code
const geometry1 = new THREE.PlaneBufferGeometry(1, 1, 1, 1);
const material1 = new THREE.ShaderMaterial({
uniforms: this.uniforms,
vertexShader,
fragmentShader,
defines: {
PR: window.devicePixelRatio.toFixed(1)
}
});
const mesh1 = new THREE.Mesh(geometry1, material1);
this.scene.add(mesh1);
// fragment shader
void main() {
vec2 res = u_res * PR;
vec2 st = gl_FragCoord.xy / res.xy - 0.5;
st.y *= u_res.y / u_res.x * 0.8;
vec2 circlePos = st;
float c = circle(circlePos, 0.2 + 0. * 0.1, 1.) * 2.5;
float offx = v_uv.x + sin(v_uv.y + u_time * .1);
float offy = v_uv.y * .1 - u_time * 0.005 - cos(u_time * .001) * .01;
float n = snoise3(vec3(offx, offy, .9) * 2.5) - 2.1;
float finalMask = smoothstep(1., 0.99, n + pow(c, 1.5));
vec4 bg = vec4(0.12, 0.07, 0.28, 1.0);
vec4 bg2 = vec4(0., 0., 0., 0.);
gl_FragColor = mix(bg, bg2, finalMask);
}
Blue mask
// three.js code
const geometry2 = new THREE.PlaneBufferGeometry(1, 1, 1, 1);
const material2 = new THREE.ShaderMaterial({
uniforms,
vertexShader,
fragmentShader,
defines: {
PR: window.devicePixelRatio.toFixed(1)
}
});
const mesh2 = new THREE.Mesh(geometry2, material2);
this.scene.add(mesh2);
// fragment shader
void main() {
vec2 res = u_res * PR;
vec2 st = gl_FragCoord.xy / res.xy - 0.5;
st.y *= u_res.y / u_res.x * 0.8;
vec2 circlePos = st;
float c = circle(circlePos, 0.2 + 0. * 0.1, 1.) * 2.5;
float offx = v_uv.x + sin(v_uv.y + u_time * .1);
float offy = v_uv.y * .1 - u_time * 0.005 - cos(u_time * .001) * .01;
float n = snoise3(vec3(offx, offy, .9) * 2.5) - 2.1;
float finalMask = smoothstep(1., 0.99, n + pow(c, 1.5));
vec4 bg = vec4(0.12, 0.07, 0.28, 1.0);
vec4 bg2 = vec4(0., 0., 0., 0.);
gl_FragColor = mix(bg, bg2, finalMask);
}
Render Target code
this.rtWidth = window.innerWidth;
this.rtHeight = window.innerHeight;
this.renderTarget = new THREE.WebGLRenderTarget(this.rtWidth, this.rtHeight);
this.rtCamera = new THREE.PerspectiveCamera(
this.camera.settings.fov,
this.camera.settings.aspect,
this.camera.settings.near,
this.camera.settings.far
);
this.rtCamera.position.set(0, 0, this.camera.settings.perspective);
this.rtScene = new THREE.Scene();
this.rtScene.add(this.purpleBlob);
const geometry = new THREE.PlaneGeometry(window.innerWidth, window.innerHeight, 1);
const material = new THREE.MeshPhongMaterial({
map: this.renderTarget.texture,
});
this.mesh = new THREE.Mesh(geometry, material);
this.scene.add(this.mesh);
I'm still new to shaders so please be patient. :-)
There are probably infinite ways to mask in three.js. Here's a few
Use the stencil buffer
The stencil buffer is similar to the depth buffer in that it for every pixel in the canvas or render target there is a corresponding stencil pixel. You need to tell three.js you want a stencil buffer and then you can tell it when rendering what to do with the stencil buffer when you're drawing things.
You the stencil settings on Material
You tell three.js
what to do if the pixel you're drawing fails the stencil test
what to do if the pixel your drawing fails the depth test
what to do if the pixel you're drawing passes the depth test.
The things you can tell it to do for each of those conditions are keep (do nothing), increment, decrement, increment wraparound, decrement wraparound, set to a specific value.
You can also specify what the stencil test is by setting Material.stencilFunc
So, for example you can clear the stencil buffer to 0 (the default?), set the stencil test so it always passes, and set the conditions so if the depth test passes you set the stencil to 1. You then draw a bunch of things. Everywhere they are drawn there will now be a 1 in then stencil buffer.
Now you change the stencil test so it only passes if it equals 1 (or 0) and then draw more stuff, now things will only be drawn where the stencil equals the value you set
This exmaple uses the stencil
Mask with an alpha mask
In this case you need 2 color textures and an alpha texture. How you get those is up to you. For example you could load all 3 from images. Or you could generate all 3 using 3 render targets. Finally you pass all 3 to a shader that mixes them as in
gl_FragColor = mix(colorFromTexture1, colorFromTexture2, valueFromAlphaTexture);
This example uses this alpha mixing method
Note that if one of your 2 colors textures has an alpha channel you could use just 2 textures. You'd just pass one of the color textures as your mask.
Or of course you could calculate a mask based on the colors in one image or the other or both. For example
// assume you have function that converts from rgb to hue,saturation,value
vec3 hsv = rgb2hsv(colorFromTexture1.rgb);
float hue = hsv.x;
// pick one or the other if color1 is close to green
float mixAmount = step(abs(hue - 0.33), 0.05);
gl_FragColor = mix(colorFromTexture1, colorFromTexture2, mixAmount);
The point here is not that exact code, it's that you can make any formula you want for the mask, based on whatever you want, color, position, random math, sine waves based on time, some formula that generates a blob, whatever. The most common is some code that just looks up a mixAmount from a texture which is what the linked example above does.
ShaderToy style
Your code above appears to be a shadertoy style shader which is drawing a fullscreen quad. Instead of drawing 2 separate things you can just draw them in the same shader
vec4 computeBlueBlob() {
...
return blueBlobColor;
}
vec4 computeWhiteBlob() {
...
return whtieBlobColor;
}
vec4 main() {
vec4 color1 = computeBlueBlob();
vec4 color2 = computeWhiteBlob();
float mixAmount = color.a; // note: color2.a could be any
// formula to decide which colors
// to draw
gl_FragColor = mix(color1, color2, mixAmount);
}
note just like above how you compute mixAmount is up to you. Based it off anything, color1.r, color2.r, some formula, some hue, some other blob generation function, whatever.

Orbiting a cube in WebGL with glMatrix

https://jsfiddle.net/sepoto/Ln7qvv7w/2/
I have a base set up to display a cube with different colored faces. What I am trying to do is set up a camera and apply a combined X axis and Y axis rotation so that the cube spins around both axis concurrently. There seems to be some problems with the matrices I set up as I can see the blue face doesn't look quite right. There are some examples of how this is done using older versions of glMatrix however the code in the examples no longer works because of some changes in vec4 of the glMatrix library. Does anyone know how this can be done using the latest version of glMatrix as I have attached a CDN to the fiddle?
Thank you!
function drawScene() {
gl.viewport(0,0,gl.viewportWidth, gl.viewportHeight);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
mat4.ortho( mOrtho, -5, 5, 5, -5, 2, -200);
mat4.identity(mMove);
var rotMatrix = mat4.create();
mat4.identity(rotMatrix);
rotMatrix = mat4.fromYRotation(rotMatrix, yRot,rotMatrix);
rotMatrix = mat4.fromXRotation(rotMatrix, xRot,rotMatrix);
mat4.multiply(mMove, rotMatrix, mMove);
setMatrixUniforms();
gl.bindBuffer(gl.ARRAY_BUFFER, triangleVertexPositionBuffer);
gl.vertexAttribPointer(shaderProgram.vertexPositionAttribute, triangleVertexPositionBuffer.itemSize, gl.FLOAT, false, 0, 0);
gl.bindBuffer(gl.ARRAY_BUFFER, triangleColorBuffer);
gl.vertexAttribPointer(shaderProgram.vertexColorAttribute, triangleColorBuffer.itemSize, gl.FLOAT, false, 0, 0);
gl.drawArrays(gl.TRIANGLES, 0, triangleVertexPositionBuffer.numItems);
yRot += 0.01;
xRot += 0.01;
}
As the name says, fromYRotation() initializes a matrix to a given rotation. Hence, you need two temporary matrices for the partial rotations, which you can then combine:
var rotMatrix = mat4.create();
var rotMatrixX = mat4.create();
var rotMatrixY = mat4.create();
mat4.fromYRotation(rotMatrixY, yRot);
mat4.fromXRotation(rotMatrixX, xRot);
mat4.multiply(rotMatrix, rotMatrixY, rotMatrixX);
And the reason why your blue face was behaving strangely, was the missing depth test. Enable it in your initialization method:
gl.enable(gl.DEPTH_TEST);
You dont need to use three matrices:
// you should do allocations outside of the renderloop
var rotMat = mat4.create();
// no need to set the matrix to identity as
// fromYRotation resets rotMats contents anyway
mat4.fromYRotation(rotMat, yRot);
mat4.rotateX(rotMat,xRot);

Mapping texture to THREE.Points

I'm trying to map one texture (same as on cube's side) to multiple points so that one point is colored with part of a texture and all the points together make up a complete image.
For doing this I have a custom shader that tries to map point position to texture like this:
var uniform = THREE.TextureShader.uniforms();
uniform.texture.value = texture;
uniform.bbMin.value = new THREE.Vector3(-2.5, -2.5, 0); //THREE.Points Box3 min value
uniform.bbMax.value = new THREE.Vector3(2.5, 2.5, 0); //THREE.Points Box3 max value
//Shader
"vec3 p = (position - bbMin) / (bbMax - bbMin);",
/*This should give me fraction between 0 and 1 to match part of texture but it is not*/
"vColor = texture2D(texture, p.xy).rgb;",
Codepen for testing is here.
Any ideas how to calculate it correctly?
Desired result would be something like this, only there would be space between tiles.
50000 points or 50000 planes it's all the same, you need some way to pass in data per point or per plane that lets you compute your texture coordinates. Personally I'd choose planes because you can rotate, scale, and flip planes where's you can't do that with POINTS.
In any case though there's an infinite number of ways to that so it's really up to you to pick one. For points you get 1 "chunk" of data per point where by "chunk" I mean all the data from all the attributes you set up.
So for example you could set up an attribute with an X,Y position representing which piece of that sprite you want to draw. In your example you've divided it 6x6 so make a vec2 attribute with values 0-5, 0-5 selecting the portion of the sprite.
Pass that into the vertex shader then you can either do some math there or pass it into the fragment shader directly. Let's assume you pass it into the fragment shader directly.
gl_PointCoord are the texture coordinates for the POINT that go from 0 to 1 so
varying vec2 segment; // the segment of the sprite 0-5x, 0-5y
vec2 uv = (v_segment + gl_PointCoord) / 6.0;
vec4 color = texture2D(yourTextureUniform, uv);
Seems like it would work.
That one is hardcoded to 6x6. Change it to NxM by passing that in
varying vec2 segment; // the segment of the sprite
uniform vec2 numSegments; // number of segments across and down sprite
vec2 uv = (v_segment + gl_PointCoord) / numSegments
vec4 color = texture2D(yourTextureUniform, uv);
Example:
"use strict";
var gl = twgl.getWebGLContext(document.getElementById("c"));
var programInfo = twgl.createProgramInfo(gl, ["vs", "fs"]);
// make a rainbow circle texture from a 2d canvas as it's easier than downloading
var ctx = document.createElement("canvas").getContext("2d");
ctx.canvas.width = 128;
ctx.canvas.height = 128;
var gradient = ctx.createRadialGradient(64,64,60,64,64,0);
for (var i = 0; i <= 12; ++i) {
gradient.addColorStop(i / 12,"hsl(" + (i / 12 * 360) + ",100%,50%");
}
ctx.fillStyle = gradient;
ctx.fillRect(0, 0, 128, 128);
// make points and segment data
var numSegmentsAcross = 6;
var numSegmentsDown = 5;
var positions = [];
var segments = [];
for (var y = 0; y < numSegmentsDown; ++y) {
for (var x = 0; x < numSegmentsAcross; ++x) {
positions.push(x / (numSegmentsAcross - 1) * 2 - 1, y / (numSegmentsDown - 1) * 2 - 1);
segments.push(x, y);
}
}
var arrays = {
position: { size: 2, data: positions },
segment: { size: 2, data: segments },
};
var bufferInfo = twgl.createBufferInfoFromArrays(gl, arrays);
var tex = twgl.createTexture(gl, { src: ctx.canvas });
var uniforms = {
u_numSegments: [numSegmentsAcross, numSegmentsDown],
u_texture: tex,
};
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, uniforms);
twgl.drawBufferInfo(gl, gl.POINTS, bufferInfo);
canvas { border: 1px solid black; }
<script id="vs" type="notjs">
attribute vec4 position;
attribute vec2 segment;
varying vec2 v_segment;
void main() {
gl_Position = position;
v_segment = segment;
gl_PointSize = 20.0;
}
</script>
<script id="fs" type="notjs">
precision mediump float;
varying vec2 v_segment;
uniform vec2 u_numSegments;
uniform sampler2D u_texture;
void main() {
vec2 uv = (v_segment + vec2(gl_PointCoord.x, 1.0 - gl_PointCoord.y)) / u_numSegments;
gl_FragColor = texture2D(u_texture, uv);
}
</script>
<script src="https://twgljs.org/dist/twgl.min.js"></script>
<canvas id="c"></canvas>

OpenGL Orthographic Projection and Translate

The code below draws a rectangle in 2D screen space using OpenGL ES2. How do move the drawing of the rectangle by 1 pixel to the right without modifying its vertices?
Specifically, what I am trying to do is move the coordinates 0.5 pixels to the right. I had to do this previously with GLES1.x and the reason for this is that I had problems drawing lines in the correct place unless I did a glTranslate() with 0.5f.
I'm confused about the use of glm::translate() in the code below.
If I attempt a translate of 0.5f, the whole rectangle moves from the left of the screen to the middle - a jump of about 200 pixels.
I get the same result whether I do a glm::translate on the Model or the View matrix.
Is the order of the matrix multiplication wrong and what should it be?
short g_RectFromTriIndices[] =
{
0, 1, 2,
0, 2, 3
}; // The order of vertex rendering.
GLfloat g_AspectRatio = 1.0f;
//--------------------------------------------------------------------------------------------
// LoadTwoTriangleVerticesForRect()
//--------------------------------------------------------------------------------------------
void LoadTwoTriangleVerticesForRect( GLfloat *pfRectVerts, float fLeft, float fTop, float fWidth, float fHeight )
{
pfRectVerts[ 0 ] = fLeft;
pfRectVerts[ 1 ] = fTop;
pfRectVerts[ 2 ] = 0.0;
pfRectVerts[ 3 ] = fLeft + fWidth;
pfRectVerts[ 4 ] = fTop;
pfRectVerts[ 5 ] = 0.0;
pfRectVerts[ 6 ] = fLeft + fWidth;
pfRectVerts[ 7 ] = fTop + fHeight;
pfRectVerts[ 8 ] = 0.0;
pfRectVerts[ 9 ] = fLeft;
pfRectVerts[ 10 ] = fTop + fHeight;
pfRectVerts[ 11 ] = 0.0;
}
//--------------------------------------------------------------------------------------------
// Draw()
//--------------------------------------------------------------------------------------------
void Draw( void )
{
GLfloat afRectVerts[ 12 ];
//LoadTwoTriangleVerticesForRect( afRectVerts, 0, 0, g_ScreenWidth, g_ScreenHeight );
LoadTwoTriangleVerticesForRect( afRectVerts, 50, 50, 100, 100 );
// Correct for aspect ratio so squares ARE squares and not rectangular stretchings..
g_AspectRatio = (GLfloat) g_ScreenWidth / (GLfloat) g_ScreenHeight;
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
GLuint hPosition = glGetAttribLocation( g_SolidProgram, "vPosition" );
// PROJECTION
glm::mat4 Projection = glm::mat4(1.0);
// Projection = glm::perspective( 45.0f, g_AspectRatio, 0.1f, 100.0f );
// VIEW
glm::mat4 View = glm::mat4(1.0);
static GLfloat transValY = 0.5f;
static GLfloat transValX = 0.5f;
//View = glm::translate( View, glm::vec3( transValX, transValY, 0.0f ) );
// MODEL
glm::mat4 Model = glm::mat4(1.0);
// static GLfloat rot = 0.0f;
// rot += 0.001f;
// Model = glm::rotate( Model, rot, glm::vec3( 0.0f, 0.0f, 1.0f ) ); // where x, y, z is axis of rotation (e.g. 0 1 0)
glm::mat4 Ortho = glm::ortho( 0.0f, (GLfloat) g_ScreenWidth, (GLfloat) g_ScreenHeight, 0.0f, 0.0f, 1000.0f );
glm::mat4 MVP;
MVP = Projection * View * Model * Ortho;
GLuint hMVP;
hMVP = glGetUniformLocation( g_SolidProgram, "MVP" );
glUniformMatrix4fv( hMVP, 1, GL_FALSE, glm::value_ptr( MVP ) );
glEnableVertexAttribArray( hPosition );
// Prepare the triangle coordinate data
glVertexAttribPointer( hPosition, 3, GL_FLOAT, FALSE, 0, afRectVerts );
// Draw the rectangle using triangles
glDrawElements( GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, g_RectFromTriIndices );
glDisableVertexAttribArray( hPosition );
}
Here is the vertex shader source:
attribute vec4 vPosition;
uniform mat4 MVP;
void main()
{
gl_Position = MVP * vPosition;
}
UPDATE: I'm finding the below matrix multiplication is giving me better results. I don't know if this is "correct" or not though:
MVP = Ortho * Model * View * Projection;
That MVP seems really weird to me, you shouldn't need 4 things in there to get your MVP.. your Projection matrix should just be the Orthogonal one, so in this case
MVP = Projection * View * Ortho;
But I can also see that your Projection matrix has been commented from perspective so I don't think it's doing much right now.
By the sounds of it since you want the model co-ordinates to stay the same while moving, you want to move your camera right? So (By the looks of it your vertices are using a 1 unit per pixel co-ordinate range) doing a translate of 0.5f to your View is shifting whatever half your projection space is. Instead, you want to have something like a Camera class that you get your Viewfrom using the camera's X and Y positions.
Then you can get your View matrix using the cameras position which can share the world units system you're using, which is 1 unit per pixel.
glm::mat4 view;
view = glm::lookAt(glm::vec3(camX, camY, 0.0), glm::vec3(0.0, 0.0, 0.0),glm::vec3(0.0, 1.0, 0.0));
I ripped that line straight (minus changing camZ for camY) from a really good 3d tutorial on camera here but the exact same concept can be applied to a orthogonal camera instead
I know it's a bit more overhead but having a cmaera class that you can control this way is nicer practice than manually using glm::translate,rotate&scale to control your viewport (and it lets you ensure that you'r working with a more obivous co-ordinate system between your camera and models co-ordinate points.

Objects look weird with first-person camera in DirectX

I'm having problems creating a 3D first-person camera in DirectX 11.
I have a camera at (0, 0, -2) looking at (0, 0, 100). There is a box at (0, 0, 0) and the box is rendered correctly. See this image below:
When the position of the box (not the camera) changes, it is rendered correctly. For example, the next image shows the box at (1, 0, 0) and the camera still at (0, 0, -2):
However, as soon as the camera moves left or right, the box should go to the opposite direction, but it looks twisted instead. Here is an example when the camera is at (1, 0, -2) and looking at (1, 0, 100). The box is still at (0, 0, 0):
Here is how I set my camera:
// Set the world transformation matrix.
D3DXMATRIX rotationMatrix; // A matrix to store the rotation information
D3DXMATRIX scalingMatrix; // A matrix to store the scaling information
D3DXMATRIX translationMatrix; // A matrix to store the translation information
D3DXMatrixIdentity(&translationMatrix);
// Make the scene being centered on the camera position.
D3DXMatrixTranslation(&translationMatrix, -camera.GetX(), -camera.GetY(), -camera.GetZ());
m_worldTransformationMatrix = translationMatrix;
// Set the view transformation matrix.
D3DXMatrixIdentity(&m_viewTransformationMatrix);
D3DXVECTOR3 cameraPosition(camera.GetX(), camera.GetY(), camera.GetZ());
// ------------------------
// Compute the lookAt position
// ------------------------
const FLOAT lookAtDistance = 100;
FLOAT lookAtXPosition = camera.GetX() + lookAtDistance * cos((FLOAT)D3DXToRadian(camera.GetXZAngle()));
FLOAT lookAtYPosition = camera.GetY() + lookAtDistance * sin((FLOAT)D3DXToRadian(camera.GetYZAngle()));
FLOAT lookAtZPosition = camera.GetZ() + lookAtDistance * (sin((FLOAT)D3DXToRadian(camera.GetXZAngle())) * cos((FLOAT)D3DXToRadian(camera.GetYZAngle())));
D3DXVECTOR3 lookAtPosition(lookAtXPosition, lookAtYPosition, lookAtZPosition);
D3DXVECTOR3 upDirection(0, 1, 0);
D3DXMatrixLookAtLH(&m_viewTransformationMatrix,
&cameraPosition,
&lookAtPosition,
&upDirection);
RECT windowDimensions = GetWindowDimensions();
FLOAT width = (FLOAT)(windowDimensions.right - windowDimensions.left);
FLOAT height = (FLOAT)(windowDimensions.bottom - windowDimensions.top);
// Set the projection matrix.
D3DXMatrixIdentity(&m_projectionMatrix);
D3DXMatrixPerspectiveFovLH(&m_projectionMatrix,
(FLOAT)(D3DXToRadian(45)), // Horizontal field of view
width / height, // Aspect ratio
1.0f, // Near view-plane
100.0f); // Far view-plane
Here is how the final matrix is set:
D3DXMATRIX finalMatrix = m_worldTransformationMatrix * m_viewTransformationMatrix * m_projectionMatrix;
// Set the new values for the constant buffer
mp_deviceContext->UpdateSubresource(mp_constantBuffer, 0, 0, &finalMatrix, 0, 0);
And finally, here is the vertex shader that uses the constant buffer:
VOut VShader(float4 position : POSITION, float4 color : COLOR, float2 texcoord : TEXCOORD)
{
VOut output;
output.color = color;
output.texcoord = texcoord;
output.position = mul(position, finalMatrix); // Transform the vertex from 3D to 2D
return output;
}
Do you see what I'm doing wrong? If you need more information on my code, feel free to ask: I really want this to work.
Thanks!
The problem is you are setting finalMatrix with a row major matrix, but HLSL expects a column major matrix. The solution is to use D3DXMatrixTranspose before updating the constants, or declare row_major in the HLSL file like this:
cbuffer ConstantBuffer
{
row_major float4x4 finalMatrix;
}

Resources