I can't get the following to draw the scene into the shape created as a stencil mask. Instead the code just seems to render the stencil itself as a black object.
http://signaturefloors.dev.flooradvisor.com.au/productapp/floor_align.php
My render function is:
var gl = floor_align.renderer.domElement.getContext('webgl') || floor_align.renderer.domElement.getContext('experimental-webgl');
gl.clearStencil(0);
gl.clear(gl.STENCIL_BUFFER_BIT);
gl.enable(gl.STENCIL_TEST);
gl.stencilFunc(gl.ALWAYS, 1, 1);
gl.stencilOp(gl.KEEP, gl.REPLACE, gl.REPLACE);
gl.colorMask(0, 0, 0, 0);
// Floor Mask (Create a stencil that we render the next pass into)
floor_align.renderer.render(floor_align.maskScene, floor_align.maskCamera);
gl.colorMask(1, 1, 1, 1);
gl.stencilFunc(gl.NOTEQUAL, 1, 1);
gl.stencilOp(gl.KEEP, gl.REPLACE, gl.REPLACE);
// Render a floor pass
floor_align.renderer.render(floor_align.scene, floor_align.camera);
gl.disable(gl.STENCIL_TEST);
The renderer has autoClear = false;
Accepted answer don't work on latest threejs. If anyone interested, here's an up-to-date version that works (using r111):
gl.enable(gl.STENCIL_TEST);
gl.stencilOp(gl.KEEP, gl.KEEP, gl.REPLACE);
//renderer.clear(); <-- works without this too
gl.stencilFunc(gl.ALWAYS, 1, 0xFF);
gl.stencilMask(0xFF);
renderer.render(maskScene, camera);
gl.stencilFunc(gl.EQUAL, 1, 0xFF);
gl.stencilMask(0x00);
renderer.render(scene, camera);
gl.stencilMask(0xFF);
gl.disable(gl.STENCIL_TEST);
Also in my case I didn't disable auto-clear and it still works.
Through trial and error I updated my code to this and it now works. Clearing the depth buffer seems particularly important so I guess my mask must have been hiding the more distant floor fragments.
// Render the scene
function fla_render() {
floor_align.renderer.clear();
// Background
//floor_align.renderer.render(floor_align.scene, floor_align.camera);
floor_align.renderer.clearDepth();
var gl = floor_align.renderer.domElement.getContext('webgl') || floor_align.renderer.domElement.getContext('experimental-webgl');
// Clear the stencil buffer
gl.clearStencil(0);
gl.clear(gl.STENCIL_BUFFER_BIT);
// Replacing the values at the stencil buffer to 1 on every pixel we draw
gl.stencilFunc(gl.ALWAYS, 1, 1);
gl.stencilOp(gl.KEEP, gl.REPLACE, gl.REPLACE);
// Disable color (u can also disable here the depth buffers)
gl.colorMask(false, false, false, false);
// Write to stencil
gl.enable(gl.STENCIL_TEST);
// Floor Mask (Create a stencil that we render the next pass into)
floor_align.renderer.render(floor_align.maskScene, floor_align.maskCamera);
// Telling the stencil now to draw/keep only pixels that equals 1 - which we set earlier
gl.stencilFunc(gl.EQUAL, 1, 1);
gl.stencilOp(gl.KEEP, gl.KEEP, gl.KEEP);
// Clear depth buffer (seems important)
floor_align.renderer.clearDepth();
// Enable color
gl.colorMask(true, true, true, true);
// Render a floor pass
floor_align.renderer.render(floor_align.scene, floor_align.camera);
// Disable stencil test;
gl.disable(gl.STENCIL_TEST);
}
Related
I want to implement an animation.
The animation should be a line move to another line. There will be some deformation in the process of the line moving
There is a correspondence between the points of the two lines.
How to implements it with three.js?
I try to use the tween.js.It works.
const material = new THREE.LineBasicMaterial({ color: 0x0000ff })
const geometry = new THREE.BufferGeometry().setAttribute('position',
new THREE.Float32BufferAttribute([-2, 0, 0, -0.5, 0, -0.5, 0, 0, -2], 3))
const line = new THREE.Line(geometry, material)
var position2 = new Float32Array([5, 0, 0, 1, 1, 1, 0, 0, 5])
setupObjectPositionTween(line, geometry.attributes.position.array, position2,
10000, 0, TWEEN.Easing.Linear.None) // duration, delay, easing
scene.add(line)
function setupObjectPositionTween(object, source, target, duration, delay, easing) {
new TWEEN.Tween(source)
.to(target, duration)
.delay(delay)
.easing(easing)
.onUpdate(function () {
// console.log(object,source,target)
object.geometry.attributes.position.array = source
object.geometry.attributes.position.needsUpdate=true
})
.start()
}
and in the render function
TWEEN.update()
I suggest you implement the animation of the green line with morph targets. Meaning the green line represents your default geometry whereas the blue line represents a morph target (also known as blend shape). You can then animate the transformation from green to blue by modulating the morphTargetInfluences parameter from 0 to 1.
Morph targets are part of the geometry data and defined in the BufferGeometry.morphAttributes property. Since multiple meshes can share the same geometry, the morphTargetInfluences property belongs to 3D objects like a mesh.
I suggest you study how the official example webgl_morphtargets is implemented. You can apply the same principles on lines.
I'm having trouble with an effect I'm trying to achieve in Three.js. I want to get outlines in my game, but I also want them to be affected by depth. I am using 3 scenes (one which has the main model, one which has the mask, and one which has coloured models for the outlines).
My mask looks like this:
As you can see, it consists of black, white, and alpha.
Now, what I want is for every instance of alpha and black in the mask to cut out my image, while every instance of white to be shown.
Unfortunately, with my current stencil script, only the alpha gets cut out:
var gl = this.renderer.domElement.getContext('webgl') || this.renderer.domElement.getContext('experimental-webgl');
// Make the entire stencil solid first.
gl.clearStencil(1);
gl.clear(gl.STENCIL_BUFFER_BIT);
// Whenever the value of a pixel is 1 in the mask, make that pixel = 0 in the stencil.
// 0xFF is the identifier I think.
gl.stencilFunc(gl.ALWAYS, 0, 0xFF);
gl.stencilOp(gl.REPLACE, gl.GL_REPLACE, gl.GL_REPLACE);
// Disable color (u can also disable here the depth buffers)
gl.colorMask(false, false, false, false);
// Write to stencil
gl.enable(gl.STENCIL_TEST);
gl.stencilMask(0xFF);
// Mask (Create a stencil that we render the next pass into)
this.renderer.render(this.sceneMask, this.camera);
// Wherever the stencil is = 1, show the model.
gl.stencilFunc(gl.EQUAL, 1, 0xFF);
gl.stencilOp(gl.KEEP, gl.KEEP, gl.KEEP);
// Enable color
gl.colorMask(true, true, true, true);
// Clear depth buffer (seems important)
this.renderer.clearDepth();
this.renderer.render(this.sceneOutline, this.camera);
// Disable stencil test;
gl.disable(gl.STENCIL_TEST);
What do I need to do to have both cut out? I don't really understand everything about the stencil script I used, but it only seems to react to alpha/non alpha.
Thanks in advance!
https://jsfiddle.net/sepoto/Ln7qvv7w/2/
I have a base set up to display a cube with different colored faces. What I am trying to do is set up a camera and apply a combined X axis and Y axis rotation so that the cube spins around both axis concurrently. There seems to be some problems with the matrices I set up as I can see the blue face doesn't look quite right. There are some examples of how this is done using older versions of glMatrix however the code in the examples no longer works because of some changes in vec4 of the glMatrix library. Does anyone know how this can be done using the latest version of glMatrix as I have attached a CDN to the fiddle?
Thank you!
function drawScene() {
gl.viewport(0,0,gl.viewportWidth, gl.viewportHeight);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
mat4.ortho( mOrtho, -5, 5, 5, -5, 2, -200);
mat4.identity(mMove);
var rotMatrix = mat4.create();
mat4.identity(rotMatrix);
rotMatrix = mat4.fromYRotation(rotMatrix, yRot,rotMatrix);
rotMatrix = mat4.fromXRotation(rotMatrix, xRot,rotMatrix);
mat4.multiply(mMove, rotMatrix, mMove);
setMatrixUniforms();
gl.bindBuffer(gl.ARRAY_BUFFER, triangleVertexPositionBuffer);
gl.vertexAttribPointer(shaderProgram.vertexPositionAttribute, triangleVertexPositionBuffer.itemSize, gl.FLOAT, false, 0, 0);
gl.bindBuffer(gl.ARRAY_BUFFER, triangleColorBuffer);
gl.vertexAttribPointer(shaderProgram.vertexColorAttribute, triangleColorBuffer.itemSize, gl.FLOAT, false, 0, 0);
gl.drawArrays(gl.TRIANGLES, 0, triangleVertexPositionBuffer.numItems);
yRot += 0.01;
xRot += 0.01;
}
As the name says, fromYRotation() initializes a matrix to a given rotation. Hence, you need two temporary matrices for the partial rotations, which you can then combine:
var rotMatrix = mat4.create();
var rotMatrixX = mat4.create();
var rotMatrixY = mat4.create();
mat4.fromYRotation(rotMatrixY, yRot);
mat4.fromXRotation(rotMatrixX, xRot);
mat4.multiply(rotMatrix, rotMatrixY, rotMatrixX);
And the reason why your blue face was behaving strangely, was the missing depth test. Enable it in your initialization method:
gl.enable(gl.DEPTH_TEST);
You dont need to use three matrices:
// you should do allocations outside of the renderloop
var rotMat = mat4.create();
// no need to set the matrix to identity as
// fromYRotation resets rotMats contents anyway
mat4.fromYRotation(rotMat, yRot);
mat4.rotateX(rotMat,xRot);
I'm having an issue while rendering a square in WebGL. When I run the program in Chrome, I'm getting the error:
GL ERROR :GL_INVALID_OPERATION : glDrawArrays: attempt to access out of range vertices in attribute 0
I've assumed this is because, at some point, the buffers are looking at the wrong arrays when trying to get data. I've pinpointed the issue to the
gl.vertexAttribPointer(pColorIndex, 4, gl.FLOAT, false, 0, 0);
line in the code below. i.e. if I change the 4 to a 2, the code will run, but not properly (as I'm looking at a vec4 for color data here). Is there an issue with the way my arrays are bound?
bindColorDisplayShaders();
// clear the framebuffer
gl.clear(gl.COLOR_BUFFER_BIT);
// bind the shader
gl.useProgram(shader);
// set the value of the uniform variable in the shader
var shift_loc = gl.getUniformLocation(shader, "shift");
gl.uniform1f(shift_loc, .5);
// bind the buffer
gl.bindBuffer(gl.ARRAY_BUFFER, vertexbuffer);
// get the index for the a_Position attribute defined in the vertex shader
var positionIndex = gl.getAttribLocation(shader, 'a_Position');
if (positionIndex < 0) {
console.log('Failed to get the storage location of a_Position');
return;
}
// "enable" the a_position attribute
gl.enableVertexAttribArray(positionIndex);
// associate the data in the currently bound buffer with the a_position attribute
// (The '2' specifies there are 2 floats per vertex in the buffer. Don't worry about
// the last three args just yet.)
gl.vertexAttribPointer(positionIndex, 2, gl.FLOAT, false, 0, 0);
gl.bindBuffer(gl.ARRAY_BUFFER, null);
// bind the buffer with the color data
gl.bindBuffer(gl.ARRAY_BUFFER, chosencolorbuffer);
var pColorIndex = gl.getUniformLocation(shader, 'a_ChosenColor');
if(pColorIndex < 0){
console.log('Failed to get the storage location of a_ChosenColor');
}
gl.enableVertexAttribArray(pColorIndex);
gl.vertexAttribPointer(pColorIndex, 4, gl.FLOAT, false, 0, 0);
gl.bindBuffer(gl.ARRAY_BUFFER, null);
// draw, specifying the type of primitive to assemble from the vertices
gl.drawArrays(gl.TRIANGLES, 0, numPoints);
You can only use either a uniform or a vertex attribute,
this are two different things.
When using a vertex attribute you have to match the amount of vertices in your position buffer, and get the location using gl.getAttribLocation.
When using a uniform you're not supplying its data via array buffers but using the gl.uniform* methods to set their value.
In your example gl.uniform4fv(pColorIndex, yourColorVector).
I'm playing with 2D graphics in OpenGL - fractals and other fun stuff ^_^. My basic setup is rendering a couple triangles to fill the screen and using a fragment shader to draw cool stuff on them. I'd like to smooth things out a bit, so I started looking into supersampling. It's not obvious to me how to go about this. Here's what I've tried so far...
First, I looked at the Apple docs on anti-aliasing. I updated my pixel format initialization:
NSOpenGLPixelFormatAttribute attrs[] =
{
NSOpenGLPFADoubleBuffer,
NSOpenGLPFADepthSize, 24,
NSOpenGLPFAOpenGLProfile, NSOpenGLProfileVersion4_1Core,
NSOpenGLPFASupersample,
NSOpenGLPFASampleBuffers, 1,
NSOpenGLPFASamples, 4,
0
};
I also added the glEnable(GL_MULTISAMPLE); line. GL_MULTISAMPLE_FILTER_HINT_NV doesn't seem to be defined (docs appear to be out of date), so I wasn't sure what to do there.
That made my renders slower but doesn't seem to be doing anti-aliasing, so I tried the "Render-to-FBO" approach described on the OpenGL Wiki on Multisampling. I've tried a bunch of variations, with a variety of outcomes: successful rendering (which don't appear to be anti-aliased), rendering garbage to the screen (fun!), crashes (app evaporates and I get a system dialog about graphics issues), and making my laptop unresponsive aside from the cursor (got the system dialog about graphics issues after hard reboot).
I am checking my framebuffer's status before drawing, so I know that's not the issue. And I'm sure I'm rendering with hardware, not software - saw that suggestion on other posts.
I've spent a fair amount of time on it and still don't quite understand how to approach this. One thing I'd love some help on is how to query GL to see if supersampling is enabled properly, or how to tell how many times my fragment shader is called, etc. I'm also a bit confused about where some of the calls go - most examples I find just say which methods to call, but don't specify which ones need to go in the draw callback. Anybody have a simple example of SSAA with OpenGL 3 or 4 and OSX... or other things to try?
Edit: drawing code - super broken (don't judge me), but for reference:
- (void)draw
{
glBindVertexArray(_vao); // todo: is this necessary? (also in init)
glBufferData(GL_ARRAY_BUFFER, 12 * sizeof(GLfloat), points, GL_STATIC_DRAW);
glGenTextures( 1, &_tex );
glBindTexture( GL_TEXTURE_2D_MULTISAMPLE, _tex );
glTexImage2DMultisample( GL_TEXTURE_2D_MULTISAMPLE, 4, GL_RGBA8, _width * 2, _height * 2, false );
glGenFramebuffers( 1, &_fbo );
glBindFramebuffer( GL_FRAMEBUFFER, _fbo );
glFramebufferTexture2D( GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D_MULTISAMPLE, _tex, 0 );
GLint status;
status = glCheckFramebufferStatus( GL_FRAMEBUFFER );
if (status != GL_FRAMEBUFFER_COMPLETE) {
NSLog(#"incomplete buffer 0x%x", status);
return;
}
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glBindFramebuffer(GL_READ_FRAMEBUFFER, _fbo);
glDrawBuffer(GL_BACK);
glClear(GL_COLOR_BUFFER_BIT);
glDrawArrays(GL_TRIANGLES, 0, 6);
glBlitFramebuffer(0, 0, _width * 2, _height * 2, 0, 0, _width, _height, GL_COLOR_BUFFER_BIT, GL_LINEAR);
glDeleteTextures(1, &_tex);
glDeleteFramebuffers(1, &_fbo);
glBindFramebuffer( GL_FRAMEBUFFER, 0 );
}
Update:
I changed my code per Reto's suggestion below:
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, _fbo);
glClear(GL_COLOR_BUFFER_BIT);
glDrawArrays(GL_TRIANGLES, 0, 6);
glBindFramebuffer(GL_READ_FRAMEBUFFER, _fbo);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glBlitFramebuffer(0, 0, _width * 2, _height * 2, 0, 0, _width, _height,
GL_COLOR_BUFFER_BIT, GL_LINEAR);
This caused the program to render garbage to the screen. I then got rid of the * 2 multiplier, and it still drew garbage to the screen. I then turned off the NSOpenGLPFA options related to multi/super-sampling, and it rendered normally, with no anti-aliasing.
I also tried using a non-multisample texture, but kept getting incomplete attachment errors. I'm not sure if this is due to the NVidia issue mentioned on the OpenGL wiki (will post ina comment since I don't have enough rep to post more than 2 links) or something else. If someone could suggest a way to find out why the attachment is incomplete, that would be very, very helpful.
Finally, I tried using a renderbuffer instead of a texture, and found that specifying width and height greater than the viewport size in glRenderbufferStorage doesn't seem to work as expected.
GLuint rb;
glGenRenderbuffers(1, &rb);
glBindRenderbuffer(GL_RENDERBUFFER, rb);
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA8, _width * 2, _height * 2);
// ...
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, _fbo);
glClear(GL_COLOR_BUFFER_BIT);
glDrawArrays(GL_TRIANGLES, 0, 6);
glBindFramebuffer(GL_READ_FRAMEBUFFER, _fbo);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glBlitFramebuffer(0, 0, _width * 2, _height * 2, 0, 0, _width, _height,
GL_COLOR_BUFFER_BIT, GL_LINEAR);
... renders in the bottom left hand 1/4 of the screen. It doesn't appear to be smoother though...
Update 2: doubled the viewport size, it's no smoother. Turning NSOpenGLPFASupersample still causes it to draw garbage to the screen. >.<
Update 3: I'm an idiot, it's totally smoother. It just doesn't look good because I'm using an ugly color scheme. And I have to double all my coordinates because the viewport is 2x. Oh well. I'd still love some help understanding why NSOpenGLPFASupersample is causing such crazy behavior...
Your sequence of calls here looks like it wouldn't do what you intended:
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glBindFramebuffer(GL_READ_FRAMEBUFFER, _fbo);
glDrawBuffer(GL_BACK);
glClear(GL_COLOR_BUFFER_BIT);
glDrawArrays(GL_TRIANGLES, 0, 6);
glBlitFramebuffer(0, 0, _width * 2, _height * 2, 0, 0, _width, _height, GL_COLOR_BUFFER_BIT, GL_LINEAR);
When you call glClear() and glDrawArrays(), your current draw framebuffer, which is determined by the last call to glBindFramebuffer(GL_DRAW_FRAMEBUFFER, ...), is the default framebuffer. So you never render to the FBO. Let me annotate the above:
// Set draw framebuffer to default (0) framebuffer. This is where the rendering will go.
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
// Set read framebuffer to the FBO.
glBindFramebuffer(GL_READ_FRAMEBUFFER, _fbo);
// This is redundant, GL_BACK is the default draw buffer for the default framebuffer.
glDrawBuffer(GL_BACK);
// Clear the current draw framebuffer, which is the default framebuffer.
glClear(GL_COLOR_BUFFER_BIT);
// Draw to the current draw framebuffer, which is the default framebuffer.
glDrawArrays(GL_TRIANGLES, 0, 6);
// Copy from read framebuffer (which is the FBO) to the draw framebuffer (which is the
// default framebuffer). Since no rendering was done to the FBO, this will copy garbage
// into the default framebuffer, wiping out what was previously rendered.
glBlitFramebuffer(0, 0, _width * 2, _height * 2, 0, 0, _width, _height,
GL_COLOR_BUFFER_BIT, GL_LINEAR);
To get this working, you need to set the draw framebuffer to the FBO while rendering, and then set the read framebuffer to the FBO and the draw framebuffer to the default for the copy:
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, _fbo);
glClear(GL_COLOR_BUFFER_BIT);
glDrawArrays(GL_TRIANGLES, 0, 6);
glBindFramebuffer(GL_READ_FRAMEBUFFER, _fbo);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glBlitFramebuffer(0, 0, _width * 2, _height * 2, 0, 0, _width, _height,
GL_COLOR_BUFFER_BIT, GL_LINEAR);
Recap:
Draw commands write to GL_DRAW_FRAMEBUFFER.
glBlitFramebuffer() copies from GL_READ_FRAMEBUFFER to GL_DRAW_FRAMEBUFFER.
A couple more remarks on the code:
Since you're creating a multisample texture of twice the size, you're using both multisampling and supersampling at the same time:
glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, _tex);
glTexImage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE, 4, GL_RGBA8,
_width * 2, _height * 2, false);
Which is entirely legal. But it's... a lot of sampling. If you just want supersampling, you can use a regular texture.
You could use a renderbuffer instead of a texture for the FBO color target. No huge advantage, but it's simpler, and potentially more efficient. You only need to use textures as attachments if you want to sample the result later, which is not the case here.