How to setup the fragment shader for multiple textures on multiple objects in OpenGLES? - opengl-es

I try to bind a texture to one object and another texture to a next object. I asked here in stackoverflow almost the same question, but the difference in that time was, that I had just one texture and multiple objects.
How to bind texture just to one object in OpenGLES?
It worked for one texture and multiple objects, but now I have multiple textures and multiple objects. I followed the advice in the answers, but I strongly believe that this time the problem is about the fragment shader.
#Override
public void onDrawFrame(GL10 gl) {
GLES20.glClear(GL10.GL_COLOR_BUFFER_BIT);
synchronized (mediaPlayerObject) {
surfaceTextureMediaPlayer.updateTexImage();
mediaPlayeUpdate = false;
}
vertexBuffer.position(0);
GLES20.glVertexAttribPointer(aPositionLocation, POSITION_COMPONENT_COUNT, GLES20.GL_FLOAT, false, STRIDETEXTURE, vertexBuffer);
vertexBuffer.position(TEXTURE_COMPONENT_COUNT);
GLES20.glVertexAttribPointer(aTextureCoordinatesLocation, TEXTURE_COMPONENT_COUNT, GLES20.GL_FLOAT, false, STRIDETEXTURE, vertexBuffer);
GLES20.glEnableVertexAttribArray(aPositionLocation);
GLES20.glEnableVertexAttribArray(aTextureCoordinatesLocation);
GLES20.glUniform1i(textureUnitLocation2, 1);
GLES20.glActiveTexture(GLES20.GL_TEXTURE1);
GLES20.glBindTexture(GL_TEXTURE_EXTERNAL_OES, textures[1]);
GLES20.glDrawArrays(GLES20.GL_TRIANGLE_FAN, 0, 6);
GLES20.glBindTexture(GL_TEXTURE_EXTERNAL_OES, 0);
GLES20.glDisableVertexAttribArray(aPositionLocation);
GLES20.glDisableVertexAttribArray(aTextureCoordinatesLocation);
synchronized (camerObject) {
surfaceTextureCamera.updateTexImage();
cameraUpdate = false;
}
vertexBuffer2.position(0);
GLES20.glVertexAttribPointer(aPositionLocation, POSITION_COMPONENT_COUNT, GLES20.GL_FLOAT, false, STRIDETEXTURE, vertexBuffer2);
vertexBuffer2.position(TEXTURE_COMPONENT_COUNT);
GLES20.glVertexAttribPointer(aTextureCoordinatesLocation, TEXTURE_COMPONENT_COUNT, GLES20.GL_FLOAT, false, STRIDETEXTURE, vertexBuffer2);
GLES20.glEnableVertexAttribArray(aPositionLocation);
GLES20.glEnableVertexAttribArray(aTextureCoordinatesLocation);
GLES20.glUniform1i(textureUnitLocation, 0);
GLES20.glActiveTexture(GLES20.GL_TEXTURE0);
GLES20.glBindTexture(GL_TEXTURE_EXTERNAL_OES, textures[0]);
GLES20.glDrawArrays(GLES20.GL_TRIANGLE_FAN, 0, 6);
GLES20.glBindTexture(GL_TEXTURE_EXTERNAL_OES, 0);
GLES20.glDisableVertexAttribArray(aPositionLocation);
GLES20.glDisableVertexAttribArray(aTextureCoordinatesLocation);
Here the fragment shader:
#extension GL_OES_EGL_image_external : require
precision mediump float;
uniform samplerExternalOES u_Texture;
uniform samplerExternalOES b_Texture;
varying vec2 v_TextureCoordinates;
void main() {
gl_FragColor = texture2D(u_Texture, v_TextureCoordinates) * texture2D(b_Texture, v_TextureCoordinates);
}

There is no need for a second texture sampler in the fragment shader, so delete b_Texture. It is sufficient to bind the texture object to the texture unit and to set the index of the texture unit to the texture sampler uniform:
#extension GL_OES_EGL_image_external : require
precision mediump float;
uniform samplerExternalOES u_Texture;
varying vec2 v_TextureCoordinates;
void main() {
gl_FragColor = texture2D(u_Texture, v_TextureCoordinates);
}
Draw 1st object:
GLES20.glUniform1i(textureUnitLocation, 1);
GLES20.glActiveTexture(GLES20.GL_TEXTURE1);
GLES20.glBindTexture(GL_TEXTURE_EXTERNAL_OES, textures[1]);
GLES20.glDrawArrays(GLES20.GL_TRIANGLE_FAN, 0, 6);
Draw 2nd object:
GLES20.glUniform1i(textureUnitLocation, 0);
GLES20.glActiveTexture(GLES20.GL_TEXTURE0);
GLES20.glBindTexture(GL_TEXTURE_EXTERNAL_OES, textures[0]);
GLES20.glDrawArrays(GLES20.GL_TRIANGLE_FAN, 0, 6);
Note, it is not necessary to use 2 different texture units. You can use GL_TEXTURE0 in both cases.

Related

Black screen when trying to use multiple textures in Expo

I'm trying to use multiple textures in expo, but for some reason whenever I try to bind more than one texture, even if the texture is unused in the shader, nothing renders to the screen.
Code that binds textures:
gl.useProgram(program);
gl.bindVertexArray(vao);
// Set texture sampler uniform
const TEXTURE_UNIT = 0;
const TEXTURE_UNIT2 = 1;
gl.uniform1i(uniformLocations.get('cameraTexture'), TEXTURE_UNIT);
gl.uniform1i(uniformLocations.get('segmentationTexture'), TEXTURE_UNIT2);
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, cameraTexture);
gl.activeTexture(gl.TEXTURE1);
gl.bindTexture(gl.TEXTURE_2D, segmentationTexture);
gl.bindFramebuffer(gl.FRAMEBUFFER, null);
gl.viewport(0, 0, dims.width, dims.height);
gl.drawArrays(gl.TRIANGLES, 0, vertices.length / 2);
//console.log("draws")
gl.bindVertexArray(null);
gl.bindTexture(gl.TEXTURE_2D, null);
gl.useProgram(null);
Fragment shader:
#version 300 es
precision highp float;
uniform sampler2D cameraTexture;
uniform sampler2D segmentationTexture;
in vec2 uv;
out vec4 fragColor;
void main() {
fragColor = texture(cameraTexture, uv); //segmentationTexture.r *
}
Vertex Shader:
const horizontalScale = flipHorizontal ? -1 : 1;
return `#version 300 es
precision highp float;
in vec2 position;
in vec2 texCoords;
out vec2 uv;
void main() {
// Invert geometry to match the image orientation from the camera.
gl_Position = vec4(position * vec2(${horizontalScale}., -1.), 0, 1);
uv = texCoords;
}`
I have checked both textures by commenting out the code the binds the other texture, e.g.
const TEXTURE_UNIT = 0;
//const TEXTURE_UNIT2 = 1;
gl.uniform1i(uniformLocations.get('cameraTexture'), TEXTURE_UNIT);
//gl.uniform1i(uniformLocations.get('segmentationTexture'), TEXTURE_UNIT2);
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, cameraTexture);
//gl.activeTexture(gl.TEXTURE1);
//gl.bindTexture(gl.TEXTURE_2D, segmentationTexture);
or
//const TEXTURE_UNIT = 0;
const TEXTURE_UNIT2 = 1;
//gl.uniform1i(uniformLocations.get('cameraTexture'), TEXTURE_UNIT);
gl.uniform1i(uniformLocations.get('segmentationTexture'), TEXTURE_UNIT2);
//gl.activeTexture(gl.TEXTURE0);
//gl.bindTexture(gl.TEXTURE_2D, cameraTexture);
gl.activeTexture(gl.TEXTURE1);
gl.bindTexture(gl.TEXTURE_2D, segmentationTexture);
and both render fine individually.
However, the act of trying to bind multiple textures, even though only one is used in frag shader, leaves me with only a black screen.

Opengl black/blank cubemap texture

I am trying to use a single cubemap texture for a skybox and reflections, but I just end up with a black texture for both. I am using a temporary array of pixels for the cubemap to make sure the image wasn't the problem. So just ignore the SDL stuff in this function; I commented some of it out. Also the texture functions and shaders are used for both 2d and cubemap textures so I commented that out as well.
Texture loading: zTexture* vars are class vars
bool loadCubeMap(std::vector<const char*> path)
{
//Texture loading success
bool textureLoaded = false;
glEnable(GL_TEXTURE_CUBE_MAP); //probably not needed
for(int j=0; j<path.size(); j++)
{
//SDL_Surface* cubFacSurf = IMG_Load(path[j]);
if(cubFacSurf != NULL)
{
//SDL_LockSurface(cubFacSurf);
zTextureW = cubFacSurf->w;
zTextureH = cubFacSurf->h;
textureLoaded = loadFromPixels((GLuint*)cubFacSurf->pixels, zTextureW, zTextureH, GL_TEXTURE_CUBE_MAP, GL_RGB, GL_TEXTURE_CUBE_MAP_POSITIVE_X+j);
//SDL_UnlockSurface(cubFacSurf);
//SDL_FreeSurface(cubFacSurf);
}
if(textureLoaded == false)
{
SDL_Log("Unable to load %s\n", path[j]);
}
}
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
return textureLoaded;
}
main pixel loader
bool loadFromPixels(void* pixels, GLuint width, GLuint height, const GLenum tType = GL_TEXTURE_2D, const GLenum glPix = GL_RGB, const GLenum tFace = GL_TEXTURE_2D)
{
glGenTextures(1, &zTextureID);
glBindTexture(tType, zTextureID);
GLfloat checkerboard[] = {1.f,1.f,1.f, 0.f,0.f,0.f, 0.f,0.f,0.f, 1.f,0.f,1.f};
if(tType == GL_TEXTURE_CUBE_MAP)
glTexImage2D(tFace, 0, glPix, 2, 2, 0, glPix, GL_FLOAT, &checkerboard);
//else
//glTexImage2D(tFace, 0, glPix, width, height, 0, glPix, GL_UNSIGNED_BYTE, pixels);
//Check for error
GLenum error = glGetError();
if( error != GL_NO_ERROR )
{
SDL_Log( "Error loading texture from %p pixels! %d\n", pixels, error);
return false;
}
return true;
}
Texture Binding:
void apply(const GLenum typ = GL_TEXTURE_2D)
{
glEnable(typ); //Probably not necessary doesnt change anything if left out
if(typ == GL_TEXTURE_2D)
{
glDisable(GL_TEXTURE_CUBE_MAP); //same here
glUniform1i(mainTxtrID, 0); //mainTxtrID = Textr in frag
glActiveTexture(GL_TEXTURE0);
}
else
{
glDisable(GL_TEXTURE_2D); //and here
glUniform1i(cubeID, 1); //cubeID = TextCub in frag
glActiveTexture(GL_TEXTURE1);
}
glBindTexture(typ, zTextureID);
}
"Uber" Shaders:
vertex:
#version 100
precision mediump float;
uniform mat4 ModelMat;
uniform mat4 ViewMat;
uniform mat4 ProjMat;
uniform mat4 OrthMat;
uniform bool world;
attribute vec4 vPosition;
attribute vec2 UVCoordAt;
attribute vec3 nPosition;
varying vec2 UVCoord;
varying vec3 vPos;
varying vec3 vNor;
varying vec3 vRefl;
void main()
{
UVCoord = UVCoordAt;
vPos = vec3(vPosition); //skybox coords
vNor = normalize(vec3(ModelMat * vec4(nPosition,0.0)));
vRefl = reflect(normalize(vPos - vec3(ViewMat[3][0], ViewMat[3][1], ViewMat[3][2])), vNor); //reflection direction vector
if(world)
gl_Position = ProjMat * ViewMat * ModelMat * vPosition;
else gl_Position = OrthMat * ModelMat * vPosition;
}
fragment:
#version 100
precision mediump float;
uniform samplerCube TextCub;
uniform sampler2D Textr;
uniform vec3 LiPos;
uniform vec4 fragCol;
uniform bool lighting;
uniform bool dimen;
uniform bool isRefl;
varying vec2 UVCoord;
varying vec3 vPos;
varying vec3 vNor;
varying vec3 vRefl;
void main()
{
vec4 textVect = texture2D(Textr, UVCoord); //default texturing
if(dimen){ textVect = textureCube(TextCub, vPos); } //skybox
else if(isRefl){ textVect = mix(textVect, textureCube(TextCub, vRefl), 0.7); } //reflections mixed with default textr
if(lighting){
float diffuse = clamp(dot(vNor, LiPos), 0.0, 1.0);
gl_FragColor = clamp(diffuse*2.0, 0.6, 1.1) * fragCol * textVect;
}
else{ gl_FragColor = fragCol * textVect; }
}
I am using GL_DEPTH_TEST, I doubt this affects anything. I am guessing the problem is in the apply() function or something else I left out. There are extensions for cubemaps but I assume the default opengles 2 cubemaps work without them.
You're creating a new texture for each cubemap face. In the loadFromPixels() function, which you call for each face:
glGenTextures(1, &zTextureID);
glBindTexture(tType, zTextureID);
...
glTexImage2D(...);
This means that you will end up with 6 textures that each have only data for one face specified, which makes them incomplete.
You need to create one cubemap texture, and then specify data for all 6 sides of that cubemap.

Convert IOSurface-backed texture to GL_TEXTURE_2D

I want to render an IOSurface texture with mipmapping.
I am generating an OpenGL texture from an IOSurface like so:
let error = CGLTexImageIOSurface2D(context.cglContextObj!,
GLenum(GL_TEXTURE_RECTANGLE_ARB),GLenum(GL_RGBA), GLsizei(width), GLsizei(height),
GLenum(GL_BGRA), GLenum(GL_UNSIGNED_INT_8_8_8_8_REV), surface!, 0)
And rendering it with a simple fragment shader:
#version 120
uniform sampler2DRect texture;
varying vec2 texCoord;
uniform vec2 size;
void main(){
vec4 tex = texture2DRect(texture, texCoord*size);
gl_FragColor = tex;
}
This works fine. But unfortunately, the texture is not mipmapped and is very grainy/pixellated when viewed at a smaller size. Upon further research, it appears that not only are IOSurface backed textures limited to a GL_TEXTURE_RECTANGLE_ARB target, they also do not support mipmapping.
Allright. So my best alternative here seems to be to:
Copy the GL_TEXTURE_RECTANGLE_ARB texture to a GL_TEXTURE_2D texture
Generate mipmaps for this new texture
Do note that I am developing on Mac, which does not support OpenGL 4.3, so I cannot use the convenient glCopyImageSubData here. Instead I am basing my approach for Step 1 here on this answer: How to copy texture1 to texture2 efficiently?
My full code now looks like this:
if !hasCreatedTexture {
glGenTextures(1, &texture);
glGenTextures(1, &arbTexture);
glGenFramebuffers(1, &fbo)
hasCreatedTexture = true
}
glEnable(GLenum(GL_TEXTURE_RECTANGLE_ARB))
glBindTexture(GLenum(GL_TEXTURE_RECTANGLE_ARB), arbTexture)
let error = CGLTexImageIOSurface2D(context.cglContextObj!, GLenum(GL_TEXTURE_RECTANGLE_ARB), GLenum(GL_RGBA), GLsizei(width), GLsizei(height), GLenum(GL_BGRA), GLenum(GL_UNSIGNED_INT_8_8_8_8_REV), surface!, 0)
glBindFramebuffer(GLenum(GL_FRAMEBUFFER), fbo);
glFramebufferTexture2D(GLenum(GL_READ_FRAMEBUFFER), GLenum(GL_COLOR_ATTACHMENT0),
GLenum(GL_TEXTURE_RECTANGLE_ARB), arbTexture, 0);
glFramebufferTexture2D(GLenum(GL_DRAW_FRAMEBUFFER), GLenum(GL_COLOR_ATTACHMENT1),
GLenum(GL_TEXTURE_2D), texture, 0);
glDrawBuffer(GLenum(GL_COLOR_ATTACHMENT1));
glBlitFramebuffer(0, 0, GLint(width), GLint(height), 0, 0, GLint(width), GLint(height), GLbitfield(GL_COLOR_BUFFER_BIT), GLenum(GL_NEAREST));
glBindTexture(GLenum(GL_TEXTURE_2D), texture)
And I changed my shader to support GL_TEXTURE_2D:
#version 120
uniform sampler2D texture;
varying vec2 texCoord;
void main(){
vec4 tex = texture2D(texture, texCoord);
gl_FragColor = tex;
}
And here is where I'm stuck. The image is all black and I have no clue why it's not working.
I tried looking at the pixel data like so:
var pixelData = UnsafeMutablePointer<GLubyte>(allocatingCapacity: Int(width * height * 4))
glGetTexImage(GLenum(GL_TEXTURE_2D), 0, GLenum(GL_BGRA),
GLenum(GL_UNSIGNED_INT_8_8_8_8_REV), pixelData)
print(NSData(bytes: pixelData, length: 50000).description)
And it's all zeroes. Is it something wrong with my approach or could the issue be specifically related to dealing with IOSurface backed textures?
Finally got this working using the following:
func createMipTexture() {
guard let surface = self.surface else { return }
glBindTexture(GLenum(GL_TEXTURE_2D), texture);
glEnable(GLenum(GL_TEXTURE_2D))
guard let address = IOSurfaceGetBaseAddress(surface) as UnsafeMutableRawPointer? else { return }
IOSurfaceLock(surface, .readOnly, nil)
glTexImage2D(GLenum(GL_TEXTURE_2D), 0, GL_RGBA, GLsizei(width), GLsizei(height), 0, GLenum(GL_BGRA), GLenum(GL_UNSIGNED_INT_8_8_8_8_REV), address)
IOSurfaceUnlock(surface, .readOnly, nil);
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_MAX_LEVEL), 100)
glHint(GLenum(GL_GENERATE_MIPMAP_HINT), GLenum(GL_NICEST));
glGenerateMipmap(GLenum(GL_TEXTURE_2D))
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_WRAP_S), GL_REPEAT);
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_WRAP_T), GL_REPEAT);
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_MAG_FILTER), GL_LINEAR);
glTexParameteri(GLenum(GL_TEXTURE_2D), GLenum(GL_TEXTURE_MIN_FILTER), GL_LINEAR_MIPMAP_LINEAR);
}

glsl bind computed fragment shader to cube face

I have a cube , i want to bind a computed fragment shader to a cube face. any help?
my vertex shader is :
precision highp float;
attribute vec3 position;
attribute vec3 normal;
uniform mat3 normalMatrix;
uniform mat4 modelViewMatrix;
uniform mat4 projectionMatrix;
varying vec3 fNormal;
varying vec3 fPosition;
void main()
{
fNormal = normalize(normalMatrix * normal);
vec4 pos = modelViewMatrix * vec4(position, 1.0);
fPosition = pos.xyz;
gl_Position = projectionMatrix * pos;
}
and my fragment shader:
precision highp float;
uniform float time;
uniform vec2 resolution;
varying vec3 fPosition;
varying vec3 fNormal;
void main()
{
float minDimension = min(resolution.x, resolution.y);
vec2 bounds = vec2(resolution.x / minDimension, resolution.y / minDimension);
vec2 uv = gl_FragCoord.xy / minDimension;
vec2 midUV = vec2(bounds.x * 0.5, bounds.y * 0.5);
uv.xy-=midUV;
float dist = sqrt(dot(uv, uv));
float t = smoothstep(0.2+0.1*cos(time), 0.1-0.01, dist);
vec4 disc_color=vec4(1.0,0.,0.,1.);
vec4 bkg_color=vec4(0.0, 0.0, 0.0, 1.0);
vec4 resultColor;
resultColor = mix(bkg_color, disc_color, t);
gl_FragColor = resultColor;
}
i get a cube with a dot on the hole image.
but what i want is actually to have the dot on each face.
you can test the code directly under http://shdr.bkcore.com/
The standard approach to do this in OpenGL is to first render your content to a FBO (Frame Buffer Object). This allows you to render to a texture. You can then use this texture for one of the sides when rendering the cube.
The following is not complete code, but just an outline to give you a starting point:
// create a texture, and bind it
GLuint texId = 0;
glGenTextures(1, &texId);
glBindTexture(GL_TEXTURE_2D, texId);
// set texture parameters (filters, wrap mode, etc)
glTexParameteri(GL_TEXTURE_2D, ...);
// allocate texture data (no data provided)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_FLOAT, 0);
glBindTexture(GL_TEXTURE_2D, texId);
// create and set up FBO with texture as attachment
GLuint fboId = 0;
glGenFramebuffers(1, &fboId);
glBindFramebuffer(GL_FRAMEBUFFER, fboId);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texId, 0);
// create a depth renderbuffer and attach it if depth testing is needed
// start rendering
glViewport(0, 0, width, height);
glClear(GL_COLOR_BUFFER_BIT);
glDraw...(...);
// back to default framebuffer
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glViewport(0, 0, windowWidth, windowHeight);
glClear(GL_COLOR_BUFFER_BIT);
// render cube using texture we rendered to
glBindTexture(GL_TEXTURE_2D, texId);
// rest of state setup and rendering
thx for the answer, this is right, but i found a better way then rendering to a texture. i passed the texture coordinates and used them instead of the glfragcoord in the fragment shader.

OpenGL Point Functionality in WebGL

In OpenGL you can draw define points like this:
glBegin(GL_POINTS);
for(float theta=0, radius=60.0; radius>1.0; theta+=0.1, radius-=0.3){
glColor3f(radius/60.0,0.3,1-(radius/60.0));
glVertex2i(200+radius*cos(theta),200+radius*sin(theta));
}
glEnd();
How do you accomplish this same functionality in WebGL?
The code you wrote really doesn't do much except define some points. To do that in WebGL could do it like this
var colors = [];
var verts = [];
var theta=0
for(var radius=60.0; radius>1.0; radius-=0.3) {
colors.push(radius/60.0, 0.3, 1-(radius/60.0));
verts.push(200+radius*Math.cos(theta),200+radius*Math.sin(theta));
theta+=0.1;
}
var numPoints = colors.length / 3;
That would make 2 JavaScript arrays. You'd then need to put them to WebGLBuffers
var colorBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, colorBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(colors), gl.STATIC_DRAW);
var vertBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, vertBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(verts), gl.STATIC_DRAW);
After that though you need to write a shader and set it up. Shaders are a huge topic. For your particular data I'm guessing these shader would do
A vertex shader
uniform mat4 u_matrix;
attribute vec4 a_vertex;
attribute vec4 a_color;
varying vec4 v_color;
void main() {
// Set the size of the point
gl_PointSize = 3.0;
// multiply each vertex by a matrix.
gl_Position = u_matrix * a_vertex;
// pass the color to the fragment shader
v_color = a_color;
}
A fragment shader
precision mediump float;
varying vec4 v_color;
void main() {
gl_FragColor = v_color;
}
Next you need to initialize the shaders and parameters. I'm going to assume I put the shaders in script tags with ids "vshader" and "fshader" and use this boilerplate code to load them.
var program = createProgramFromScriptTags(gl, "vshader", "fshader");
gl.useProgram(program);
// look up the locations for the inputs to our shaders.
var u_matLoc = gl.getUniformLocation(program, "u_matrix");
var colorLoc = gl.getAttribLocation(program, "a_color");
var vertLoc = gl.getAttribLocation(program, "a_vertex");
// Set the matrix to some that makes 1 unit 1 pixel.
gl.uniformMatrix4fv(u_matLoc, false, [
2 / width, 0, 0, 0,
0, 2 / height, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1
]);
// Tell the shader how to get data out of the buffers.
gl.bindBuffer(gl.ARRAY_BUFFER, colorBuffer);
gl.vertexAttribPointer(colorLoc, 3, gl.FLOAT, false, 0, 0);
gl.enableVertexAttribArray(colorLoc);
gl.bindBuffer(gl.ARRAY_BUFFER, vertBuffer);
gl.vertexAttribPointer(vertLoc, 2, gl.FLOAT, false, 0, 0);
gl.enableVertexAttribArray(vertLoc);
and finally draw the points
gl.drawArrays(gl.POINTS, 0, numPoints);
Here's a snippet
var canvas = document.getElementById("c");
var gl = canvas.getContext("webgl");
if (!gl) {
alert("no WebGL");
//return;
}
var colors = [];
var verts = [];
var theta=0
for(var radius=160.0; radius>1.0; radius-=0.3) {
colors.push(radius/160.0, 0.3, 1-(radius/160.0));
verts.push(radius*Math.cos(theta),radius*Math.sin(theta));
theta+=0.1;
}
var numPoints = colors.length / 3;
var colorBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, colorBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(colors), gl.STATIC_DRAW);
var vertBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, vertBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(verts), gl.STATIC_DRAW);
var program = twgl.createProgramFromScripts(gl, ["vshader", "fshader"]);
gl.useProgram(program);
// look up the locations for the inputs to our shaders.
var u_matLoc = gl.getUniformLocation(program, "u_matrix");
var colorLoc = gl.getAttribLocation(program, "a_color");
var vertLoc = gl.getAttribLocation(program, "a_vertex");
function draw() {
gl.clear(gl.COLOR_BUFFER_BIT);
gl.clearColor(1.0, 1.0, 1.0, 1.0);
// Set the matrix to some that makes 1 unit 1 pixel.
gl.uniformMatrix4fv(u_matLoc, false, [
2 / canvas.width, 0, 0, 0,
0, -2 / canvas.height, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1
]);
gl.bindBuffer(gl.ARRAY_BUFFER, colorBuffer);
gl.vertexAttribPointer(colorLoc, 3, gl.FLOAT, false, 0, 0);
gl.enableVertexAttribArray(colorLoc);
gl.bindBuffer(gl.ARRAY_BUFFER, vertBuffer);
gl.vertexAttribPointer(vertLoc, 2, gl.FLOAT, false, 0, 0);
gl.enableVertexAttribArray(vertLoc);
gl.drawArrays(gl.POINTS, 0, numPoints);
requestAnimationFrame(draw, canvas);
}
draw();
canvas { border: 1px solid black; }
<script src="https://twgljs.org/dist/3.x/twgl.min.js"></script>
<script id="vshader" type="whatever">
uniform mat4 u_matrix;
attribute vec4 a_vertex;
attribute vec4 a_color;
varying vec4 v_color;
void main() {
// Set the size of the point
gl_PointSize = length(a_vertex) * 0.1;
// multiply each vertex by a matrix.
gl_Position = u_matrix * a_vertex;
// pass the color to the fragment shader
v_color = a_color;
}
</script>
<script id="fshader" type="whatever">
precision mediump float;
varying vec4 v_color;
void main() {
gl_FragColor = v_color;
}
</script>
<canvas id="c" width="400" height="400"></canvas>
you might find these WebGL tutorials helpful.
WebGL is based on OpenGL ES 2.0 (see here), which dropped immediate-mode support.
This specification describes an additional rendering context and support objects for the HTML 5 canvas element [CANVAS]. This context allows rendering using an API that conforms closely to the OpenGL ES 2.0 API.
You'll need to use vertex buffers to store vertex data. See here1 for a good explanation of how things work in retained mode. And there for a nice small example to get you started.
1: Kudos to whoever posted this here.

Resources