DirectX pixel shows only at (0.0f, 0.0f) - winapi

I'm learning a DirectX11 and WinAPI following some Youtube tutorial. I trying to show some pixel on a display at (0.0f, 0.0f) and it works, but if I change coordinate (for example to (-0.5f, 0.0f)) - it doesn't. And when I try to show more than one pixel it shows me pixel at (0.0f, 0.0f) only.
Here's the render method:
void Graphics::RenderFrame()
{
float bgcolor[] = { 0.0f, 0.0f, 0.0f, 1.0f };
this->deviceContext->ClearRenderTargetView(this->renderTargetView.Get(), bgcolor);
this->deviceContext->IASetInputLayout(this->vertexshader.GetInputLayout());
this->deviceContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY::D3D10_PRIMITIVE_TOPOLOGY_POINTLIST);
this->deviceContext->VSSetShader(vertexshader.GetShader(), NULL, 0);
this->deviceContext->PSSetShader(pixelshader.GetShader(), NULL, 0);
UINT stride = sizeof(Vertex);
UINT offset = 0;
this->deviceContext->IASetVertexBuffers(0, 1, vertexBuffer.GetAddressOf(), &stride, &offset);
this->deviceContext->Draw(2,0);
this->swapchain->Present(1, NULL);
}
Vertex structure:
Vertex v[] =
{
Vertex(0.0f, 0.0f),
Vertex(-0.5f, 0.0f),
};
Vertex header:
struct Vertex
{
Vertex()
{
}
Vertex(float x, float y)
: pos(x, y)
{
}
DirectX::XMFLOAT2 pos;
};
Vertex shader:
float4 main(float2 inPos : POSITION) : SV_POSITION
{
return float4(inPos, 0, 1);
}
Pixel shader:
float4 main() : SV_TARGET
{
return float4(1.0f, 1.0f, 1.0f, 1.0f);
}

From the process of my test, I didn't see any problem from the code you provided. The first time I tested the data was the same as you, but because the pixels were too small, I couldn't find the specific location, so I put the pixels Point a little closer and I can clearly see that they are successfully drawn in the window.
I suggest your coordinates to be closer, and better to use D3D11_PRIMITIVE_TOPOLOGY_POINTLIST. here is my test pic. You can see three pixels of the output.
{ 0.0f, -0.0f },
{ 0.0f, -0.1f },
{ 0.0f, -0.2f },
The following is my test code, hope it will help you.
https://gist.github.com/msmshazan/dfd5362004be37ff5e016b6a42be5083
static void RenderFrame()
{
if (!render_occluded)
{
if (render_frame_latency_wait)
{
WaitForSingleObjectEx(render_frame_latency_wait, INFINITE, TRUE);
}
#if WINDOW_DEPTH || WINDOW_STENCIL
ID3D11DeviceContext_OMSetRenderTargets(render_context, 1, &render_window_rtview, render_window_dpview);
ID3D11DeviceContext_OMSetDepthStencilState(render_context, render_depthstencil_state, 0);
ID3D11DeviceContext_ClearDepthStencilView(render_context, render_window_dpview, D3D11_CLEAR_DEPTH | D3D11_CLEAR_STENCIL, 1.f, 0);
#else
ID3D11DeviceContext_OMSetRenderTargets(render_context, 1, &render_window_rtview, NULL);
#endif
// clear background
FLOAT clear_color[] = { 100.f / 255.f, 149.f / 255.f, 237.f / 255.f, 1.f };
ID3D11DeviceContext_ClearRenderTargetView(render_context, render_window_rtview, clear_color);
// draw a triangle
const UINT stride = sizeof(struct Vertex);
const UINT offset = 0;
ID3D11DeviceContext_IASetInputLayout(render_context, render_input_layout);
ID3D11DeviceContext_IASetVertexBuffers(render_context, 0, 1, &render_vertex_buffer, &stride, &offset);
ID3D11DeviceContext_IASetPrimitiveTopology(render_context, D3D11_PRIMITIVE_TOPOLOGY_POINTLIST);
ID3D11DeviceContext_VSSetShader(render_context, render_vertex_shader, NULL, 0);
ID3D11DeviceContext_PSSetShader(render_context, render_pixel_shader, NULL, 0);
ID3D11DeviceContext_RSSetState(render_context, render_raster_state);
ID3D11DeviceContext_OMSetBlendState(render_context, render_blend_state, NULL, ~0U);
ID3D11DeviceContext_Draw(render_context, _countof(vertices), 0);
}
}
struct Vertex
{
float x, y;
float r, g, b;
};
static const struct Vertex vertices[] =
{
{ 0.0f, -0.0f, 1.f, 0.f, 0.f },
{ 0.0f, -0.1f, 1.f, 0.f, 0.f },
{ 0.0f, -0.3f, 0.f, 1.f, 1.f },
};
// You can compile shader to bytecode at build time, this will increase startup
// performance and also will avoid dependency on d3dcompiler dll file.
// To do so, put the shader source in shader.hlsl file and run these two commands:
// fxc.exe /nologo /T vs_4_0_level_9_0 /E vs /O3 /WX /Zpc /Ges /Fh d3d11_vshader.h /Vn d3d11_vshader /Qstrip_reflect /Qstrip_debug /Qstrip_priv shader.hlsl
// fxc.exe /nologo /T ps_4_0_level_9_0 /E ps /O3 /WX /Zpc /Ges /Fh d3d11_pshader.h /Vn d3d11_pshader /Qstrip_reflect /Qstrip_debug /Qstrip_priv shader.hlsl
// then set next setting to 1
#define USE_PRECOMPILED_SHADERS 0
#if USE_PRECOMPILED_SHADERS
#include "d3d11_vshader.h"
#include "d3d11_pshader.h"
#else
#pragma comment (lib, "d3dcompiler.lib")
static const char d3d11_shader[] =
"struct VS_INPUT \n"
"{ \n"
" float2 pos : POSITION; \n"
" float3 col : COLOR0; \n"
"}; \n"
" \n"
"struct PS_INPUT \n"
"{ \n"
" float4 pos : SV_POSITION; \n"
" float3 col : COLOR0; \n"
"}; \n"
" \n"
"PS_INPUT vs(VS_INPUT input) \n"
"{ \n"
" PS_INPUT output; \n"
" output.pos = float4(input.pos.xy, 0.f, 1.f); \n"
" output.col = input.col; \n"
" return output; \n"
"} \n"
" \n"
"float4 ps(PS_INPUT input) : SV_Target \n"
"{ \n"
" return float4(input.col, 1.f); \n"
"} \n";
#endif

Related

Why do I get "unsupported shader version" using "#version 300 es" at emscripten?

I don't know why I get a "unsupported shader version" error message using #version 300 es in my vertex shader with the latest emscripten 1.39. With #version 100 it works fine.
const GLchar* vertex_shader_code[] = {
"#version 300 es\n"
"precision mediump float; \n"
"void main() { \n"
"gl_Position = vec4(0.0, 0.0, 0.0, 1.0); \n"
"} \n"
};
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 0);
SDL_version compiled;
SDL_version linked;
SDL_VERSION(&compiled);
SDL_GetVersion(&linked);
printf("Compiled SDL version: %d.%d.%d\n", compiled.major, compiled.minor, compiled.patch);
printf("Linked SDL version: %d.%d.%d\n", linked.major, linked.minor, linked.patch);
SDL_Init(SDL_INIT_VIDEO);
SDL_CreateWindowAndRenderer(CANVAS_WIDTH, CANVAS_HEIGHT, 0, &window, &renderer);
SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
SDL_RenderClear(renderer);
GLuint vertex_shader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertex_shader, 1, vertex_shader_code, 0);
glCompileShader(vertex_shader);
auto compile_success = 0;
auto compile_info_lenght = 0;
glGetShaderiv(vertex_shader, GL_COMPILE_STATUS, &compile_success);
if(compile_success == GL_FALSE) {
glGetShaderiv(vertex_shader, GL_INFO_LOG_LENGTH, &compile_info_lenght);
std::string vertex_shader_log(compile_info_lenght, ' ');
glGetShaderInfoLog(vertex_shader, compile_info_lenght, NULL, &vertex_shader_log[0]);
int n = vertex_shader_log.length();
char char_array[n + 1];
strcpy(char_array, vertex_shader_log.c_str());
printf("%s\n", char_array);
glDeleteShader(vertex_shader);
return 0;
}
For the built I use emcc -s main.cpp -o index.html --shell-file shell.html -s USE_SDL=2 -s FULL_ES3=1
Message:
Compiled SDL version: 2.0.9
Linked SDL version: 2.0.9
ERROR: unsupported shader version
What I'm doing wrong?
Without your full program, it is difficult to see everything that is wrong. You need to build with the correct options, call your WASM module correctly from JavaScript, and set up everything correctly in SDL and OpenGL.
Here is a working example:
// main.cpp
#include <stdio.h>
#include <stdarg.h>
#include <emscripten.h>
#include <SDL.h>
#include <SDL_opengles2.h>
SDL_Window *sdlWindow;
static const char *vertexShaderSource[] = {
"#version 300 es\n"
"in vec4 position;\n"
"void main(void) {\n"
" gl_Position = vec4(position.xyz, 1.0);\n"
"}\n"
};
static const char *fragmentShaderSource[] = {
"#version 300 es\n"
"precision mediump float;\n"
"out vec4 fragColor;\n"
"void main(void) {\n"
" fragColor = vec4(1.0, 0.0, 0.0, 1.0);\n"
"}\n"
};
static void
fatal(const char *const fmt, ...)
{
va_list args;
va_start(args, fmt);
vprintf(fmt, args);
printf("\n");
va_end(args);
for (;;) {
SDL_Delay(1000);
}
}
static GLuint
compileShader(GLenum shaderType, const char **shaderSource)
{
GLuint shdr = glCreateShader(shaderType);
glShaderSource(shdr, 1, shaderSource, nullptr);
glCompileShader(shdr);
GLint isCompiled = 0;
glGetShaderiv(shdr, GL_COMPILE_STATUS, &isCompiled);
if (isCompiled == GL_FALSE) {
GLint maxLength = 0;
glGetShaderiv(shdr, GL_INFO_LOG_LENGTH, &maxLength);
char *errorString = (char *) malloc(maxLength + 1);
glGetShaderInfoLog(shdr, maxLength, &maxLength, errorString);
fatal("Compile failed: %s", errorString);
}
return shdr;
}
static void
_set_SDL_Attribute(SDL_GLattr attr, int value, const char *attrName)
#define set_SDL_Attribute(x, v) _set_SDL_Attribute(x, v, #x)
{
if (SDL_GL_SetAttribute(attr, value) != 0) {
fatal("SDL set attrib failed: %s, %s", attrName, SDL_GetError());
}
}
static void
setupSDL(void)
{
if (SDL_Init(SDL_INIT_VIDEO) != 0) {
fatal("Unable to init SDL: %s", SDL_GetError());
}
SDL_version compiled;
SDL_version linked;
SDL_VERSION(&compiled);
SDL_GetVersion(&linked);
printf("Compiled SDL version: %d.%d.%d\n",
compiled.major, compiled.minor, compiled.patch);
printf("Linked SDL version: %d.%d.%d\n",
linked.major, linked.minor, linked.patch);
set_SDL_Attribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_ES);
set_SDL_Attribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
set_SDL_Attribute(SDL_GL_CONTEXT_MINOR_VERSION, 0);
set_SDL_Attribute(SDL_GL_DOUBLEBUFFER, 1);
set_SDL_Attribute(SDL_GL_DEPTH_SIZE, 24);
SDL_Renderer *renderer;
if (SDL_CreateWindowAndRenderer(400, 400, SDL_WINDOW_OPENGL,
&sdlWindow, &renderer) < 0) {
fatal("Unable to create windown: %s", SDL_GetError());
}
SDL_GLContext glContext = SDL_GL_CreateContext(sdlWindow);
if (glContext == NULL) {
fatal("Unable to create context: %s", SDL_GetError());
}
printf("GL Version={%s}\n", glGetString(GL_VERSION));
printf("GLSL Version={%s}\n", glGetString(GL_SHADING_LANGUAGE_VERSION));
}
static void
setupOpenGL(void)
{
GLuint shaderProgram = glCreateProgram();
GLuint vertexShader = compileShader(GL_VERTEX_SHADER, vertexShaderSource);
GLuint fragmentShader = compileShader(GL_FRAGMENT_SHADER,
fragmentShaderSource);
glAttachShader(shaderProgram, vertexShader);
glAttachShader(shaderProgram, fragmentShader);
glLinkProgram(shaderProgram);
glUseProgram(shaderProgram);
static const GLfloat vertices[] = { 0.0f, 0.5f, 0.5f, -0.5f, -0.5f, -0.5f };
GLuint vbo;
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof (vertices), vertices,
GL_STATIC_DRAW);
glEnableVertexAttribArray(0); // Input offset is zero.
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, 0);
glClearColor(0.0f, 0.0f, 0.0f, 1.0f); // Black
}
static void
mainLoop(void)
{
glClear(GL_COLOR_BUFFER_BIT);
glDrawArrays(GL_TRIANGLES, 0, 3);
SDL_GL_SwapWindow(sdlWindow);
}
int
main(void)
{
setupSDL();
setupOpenGL();
emscripten_set_main_loop(mainLoop, 0, true);
return 0;
}
Compile with these options:
emcc main.cpp -O3 -Wall -Wextra -Werror -s WASM=1 -s USE_SDL=2 -s MAX_WEBGL_VERSION=2 -s MIN_WEBGL_VERSION=2 -o foo.js
Launch from HTML like this:
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
</head>
<body>
<h1>Wasm Test Page</h1>
<canvas id=canvas width=400 height=400 oncontextmenu="event.preventDefault()"></canvas>
<script>var Module = { canvas: document.getElementById('canvas') }; </script>
<script src="foo.js"></script>
</body>
</html>
If everything works, you should see a red triangle on a black background in your browser window.
HTH.
For those with the same problem, the solution is SDL_WINDOW_OPENGL.
SDL_CreateWindowAndRenderer(CANVAS_WIDTH, CANVAS_HEIGHT, SDL_WINDOW_OPENGL, &window, &renderer);

OpenGL 3.3 (mac) Error validating program: Validation Failed: No vertex array object bound

The following code compiles and runs without errors on linux but gives error
"Error validating program: 'Validation Failed: No vertex array object bound."
on mac OS 10.14.2 (Mojave). Note that the program compiles successfully but has a problem during runtime.
MacBook Pro (Retina, 15-inch, Mid 2015)
I am compiling using g++ -std=c++11 test.cpp -w -framework OpenGL -lglfw -lGLEW -o p
test.cpp
#include <bits/stdc++.h>
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <glm/glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/type_ptr.hpp>
using namespace std;
#define cout(a) cout<<a<<endl
// IDs
GLuint VAO, VBO, VAO2, VBO2, shaderID, uniformModel;
float scale = 1.0, x = 0.0, y = 0.0;
const int numPoints = 50000;
const char* vShader = "shader.vert";
const char* fShader = "shader.frag";
void createSierpinskiGasket()
{
GLfloat points[3 * numPoints];
GLfloat vertices[] = {
-1.0f, -1.0f, 0.0f,
0.0f, 1.0f, 0.0f,
1.0f, -1.0f, 0.0f
};
points[0] = 0.25f; points[1] = 0.50f; points[2] = 0.0f;
for(int i = 3; i < numPoints * 3; i += 3)
{
int j = rand() % 3;
points[i] = (points[i - 3] + vertices[j * 3]) / 2.0;
points[i + 1] = (points[i - 2] + vertices[j * 3 + 1]) / 2.0;
points[i + 2] = (points[i - 1] + vertices[j * 3 + 2]) / 2.0;
}
glGenVertexArrays(1, &VAO2);
glBindVertexArray(VAO2);
glGenBuffers(1, &VBO2);
glBindBuffer(GL_ARRAY_BUFFER, VBO2);
glBufferData(GL_ARRAY_BUFFER, sizeof(points), points, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
void createTriangle()
{
GLfloat vertices[] = {
-1.0f, -1.0f, 0.0f,
1.0f, -1.0f, 0.0f,
0.0f, 1.0f, 0.0f
};
glGenVertexArrays(1, &VAO);
// Subsequent code will be associated with this VAO
glBindVertexArray(VAO);
glGenBuffers(1, &VBO);
// GL_ARRAY_BUFFER = Vertex data
glBindBuffer(GL_ARRAY_BUFFER, VBO);
// GL_STATIC_DRAW = Not going to change the data (transforms are OK)
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
// Location, number, type, normalize, stride, offset
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
// Enable location 0
glEnableVertexAttribArray(0);
// Unbinding
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
void addShader(const char* shaderIDCode, GLenum shaderIDType)
{
GLuint theShader = glCreateShader(shaderIDType);
const GLchar* theCode[1];
theCode[0] = shaderIDCode;
GLint codeLength[1];
codeLength[0] = strlen(shaderIDCode);
glShaderSource(theShader, 1, theCode, codeLength);
glCompileShader(theShader);
GLint result = 0;
GLchar eLog[1024] = { 0 };
glGetShaderiv(theShader, GL_COMPILE_STATUS, &result);
if (!result)
{
glGetShaderInfoLog(theShader, sizeof(eLog), NULL, eLog);
printf("Error compiling the %d shaderID: '%s'\n", shaderIDType, eLog);
return;
}
glAttachShader(shaderID, theShader);
}
void compileShader(const char* vertexCode, const char* fragmentCode)
{
// Creating shaderID program
shaderID = glCreateProgram();
if(!shaderID)
{
cout("Error creating shaderID.");
return;
}
addShader(vertexCode, GL_VERTEX_SHADER);
addShader(fragmentCode, GL_FRAGMENT_SHADER);
GLint result = 0;
GLchar eLog[1024] = { 0 };
glLinkProgram(shaderID);
glGetProgramiv(shaderID, GL_LINK_STATUS, &result);
if (!result)
{
glGetProgramInfoLog(shaderID, sizeof(eLog), NULL, eLog);
printf("Error linking program: '%s'\n", eLog);
return;
}
glValidateProgram(shaderID);
glGetProgramiv(shaderID, GL_VALIDATE_STATUS, &result);
if (!result)
{
glGetProgramInfoLog(shaderID, sizeof(eLog), NULL, eLog);
printf("Error validating program: '%s'\n", eLog);
return;
}
}
string readFile(const char* fileLocation)
{
string content;
ifstream fileStream(fileLocation, ios::in);
if (!fileStream.is_open()) {
printf("Failed to read %s! File doesn't exist.", fileLocation);
return "";
}
string line = "";
while (!fileStream.eof())
{
getline(fileStream, line);
content.append(line + "\n");
}
fileStream.close();
return content;
}
void createShader(const char* vertexLocation, const char* fragmentLocation)
{
string vertexString = readFile(vertexLocation);
string fragmentString = readFile(fragmentLocation);
const char* vertexCode = vertexString.c_str();
const char* fragmentCode = fragmentString.c_str();
compileShader(vertexCode, fragmentCode);
}
void handleKeys(GLFWwindow* window, int key, int code, int action, int mode)
{
if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
{
glfwSetWindowShouldClose(window, GL_TRUE);
}
if (key == GLFW_KEY_EQUAL && action == GLFW_PRESS)
{
scale += 0.05;
}
if (key == GLFW_KEY_MINUS && action == GLFW_PRESS)
{
scale -= 0.05;
}
if (key == GLFW_KEY_LEFT && action == GLFW_PRESS)
{
x -= 0.05;
}
if (key == GLFW_KEY_RIGHT && action == GLFW_PRESS)
{
x += 0.05;
}
if (key == GLFW_KEY_UP && action == GLFW_PRESS)
{
y += 0.05;
}
if (key == GLFW_KEY_DOWN && action == GLFW_PRESS)
{
y -= 0.05;
}
}
int main(void)
{
const GLint WIDTH = 800, HEIGHT = 600;
// Initializing GLFW
if(!glfwInit())
{
cout("GLFW initialization failed.");
glfwTerminate();
return 1;
}
// Setup GLFW window properties
// OpenGL version
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
// Not backwards compatible
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
// Allow forward compatibility
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
GLFWwindow* mainWindow = glfwCreateWindow(WIDTH, HEIGHT, "Test Window", NULL, NULL);
if(!mainWindow)
{
cout("GLFW window creation failed.");
glfwTerminate();
return 1;
}
// Get buffer size information
int bufferWidth, bufferHeight;
glfwGetFramebufferSize(mainWindow, &bufferWidth, &bufferHeight);
// Set context for GLEW to use
glfwMakeContextCurrent(mainWindow);
// Allow modern extension features
glewExperimental = GL_TRUE;
if(glewInit() != GLEW_OK)
{
cout("GLEW initialization failed.");
glfwDestroyWindow(mainWindow);
glfwTerminate();
return 1;
}
// Setup viewport size
glViewport(0, 0, bufferWidth, bufferHeight);
createTriangle();
createShader(vShader, fShader);
createSierpinskiGasket();
uniformModel = glGetUniformLocation(shaderID, "model");
// Loop until window is closed
while(!glfwWindowShouldClose(mainWindow))
{
// Get and handle user input
glfwPollEvents();
glfwSetKeyCallback(mainWindow, handleKeys);
// Clear window
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
// Clear colour buffer before next frame
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(shaderID);
glm::mat4 model = glm::mat4();
model = glm::translate(model, glm::vec3(x, y, 0));
//model = glm::rotate(model, rotX * toRadians, glm::vec3(1, 0, 0));
//model = glm::rotate(model, rotY * toRadians, glm::vec3(0, 1, 0));
//model = glm::rotate(model, rotZ * toRadians, glm::vec3(0, 0, 1));
model = glm::scale(model, glm::vec3(scale, scale, scale));
glUniformMatrix4fv(uniformModel, 1, GL_FALSE, glm::value_ptr(model));
glBindVertexArray(VAO);
glDrawArrays(GL_TRIANGLES, 0, 3);
glBindVertexArray(0);
/*glBindVertexArray(VAO2);
glDrawArrays(GL_POINTS, 0, numPoints);
glBindVertexArray(0);*/
glUseProgram(0);
glfwSwapBuffers(mainWindow);
}
return 0;
}
shader.frag
#version 330
in vec4 vCol;
uniform mat4 model;
out vec4 color;
void main()
{
//color = vec4(1.0f, 1.0f, 0.0f, 1.0f);
color = vec4(vCol.x, vCol.y, 0.5, 1.0);
}
shader.vert
#version 330
layout (location = 0) in vec3 pos;
uniform mat4 model;
out vec4 vCol;
void main()
{
gl_Position = model * vec4(pos.x, pos.y, pos.z, 1.0f);
vCol = vec4(clamp(pos, 0.0f, 1.0f), 1.0f);
}
The message
Validation Failed: No vertex array object bound.
means that the validation of the program could not be performed, because no Vertex Array Object is bound, when glValidateProgram is called
See OpenGL 4.6 API Core Profile Specification; 11.1. VERTEX SHADERS; page 402
[...] As a development aid, use the command
void ValidateProgram( uint program );
to validate the program object program against the current GL state.
This means that the VAO which is should be drawn, by the shader program, has to be bound, before glValidateProgram is called.
Bind the "triangle" VAO, before the shader program is validated:
createTriangle();
glBindVertexArray(VAO);
createShader(vShader, fShader);

egl pbuffer offscreen rendering can only read back background

I want to achieve egl offscreen rendering with pbuffer surface. but all I can read back is the background color. things I draw can't be seen.
for example, if I clear the screen with blue, the read back image via glReadPixel is just a blue-colored image, there is no other things.
I have really run out of ideas
#include <QCoreApplication>
#include <QDebug>
#include <QImage>
#include <GLES2/gl2.h>
#include <EGL/egl.h>
#include <QElapsedTimer>
GLuint LoadShader(const char *shaderSrc, GLenum type)
{
GLuint shader;
GLint compiled;
// Create the shader object
shader = glCreateShader(type);
if(shader == 0)
return 0;
// Load the shader source
glShaderSource(shader, 1, &shaderSrc, NULL);
// Compile the shader
glCompileShader(shader);
// Check the compile status
glGetShaderiv(shader, GL_COMPILE_STATUS, &compiled);
if(!compiled)
{
GLint infoLen = 0;
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLen);
if(infoLen > 1)
{
char* infoLog = (char*)malloc(sizeof(char) * infoLen);
glGetShaderInfoLog(shader, infoLen, NULL, infoLog);
qDebug() << "Error compiling shader:" << infoLog;
free(infoLog);
}
glDeleteShader(shader);
return 0;
}
return shader;
}
int main(int argc, char *argv[])
{
GLuint renderBufferWidth = 1920;
GLuint renderBufferHeight = 1080;
EGLint ai32ContextAttribs[] = { EGL_CONTEXT_CLIENT_VERSION, 2,
EGL_NONE };
// Step 1 - Get the default display.
EGLDisplay eglDisplay = eglGetDisplay((EGLNativeDisplayType)0);
// Step 2 - Initialize EGL.
eglInitialize(eglDisplay, 0, 0);
// Step 3 - Make OpenGL ES the current API.
eglBindAPI(EGL_OPENGL_ES_API);
// Step 4 - Specify the required configuration attributes.
EGLint pi32ConfigAttribs[23];
pi32ConfigAttribs[0] = EGL_SURFACE_TYPE;
pi32ConfigAttribs[1] = EGL_PBUFFER_BIT;
pi32ConfigAttribs[2] = EGL_RENDERABLE_TYPE;
pi32ConfigAttribs[3] = EGL_OPENGL_ES2_BIT;
pi32ConfigAttribs[4] = EGL_CONFORMANT;
pi32ConfigAttribs[5] = EGL_OPENGL_ES2_BIT;
pi32ConfigAttribs[6] = EGL_COLOR_BUFFER_TYPE;
pi32ConfigAttribs[7] = EGL_RGB_BUFFER;
pi32ConfigAttribs[8] = EGL_LUMINANCE_SIZE;
pi32ConfigAttribs[9] = 0;
pi32ConfigAttribs[10] = EGL_RED_SIZE;
pi32ConfigAttribs[11] = 5;
pi32ConfigAttribs[12] = EGL_GREEN_SIZE;
pi32ConfigAttribs[13] = 6;
pi32ConfigAttribs[14] = EGL_BLUE_SIZE;
pi32ConfigAttribs[15] = 5;
pi32ConfigAttribs[16] = EGL_ALPHA_SIZE;
pi32ConfigAttribs[17] = 0;
pi32ConfigAttribs[18] = EGL_DEPTH_SIZE;
pi32ConfigAttribs[19] = 16;
pi32ConfigAttribs[20] = EGL_LEVEL;
pi32ConfigAttribs[21] = 0;
pi32ConfigAttribs[22] = EGL_NONE;
// Step 5 - Find a config that matches all requirements.
int iConfigs;
EGLConfig eglConfig;
eglChooseConfig(eglDisplay, pi32ConfigAttribs, &eglConfig, 1,
&iConfigs);
if (iConfigs != 1)
{
printf("Error: eglChooseConfig(): config not found.\n");
exit(-1);
}
EGLint pbufferAttribs[5];
pbufferAttribs[0] = EGL_WIDTH;
pbufferAttribs[1] = renderBufferWidth;
pbufferAttribs[2] = EGL_HEIGHT;
pbufferAttribs[3] = renderBufferHeight;
pbufferAttribs[4] = EGL_NONE;
// Step 6 - Create a surface to draw to.
EGLSurface eglSurface;
eglSurface = eglCreatePbufferSurface(eglDisplay, eglConfig, pbufferAttribs);
if (eglSurface == EGL_NO_SURFACE)
{
qDebug() << "surface error";
exit(1);
}
// Step 7 - Create a context.
EGLContext eglContext;
eglContext = eglCreateContext(eglDisplay, eglConfig, NULL,
ai32ContextAttribs);
if (eglContext == EGL_NO_CONTEXT)
{
qDebug() << "context error";
exit(1);
}
// Step 8 - Bind the context to the current thread
bool result = eglMakeCurrent(eglDisplay, eglSurface, eglSurface, eglContext);
if (!result)
{
qDebug() << "make current error" << eglGetError();
}
GLuint programObject;
{ //init
char* vShaderStr =
"attribute vec4 vPosition; \n"
"void main() \n"
"{ \n"
" gl_Position = vPosition; \n"
"} \n";
char* fShaderStr =
"precision mediump float; \n"
"void main() \n"
"{ \n"
" gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0); \n"
"} \n";
GLuint vertexShader;
GLuint fragmentShader;
GLint linked;
// Load the vertex/fragment shaders
vertexShader = LoadShader(vShaderStr, GL_VERTEX_SHADER);
fragmentShader = LoadShader(fShaderStr, GL_FRAGMENT_SHADER);
// Create the program object
programObject = glCreateProgram();
if(programObject == 0)
return 0;
glAttachShader(programObject, vertexShader);
glAttachShader(programObject, fragmentShader);
// Bind vPosition to attribute 0
glBindAttribLocation(programObject, 0, "vPosition");
// Link the program
glLinkProgram(programObject);
// Check the link status
glGetProgramiv(programObject, GL_LINK_STATUS, &linked);
if(!linked)
{
GLint infoLen = 0;
glGetProgramiv(programObject, GL_INFO_LOG_LENGTH, &infoLen);
if(infoLen > 1)
{
char* infoLog = (char*)malloc(sizeof(char) * infoLen);
glGetProgramInfoLog(programObject, infoLen, NULL, infoLog);
qDebug() <<"Error linking program:" << infoLog;
free(infoLog);
}
glDeleteProgram(programObject);
}
glClearColor(0.0f, 0.0f, 1.0f, 1.0f);
}
{//draw
GLfloat vVertices[] = {0.0f, 1.5f, 0.0f,
-0.5f, -0.5f, 0.0f,
0.5f, -0.5f, 0.0f};
// Set the viewport
glViewport(0, 0, 1920, 1080);
// Clear the color buffer
glClear(GL_COLOR_BUFFER_BIT);
// Use the program object
glUseProgram(programObject);
// Load the vertex data
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, vVertices);
glEnableVertexAttribArray(0);
glDrawArrays(GL_TRIANGLES, 0, 3);
}
int size = 4 * renderBufferHeight * renderBufferWidth;
unsigned char *data2 = new unsigned char[size];
eglSwapBuffers( eglDisplay, eglSurface);
glReadPixels(0,0,renderBufferWidth,renderBufferHeight,GL_RGBA, GL_UNSIGNED_BYTE, data2);
qDebug() << glGetError() << eglGetError();
QImage saveImage(data2, renderBufferWidth, renderBufferHeight, QImage::Format_RGBA8888_Premultiplied);
saveImage.save("haha.png");
QCoreApplication a(argc, argv);
qDebug() << "done";
return a.exec();
}
today, I ran the same program on a machine with NVIDIA graphics card and the proprietary driver installed.
it turned out that the code works very well.
this is likely a bug of intel driver.

OpenGL combine Textures

I'm programming a 2D-Game in OpenGL and I have to output a level which consists of 20x15 fields.
So I'm currently outputting a texture for each field which is quite slow (300 textures/frame).
But due to the reason that the level never changes, I wondered if it's possible to combine the textures to a big, single texture before the game-loop starts.
Then I would have to output only one texture with 4 Texture Coordinates (0/0)(0/1)(1/1)(1/0) and 4 glVertex2f() which specifies the position in the Window.
This is my current Code for each of the 300 fields:
glColor3f(1,1,1);
glBindTexture(GL_TEXTURE_2D,textur);
glBegin(GL_QUADS);
glTexCoord2f(textArea.a.x,textArea.b.y);glVertex2f(display.a.x,display.a.y);
glTexCoord2f(textArea.a.x,textArea.a.y);glVertex2f(display.a.x,display.b.y);
glTexCoord2f(textArea.b.x,textArea.a.y);glVertex2f(display.b.x,display.b.y);
glTexCoord2f(textArea.b.x,textArea.b.y);glVertex2f(display.b.x,display.a.y);
glEnd();
Note that I have the images for all possible field-types in one .tga-File. So I'm choosing the right one with glTexCoord2f().
The image-File with all Tiles is loaded into
GLuint textur;
So I bind the same texture for every field.
My target is to decrease CPU-time. Display-Lists didn't work because there is so many data to load in the Graphics Card, that, in the end, display-Lists were even slower.
I also wasn't able to use VBOs because I don't use extensions like GLUT.
So my idea was to generate a single texture which should be quite easy and effective.
I hope you can give me feedback how I can combine textures and if this method would be the easiest one to increase performance
EDIT: that are the OpenGl-Functions I use in my program:
When I start the program, I initialize the window:
glfwInit();
if( !glfwOpenWindow(windowSize.x,windowSize.y, 0, 0, 0, 0, 0, 0, GLFW_WINDOW ) )
{ glfwTerminate();
return;
}
And that's all what the game-loop does with OpenG:
int main()
{
//INIT HERE (see code above)
glBlendFunc(GL_SRC_ALPHA,GL_ONE_MINUS_SRC_ALPHA);
glEnable(GL_BLEND);
glAlphaFunc(GL_GREATER,0.1f);
glEnable(GL_ALPHA_TEST);
long loopStart;//measure loopcycle-time
do{
height = height > 0 ? height : 1;
glViewport( 0, 0, width, height ); //set Origin
glClearColor( 0.0f, 0.0f, 0.0f, 0.0f ); //background-color
glClear(GL_COLOR_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0,windowSize.x,0,windowSize.y,0,128); //2D-Mode
glMatrixMode(GL_MODELVIEW);
loopStart=clock();
//(...) OUTPUT HERE (code see above)
glfwSwapBuffers(); //erzeugte Grafikdaten ausgeben
printf("%4dms -> ",clock()-loopStart);
}while(...);
glDisable(GL_ALPHA_TEST);
glDisable(GL_TEXTURE_2D);
glfwTerminate();
}
I see you're using GLFW. You can add GLEW and GLM and then you should use OpenGL 3.x or higher.
Here is a FULL example, how you can easily draw 2000 Textured Quads (With Alphablending) or more with FPS of 200 or more on a lost budget Laptop. It has only one little Texture, but it will work also with an 4096x4096 Texture Atlas. You get one HUGE Performance-Hit, if the Subtexture Size in the big texture EXACTLY matches the size of your Quad you draw! You should use 50x50 Pixels also in the Big-Texture! The following Deme-Code here also UPDATES ALL 2000 Quads each frame and send them to the GPU. If you will not have to update them each frame and put the Scroll-Coordinates to the Shader..you will gain performance again.
If you need no blending...use Alpha-Tests..you will gain again more speed.
#define GLEW_STATIC
#include "glew.h"
#include "glfw.h"
#include "glm.hpp"
#include "glm/gtc/matrix_transform.hpp"
#include "glm/gtx/transform.hpp"
#include <sstream>
#include <fstream>
#include <vector>
#define BUFFER_OFFSET(i) ((char *)NULL + (i))
std::ofstream logger("Log\\Ausgabe.txt", (std::ios::out | std::ios::app));
class Vertex
{
public:
float x;
float y;
float z;
float tx;
float ty;
};
class Quad
{
public:
float x;
float y;
float width;
float height;
};
int getHighResTimeInMilliSeconds(bool bFirstRun);
GLuint buildShader();
void addQuadToLocalVerticeArray(Vertex * ptrVertexArrayLocal, Quad *quad, int *iQuadCounter);
int main()
{
logger << "Start" << std::endl;
if(!glfwInit())
exit(EXIT_FAILURE);
glfwOpenWindowHint(GLFW_OPENGL_VERSION_MAJOR,3);
glfwOpenWindowHint(GLFW_OPENGL_VERSION_MINOR,3);
glfwOpenWindowHint(GLFW_OPENGL_FORWARD_COMPAT, 1);
glfwOpenWindowHint(GLFW_OPENGL_PROFILE,GLFW_OPENGL_CORE_PROFILE);
if( !glfwOpenWindow(1366, 768,8,8,8,8,32,32,GLFW_FULLSCREEN) )
{
glfwTerminate();
exit( EXIT_FAILURE );
}
if (glewInit() != GLEW_OK)
exit( EXIT_FAILURE );
//Init
GLuint VertexArrayID;
GLuint vertexbuffer;
GLuint MatrixID;
GLuint TextureID;
GLuint Texture;
GLuint programID = buildShader();
//Texture in Video-Speicher erstellen
GLFWimage img;
int iResult = glfwReadImage("Graphics\\gfx.tga", &img, GLFW_NO_RESCALE_BIT);
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &Texture);
glBindTexture(GL_TEXTURE_2D, Texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA,32,32, 0, GL_RGBA, GL_UNSIGNED_BYTE, img.Data);
glfwFreeImage(&img);
Vertex * ptrVertexArrayLocal = new Vertex[12000];
glGenVertexArrays(1, &VertexArrayID);
glBindVertexArray(VertexArrayID);
glGenBuffers(1, &vertexbuffer);
glBindBuffer(GL_ARRAY_BUFFER, VertexArrayID);
glBufferData(GL_ARRAY_BUFFER, sizeof(Vertex) * 12000, NULL, GL_DYNAMIC_DRAW);
glm::mat4 Projection = glm::ortho(0.0f, (float)1366,0.0f, (float)768, 0.0f, 100.0f);
glm::mat4 Model = glm::mat4(1.0f);
glm::mat4 MVP = Projection * Model;
glViewport( 0, 0, 1366, 768 );
MatrixID = glGetUniformLocation(programID, "MVP");
glEnable(GL_CULL_FACE);
glEnable (GL_BLEND);
glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
TextureID = glGetUniformLocation(programID, "myTextureSampler");
glUseProgram(programID);
glUniformMatrix4fv(MatrixID, 1, GL_FALSE, &MVP[0][0]);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, Texture);
glUniform1i(TextureID, 0);
int iQuadVerticeCounter=0;
int iNumOfQuads = 2000;
Quad * ptrQuads = new Quad[iNumOfQuads];
//LOCAL VERTICES CHANGES EACH LOOP
for (int i=0; i<iNumOfQuads; i++)
{
ptrQuads[i].width = 32;
ptrQuads[i].height = 32;
ptrQuads[i].x = (float)(rand() % (1334));
ptrQuads[i].y = (float)(rand() % (736));
}
int iCurrentTime=0;
int iFPS=0;
int iFrames=0;
int iFrameCounterTimeStart=0;
int running = GL_TRUE;
bool bFirstRun=true;
while( running )
{
iCurrentTime = getHighResTimeInMilliSeconds(bFirstRun);
bFirstRun=false;
//UPDATE ALL QUADS EACH FRAME!
for (int i=0; i<iNumOfQuads; i++)
{
ptrQuads[i].width = 32;
ptrQuads[i].height = 32;
ptrQuads[i].x = ptrQuads[i].x;
ptrQuads[i].y = ptrQuads[i].y;
addQuadToLocalVerticeArray(ptrVertexArrayLocal, &ptrQuads[i], &iQuadVerticeCounter);
}
//DO THE RENDERING
glClear( GL_COLOR_BUFFER_BIT );
glBindBuffer(GL_ARRAY_BUFFER, VertexArrayID);
glBufferSubData(GL_ARRAY_BUFFER, 0,sizeof(Vertex) * iQuadVerticeCounter, ptrVertexArrayLocal);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glVertexAttribPointer(0,3,GL_FLOAT,GL_FALSE,sizeof(Vertex),BUFFER_OFFSET(0));
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glVertexAttribPointer(1,2,GL_FLOAT,GL_FALSE,sizeof(Vertex),BUFFER_OFFSET(3*sizeof(GL_FLOAT)));
glDrawArrays(GL_TRIANGLES, 0, iQuadVerticeCounter);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
iQuadVerticeCounter=0;
glfwSwapBuffers();
//END OF DOING THE RENDERING
running = !glfwGetKey( GLFW_KEY_ESC ) &&glfwGetWindowParam( GLFW_OPENED );
iFrames++;
if (iCurrentTime >= iFrameCounterTimeStart + 1000.0f)
{
iFPS = (int)((iCurrentTime - iFrameCounterTimeStart) / 1000.0f * iFrames);
iFrameCounterTimeStart = iCurrentTime;
iFrames = 0;
logger << "FPS: " << iFPS << std::endl;
}
}
glfwTerminate();
exit( EXIT_SUCCESS );
}
int getHighResTimeInMilliSeconds(bool bFirstRun)
{
if (bFirstRun)
glfwSetTime(0);
return (int)((float)glfwGetTime()*1000.0f);
}
GLuint buildShader()
{
//Hint: Shader in the TXT-File looks like this
/*std::stringstream ssVertexShader;
ssVertexShader << "#version 330 core"<< std::endl
<< "layout(location = 0) in vec3 vertexPosition_modelspace;"<< std::endl
<< "layout(location = 1) in vec2 vertexUV;"<< std::endl
<< "out vec2 UV;"<< std::endl
<< "uniform mat4 MVP;"<< std::endl
<< "void main(){"<< std::endl
<< "vec4 v = vec4(vertexPosition_modelspace,1);"<< std::endl
<< "gl_Position = MVP * v;"<< std::endl
<< "UV = vertexUV;"<< std::endl
<< "}"<< std::endl;*/
std::string strVertexShaderCode;
std::ifstream VertexShaderStream("Shader\\VertexShader.txt", std::ios::in);
if(VertexShaderStream.is_open())
{
std::string Line = "";
while(getline(VertexShaderStream, Line))
strVertexShaderCode += "\n" + Line;
VertexShaderStream.close();
}
//Hint: Shader in the TXT-File looks like this
/*std::stringstream ssFragmentShader;
ssFragmentShader << "#version 330 core\n"
"in vec2 UV;\n"
"out vec4 color;\n"
"uniform sampler2D myTextureSampler;\n"
"void main(){\n"
"color = texture( myTextureSampler, UV ).rgba;\n"
"}\n";*/
std::string strFragmentShaderCode;
std::ifstream FragmentShaderStream("Shader\\FragmentShader.txt", std::ios::in);
if(FragmentShaderStream.is_open())
{
std::string Line = "";
while(getline(FragmentShaderStream, Line))
strFragmentShaderCode += "\n" + Line;
FragmentShaderStream.close();
}
GLuint gluiVertexShaderId = glCreateShader(GL_VERTEX_SHADER);
char const * VertexSourcePointer = strVertexShaderCode.c_str();
glShaderSource(gluiVertexShaderId, 1, &VertexSourcePointer , NULL);
glCompileShader(gluiVertexShaderId);
GLint Result = GL_FALSE;
int InfoLogLength;
glGetShaderiv(gluiVertexShaderId, GL_COMPILE_STATUS, &Result);
glGetShaderiv(gluiVertexShaderId, GL_INFO_LOG_LENGTH, &InfoLogLength);
std::vector<char> VertexShaderErrorMessage(InfoLogLength);
glGetShaderInfoLog(gluiVertexShaderId, InfoLogLength, NULL, &VertexShaderErrorMessage[0]);
std::string strInfoLog = std::string(&VertexShaderErrorMessage[0]);
GLuint gluiFragmentShaderId = glCreateShader(GL_FRAGMENT_SHADER);
char const * FragmentSourcePointer = strFragmentShaderCode.c_str();
glShaderSource(gluiFragmentShaderId, 1, &FragmentSourcePointer , NULL);
glCompileShader(gluiFragmentShaderId);
Result = GL_FALSE;
glGetShaderiv(gluiFragmentShaderId, GL_COMPILE_STATUS, &Result);
glGetShaderiv(gluiFragmentShaderId, GL_INFO_LOG_LENGTH, &InfoLogLength);
std::vector<char> FragmentShaderErrorMessage(InfoLogLength);
glGetShaderInfoLog(gluiFragmentShaderId, InfoLogLength, NULL, &FragmentShaderErrorMessage[0]);
strInfoLog = std::string(&FragmentShaderErrorMessage[0]);
GLuint gluiProgramId = glCreateProgram();
glAttachShader(gluiProgramId, gluiVertexShaderId);
glAttachShader(gluiProgramId, gluiFragmentShaderId);
glLinkProgram(gluiProgramId);
Result = GL_FALSE;
glGetProgramiv(gluiProgramId, GL_LINK_STATUS, &Result);
glGetProgramiv(gluiProgramId, GL_INFO_LOG_LENGTH, &InfoLogLength);
std::vector<char> ProgramErrorMessage( std::max(InfoLogLength, int(1)) );
glGetProgramInfoLog(gluiProgramId, InfoLogLength, NULL, &ProgramErrorMessage[0]);
strInfoLog = std::string(&ProgramErrorMessage[0]);
glDeleteShader(gluiVertexShaderId);
glDeleteShader(gluiFragmentShaderId);
return gluiProgramId;
}
void addQuadToLocalVerticeArray(Vertex * ptrVertexArrayLocal, Quad *quad, int *ptrQuadVerticeCounter)
{
//Links oben
ptrVertexArrayLocal[*ptrQuadVerticeCounter].x = quad->x;
ptrVertexArrayLocal[*ptrQuadVerticeCounter].y = quad->y;
ptrVertexArrayLocal[*ptrQuadVerticeCounter].z = 0.0f;
ptrVertexArrayLocal[*ptrQuadVerticeCounter].tx = 0.0f;
ptrVertexArrayLocal[*ptrQuadVerticeCounter].ty = 1.0f;
++(*ptrQuadVerticeCounter);
//Links unten
ptrVertexArrayLocal[*ptrQuadVerticeCounter].x = quad->x;
ptrVertexArrayLocal[*ptrQuadVerticeCounter].y = quad->y - quad->height;
ptrVertexArrayLocal[*ptrQuadVerticeCounter].z = 0.0f;
ptrVertexArrayLocal[*ptrQuadVerticeCounter].tx = 0.0f;
ptrVertexArrayLocal[*ptrQuadVerticeCounter].ty = 0.0f;
++(*ptrQuadVerticeCounter);
//Rechts unten
ptrVertexArrayLocal[*ptrQuadVerticeCounter].x = quad->x + quad->width;
ptrVertexArrayLocal[*ptrQuadVerticeCounter].y = quad->y - quad->height;
ptrVertexArrayLocal[*ptrQuadVerticeCounter].z = 0.0f;
ptrVertexArrayLocal[*ptrQuadVerticeCounter].tx = 1.0f;
ptrVertexArrayLocal[*ptrQuadVerticeCounter].ty = 0.0f;
++(*ptrQuadVerticeCounter);
//Rechts unten
ptrVertexArrayLocal[*ptrQuadVerticeCounter].x = quad->x + quad->width;
ptrVertexArrayLocal[*ptrQuadVerticeCounter].y = quad->y - quad->height;
ptrVertexArrayLocal[*ptrQuadVerticeCounter].z = 0.0f;
ptrVertexArrayLocal[*ptrQuadVerticeCounter].tx = 1.0f;
ptrVertexArrayLocal[*ptrQuadVerticeCounter].ty = 0.0f;
++(*ptrQuadVerticeCounter);
//Rechts oben
ptrVertexArrayLocal[*ptrQuadVerticeCounter].x = quad->x + quad->width;
ptrVertexArrayLocal[*ptrQuadVerticeCounter].y = quad->y;
ptrVertexArrayLocal[*ptrQuadVerticeCounter].z = 0.0f;
ptrVertexArrayLocal[*ptrQuadVerticeCounter].tx = 1.0f;
ptrVertexArrayLocal[*ptrQuadVerticeCounter].ty = 1.0f;
++(*ptrQuadVerticeCounter);
//Links oben
ptrVertexArrayLocal[*ptrQuadVerticeCounter].x = quad->x;
ptrVertexArrayLocal[*ptrQuadVerticeCounter].y = quad->y;
ptrVertexArrayLocal[*ptrQuadVerticeCounter].z = 0.0f;
ptrVertexArrayLocal[*ptrQuadVerticeCounter].tx = 0.0f;
ptrVertexArrayLocal[*ptrQuadVerticeCounter].ty = 1.0f;
++(*ptrQuadVerticeCounter);
}
I identified a huge time-killer now. The textures I was using were too large, and the resolution was very unefficient.
The main-texture which included the level sprites had a resolution of 2200x2200 Pixels. So the GPU increased the size to 4096x4096 and calculated it with a huge amount of data.
The image contains 10x10 different Level-Tiles which are outputed on the screen with a resolution of 50x50 pixels each.
So I saved the Tiles-File with a lower resolution (1020 x 1020 Pixels -> each tile=102x102px) and now I have a loop-cycle time of <=15ms.
This isn't perfect, but in comparison with my previous 30-60ms it was a huge progress.

OpenGL 3.2 not working on mac os x with GLFW, can't draw anything

I'm setting up a OpenGL 3.2 Xcode project using GLFW and GLEW. I've added the GLFW include path to the header paths, and added libglfw.a to the project. I've also added the cocoa and OpenGL frameworks. Then I try to use this sample code:
main.cpp:
#include <stdio.h>
#include <string.h>
#include <GL/glew.h>
#include <GL/glfw.h>
#include "math3d.h"
GLuint VBO;
static const char* pVS = " \n\
#version 150 \n\
\n\
layout (location = 0) in vec3 Position; \n\
\n\
void main() \n\
{ \n\
gl_Position = vec4(0.5 * Position.x, 0.5 * Position.y, Position.z, 1.0); \n\
}";
static const char* pFS = " \n\
#version 150 \n\
\n\
out vec4 FragColor; \n\
\n\
void main() \n\
{ \n\
FragColor = vec4(1.0, 0.0, 0.0, 1.0); \n\
}";
static void RenderScene()
{
glClear(GL_COLOR_BUFFER_BIT);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glDrawArrays(GL_TRIANGLES, 0, 3);
glDisableVertexAttribArray(0);
glfwSwapBuffers();
}
static void CreateVertexBuffer()
{
Vector3f Vertices[3];
Vertices[0] = Vector3f(-1.0f, -1.0f, 0.0f);
Vertices[1] = Vector3f(1.0f, -1.0f, 0.0f);
Vertices[2] = Vector3f(0.0f, 1.0f, 0.0f);
glGenBuffers(1, &VBO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(Vertices), Vertices, GL_STATIC_DRAW);
}
static void AddShader(GLuint ShaderProgram, const char* pShaderText, GLenum ShaderType)
{
GLuint ShaderObj = glCreateShader(ShaderType);
if (ShaderObj == 0) {
fprintf(stderr, "Error creating shader type %d\n", ShaderType);
return ;
}
const GLchar* p[1];
p[0] = pShaderText;
GLint Lengths[1];
Lengths[0]= strlen(pShaderText);
glShaderSource(ShaderObj, 1, p, Lengths);
glCompileShader(ShaderObj);
GLint success;
glGetShaderiv(ShaderObj, GL_COMPILE_STATUS, &success);
if (!success) {
GLchar InfoLog[1024];
glGetShaderInfoLog(ShaderObj, 1024, NULL, InfoLog);
fprintf(stderr, "Error compiling shader type %d: '%s'\n", ShaderType, InfoLog);
return;
}
glAttachShader(ShaderProgram, ShaderObj);
}
static void CompileShaders()
{
GLuint ShaderProgram = glCreateProgram();
if (ShaderProgram == 0) {
fprintf(stderr, "Error creating shader program\n");
return;
}
AddShader(ShaderProgram, pVS, GL_VERTEX_SHADER);
AddShader(ShaderProgram, pFS, GL_FRAGMENT_SHADER);
GLint Success = 0;
GLchar ErrorLog[1024] = { 0 };
glLinkProgram(ShaderProgram);
glGetProgramiv(ShaderProgram, GL_LINK_STATUS, &Success);
if (Success == 0) {
glGetProgramInfoLog(ShaderProgram, sizeof(ErrorLog), NULL, ErrorLog);
fprintf(stderr, "Error linking shader program: '%s'\n", ErrorLog);
return;
}
glValidateProgram(ShaderProgram);
glGetProgramiv(ShaderProgram, GL_VALIDATE_STATUS, &Success);
if (!Success) {
glGetProgramInfoLog(ShaderProgram, sizeof(ErrorLog), NULL, ErrorLog);
fprintf(stderr, "Invalid shader program: '%s'\n", ErrorLog);
return;
}
glUseProgram(ShaderProgram);
}
int main(int argc, char** argv)
{
glfwInit();
glfwOpenWindowHint(GLFW_OPENGL_VERSION_MAJOR, 3);
glfwOpenWindowHint(GLFW_OPENGL_VERSION_MINOR, 2);
glfwOpenWindowHint(GLFW_OPENGL_PROFILE,GLFW_OPENGL_CORE_PROFILE);
glfwOpenWindow(600, 600, 0, 0, 0, 0, 0, 0, GLFW_WINDOW);
GLenum err = glewInit();
if (GLEW_OK != err)
{
/* Problem: glewInit failed, something is seriously wrong. */
fprintf(stderr, "Error: %s\n", glewGetErrorString(err));
}
CreateVertexBuffer();
CompileShaders();
glClearColor(1.0f, 1.0f, 0.0f, 1.0f);
while (1)
{
RenderScene();
}
return 0;
}
math3d.h:
#ifndef GLFWTest_math3d_h
#define GLFWTest_math3d_h
struct Vector3f
{
float x;
float y;
float z;
Vector3f()
{
}
Vector3f(float _x, float _y, float _z)
{
x = _x;
y = _y;
z = _z;
}
};
#endif
but nothing gets drawn, I only get a yellow window. Any Idea why this is happening? I'm using a mac book pro a year or so old with osx lion installed

Resources