Error with VAO on Nexus 4 device only - opengl-es

I've got a strange error on my nexus 4 with OpenGL ES2 when I use vertex array objects.
Here is some informations:
Everything work when I don't use VAO
Everything work on others device and on an Ipad 2 with and without VAO
glGetError() didn't return error
Due to the error some glitch appear in the game (some elements take another apparence)
My VBO are dynamics (I update them with glBufferData)
Here is the error:
Adreno-ES20(16818): : validate_vertex_attrib_state: No vertex attrib is enabled in a draw call!
And here is my code:
void Renderer::setVertexBuffer( Uint32 stream, const Base* vertexBuffer, std::size_t stride, Uint32 startVertex, Uint32 endVertex )
{
static const bool VAOSupported = this->isExtensionPresent(VertexArrayObject);
if( VAOSupported )
{
if( vertexBuffer->vao.isReady() == false )
{
// Bind VAO.
glBindVertexArrayOES( vertexBuffer->vao.getId() );
// Bind filled VBO.
glCheck( glBindBuffer( GL_ARRAY_BUFFER, vertexBuffer->getId() ) );
// Set attributs with vertex format.
this->applyVertexFormat( startVertex, endVertex );
// Unbind buffer and VAO.
glBindVertexArrayOES(0);
vertexBuffer->vao.isReady(true);
}
glBindVertexArrayOES( vertexBuffer->vao.getId() );
}
else
{
glBindVertexArrayOES(0);
glCheck( glBindBuffer( GL_ARRAY_BUFFER, vertexBuffer->getId() ) );
this->applyVertexFormat( startVertex, endVertex );
}
}
////////////////////////////////////////////////////////////
void Renderer::setIndexBuffer( const Buffer* indexBuffer, std::size_t stride )
{
glCheck( glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, indexBuffer->getId() ) );
this->usedIndexBufferStride = stride;
}
////////////////////////////////////////////////////////////
void Renderer::applyVertexFormat( Uint32 startVertex, Uint32 endVertex )
{
const Uint32 stride = this->vertexFormat->getStride();
for( Uint32 i = 0; i < this->vertexFormat->getAttributCount(); i++ )
{
const VertexElement& element = this->vertexFormat->getAttribut(i);
glCheck( glEnableVertexAttribArray( element.usage ) );
glCheck( glVertexAttribPointer( element.usage,
element.type,
element.type,
element.normalize,
stride,
BUFFER_OFFSET(element.offset + startVertex * stride ) ) );
}
}
And here is how I use it :
renderer->setFormat(geometry->getFormat()); // Only save a pointer to the format to use in apply method.
renderer->setVertexBuffer(geometry->getVertexBuffer());
renderer->setIndexBuffer(geometry->getIndexBuffer());
renderer->draw(GL_TRIANGLES, geometry->indiceCount);

Are you sure usage is an appropriate name for the field that defines which attribute array to associate the pointer with? Buffer objects already have a property called usage (e.g. GL_DYNAMIC_DRAW). location might make more sense.
You have a much more serious issue in your code, however:
element.type cannot be both the type of your data and the number of components. glVertexAttribPointer (...) only accepts 1, 2, 3 or 4 components, an enumerant like GL_FLOAT has a value much larger than 4.
Assuming glCheck( ... ) correctly wraps glGetError (...), this situation should be indicating GL_INVALID_VALUE.
This leads me to believe that your loop in void Renderer::applyVertexFormat( Uint32 startVertex, Uint32 endVertex ) is never triggered.

Related

Why is my code not displaying on phone emulator with vertex array object in opengl

I'm new to this but what I'm trying to do is using vertex array object to display something on a phone emulator. But the problem is that it isn't displaying anything on the phone.
What I have understood of using vertex array objects is that a VAO is something like a folder that can points to different buffers and inside the VAO there is vertex buffer objects. What I have done in the code is create two buffers and a VAO. Bind the buffers to an array called mVBOIds. Then bind the VAO to it's own array then i setup the vertex with the buffers.
To populate these buffers I have a file named torus2.raw that has data that looks like this:
# Data order:
# Vertex
# Normal vector
# Texture coordinate
12.329425 0.0 -8.957851 1.0
-0.809017 1.0E-6 0.587785 0.0
0.05 1.0
Where we have the order of data, Vertex has 4 data, Normal Vector has 4 and then Texture coordinate has 2 data.
Here is the code for that part that creates the buffers and VAO and its inside the method onSurfaceCreated:
// Generate VBO Ids and load the VBOs with data
GLES30.glGenBuffers ( 2, mVBOIds, 0 );
GLES30.glBindBuffer ( GLES30.GL_ARRAY_BUFFER, mVBOIds[0] );
mVertices.position ( 0 );
GLES30.glBufferData ( GLES30.GL_ARRAY_BUFFER, mVertices.remaining() * 4,
mVertices, GLES30.GL_STATIC_DRAW );
GLES30.glBindBuffer ( GLES30.GL_ELEMENT_ARRAY_BUFFER, mVBOIds[1] );
mNormals.position ( 0 );
GLES30.glBufferData ( GLES30.GL_ELEMENT_ARRAY_BUFFER, 4 * mNormals.remaining(),
mNormals, GLES30.GL_STATIC_DRAW );
// Generate VAO Id
GLES30.glGenVertexArrays ( 1, mVAOId, 0 );
// Bind the VAO and then setup the vertex
// attributes
GLES30.glBindVertexArray ( mVAOId[0] );
GLES30.glBindBuffer ( GLES30.GL_ARRAY_BUFFER, mVBOIds[0] );
GLES30.glBindBuffer ( GLES30.GL_ELEMENT_ARRAY_BUFFER, mVBOIds[1] );
GLES30.glEnableVertexAttribArray (VERTEX_POS_INDX);
GLES30.glEnableVertexAttribArray (VERTEX_NORM_INDX);
GLES30.glVertexAttribPointer ( VERTEX_POS_INDX, VERTEX_POS_SIZE,
GLES30.GL_FLOAT, false, VERTEX_STRIDE,
0 );
GLES30.glVertexAttribPointer (VERTEX_NORM_INDX, VERTEX_NORM_SIZE,
GLES30.GL_FLOAT, false, VERTEX_STRIDE,
( VERTEX_POS_SIZE * 4 ) );
// Reset to the default VAO
GLES30.glBindVertexArray ( 0 );
GLES30.glClearColor ( 0.15f, 0.15f, 0.15f, 1.0f );
GLES30.glEnable(GLES30.GL_DEPTH_TEST);
After this when I have the buffer and VAO setup, I use onDrawFrame method to get it to show in display:
public void onDrawFrame ( GL10 glUnused )
{
// Initiate the model-view matrix as identity matrix
Matrix.setIdentityM(mViewMatrix, 0);
// Define a translation transformation
Matrix.translateM(mViewMatrix, 0, 0.0f, 0.0f, -60.0f);
// Define a rotation transformation
Matrix.rotateM(mViewMatrix, 0, 90.0f, 1.0f, 0.0f, 0.0f);
// Calculate the model-view and projection transformation as composite transformation
Matrix.multiplyMM (mMVPMatrix, 0, mProjectionMatrix, 0, mViewMatrix, 0);
// Clear the color buffer
GLES30.glClear ( GLES30.GL_COLOR_BUFFER_BIT | GLES30.GL_DEPTH_BUFFER_BIT );
// Use the program object
GLES30.glUseProgram ( mProgramObject );
// Make MVP matrix accessible in the vertex shader
mMVPMatrixHandle = GLES30.glGetUniformLocation(mProgramObject, "uMVPMatrix");
GLES30.glUniformMatrix4fv(mMVPMatrixHandle, 1, false, mMVPMatrix, 0);
// Light position:
vLightPositionHandle = GLES30.glGetUniformLocation(mProgramObject, "vLightPosition");
GLES30.glUniform4fv(vLightPositionHandle, 1, lightPosition, 0);
// Light color:
vLightColorDfHandle = GLES30.glGetUniformLocation(mProgramObject, "vLightColorDf");
GLES30.glUniform4fv(vLightColorDfHandle, 1, lightColorDf, 0);
// Material color:
vMaterialColorDfHandle = GLES30.glGetUniformLocation(mProgramObject, "vMaterialColorDf");
GLES30.glUniform4fv(vMaterialColorDfHandle, 1, materialColorDf, 0);
GLES30.glBindVertexArray ( mVAOId[0] );
// Draw with the VAO settings
GLES30.glDrawElements ( GLES30.GL_TRIANGLES, mNormals.remaining(), GLES30.GL_UNSIGNED_SHORT, 0 );
// Return to the default VAO
GLES30.glBindVertexArray ( 0 );
}
But the problem is that I can't get anything to display. I've tried to double check the variabels, check if if the variabels are empty etc but I could not find the culprit. It could also be that I've understood the code differently and made a logical error but I can't see what it is. The result should be a blue-ish donut shaped model.
This is the whole code:
import android.content.Context;
import android.opengl.GLES30;
import android.opengl.GLSurfaceView;
import android.opengl.Matrix;
import android.util.Log;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.FloatBuffer;
import javax.microedition.khronos.egl.EGLConfig;
import javax.microedition.khronos.opengles.GL10;
import se.hig.dvg306.modul3app.tools.ResourceHandler;
public class Modul3Renderer implements GLSurfaceView.Renderer
{
//
// Constructor - loads model data from a res file and creates byte buffers for
// vertex data and for normal data
//
public Modul3Renderer (Context context)
{
appContext = context;
Log.e(TAG, "--->>> Creating ModelLoader...");
ModelLoader modelLoader = new ModelLoaderImpl ();
Log.e(TAG, "--->>> ...finished.");
Log.e(TAG, "--->>> Loading model...");
Log.e(TAG, "--->>> Starting with vertices...");
float[] mVerticesData; //= new float[0];
try {
mVerticesData = modelLoader.loadModel (context, R.raw.torus2, 0, 10, 0);
} catch (IOException e) {
throw new RuntimeException (e);
}
Log.e(TAG, "--->>> ...finished.");
// Process vertex data
// 4: because of 4 elements per vertex position
nbrOfVertices = mVerticesData.length / 10;
mVertices = ByteBuffer.allocateDirect(mVerticesData.length * 4)
.order(ByteOrder.nativeOrder()).asFloatBuffer();
mVertices.put(mVerticesData).position(0);
Log.e(TAG, "--->>> Starting with normals...");
float[] mNormalData; //= new float[0];
try {
mNormalData = modelLoader.loadModel (context, R.raw.torus2, 4, 4, 6);
} catch (IOException e) {
throw new RuntimeException (e);
}
Log.e(TAG, "--->>> ...finished.");
// Process normal data
// 4: because of 4 elements per vertex position
nbrOfNormals = mNormalData.length / 4;
mNormals = ByteBuffer.allocateDirect(mNormalData.length * 4)
.order(ByteOrder.nativeOrder()).asFloatBuffer();
mNormals.put(mNormalData).position(0);
}
///
// Create a shader object, load the shader source, and
// compile the shader.
//
private int createShader(int type, String shaderSrc )
{
int shader;
int[] compiled = new int[1];
// Create the shader object
shader = GLES30.glCreateShader ( type );
if ( shader == 0 )
{
return 0;
}
// Load the shader source
GLES30.glShaderSource ( shader, shaderSrc );
// Compile the shader
GLES30.glCompileShader ( shader );
// Check the compile status
GLES30.glGetShaderiv ( shader, GLES30.GL_COMPILE_STATUS, compiled, 0 );
if ( compiled[0] == 0 )
{
Log.e ( TAG, GLES30.glGetShaderInfoLog ( shader ) );
GLES30.glDeleteShader ( shader );
return 0;
}
return shader;
}
///
// Initialize the shader and program object
//
public void onSurfaceCreated ( GL10 glUnused, EGLConfig config )
{
int vertexShader;
int fragmentShader;
int programObject;
int[] linked = new int[1];
// Load the source code for the vertex shader program from a res file:
try {
vShaderStr = ResourceHandler.readTextData(appContext, R.raw.vertex_shader);
} catch (IOException e) {
Log.e ( TAG, "--->>> Could not load source code for vertex shader.");
throw new RuntimeException (e);
}
Log.e ( TAG, "--->>> Loaded vertex shader: " + vShaderStr);
// Load the source code for the fragment shader program from a res file:
try {
fShaderStr = ResourceHandler.readTextData(appContext, R.raw.fragment_shader);
} catch (IOException e) {
Log.e ( TAG, "--->>> Could not load source code for fragment shader.");
throw new RuntimeException (e);
}
Log.e ( TAG, "--->>> Loaded fragment shader: " + fShaderStr);
// Create the vertex/fragment shaders
vertexShader = createShader( GLES30.GL_VERTEX_SHADER, vShaderStr );
fragmentShader = createShader( GLES30.GL_FRAGMENT_SHADER, fShaderStr );
// Create the program object
programObject = GLES30.glCreateProgram();
if ( programObject == 0 )
{
return;
}
GLES30.glAttachShader ( programObject, vertexShader );
GLES30.glAttachShader ( programObject, fragmentShader );
// Bind vPosition to attribute 0
GLES30.glBindAttribLocation ( programObject, 0, "vPosition" );
// Bind vNormal to attribute 1
GLES30.glBindAttribLocation ( programObject, 1, "vNormal" );
// Link the program
GLES30.glLinkProgram ( programObject );
// Check the link status
GLES30.glGetProgramiv ( programObject, GLES30.GL_LINK_STATUS, linked, 0 );
if ( linked[0] == 0 )
{
Log.e ( TAG, "Error linking program:" );
Log.e ( TAG, GLES30.glGetProgramInfoLog ( programObject ) );
GLES30.glDeleteProgram ( programObject );
return;
}
// Store the program object
mProgramObject = programObject;
// Generate VBO Ids and load the VBOs with data
GLES30.glGenBuffers ( 2, mVBOIds, 0 );
GLES30.glBindBuffer ( GLES30.GL_ARRAY_BUFFER, mVBOIds[0] );
mVertices.position ( 0 );
GLES30.glBufferData ( GLES30.GL_ARRAY_BUFFER, mVertices.remaining() * 4,
mVertices, GLES30.GL_STATIC_DRAW );
GLES30.glBindBuffer ( GLES30.GL_ELEMENT_ARRAY_BUFFER, mVBOIds[1] );
mNormals.position ( 0 );
GLES30.glBufferData ( GLES30.GL_ELEMENT_ARRAY_BUFFER, 4 * mNormals.remaining(),
mNormals, GLES30.GL_STATIC_DRAW );
// Generate VAO Id
GLES30.glGenVertexArrays ( 1, mVAOId, 0 );
// Bind the VAO and then setup the vertex
// attributes
GLES30.glBindVertexArray ( mVAOId[0] );
GLES30.glBindBuffer ( GLES30.GL_ARRAY_BUFFER, mVBOIds[0] );
GLES30.glBindBuffer ( GLES30.GL_ELEMENT_ARRAY_BUFFER, mVBOIds[1] );
GLES30.glEnableVertexAttribArray (VERTEX_POS_INDX);
GLES30.glEnableVertexAttribArray (VERTEX_NORM_INDX);
GLES30.glVertexAttribPointer ( VERTEX_POS_INDX, VERTEX_POS_SIZE,
GLES30.GL_FLOAT, false, VERTEX_STRIDE,
0 );
GLES30.glVertexAttribPointer (VERTEX_NORM_INDX, VERTEX_NORM_SIZE,
GLES30.GL_FLOAT, false, VERTEX_STRIDE,
( VERTEX_POS_SIZE * 4 ) );
// Reset to the default VAO
GLES30.glBindVertexArray ( 0 );
GLES30.glClearColor ( 0.15f, 0.15f, 0.15f, 1.0f );
GLES30.glEnable(GLES30.GL_DEPTH_TEST);
}
//
// Draw a torus using the shader pair created in onSurfaceCreated()
//
public void onDrawFrame ( GL10 glUnused )
{
// Initiate the model-view matrix as identity matrix
Matrix.setIdentityM(mViewMatrix, 0);
// Define a translation transformation
Matrix.translateM(mViewMatrix, 0, 0.0f, 0.0f, -60.0f);
// Define a rotation transformation
Matrix.rotateM(mViewMatrix, 0, 90.0f, 1.0f, 0.0f, 0.0f);
// Calculate the model-view and projection transformation as composite transformation
Matrix.multiplyMM (mMVPMatrix, 0, mProjectionMatrix, 0, mViewMatrix, 0);
// Clear the color buffer
GLES30.glClear ( GLES30.GL_COLOR_BUFFER_BIT | GLES30.GL_DEPTH_BUFFER_BIT );
// Use the program object
GLES30.glUseProgram ( mProgramObject );
// Make MVP matrix accessible in the vertex shader
mMVPMatrixHandle = GLES30.glGetUniformLocation(mProgramObject, "uMVPMatrix");
GLES30.glUniformMatrix4fv(mMVPMatrixHandle, 1, false, mMVPMatrix, 0);
// Light position:
vLightPositionHandle = GLES30.glGetUniformLocation(mProgramObject, "vLightPosition");
GLES30.glUniform4fv(vLightPositionHandle, 1, lightPosition, 0);
// Light color:
vLightColorDfHandle = GLES30.glGetUniformLocation(mProgramObject, "vLightColorDf");
GLES30.glUniform4fv(vLightColorDfHandle, 1, lightColorDf, 0);
// Material color:
vMaterialColorDfHandle = GLES30.glGetUniformLocation(mProgramObject, "vMaterialColorDf");
GLES30.glUniform4fv(vMaterialColorDfHandle, 1, materialColorDf, 0);
GLES30.glBindVertexArray ( mVAOId[0] );
// Draw with the VAO settings
GLES30.glDrawElements ( GLES30.GL_TRIANGLES, mNormals.remaining(), GLES30.GL_UNSIGNED_SHORT, 0 );
// Return to the default VAO
GLES30.glBindVertexArray ( 0 );
}
//
// Handle surface changes
//
public void onSurfaceChanged ( GL10 glUnused, int width, int height )
{
mWidth = width;
mHeight = height;
GLES30.glViewport(0, 0, width, height);
float ratio = (float) width / height;
// this projection matrix is applied to object coordinates
Matrix.frustumM(mProjectionMatrix, 0, -ratio, ratio, -1.0f, 1.0f, 0.5f, 1000.0f);
}
// Member variables
private Context appContext;
private int mWidth;
private int mHeight;
private int nbrOfVertices;
private FloatBuffer mVertices;
final int VERTEX_POS_SIZE = 4; // x, y and z
final int VERTEX_NORM_SIZE = 4; // r, g, b, and a
final int VERTEX_POS_INDX = 0;
final int VERTEX_NORM_INDX = 1;
private int [] mVBOIds = new int[2];
private int [] mVAOId = new int[1];
private int nbrOfNormals;
private FloatBuffer mNormals;
private int mProgramObject;
private int mMVPMatrixHandle;
// Transformation data:
private final float[] mMVPMatrix = new float[16];
private final float[] mProjectionMatrix = new float[16];
private final float[] mViewMatrix = new float[16];
final int VERTEX_STRIDE = ( 4 * ( VERTEX_POS_SIZE + VERTEX_NORM_SIZE ) );
// Light position and color (only diffuse term now):
private int vLightPositionHandle;
private final float lightPosition [] = {175.0f, 75.0f, 125.0f, 0.0f};
// Light color (only diffuse term now):
private int vLightColorDfHandle;
private final float lightColorDf [] = {0.98f, 0.98f, 0.98f, 1.0f};
// Material color (only diffuse term now):
private int vMaterialColorDfHandle;
private final float materialColorDf [] = {0.62f, 0.773f, 0.843f, 1.0f};
// To be read when creating the instance:
private String vShaderStr;
private String fShaderStr;
private static String TAG = "Modul3Renderer";
}
A GL_ELEMENT_ARRAY_BUFFER object is meant for the indices and should conitain integral data. You should use a GL_ARRAY_BUFFER for the normal vectors:
GLES30.glBindBuffer ( GLES30.GL_ARRAY_BUFFER, mVBOIds[1] );
mNormals.position ( 0 );
GLES30.glBufferData ( GLES30.GL_ARRAY_BUFFER, 4 * mNormals.remaining(),
mNormals, GLES30.GL_STATIC_DRAW );
When glVertexAttribPointer is called the buffer which is currently bound to the target ARRAY_BUFFER, is associated to the specified attribute index and the ID of the object is stored in the state vector of the currently bound VAO. So you have to bind the buffer object before calling glVertexAttribPointer:
GLES30.glBindBuffer(GLES30.GL_ARRAY_BUFFER, mVBOIds[0]);
GLES30.glVertexAttribPointer ( VERTEX_POS_INDX, VERTEX_POS_SIZE,
GLES30.GL_FLOAT, false, VERTEX_STRIDE,
0 );
GLES30.glBindBuffer(GLES30.GL_ARRAY_BUFFER, mVBOIds[1]);
GLES30.glVertexAttribPointer (VERTEX_NORM_INDX, VERTEX_NORM_SIZE,
GLES30.GL_FLOAT, false, VERTEX_STRIDE,
( VERTEX_POS_SIZE * 4 ) );

Is there a way to let the printer handling multiple copies when printing using gdi plus?

I'm printing using the hDC I get from the print dialog and was wondering if there's a way to let the printer/printer driver/OS handle multiple copies and collation settings? I can of course print the pages multiple times, but that doesn't feel like the right way to go about this...
This is roughly what my code looks like now:
ULONG_PTR gdiplusToken = 0;
Gdiplus::GdiplusStartupInput gdiplusStartupInput;
Gdiplus::GdiplusStartup( &gdiplusToken, &gdiplusStartupInput, NULL );
DOCINFO documentInfo;
memset( &documentInfo, 0, sizeof( documentInfo ) );
documentInfo.cbSize = sizeof( documentInfo );
theDocumentInfo.lpszDocName = mDocumentName;
HDC printerHDC = printDialogData.hDC; // printDialogData is PRINTDLG struct from PrintDlg
::StartDoc( printerHDC, &documentInfo );
for( int page = 0; page < numberOfPages; ++ page )
{
::StartPage( printerHDC );
// print the page
::EndPage( printerHDC );
}
::EndDoc( printerHDC );
::DeleteDC( printerHDC );
Gdiplus::GdiplusShutdown( gdiplusToken );

How can I create Direct3d 11 renderer using SDL 2.0.9

In my project I must use SDL_BLENDOPERATION_MAXIMUM via SDL_ComposeCustomBlendMode() which is supported in SDL 2.0.9 by direct3d11 renderer only. I have Windows 8.1 and GeForce GTX750 Ti with updated drivers. My system should support DirectX 11 renderending.
Changing defines in SDL_config.h or SDL_config_windows.h (SDL_VIDEO_RENDER_D3D11 to 1 and SDL_VIDEO_RENDER_D3D to 0) doesn't help.
I tried to fill preprocessor difinitions with defines SDL_VIDEO_RENDER_D3D11 or WINRT according to SDL source code. But it doesn't help.
What should I do to activate direct3d11 renderer so I can use blend mode max?
My test code:
#include "SDL.h"
#include "SDL_image.h"
#include <string>
using namespace std;
int main( int argc, char *argv[] ) {
SDL_Init( SDL_INIT_VIDEO );
IMG_Init( IMG_INIT_PNG );
SDL_SetHintWithPriority( SDL_HINT_RENDER_DRIVER, "direct3d11", SDL_HINT_OVERRIDE );
SDL_Window *window = SDL_CreateWindow( "Testing", SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED,
1200, 600, SDL_WINDOW_RESIZABLE );
SDL_Renderer *renderer = SDL_CreateRenderer( window, -1, SDL_RENDERER_ACCELERATED );
SDL_RendererInfo *rendererInfo = new SDL_RendererInfo();
SDL_RendererInfo *driverInfo = new SDL_RendererInfo();
SDL_GetRendererInfo( renderer, rendererInfo );
int drivers = SDL_GetNumRenderDrivers();
string availableDrivers = " (";
for ( int i = 0; i < drivers; ++i ) {
SDL_GetRenderDriverInfo( i, driverInfo );
string driverName = driverInfo->name;
if ( i == drivers - 1 ) {
availableDrivers += driverName;
}
else {
availableDrivers += driverName + ", ";
}
}
availableDrivers += ")";
string path = SDL_GetBasePath();
SDL_Surface *surfRed = IMG_Load( (path + "\\Red.png").c_str() );
SDL_Texture *textRed = SDL_CreateTextureFromSurface( renderer, surfRed );
SDL_FreeSurface( surfRed );
SDL_Surface *surfBlue = IMG_Load( ( path + "\\Blue.png" ).c_str() );
SDL_Texture *textBlue = SDL_CreateTextureFromSurface( renderer, surfBlue );
SDL_FreeSurface( surfBlue );
SDL_Rect destRed, destBlue;
destRed.x = 128;
destRed.y = 128;
destBlue.x = 196;
destBlue.y = 196;
SDL_QueryTexture( textRed, NULL, NULL, &destRed.w, &destRed.h );
SDL_QueryTexture( textBlue, NULL, NULL, &destBlue.w, &destBlue.h );
SDL_BlendMode blendMode = SDL_ComposeCustomBlendMode( SDL_BLENDFACTOR_ONE, SDL_BLENDFACTOR_ONE, SDL_BLENDOPERATION_MAXIMUM,
SDL_BLENDFACTOR_ONE, SDL_BLENDFACTOR_ONE, SDL_BLENDOPERATION_MAXIMUM );
SDL_SetTextureBlendMode( textRed, blendMode );
SDL_SetTextureBlendMode( textBlue, blendMode );
// SDL_SetRenderDrawBlendMode( renderer, blendMode );
string info = rendererInfo->name + availableDrivers + " " + SDL_GetError();
SDL_SetWindowTitle( window, info.c_str() );
SDL_SetRenderDrawColor( renderer, 0, 0, 0, 255 );
SDL_Event event;
bool isRunning = true;
while ( isRunning ) {
if ( SDL_PollEvent( &event ) ) {
if ( event.type == SDL_QUIT ) {
isRunning = false;
}
}
SDL_RenderClear( renderer );
SDL_RenderCopy( renderer, textRed, NULL, &destRed );
SDL_RenderCopy( renderer, textBlue, NULL, &destBlue );
SDL_RenderPresent( renderer );
}
delete driverInfo;
delete rendererInfo;
SDL_DestroyTexture( textRed );
SDL_DestroyTexture( textBlue );
SDL_DestroyRenderer( renderer );
SDL_DestroyWindow( window );
IMG_Quit();
SDL_Quit();
return 0;
}
Window title is "direct3d (direct3d, opengl, opengles2, software) This operration is not supported". It works fine when I change to SDL_BLENDOPERATION_ADD, but it's not what I want. If I uncomment renderer blend mode it doesn't help too.
Enumerate the supported renderer backends via SDL_GetNumRenderDrivers() and SDL_GetRenderDriverInfo().
Note the index of the direct3d11 driver, if it exists.
Pass index into SDL_CreateRenderer().
All together:
SDL_Init( SDL_INIT_VIDEO );
SDL_Window* window = SDL_CreateWindow( "SDL2", 0, 0, 640, 480, SDL_WINDOW_SHOWN );
SDL_Renderer* renderer = nullptr;
for( int i = 0; i < SDL_GetNumRenderDrivers(); ++i )
{
SDL_RendererInfo rendererInfo = {};
SDL_GetRenderDriverInfo( i, &rendererInfo );
if( rendererInfo.name != std::string( "direct3d11" ) )
{
continue;
}
renderer = SDL_CreateRenderer( window, i, 0 );
break;
}
Note that what you are setting with SDL_VIDEO_RENDER_D3D and friends are compile time definitions for building SDL with D3D support.
To do this at runtime:
Sometime after SDL_Init() and before creating your window/renderer set a hint for it.
// returns true on success or false on failure
SDL_SetHint(SDL_HINT_RENDER_DRIVER, "direct3d11");
However, this will only work on Windows, and I believe d3d is already the default renderer for windows. If your version of windows supports d3d11 that is what SDL should use. I strongly suspect you already have a d3d11 renderer and your problem is with how you instantiate or use your custom blend mode.
To verify you have a d3d11 renderer:
SDL_RendererInfo info;
SDL_GetRendererInfo(renderer, &info);
printf("%s", info.name);
To create a custom blend mode with SDL_BLENDOPERATION_MAXIMUM
SDL_BlendMode blender = SDL_ComposeCustomBlendMode(SDL_BLENDFACTOR_SRC_ALPHA, // change this to suit your needs
SDL_BLENDFACTOR_ONE_MINUS_SRC_ALPHA, // change this to suit your needs
SDL_BLENDOPERATION_MAXIMUM,
SDL_BLENDFACTOR_ONE, // change this to suit your needs
SDL_BLENDFACTOR_ONE_MINUS_SRC_ALPHA, // change this to suit your needs
SDL_BLENDOPERATION_MAXIMUM);
SDL_SetTextureBlendMode(texture, blender); // blender is your custom mode
SDL_SetRenderDrawBlendMode(renderer, blender); // blender is your custom mode
I cant imagine the above blender is actually the combination of factors/operations you want, but you didn't post your actual code to work off.

MQSendMessage() failed with MQ_ERROR_INVALID_PARAMETER (0xC00E0006)

Problem with MSMQ configuration or code i've implemented??
I've written windows service code (win32 C++ ) in which i am sending a log to the local private queue.This code is working fine if I execute that in 32-bit environment (either windows7/8/vista). But that same code if I build for x64 OS and if I execute MQSendMessage() failed with MQ_ERROR_INVALID_PARAMETER (0xC00E0006). What could be the problem.??? Please help me out in this regard.Thanks in advance..
I've tried by changing the NUMBEROFPROPERTIES from 3-7 in x-64 windows 7 system. But still the problem remains same. what to do to avoid this..
Here is my sample code
#define ClientQueue L".\\Private$\\TestQueue"
#define LogMsgLable L"TestLOG"
#define MIN_PRIVATE_QUEUE_NAME_LENGTH 55
DWORD MSMQSendMessage()
{
//Define the required constants and variables.
const int NUMBEROFPROPERTIES = 7; // Number of properties
DWORD cPropId = 0; // Property counter
HRESULT hr = MQ_OK; // Return code
HANDLE hQueue = NULL; // Queue handle
//Define an MQMSGPROPS structure.
MQMSGPROPS msgProps;
MSGPROPID aMsgPropId[NUMBEROFPROPERTIES] = {0};
MQPROPVARIANT aMsgPropVar[NUMBEROFPROPERTIES] = {0};
HRESULT aMsgStatus[NUMBEROFPROPERTIES] = {0};
// Specify the message properties to be sent.
aMsgPropId[cPropId] = PROPID_M_LABEL; // Property ID
aMsgPropVar[cPropId].vt = VT_LPWSTR; // Type indicator
aMsgPropVar[cPropId].pwszVal = L"ADCLOG"; // The message label
cPropId++;
// Specifying the storage of messages in the harddisk
// setting the message properties as recoverable
aMsgPropId[cPropId] = PROPID_M_DELIVERY;
aMsgPropVar[cPropId].vt = VT_UI1;
aMsgPropVar[cPropId].bVal = MQMSG_DELIVERY_RECOVERABLE;
cPropId++;
aMsgPropId[cPropId] = PROPID_M_ACKNOWLEDGE; // Property ID
aMsgPropVar[cPropId].vt = VT_UI1; // Type indicator
aMsgPropVar[cPropId].bVal = MQMSG_ACKNOWLEDGMENT_FULL_RECEIVE;
cPropId++;
// we need to set the size of the message
// if we dont set it, takes 4MB as default message size
// to set the size of it we have ---> PROPID_M_BODY
ULONG ulBufferSize = 15;
char *lLog_msg = NULL;
lLog_msg = ( char*)GlobalAlloc( GPTR, 15);
ZeroMemory( lLog_msg, 15) ;
strcpy(lLog_msg, "HelloWorld");
aMsgPropId[cPropId] = PROPID_M_BODY; // Property ID
aMsgPropVar[cPropId].vt = VT_VECTOR | VT_UI1; // Type indicator
aMsgPropVar[cPropId].caub.pElems = (UCHAR *)lLog_msg; // Body buffer
aMsgPropVar[cPropId].caub.cElems = ulBufferSize; // Buffer size
cPropId++;
//here we should not put VT_NULL in type as defined with VT_UI4.........
aMsgPropId[cPropId] = PROPID_M_BODY_TYPE; // Property ID
aMsgPropVar[cPropId].vt = VT_UI4; // Type indicator
cPropId++;
// Initialize the MQMSGPROPS structure.
msgProps.cProp = cPropId;
msgProps.aPropID = aMsgPropId;
msgProps.aPropVar = aMsgPropVar;
msgProps.aStatus = aMsgStatus;
// Create a direct format name for the queue.
WCHAR *gFormatName = NULL;
DWORD dwBufferLength = 0;
dwBufferLength = MIN_PRIVATE_QUEUE_NAME_LENGTH; //Private queue format name buffer size atleast 54
gFormatName = (WCHAR *)malloc( dwBufferLength*sizeof( WCHAR ));
if (gFormatName == NULL)
{
printf( "malloc", 0, NULL );
return MQ_ERROR_INSUFFICIENT_RESOURCES;
}
SecureZeroMemory( gFormatName, dwBufferLength*sizeof(WCHAR) );
hr = MQPathNameToFormatName( ClientQueue,
gFormatName,
&dwBufferLength );
if (FAILED( hr ))
{
if( hr == MQ_ERROR_FORMATNAME_BUFFER_TOO_SMALL )
{
if (gFormatName != NULL)
{
gFormatName = (WCHAR *)realloc( gFormatName, dwBufferLength*sizeof( WCHAR ));
if (gFormatName == NULL)
{
printf( "realloc failed\n");
return MQ_ERROR_INSUFFICIENT_RESOURCES;
}
}
SecureZeroMemory( gFormatName, dwBufferLength*sizeof( WCHAR ));
hr = MQPathNameToFormatName( ClientQueue,
gFormatName,
&dwBufferLength );
if(FAILED( hr ))
{
printf( L"MQPathNameToFormatName2 failed:%x\n", hr);
return hr;
}
}
else
{
printf("MQPathNameToFormatName failed:%x\n", hr);
return hr;
}
}
// Call MQOpenQueue to open the queue with send access.
hr = MQOpenQueue(
gFormatName, // Format name of the queue
MQ_SEND_ACCESS, // Access mode
MQ_DENY_NONE, // Share mode
&hQueue // OUT: Queue handle
);
if ( FAILED( hr ))
{
printf("MQOpenQueue failed:%x\n", hr);
goto ret;
//goto cleanup;
}
if( gFormatName )
free( gFormatName );
// Call MQSendMessage to send the message to the queue.
hr = MQSendMessage(
hQueue, // Queue handle
&msgProps, // Message property structure
NULL // Not in a transaction
);
if (FAILED(hr))
{
printf( "MQSendMessage failed:%x\n", hr );
MQCloseQueue( hQueue );
goto ret;
}
//Call MQCloseQueue to close the queue.
hr = MQCloseQueue(hQueue);
if(hr != 0)
{
printf("MQCloseQueue failed:%x",hr);
//goto cleanup;
goto ret;
}ret:
if( lLog_msg )
{
GlobalFree( lLog_msg );
lLog_msg = NULL;
}
return hr;
}
Your code works on 32-bit Windows purely by chance. Take a look at this:
ULONG ulBufferSize = sizeof( 15);
char *lLog_msg = NULL;
lLog_msg = ( char*)GlobalAlloc( GPTR, sizeof( 15));
ZeroMemory( lLog_msg, sizeof( 15)) ;
strcpy(lLog_msg, "HelloWorld");
You seem to misunderstand what the sizeof operator does. It is a compile time operator that replaces its argument with the size of that argument. In this case, the compiler replaces sizeof(15) with the number 4. Why? Because a literal constant like 15 occupies 4 bytes on a 64-bit machine. So in the code above you are allocating 4 bytes of memory and then copying 11 bytes into it, thereby corrupting memory.
To fix this, simply remove sizeof. The code above should look like this:
ULONG ulBufferSize = 15;
char *lLog_msg = NULL; // this is pointless since you set it in the next line
lLog_msg = ( char*)GlobalAlloc( GPTR, ulBufferSize);
ZeroMemory( lLog_msg, ulBufferSize) ;
strcpy(lLog_msg, "HelloWorld");

How do I correctly call LsaLogonUser for an interactive logon?

I'm trying to use LsaLogonUser to create an interactive logon session, but it always returns STATUS_INVALID_INFO_CLASS (0xc0000003). From what I have found in searching online, the memory layout of the KERB_INTERACTIVE_LOGON structure is tricky, but I'm pretty sure I've done that right.
I've also tried using MSV1.0 instead of Kerberos, with MSV1_0_INTERACTIVE_LOGON for the authentication structure and MSV1_0_PACKAGE_NAME as the package name, but that fails with STATUS_BAD_VALIDATION_CLASS (0xc00000a7).
Can anyone tell what I'm doing wrong here? Here's the code, with most of the error handling stripped. Clearly this isn't production-quality; I'm just trying to get a working sample.
// see below for definitions of these
size_t wcsByteLen( const wchar_t* str );
void InitUnicodeString( UNICODE_STRING& str, const wchar_t* value, BYTE* buffer, size_t& offset );
int main( int argc, char * argv[] )
{
// connect to the LSA
HANDLE lsa;
LsaConnectUntrusted( &lsa );
const wchar_t* domain = L"mydomain";
const wchar_t* user = L"someuser";
const wchar_t* password = L"scaryplaintextpassword";
// prepare the authentication info
ULONG authInfoSize = sizeof(KERB_INTERACTIVE_LOGON) +
wcsByteLen( domain ) + wcsByteLen( user ) + wcsByteLen( password );
BYTE* authInfoBuf = new BYTE[authInfoSize];
KERB_INTERACTIVE_LOGON* authInfo = (KERB_INTERACTIVE_LOGON*)authInfoBuf;
authInfo->MessageType = KerbInteractiveLogon;
size_t offset = sizeof(KERB_INTERACTIVE_LOGON);
InitUnicodeString( authInfo->LogonDomainName, domain, authInfoBuf, offset );
InitUnicodeString( authInfo->UserName, user, authInfoBuf, offset );
InitUnicodeString( authInfo->Password, password, authInfoBuf, offset );
// find the Kerberos security package
char packageNameRaw[] = MICROSOFT_KERBEROS_NAME_A;
LSA_STRING packageName;
packageName.Buffer = packageNameRaw;
packageName.Length = packageName.MaximumLength = (USHORT)strlen( packageName.Buffer );
ULONG packageId;
LsaLookupAuthenticationPackage( lsa, &packageName, &packageId );
// create a dummy origin and token source
LSA_STRING origin = {};
origin.Buffer = _strdup( "TestAppFoo" );
origin.Length = (USHORT)strlen( origin.Buffer );
origin.MaximumLength = origin.Length;
TOKEN_SOURCE source = {};
strcpy( source.SourceName, "foobar" );
AllocateLocallyUniqueId( &source.SourceIdentifier );
void* profileBuffer;
DWORD profileBufLen;
LUID luid;
HANDLE token;
QUOTA_LIMITS qlimits;
NTSTATUS subStatus;
NTSTATUS status = LsaLogonUser( lsa, &origin, Interactive, packageId,
&authInfo, authInfoSize, 0, &source, &profileBuffer, &profileBufLen,
&luid, &token, &qlimits, &subStatus );
if( status != ERROR_SUCCESS )
{
ULONG err = LsaNtStatusToWinError( status );
printf( "LsaLogonUser failed: %x\n", status );
return 1;
}
}
size_t wcsByteLen( const wchar_t* str )
{
return wcslen( str ) * sizeof(wchar_t);
}
void InitUnicodeString( UNICODE_STRING& str, const wchar_t* value,
BYTE* buffer, size_t& offset )
{
size_t size = wcsByteLen( value );
str.Length = str.MaximumLength = (USHORT)size;
str.Buffer = (PWSTR)(buffer + offset);
memcpy( str.Buffer, value, size );
offset += size;
}
You goofed up on one of the parameters to LsaLogonUser(); instead of &authInfo you should pass just authInfo. Happens to everyone :)

Resources