SDL2 on Raspberry Pi can't create renderer: "OpenGL context already created" - opengl-es

I set up SDL2 on the raspberry pi using the resources from this tutorial: https://www.youtube.com/watch?v=Yo7hO7GZ-ug I got it to compile and run. But when it reaches the point where it needs to setup the renderer, I get a NULL return value.
The accepted answer on this question suggests that the error "OpenGL context already created" is deceiving and that the OpenGL context hasn't been created at all. This would mean that my OpenGL is broken. The Raspberry Pi uses OpenGL ES and from what I understand, SDL is smart enough to use GLES instead of GL? I'm wondering if anyone else has had this kind of issue and if there's a known way to fix it.
This is my code:
#include <SDL2/SDL.h>
const char* WINDOW_TITLE = "steel";
int main(int argc, char** argv) {
SDL_Window* window = NULL;
SDL_Renderer* renderer = NULL;
SDL_Init(SDL_INIT_EVERYTHING);
// Setup window
window = SDL_CreateWindow(
WINDOW_TITLE, //Title
SDL_WINDOWPOS_CENTERED, // x pos
SDL_WINDOWPOS_CENTERED, // y pos
0, //width
0, //height
SDL_WINDOW_FULLSCREEN_DESKTOP);
if (window == NULL) {
printf("Could not create window %s\n", SDL_GetError());
return 1;
}
// Setup renderer
renderer = SDL_CreateRenderer(window, 0, 0);
if (renderer == NULL) {
printf("Could not create renderer %s\n", SDL_GetError());
return 1;
}
}

According to SDL_CreateRenderer documentation, you should specify -1 to its second parameter to ask for the first rendering driver supporting your requested flags. The one at index 0 might not be the correct one.

Related

How to display two images from a binocular camera synchronously with OpenCV

I meet a problem that one of image from a binocular camera has a time delay corresponding to another image. I use ros package usb-cam to get the images with the time stamps and then use the OpenCV fuction imshow() to show them.
Environment: Ubuntu 16.04 in the Vmware WorkStation 12 (in win10), ROS Kinetic, OpenCV 3.3.0. Binocular camera can support the mjepg format with 640*480 and 30fps.
Well, I am a beginner of vision SLAM and now trying to show images in the real time of a binocular camera. Something I have done is that I have used the usb-cam package to get the image data and shown them in the rviz and rqt. But I meet a time synchroniazation problem with the OpenCV imshow(). Specificlly, I want to use usb-cam to get the data and do some image procession with OpenCV and at first I try to display the images with OpenCV functions.
You will see the codes in the following that I notes some codes.
The code are referred with the open resource project on the github. It is a C++ SLAM project about INS, GPS and binocular camera. I mainly refer the data capture in the rosNodeTest.cpp. It is about a multi-thread coding however I am not familiar with it.
Please visit https://github.com/HKUST-Aerial-Robotics/VINS-Fusion
And there are my codes. It is a ros package and I am sure that it can run well. So there I don't paste the CMakeLists.txt and package.xml.
#include <ros/ros.h>
#include <sensor_msgs/Image.h>
#include <cv_bridge/cv_bridge.h>
#include <opencv2/opencv.hpp>
#include <queue>
#include <thread>
#include <mutex>
#include <iostream>
std::queue<sensor_msgs::ImageConstPtr> img0_buf;
std::queue<sensor_msgs::ImageConstPtr> img1_buf;
std::mutex m_buf;
//Test for rqt, 0 for cv, else for rviz
int flag = 0;
ros::Publisher pubImg0;
ros::Publisher pubImg1;
void img0_callback(const sensor_msgs::ImageConstPtr &img0)
{
m_buf.lock();
//For rqt
if(flag != 0)
{
sensor_msgs::Image img;
img.header=img0->header;
img.height = img0->height;
img.width = img0->width;
img.is_bigendian = img0->is_bigendian;
img.step = img0->step;
img.data=img0->data;
img.encoding=img0->encoding;
pubImg0.publish(img);
// std::cout<<"0."<<img.header<<std::endl;
}
else
{
img0_buf.push(img0);
}
m_buf.unlock();
}
void img1_callback(const sensor_msgs::ImageConstPtr &img1)
{
m_buf.lock();
if(flag != 0)
{
sensor_msgs::Image img;
img.header=img1->header;
img.height = img1->height;
img.width = img1->width;
img.is_bigendian = img1->is_bigendian;
img.step = img1->step;
img.data=img1->data;
img.encoding=img1->encoding;
pubImg1.publish(img);
// std::cout<<"1."<<img.header<<std::endl;
}
else
{
img1_buf.push(img1);
}
m_buf.unlock();
}
//Use the cv_bridge of ros to change the image data format from msgs to cv
cv::Mat msg2cv(const sensor_msgs::ImageConstPtr &img_msg)
{
cv_bridge::CvImageConstPtr ptr;
sensor_msgs::Image img_tmp;
img_tmp.header = img_msg->header;
img_tmp.height = img_msg->height;
img_tmp.width = img_msg->width;
img_tmp.is_bigendian = img_msg->is_bigendian;
img_tmp.step = img_msg->step;
img_tmp.data = img_msg->data;
img_tmp.encoding =img_msg->encoding;
ptr = cv_bridge::toCvCopy(img_tmp, sensor_msgs::image_encodings::BGR8);
cv::Mat img = ptr->image.clone();
return img;
}
//With reference of VINS rosNodeTest.cpp
void display()
{
while(1)
{
cv::Mat image0, image1;
// double t1,t2;
m_buf.lock();
if(!img0_buf.empty() && !img1_buf.empty())
{
ROS_INFO("Two cameras work");
image0=msg2cv(img0_buf.front());
// ROS_INFO("img0 %.9lf", img0_buf.front()->header.stamp.toSec());
// t1=img0_buf.front()->header.stamp.toSec();
img0_buf.pop();
imshow("camera1", image0);
image1=msg2cv(img1_buf.front());
// ROS_INFO("img1 %.9lf", img1_buf.front()->header.stamp.toSec());
// t2=img1_buf.front()->header.stamp.toSec();
img1_buf.pop();
cv::imshow("camera2", image1);
cv::waitKey(1);
}
m_buf.unlock();
// //display with cv
// if(!image0.empty())
// {
// imshow("camera1", image0);
// // cv::waitKey(1);
// }
// // else
// // { std::cout<<"image0 is empty!"<<std::endl;}
// if(!image1.empty())
// {
// imshow("camera2", image1);
// // cv::waitKey(1);
// }
// else
// {std::cout<<"image1 is empty!"<<std::endl;}
// cv::waitKey();
//std::chrono::milliseconds dura(2);
//std::this_thread::sleep_for(dura);
}
}
int main(int argc, char** argv)
{
//Initialize a ros node
ros::init(argc,argv,"demo");
ros::NodeHandle n;
ros::console::set_logger_level(ROSCONSOLE_DEFAULT_NAME, ros::console::levels::Info);
//Subscribe the binocular camera raw data
ros::Subscriber sub_img0=n.subscribe("/camera1/usb_cam1/image_raw", 2000, img0_callback);
ros::Subscriber sub_img1=n.subscribe("/camera2/usb_cam2/image_raw", 2000, img1_callback);
ROS_INFO("Wait for camera data.");
if(flag != 0) //for rviz
{
pubImg0 = n.advertise<sensor_msgs::Image>("/Img0", 100);
pubImg1 = n.advertise<sensor_msgs::Image>("/Img1", 100);
std::cout<<"for rviz"<<std::endl;
}
else //for cv
{
//Synchronization and display
std::cout<<"for cv"<<std::endl;
std::thread sync_thread{display};
sync_thread.detach();
//display();
}
ros::spin();
return 1;
}
The result now is that there is a camera slow. Is there some one to say something about the problem? I am not sure that the problem is from the OpenCv display or multi-thread coding. Thank you!

SDL2 Window turns black on resize

I have started to work with SDL2 and am not experienced with it. I am working on a Mac system. Almost everything has been good, but I have a problem that when a resizeable window is resized, while the handle is being dragged, the window turns black, and I can only repaint it after releasing. And I have checked that while the window is being resized, no event is being produced and I have no means to interfere or detect this, as the event loop is just paused. Is there any possible solutions?
Here is the code (Almost replica of a tutorial on handling resize event):
SDL_Event event;
SDL_Rect nativeSize;
SDL_Rect newWindowSize;
float scaleRatioW;//This is to change anything that might rely on something like mouse coords
float scaleRatioH; //(such as a button on screen) over to the new coordinate system scaling would create
SDL_Window * window; //Our beautiful window
SDL_Renderer * renderer; //The renderer for our window
SDL_Texture * backBuffer; //The back buffer that we will be rendering everything to before scaling up
SDL_Texture * ballImage; //A nice picture to demonstrate the scaling;
bool resize;
void InitValues(); //Initialize all the variables needed
void InitSDL(); //Initialize the window, renderer, backBuffer, and image;
bool HandleEvents(); //Handle the window changed size event
void Render(); //Switches the render target back to the window and renders the back buffer, then switches back.
void Resize(); //The important part for stretching. Changes the viewPort, changes the scale ratios
void InitValues()
{
nativeSize.x = 0;
nativeSize.y = 0;
nativeSize.w = 256;
nativeSize.h = 224; //A GameBoy size window width and height
scaleRatioW = 1.0f;
scaleRatioH = 1.0f;
newWindowSize.x = 0;
newWindowSize.y = 0;
newWindowSize.w = nativeSize.w;
newWindowSize.h = nativeSize.h;
window = NULL;
renderer = NULL;
backBuffer = NULL;
ballImage = NULL;
resize = false;
}
void InitSDL()
{
if(SDL_Init(SDL_INIT_EVERYTHING) < 0)
{
//cout << "Failed to initialize SDL" << endl;
printf("%d\r\n", __LINE__);
}
//Set the scaling quality to nearest-pixel
if(SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "0") < 0)
{
//cout << "Failed to set Render Scale Quality" << endl;
printf("%d\r\n", __LINE__);
}
//Window needs to be resizable
window = SDL_CreateWindow("Rescaling Windows!",
SDL_WINDOWPOS_CENTERED,
SDL_WINDOWPOS_CENTERED,
256,
224,
SDL_WINDOW_RESIZABLE);
//You must use the SDL_RENDERER_TARGETTEXTURE flag in order to target the backbuffer
renderer = SDL_CreateRenderer(window,
-1,
SDL_RENDERER_ACCELERATED |
SDL_RENDERER_TARGETTEXTURE);
//Set to blue so it's noticeable if it doesn't do right.
SDL_SetRenderDrawColor(renderer, 0, 0, 200, 255);
//Similarly, you must use SDL_TEXTUREACCESS_TARGET when you create the texture
backBuffer = SDL_CreateTexture(renderer,
SDL_GetWindowPixelFormat(window),
SDL_TEXTUREACCESS_TARGET,
nativeSize.w,
nativeSize.h);
//IMPORTANT Set the back buffer as the target
SDL_SetRenderTarget(renderer, backBuffer);
//Load an image yay
SDL_Surface * image = SDL_LoadBMP("Ball.bmp");
ballImage = SDL_CreateTextureFromSurface(renderer, image);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 4);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 1);
SDL_FreeSurface(image);
}
bool HandleEvents()
{
while(SDL_PollEvent(&event) )
{
printf("%d\r\n", __LINE__);
if(event.type == SDL_QUIT)
{
printf("%d\r\n", __LINE__);
return true;
}
else if(event.type == SDL_WINDOWEVENT)
{
if(event.window.event == SDL_WINDOWEVENT_SIZE_CHANGED)
{
resize = true;
printf("%d\r\n", __LINE__);
}
}
return false;
}
return false;
}
void Render()
{
SDL_RenderCopy(renderer, ballImage, NULL, NULL); //Render the entire ballImage to the backBuffer at (0, 0)
printf("%d\r\n", __LINE__);
SDL_SetRenderTarget(renderer, NULL); //Set the target back to the window
if(resize)
{
Resize();
resize = false;
}
printf("%d\r\n", __LINE__);
SDL_RenderCopy(renderer, backBuffer, &nativeSize, &newWindowSize); //Render the backBuffer onto the screen at (0,0)
SDL_RenderPresent(renderer);
SDL_RenderClear(renderer); //Clear the window buffer
SDL_SetRenderTarget(renderer, backBuffer); //Set the target back to the back buffer
SDL_RenderClear(renderer); //Clear the back buffer
printf("%d\r\n", __LINE__);
}
void Resize()
{
int w, h;
printf("%d\r\n", __LINE__);
SDL_GetWindowSize(window, &w, &h);
scaleRatioW = w / nativeSize.w;
scaleRatioH = h / nativeSize.h; //The ratio from the native size to the new size
newWindowSize.w = w;
newWindowSize.h = h;
//In order to do a resize, you must destroy the back buffer. Try without it, it doesn't work
SDL_DestroyTexture(backBuffer);
backBuffer = SDL_CreateTexture(renderer,
SDL_GetWindowPixelFormat(window),
SDL_TEXTUREACCESS_TARGET, //Again, must be created using this
nativeSize.w,
nativeSize.h);
SDL_Rect viewPort;
SDL_RenderGetViewport(renderer, &viewPort);
if(viewPort.w != newWindowSize.w || viewPort.h != newWindowSize.h)
{
//VERY IMPORTANT - Change the viewport over to the new size. It doesn't do this for you.
SDL_RenderSetViewport(renderer, &newWindowSize);
}
}
int main(int argc, char * argv[])
{
InitValues();
InitSDL();
bool quit = false;
printf("%d\r\n", __LINE__);
while(!quit)
{
printf("%d\r\n", __LINE__);
quit = HandleEvents();
Render();
}
return 0;
}
Ok, after fighting a bit with SDL2, I got it working with macOS 10.12.
Here is the problem:
SDL2 catches resize events and resends only the last 3 when you poll events using SDL_PollEvent(&event) for example.
During this time (you pressed left mouse click on resize area and hold the mouse) the SDL_PollEvent is blocking.
Here is the workaround:
Luckily, you can hook into the event handler using SDL_SetEventFilter. This will fire every time a event is being received. So for all resize events as they occur.
So what you can do is to register your own event filter callback that basically allows every event (by returning 1), listens on resize events, and sends them to your draw loop.
Example:
//register this somewhere
int filterEvent(void *userdata, SDL_Event * event) {
if (event->type == SDL_WINDOWEVENT && event->window.event == SDL_WINDOWEVENT_RESIZED) {
//convert userdata pointer to yours and trigger your own draw function
//this is called very often now
//IMPORTANT: Might be called from a different thread, see SDL_SetEventFilter docs
((MyApplicationClass *)userdata)->myDrawFunction();
//return 0 if you don't want to handle this event twice
return 0;
}
//important to allow all events, or your SDL_PollEvent doesn't get any event
return 1;
}
///after SDL_Init
SDL_SetEventFilter(filterEvent, this) //this is instance of MyApplicationClass for example
Important: Do not call SDL_PollEvent within your filterEvent callback, as this will result in weird behaviour of stuck events. (resize will not stop sometimes for example)
Turns out that this is not specific to my code, and it is a part of a wider problem with all the OpenGL libraries in MacOSX. Although recent patches in GLFW has fixed it, and in the GLUT version which is provided with XCode itself, it is quite better and you just observe a flicker in the window while resizing.
https://github.com/openframeworks/openFrameworks/issues/2800
https://github.com/openframeworks/openFrameworks/issues/2456
The problem is because of the blocking nature of the OSX window manager, which blocks all the events until the mouse is released.
To solve this, you should manipulate the library you are using and recompile it. You should add these or something similar (depending on your development environment) to the resize handler to bypass the block:
ofNotifyUpdate();
instance->display();
which is disastrous, if you are a novice, and want the ability to use library updates without much difficulty. Another solution is to override the SDL behaviour, by writing another event handler that does that. It is better as it doesn't need editing the SDL code, but adds a pile of platform specific code that I personally don't tend to and would cause a lot of problems that I don't want to spend time fixing.
After 2 days of searching, because I had just started the project and not implemented much relying on SDL, I decided to switch to GLFW which has the smoothest resize handling and I observe no flicker.

SDL 2.0 Issue - Trying to load image, but only get a white box

Okay, so I am trying to load a window and display a couple of images on it using a function. The window loads and my errors don't display for a failure to load the image, however the window just stays white. Any ideas why that might be? This is my code below.
#include "stdafx.h"
#include "SDL.h"
#include <iostream>
#include <string>
using namespace std;
const int Window_Width = 640;
const int Window_Height = 480;
SDL_Window *window = NULL;
SDL_Renderer *render = NULL;
SDL_Texture* loadImage(string imagename) //function that loads the image, useful for handling multiple image imports
{
SDL_Surface* loadedImage = NULL;
SDL_Texture* texture = NULL;
loadedImage = SDL_LoadBMP(imagename.c_str()); //loads the image with the passed file name
if (loadedImage == NULL) //checks for any errors loading the image
{
cout<<"The image failed to load.."<<endl;
}
texture = SDL_CreateTextureFromSurface(render, loadedImage);
SDL_FreeSurface(loadedImage);
return texture;
}
int main(int argc, char** argv)
{
if (SDL_Init(SDL_INIT_EVERYTHING) == -1)
{
cout << SDL_GetError() << endl;
return 1;
}
window = SDL_CreateWindow("Frogger", SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, Window_Width, Window_Height, SDL_WINDOW_SHOWN);
//creates a window in the centre of the screen, it uses const int's to define the size of the window
if (window == NULL)
{
cout << SDL_GetError()<<endl;
return 1;
}
render = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
//this renders the window
if (render == NULL)
{
cout << SDL_GetError()<<endl;
return 1;
}
//loading the images using the function
SDL_Texture* background = NULL;
SDL_Texture* frog = NULL;
background = loadImage("background.bmp");
frog = loadImage("frogger.bmp");
SDL_Delay(2000);
SDL_RenderClear(render);
SDL_RenderPresent(render);
SDL_UpdateWindowSurface(window);
//freeing the memory back up
SDL_DestroyRenderer(render);
SDL_DestroyWindow(window);
SDL_DestroyTexture(background);
SDL_DestroyTexture(frog);
SDL_Quit();
return 0;
}
You aren't rendering anything, you just load some textures and end the program.

C++11 + SDL2 + Windows: Multithreaded program hangs after any input event

I am working on a screen capture program using C++11, MinGW, and the Windows API. I am trying to use SDL2 to watch how my screen capture program works in real time.
The window opens fine, and the program seems to run well so long as I do nothing more than move the mouse cursor. But iff I click in the window, its menu bar, outside the window, or press any keys, the SDL window freezes.
I have set up some logging for the events to figure out what is happening. I never receive any events other than SDL_WINDOW_FOCUS_GAINED, SDL_TEXTEDITING, and SDL_WINDOWEVENT_SHOWN in that order. All of these are received at the start.
I have tried to find tutorials on SDL event handling since that's my best guess as to the source of the problem. I have found nothing more than basic event handling to watch for SDL_QUIT, basic mouse and keyboard events, and one on SDL_WINDOWEVENTs that does not seem to help. I have found nothing in-depth on what the events mean and best practices for handling them. That may not matter, because that might not be the source of the problem. For all I know, SDL is throwing a fit because there are other threads running.
Can anyone see any cause for this hanging in my code and provide an explanation as to how to fix it?
A quick explanation for the structure of my program is in order to cover the code I have omitted. The Captor class starts and runs a thread to grab a screenshot to pass to the Encoder. The Encoder starts a variable number of threads that receive a screenshot from the Captor, encode the screenshot, then passes the encoding to the Screen. The passing mechanism is the SynchronousQueue<T> class that provides paired methods put(const T&) and T get() to allow a producer and a consumer to synchronize using a resource; these methods time out to allow the the system to be responsive to kill messages.
Now for the source files (hopefully without too much bloat). While I would appreciate any comments on how to improve the performance of the application, my focus is on making the program responsive.
main.cpp
#include "RTSC.hpp"
int main(int argc, char** argv) {
RTSC rtsc {
(uint32_t) stoi(argv[1]),
(uint32_t) stoi(argv[2]),
(uint32_t) stoi(argv[3]),
(uint32_t) stoi(argv[4]),
(uint32_t) stoi(argv[5]),
(uint32_t) stoi(argv[6])
};
while (rtsc.isRunning()) {
SwitchToThread();
}
return 0;
}
RTSC.hpp
#ifndef RTSC_HPP
#define RTSC_HPP
#include "Captor.hpp"
#include "Encoder.hpp"
#include "Screen.hpp"
#include <iostream>
using namespace std;
class RTSC {
private:
Captor *captor;
Encoder *encoder;
SynchronousQueue<uint8_t*> imageQueue {1};
SynchronousQueue<RegionList> regionQueue {1};
Screen *screen;
public:
RTSC(
uint32_t width,
uint32_t height,
uint32_t maxRegionCount,
uint32_t threadCount,
uint32_t divisionsAlongThreadWidth,
uint32_t divisionsAlongThreadHeight
) {
captor = new Captor(width, height, imageQueue);
encoder = new Encoder(
width,
height,
maxRegionCount,
threadCount,
divisionsAlongThreadWidth,
divisionsAlongThreadHeight,
imageQueue,
regionQueue
);
screen = new Screen(
width,
height,
width >> 1,
height >> 1,
regionQueue
);
captor->start();
}
~RTSC() {
delete screen;
delete encoder;
delete captor;
}
bool isRunning() const {
return screen->isRunning();
}
};
#endif
Screen.hpp
#ifndef SCREEN_HPP
#define SCREEN_HPP
#include <atomic>
#include <SDL.h>
#include <windows.h>
#include "Region.hpp"
#include "SynchronousQueue.hpp"
using namespace std;
class Screen {
private:
atomic_bool running {false};
HANDLE thread;
SynchronousQueue<RegionList>* inputQueue;
uint32_t inputHeight;
uint32_t inputWidth;
uint32_t screenHeight;
uint32_t screenWidth;
SDL_Renderer* renderer;
SDL_Surface* surface;
SDL_Texture* texture;
SDL_Window* window;
void run() {
SDL_Event event;
while (running) {
while (SDL_PollEvent(&event)) {
switch (event.type) {
case SDL_QUIT:
running = false;
break;
case SDL_WINDOWEVENT:
switch (event.window.event) {
case SDL_WINDOWEVENT_CLOSE:
running = false;
break;
default:
break;
}
}
try {
RegionList rl = inputQueue->get();
SDL_RenderClear(renderer);
SDL_LockSurface(surface);
SDL_FillRect(surface, nullptr, 0);
for (uint32_t i = 0; i < rl.count; ++i) {
Region &r = rl.regions[i];
SDL_Rect rect {
(int) r.getX(),
(int) r.getY(),
(int) r.getWidth(),
(int) r.getHeight()
};
uint32_t color =
(r.getRed() << 16) +
(r.getGreen() << 8) +
r.getBlue();
SDL_FillRect(surface, &rect, color);
}
SDL_UnlockSurface(surface);
SDL_UpdateTexture(
texture,
nullptr,
surface->pixels,
surface->pitch
);
SDL_RenderCopyEx(
renderer,
texture,
nullptr,
nullptr,
0,
nullptr,
SDL_FLIP_VERTICAL
);
} catch (exception &e) {}
SDL_RenderPresent(renderer);
SwitchToThread();
}
}
static DWORD startThread(LPVOID self) {
((Screen*) self)->run();
return (DWORD) 0;
}
public:
Screen(
uint32_t inputWidth,
uint32_t inputHeight,
uint32_t windowWidth,
uint32_t windowHeight,
SynchronousQueue<RegionList> &inputQueue
): inputQueue {&inputQueue}, inputHeight {inputHeight} {
SDL_Init(SDL_INIT_VIDEO);
window = SDL_CreateWindow(
"RTSC",
SDL_WINDOWPOS_CENTERED,
SDL_WINDOWPOS_CENTERED,
windowWidth,
windowHeight,
SDL_WINDOW_SHOWN | SDL_WINDOW_RESIZABLE |
SDL_WINDOW_INPUT_FOCUS | SDL_WINDOW_MOUSE_FOCUS
);
renderer = SDL_CreateRenderer(window, -1, 0);
surface = SDL_CreateRGBSurface(
0,
inputWidth,
inputHeight,
24,
0xFF << 16,
0xFF << 8,
0xFF,
0
);
texture = SDL_CreateTexture(
renderer,
surface->format->format,
SDL_TEXTUREACCESS_STREAMING,
inputWidth,
inputHeight
);
running = true;
thread = CreateThread(nullptr, 0, startThread, this, 0, nullptr);
}
~Screen() {
running = false;
WaitForSingleObject(thread, INFINITE);
CloseHandle(thread);
SDL_FreeSurface(surface);
SDL_DestroyRenderer(renderer);
SDL_DestroyWindow(window);
SDL_Quit();
}
bool isRunning() const {
return running;
}
};
#endif
I have no experience in using SDL API in a multithreaded environment but this isn't a big problem as you will see later. I've checked your code and there is at least one thing you should change in my opinion.
Generally, in case of GUI systems (and partly SDL is also a gui system) you should always access the gui only from the main thread and expect the gui events to come from the main thread. Most GUI APIs are single threaded and I wouldn't be surprised if this would apply to SDL too. Note that many gui systems run on the main thread of your process by default and you can't choose your own thread. Don't run the code of your Screen class on a worker thread, run it on your main thread and make EVERY SDL API call from the main thread.
If you are writing the game or a similar software then (first) write it as if it was single threaded. The subsystems of your engine (physics simulation, this-and-that-system, game logic, rendering) should be executed serially one-after-the-other on your main thread (from your main loop). If you want to make use of multithreading that do that in "another dimension": Convert some of the subsystems or a smaller unit of work (like merge sort) to multithreaded, for example a physics system tasks can often be split into several small tasks so when the physics system is updated by the main thread then the physics system can burn all of your cores...
Doing most of your tasks on the main thread has another advantage: It makes your code much more easy to port to any platform. Optionally if you write your code so that it can execute in single threaded mode then it can make debugging easier in many cases and then you also have a "reference build" to compare the multithreaded build to performancewise.

How do you programmatically access the ambient light sensor on Mac OS X 10.5+?

I'm trying to programmatically access the ambient light sensor in a Mac application running on OS X 10.5 and above, but can't find a way to do this.
Two other questions had been posed about this here, "Accessing mac's sensor data" and "Disable ambient-light sensor screen dimming programmatically on OS X", but they either didn't address this or present solutions that break on 10.5 and up.
What private API does Apple use to access the ambient light-sensor data on OS X and/or how would I find it?
I've found the closest thing I can -- example code from a Firefox bug report last modified in April 2013. The following works, producing a simple CLI program to query the sensor (taken freely from https://bugzilla.mozilla.org/show_bug.cgi?id=793728#attach_664102). The service polled is "AppleLMUController", which you can then extract relevant information from -- the snippet below creates a serviceObject=IOServiceGetMatchingService(kIOMasterPortDefault, IOServiceMatching("AppleLMUController")), which is then used.
// lmutracker.mm
//
// clang -o lmutracker lmutracker.mm -framework IOKit -framework CoreFoundation
#include <mach/mach.h>
#import <IOKit/IOKitLib.h>
#import <CoreFoundation/CoreFoundation.h>
static double updateInterval = 0.1;
static io_connect_t dataPort = 0;
void updateTimerCallBack(CFRunLoopTimerRef timer, void *info) {
kern_return_t kr;
uint32_t outputs = 2;
uint64_t values[outputs];
kr = IOConnectCallMethod(dataPort, 0, nil, 0, nil, 0, values, &outputs, nil, 0);
if (kr == KERN_SUCCESS) {
printf("\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b%8lld %8lld", values[0], values[1]);
return;
}
if (kr == kIOReturnBusy) {
return;
}
mach_error("I/O Kit error:", kr);
exit(kr);
}
int main(void) {
kern_return_t kr;
io_service_t serviceObject;
CFRunLoopTimerRef updateTimer;
serviceObject = IOServiceGetMatchingService(kIOMasterPortDefault, IOServiceMatching("AppleLMUController"));
if (!serviceObject) {
fprintf(stderr, "failed to find ambient light sensors\n");
exit(1);
}
kr = IOServiceOpen(serviceObject, mach_task_self(), 0, &dataPort);
IOObjectRelease(serviceObject);
if (kr != KERN_SUCCESS) {
mach_error("IOServiceOpen:", kr);
exit(kr);
}
setbuf(stdout, NULL);
printf("%8ld %8ld", 0L, 0L);
updateTimer = CFRunLoopTimerCreate(kCFAllocatorDefault,
CFAbsoluteTimeGetCurrent() + updateInterval, updateInterval,
0, 0, updateTimerCallBack, NULL);
CFRunLoopAddTimer(CFRunLoopGetCurrent(), updateTimer, kCFRunLoopDefaultMode);
CFRunLoopRun();
exit(0);
}
While #Landak's answer is good for the time, that ambient light sensor api seems to have been deprecated.
The code that works now is as follows:
// lmutracker.mm
//
// clang -o lmutracker lmutracker.mm -F /Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/System/Library/PrivateFrameworks -framework Foundation -framework IOKit -framework CoreFoundation -framework BezelServices
#include <mach/mach.h>
#import <Foundation/Foundation.h>
#import <IOKit/IOKitLib.h>
#import <IOKit/hidsystem/IOHIDServiceClient.h>
typedef struct __IOHIDEvent *IOHIDEventRef;
#define kAmbientLightSensorEvent 12
#define IOHIDEventFieldBase(type) (type << 16)
extern "C" {
IOHIDEventRef IOHIDServiceClientCopyEvent(IOHIDServiceClientRef, int64_t, int32_t, int64_t);
double IOHIDEventGetFloatValue(IOHIDEventRef, int32_t);
IOHIDServiceClientRef ALCALSCopyALSServiceClient(void);
}
static double updateInterval = 0.1;
static IOHIDServiceClientRef client;
static IOHIDEventRef event;
void updateTimerCallBack(CFRunLoopTimerRef timer, void *info) {
double value;
event = IOHIDServiceClientCopyEvent(client, kAmbientLightSensorEvent, 0, 0);
value = IOHIDEventGetFloatValue(event, IOHIDEventFieldBase(kAmbientLightSensorEvent));
printf("\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b%8f", value);
CFRelease(event);
}
int main(void) {
kern_return_t kr;
CFRunLoopTimerRef updateTimer;
client = ALCALSCopyALSServiceClient();
if (client) {
event = IOHIDServiceClientCopyEvent(client, kAmbientLightSensorEvent, 0, 0);
}
if (!event) {
fprintf(stderr, "failed to find ambient light sensors\n");
exit(1);
}
CFRelease(event);
setbuf(stdout, NULL);
printf("%8f", 0.0);
updateTimer = CFRunLoopTimerCreate(kCFAllocatorDefault,
CFAbsoluteTimeGetCurrent() + updateInterval, updateInterval,
0, 0, updateTimerCallBack, NULL);
CFRunLoopAddTimer(CFRunLoopGetCurrent(), updateTimer, kCFRunLoopDefaultMode);
CFRunLoopRun();
exit(0);
}
I found this in DarkModeBuddy (BSD 2-clause license), and adapted it into a cli tool in my dotfiles.
You can get the value of ambient light sensor using terminal.
For get the value first you should install the system management controller smc .exec then run it using terminal. After that run this command ./smc -l it's show's the list of all sensor's which exist on mac after that try to find the key which is ALSL this key give's the actual value of light ambient sensor of every mac.

Resources