This code works on GNU/Linux but I can't find how to make it run on Mac OS X Lion.
Here is a test code. Nothing too hard to understand, only an empty window waiting for a quit event to stop. If I comment the glClear call, everything works fine... in fact glClear or any gl call makes it crash on a nice Segmentation fault.
Here is the code :
#include <iostream>
#include <SDL/SDL.h>
#include <SDL/SDL_opengl.h>
int main( int ac, char **av )
{
SDL_Surface *screen;
SDL_Init( SDL_INIT_VIDEO );
if(SDL_GL_SetAttribute(SDL_GL_RED_SIZE, 8) < 0) { printf("opengl error: %s\n", SDL_GetError()); }
if(SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, 8) < 0) { printf("opengl error: %s\n", SDL_GetError()); }
if(SDL_GL_SetAttribute(SDL_GL_GREEN_SIZE, 8) < 0) { printf("opengl error: %s\n", SDL_GetError()); }
if(SDL_GL_SetAttribute(SDL_GL_BUFFER_SIZE, 32) < 0) { printf("opengl error: %s\n", SDL_GetError()); }
if(SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1) < 0) { printf("couldn't set double buffering: %s\n", SDL_GetError()); }
if ( (screen = SDL_SetVideoMode( 640, 480, 32, SDL_OPENGL | SDL_NOFRAME | SDL_DOUBLEBUF )) == NULL )
{
exit( EXIT_FAILURE );
}
SDL_WM_SetCaption( "test", NULL );
bool loop = true;
SDL_Event event;
while ( loop )
{
glClear( GL_COLOR_BUFFER_BIT );
SDL_PollEvent( &event );
switch ( event.type )
{
case SDL_QUIT:
loop = false;
break;
}
}
SDL_Quit();
return 0;
}
Here is the how I compile it:
g++ -g -I/opt/local/include -I/usr/X11R6/include -L/opt/local/lib -lSDLmain -lSDL -Wl,-framework,Cocoa -L/usr/X11R6/lib -lGL main.cpp
gdb does not help me so much:
Reason: KERN_INVALID_ADDRESS at address: 0x0000000000000000
0x0000000000000000 in ?? ()
(gdb) bt
#0 0x0000000000000000 in ?? ()
#1 0x000000010000269d in SDL_main (ac=1, av=0x100517c90) at main.cpp:28
#2 0x0000000100002360 in -[SDLMain applicationDidFinishLaunching:] ()
#3 0x00007fff90bc2de2 in __-[NSNotificationCenter addObserver:selector:name:object:]_block_invoke_1 ()
#4 0x00007fff8c354e0a in _CFXNotificationPost ()
#5 0x00007fff90baf097 in -[NSNotificationCenter postNotificationName:object:userInfo:] ()
#6 0x00007fff8a49faa7 in -[NSApplication _postDidFinishNotification] ()
#7 0x00007fff8a49f80d in -[NSApplication _sendFinishLaunchingNotification] ()
#8 0x00007fff8a49e4d2 in -[NSApplication(NSAppleEventHandling) _handleAEOpenEvent:] ()
#9 0x00007fff8a49e233 in -[NSApplication(NSAppleEventHandling) _handleCoreEvent:withReplyEvent:] ()
#10 0x00007fff8c39e851 in -[NSObject performSelector:withObject:withObject:] ()
#11 0x00007fff90be589b in __-[NSAppleEventManager setEventHandler:andSelector:forEventClass:andEventID:]_block_invoke_1 ()
#12 0x00007fff90be4822 in -[NSAppleEventManager dispatchRawAppleEvent:withRawReply:handlerRefCon:] ()
#13 0x00007fff90be46b0 in _NSAppleEventManagerGenericHandler ()
#14 0x00007fff8e760c25 in aeDispatchAppleEvent ()
#15 0x00007fff8e760b03 in dispatchEventAndSendReply ()
#16 0x00007fff8e7609f7 in aeProcessAppleEvent ()
#17 0x00007fff912a1b6d in AEProcessAppleEvent ()
#18 0x00007fff8a49b63d in _DPSNextEvent ()
#19 0x00007fff8a49acf5 in -[NSApplication nextEventMatchingMask:untilDate:inMode:dequeue:] ()
#20 0x00007fff8a49762d in -[NSApplication run] ()
#21 0x0000000100002174 in main ()
I'm not sure, but IIRC you need to add the OpenGL framework as well, if using SDL for window and context creation.
Related
#my problem:#
1. I create two threads, one real-time thread, one normal thread; both threads have read-write locks, and we bind two threads to one CPU core to run;
2. Two threads will hang,Has anyone encountered such a problem?
3. My Glibc library is libc-2.27.so
#include <sys/prctl.h>
#include <sched.h>
#include <stdio.h>
#include <sys/syscall.h>
#include <unistd.h>
#include <errno.h>
#include <pthread.h>
#include <iostream>
#include <thread>
pthread_rwlock_t qlock;
inline int set_cpu(int i)
{
cpu_set_t mask;
CPU_ZERO(&mask);
CPU_SET(i,&mask);
printf("thread %u, i = %d\n", pthread_self(), i);
if(-1 == pthread_setaffinity_np(pthread_self() ,sizeof(mask),&mask))
{
fprintf(stderr, "pthread_setaffinity_np erro\n");
return -1;
}
return 0;
}
int32_t set_rt_thread(const char *thread_name, uint32_t prio)
{
struct sched_param cfg_param;
int32_t ret = 0;
if (prio > 99)
{
printf("%s rt prio is too big, %u.\n", thread_name, prio);
return -EINVAL;
}
cfg_param.sched_priority = prio;
ret = sched_setscheduler(0, SCHED_RR, &cfg_param);
printf("Cur thread new scheduler = %d.\n", sched_getscheduler(0));
if(ret < 0)
{
printf("%s sched_set scheduler to SCHED_RR error.\n", thread_name);
}
else
{
printf("%s tid=%ld is rt thread now.\n", thread_name, syscall(SYS_gettid));
}
return 0;
}
int32_t set_thread_name(const char *name)
strong text{
return prctl(PR_SET_NAME, name);
}
//thread 1
void thead_input() {
std::string thread_name = "thead_input";
set_thread_name(thread_name.c_str());
set_rt_thread(thread_name.c_str(),90);
set_cpu(5);
while(1) {
pthread_rwlock_wrlock(&qlock);
printf("thead 1\n");
pthread_rwlock_unlock(&qlock);
printf("thread unlock 2\n");
usleep(100);
}
}
//thread 2
void thead_output() {
std::string thread_name = "thead_output";
set_cpu(5);
set_thread_name(thread_name.c_str());
set_rt_thread(thread_name.c_str(),99);
while(1) {
pthread_rwlock_wrlock(&qlock);
std::cout<<"thead 2"<<std::endl;
pthread_rwlock_unlock(&qlock);
usleep(100);
}
}
//main function
int main(int argc, char **argv) {
if(pthread_rwlock_init(&qlock, NULL) != 0) {
std::cout<<"pthread rw lock init fail\n"<<std::endl;
return -1;
}
std::thread t1(thead_input);
std::thread t2(thead_output);
t1.join();
t2.join();
return 0;
}
Debug log:
[Thread debugging using libthread_db enabled] Using host libthread_db library "/lib/aarch64-linux-gnu/libthread_db.so.1".
0x0000007f899de31c in __GI___pthread_timedjoin_ex
(threadid=547765662160, thread_return=0x0, abstime=0x0,
block=<optimized out>) at pthread_join_common.c:89 89 pthread_join_common.c: No such file or directory. (gdb) thread
apply all bt
Thread 3 (Thread 0x7f88e0a1d0 (LWP 21439)):
#0 __pthread_rwlock_wrlock_full (abstime=0x0, rwlock=0x5577ff2020 <qlock>) at pthread_rwlock_common.c:679
#1 __GI___pthread_rwlock_wrlock (rwlock=0x5577ff2020 <qlock>) at pthread_rwlock_wrlock.c:27
#2 0x0000005577fdf8b8 in thead_output () at main.cpp:88
#3 0x0000005577fdff64 in std::__invoke_impl<void, void (*)()> (__f=#0x55a376afc8: 0x5577fdf844 <thead_output()>)
at /usr/include/c++/7/bits/invoke.h:60
#4 0x0000005577fdfd30 in std::__invoke<void (*)()> (__fn=#0x55a376afc8: 0x5577fdf844 <thead_output()>)
at /usr/include/c++/7/bits/invoke.h:95
#5 0x0000005577fe04cc in std::thread::_Invoker<std::tuple<void (*)()> >::_M_invoke<0ul>
(this=0x55a376afc8)
at /usr/include/c++/7/thread:234
#6 0x0000005577fe0480 in std::thread::_Invoker >::operator()
(this=0x55a376afc8)
at /usr/include/c++/7/thread:243
#7 0x0000005577fe044c in std::thread::_State_impl
::_M_run (
this=0x55a376afc0) at /usr/include/c++/7/thread:186
#8 0x0000007f898fee14 in ?? () from /usr/lib/aarch64-linux-gnu/libstdc++.so.6
#9 0x0000007f899dd088 in start_thread (arg=0x7ff794cc5f) at pthread_create.c:463
#10 0x0000007f897964ec in thread_start () at ../sysdeps/unix/sysv/linux/aarch64/clone.S:78
Thread 2 (Thread 0x7f8960b1d0 (LWP 21438)):
#0 0x0000007f899e1ef8 in __pthread_rwlock_wrlock_full (abstime=0x0, rwlock=0x5577ff2020 <qlock>)
at pthread_rwlock_common.c:595
#1 __GI___pthread_rwlock_wrlock (rwlock=0x5577ff2020 <qlock>) at pthread_rwlock_wrlock.c:27
#2 0x0000005577fdf7ec in thead_input () at main.cpp:71
#3 0x0000005577fdff64 in std::__invoke_impl<void, void (*)()> (__f=#0x55a376ae78: 0x5577fdf788 <thead_input()>)
at /usr/include/c++/7/bits/invoke.h:60
#4 0x0000005577fdfd30 in std::__invoke<void (*)()> (__fn=#0x55a376ae78: 0x5577fdf788 <thead_input()>)
at /usr/include/c++/7/bits/invoke.h:95
#5 0x0000005577fe04cc in std::thread::_Invoker<std::tuple<void (*)()> >::_M_invoke<0ul>
(this=0x55a376ae78)
at /usr/include/c++/7/thread:234
#6 0x0000005577fe0480 in std::thread::_Invoker >::operator()
(this=0x55a376ae78)
at /usr/include/c++/7/thread:243
#7 0x0000005577fe044c in std::thread::_State_impl
::_M_run (
this=0x55a376ae70) at /usr/include/c++/7/thread:186
#8 0x0000007f898fee14 in ?? () from /usr/lib/aarch64-linux-gnu/libstdc++.so.6
#9 0x0000007f899dd088 in start_thread (arg=0x7ff794cc5f) at pthread_create.c:463
#10 0x0000007f897964ec in thread_start () at ../sysdeps/unix/sysv/linux/aarch64/clone.S:78
Thread 1 (Thread 0x7f89a75ce0 (LWP 21436)):
#0 0x0000007f899de31c in __GI___pthread_timedjoin_ex (threadid=547765662160, thread_return=0x0, abstime=0x0,
block=<optimized out>) at pthread_join_common.c:89
#1 0x0000007f898ff0a8 in std::thread::join() () from /usr/lib/aarch64-linux-gnu/libstdc++.so.6
#2 0x0000005577fdf9cc in main (argc=1, argv=0x7ff794ce98) at main.cpp:104
Getting crash in regcomp() on suse linux,
Following is the backtrace,
0x00007fb4bd90c0e0 in raise () from /lib64/libc.so.6
(gdb) bt
#0 0x00007fb4bd90c0e0 in raise () from /lib64/libc.so.6
#1 0x00007fb4bd90d6c1 in abort () from /lib64/libc.so.6
#2 0x00007fb4bd94f427 in __libc_message () from /lib64/libc.so.6
#3 0x00007fb4bd955c43 in malloc_printerr () from /lib64/libc.so.6
#4 0x00007fb4bd9595b1 in _int_malloc () from /lib64/libc.so.6
#5 0x00007fb4bd95b61a in calloc () from /lib64/libc.so.6
#6 0x00007fb4bd9aca65 in parse_expression () from /lib64/libc.so.6
#7 0x00007fb4bd9ae087 in parse_branch () from /lib64/libc.so.6
#8 0x00007fb4bd9ae1bc in parse_reg_exp () from /lib64/libc.so.6
#9 0x00007fb4bd9ae797 in re_compile_internal () from /lib64/libc.so.6
#10 0x00007fb4bd9b44e9 in regcomp () from /lib64/libc.so.6
Tried changing the const and non-const variables.
should not crash
int main()
{
regex_t myRegEx;
string str = "\\[[0-9]+\\][0-9]+\\[/[0-9]+\\]";
const int retval = regcomp(&myRegEx, str.c_str(), REG_EXTENDED);
return 0;
}
no crash
I believe I have an SDL2 bug here and I wanted some verification. When I create a renderer it is invalid and I cannot initialize SDL_ttf because of this. Here is a quick demo program that exhibits the issue on my xubuntu 14.04 distribution. My graphics card is an NVIDIA GTX 550 Ti. Driver version 331.113 proprietary, tested.
#include <SDL2/SDL.h>
#include <string>
#include <stdexcept>
#include <iostream>
using namespace std;
int main() {
SDL_Window* _window;
SDL_Renderer* _renderer;
if( SDL_Init( SDL_INIT_EVERYTHING ) != 0 ) {
string error( SDL_GetError() );
throw runtime_error( "SDL could not initialize! SDL Error: " + error );
}
_window = SDL_CreateWindow( "Conscious", SDL_WINDOWPOS_UNDEFINED,
SDL_WINDOWPOS_UNDEFINED, 500, 500, SDL_WINDOW_SHOWN );
if( _window == NULL ) {
string error( SDL_GetError() );
throw runtime_error( "Window could not be created! SDL Error: " + error );
}
_renderer = SDL_CreateRenderer( _window , -1,
SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC );
if( _renderer == NULL ) {
string error( SDL_GetError() );
throw runtime_error( "Renderer could not be created! SDL Error: " + error );
}
cout << SDL_GetError() << endl;
return 0;
}
Let me know what you think!
Thanks,
Jonathon
--EDIT--
If you take out SDL_image and SDL_ttf the renderer is still returned as invalid.
Also this does not happen on windows with the same exact hardware. If this is not a bug would somebody please explain what could be wrong with such a simple example?
--EDIT EDIT--
This code prints out "Invalid renderer" on my system. If you have SDL2 please run it and let me know if you do too.
While it is indeed produces 'Invalid renderer' message, it isn't a problem because as SDL_GetError documentation states You must check the return values of SDL function calls to determine when to appropriately call SDL_GetError()., and SDL gave you no reason to look for error description in GetError.
Why it happens, by the way:
Breakpoint 1, 0x00007ffff7b00e90 in SDL_SetError_REAL () from /usr/lib64/libSDL2-2.0.so.0
(gdb) backtrace
#0 0x00007ffff7b00e90 in SDL_SetError_REAL () from /usr/lib64/libSDL2-2.0.so.0
#1 0x00007ffff7b8f124 in SDL_GL_GetProcAddress_REAL () from /usr/lib64/libSDL2-2.0.so.0
#2 0x00007ffff7b8f80b in SDL_GL_GetAttribute_REAL () from /usr/lib64/libSDL2-2.0.so.0
#3 0x00007ffff7b41bcf in GL_CreateRenderer () from /usr/lib64/libSDL2-2.0.so.0
#4 0x00007ffff7b3b66c in SDL_CreateRenderer_REAL () from /usr/lib64/libSDL2-2.0.so.0
#5 0x0000000000401383 in main ()
GL_CreateRenderer calls SDL_GetAttribute to get context version and such, which tries to load GL functions, which requires active renderer, but it isn't there yet. While not most eye-pleasant solution, it works well and it isn't a bug. Probable reason why you don't have it on windows is e.g. because it uses different renderer (d3d?).
The attached code works correctly if I compile it with no -O parameter. If however, I compile it with -O2, it fails to print out the intermediate functions in the traceback. Originally, I thought that everything was optimized out, so I put a call to printf into each of the routines to rule that out. It still had the same output.
Expected results: gcc -rdynamic -g test.c -o test -L/usr/local/lib -lexecinfo
./test
DEPTH=11
./test: f0 (0x40d952)
./test: f1 (0x40da0e)
./test: f2 (0x40da1e)
./test: f3 (0x40da2e)
./test: f4 (0x40da3e)
./test: f5 (0x40da4e)
./test: f6 (0x40da5e)
./test: f7 (0x40da6e)
./test: main (0x40da89)
./test: _start (0x40080e)
Unexpected results: gcc -O2 -rdynamic -g test.c -o test -L/usr/local/lib -lexecinfo
./test
DEPTH=2
./test: f0 (0x40794b)
#include <stdio.h>
#include <dlfcn.h>
#define CALLSTACK_MAXLEN 64
//
// We use this macro instead of a for loop in backtrace() because the
// documentation says that you have to use a constant, not a variable.
//
#define BT(X) { \
case X: \
if (!__builtin_frame_address(X)) { \
return X; \
} \
\
trace[X].address = __builtin_return_address(X); \
break; \
}
struct call {
const void *address;
const char *function;
const char *object;
};
struct call trace[CALLSTACK_MAXLEN];
int
backtrace(int depth) {
int i;
Dl_info dlinfo;
for (i = 0; i < depth; i++) {
switch (i) {
BT( 0);
BT( 1);
BT( 2);
BT( 3);
BT( 4);
BT( 5);
BT( 6);
BT( 7);
BT( 8);
BT( 9);
BT( 10);
BT( 11);
BT( 12);
BT( 13);
BT( 14);
BT( 15);
BT( 16);
BT( 17);
BT( 18);
BT( 19);
default: return i;
}
if (dladdr(trace[i].address, &dlinfo) != 0) {
trace[i].function = dlinfo.dli_sname;
trace[i].object = dlinfo.dli_fname;
}
}
return i;
}
void
f0() {
int i;
int depth;
depth = backtrace(CALLSTACK_MAXLEN);
printf("DEPTH=%d\n", depth);
for (i = 0 ; trace[i].object != NULL; i++) {
printf("%s: %s (%p)\n", trace[i].object, trace[i].function, trace[i].address);
}
}
void f1() { f0(); }
void f2() { f1(); }
void f3() { f2(); }
void f4() { f3(); }
void f5() { f4(); }
void f6() { f5(); }
void f7() { f6(); }
int main(int argc, char **argv) {
f7();
return 0;
}
Reason is tail-recursive optimization. Even if inlining is switched off, tail recursion changes call to jump, like
f6:
.LFB29:
.cfi_startproc
xorl %eax, %eax
jmp f5
So you must:
Exclude inlining
void __attribute__ ((noinline)) f1() { f0(); }
void __attribute__ ((noinline)) f2() { f1(); }
void __attribute__ ((noinline)) f3() { f2(); }
void __attribute__ ((noinline)) f4() { f3(); }
void __attribute__ ((noinline)) f5() { f4(); }
void __attribute__ ((noinline)) f6() { f5(); }
void __attribute__ ((noinline)) f7() { f6(); }
Compile with -fno-optimize-sibling-calls and preserve frame pointer
gcc -O2 -rdynamic -g -o bfa bfa.c -ldl -fno-optimize-sibling-calls -fno-omit-frame-pointer
Output is:
$ ./bfa
DEPTH=10
./bfa: f0 (0x400f23)
./bfa: f1 (0x400f8b)
./bfa: f2 (0x400f9b)
./bfa: f3 (0x400fab)
./bfa: f4 (0x400fbb)
./bfa: f5 (0x400fcb)
./bfa: f6 (0x400fdb)
./bfa: f7 (0x400feb)
./bfa: main (0x400ffb)
/lib/libc.so.6: __libc_start_main (0x7fdfbae51c4d)
As desired.
I'm working on a Mac application that uses garbage collection. The application crashes for a few users and the crash logs indicate that it has something to do with memory corruption or memory trashing.
I post the important bit of the crash log below.
Exception Type: EXC_BAD_ACCESS (SIGSEGV)
Exception Codes: KERN_INVALID_ADDRESS at 0x0000000000000000
Crashed Thread: 0 Dispatch queue: com.apple.main-thread
Application Specific Information:
objc[81831]: garbage collection is ON
Thread 0 Crashed: Dispatch queue: com.apple.main-thread
0 libSystem.B.dylib 0x00007fffffe00847 __memcpy + 167
1 libauto.dylib 0x00007fff82718170 auto_zone_write_barrier_memmove + 96
2 libauto.dylib 0x00007fff8271916e auto_realloc(_malloc_zone_t*, void*, unsigned long) + 878
3 libSystem.B.dylib 0x00007fff8346e0db malloc_zone_realloc + 92
4 com.apple.Foundation 0x00007fff83169836 _NSMutableDataGrowBytes + 652
5 com.apple.Foundation 0x00007fff83169513 -[NSConcreteMutableData appendBytes:length:] + 101
6 MY.Application 0x000000010000b9cd -[Connection stream:handleEvent:] + 376
7 com.apple.CoreFoundation 0x00007fff85742373 _signalEventSync + 115
8 com.apple.CoreFoundation 0x00007fff857422e4 _cfstream_solo_signalEventSync + 116
What happens is, my application receives data from the network and writes that data to an NSMutableData object. I have talked about this with some other developers and our best guess is that memory is being trashed, which causes the crash.
The question is how do you prevent memory trashing and how do you debug bugs like this in Xcode?
For completeness, I also post the code of the method that leads up to the crash.
- (void)stream:(NSStream *)stream handleEvent:(NSStreamEvent)eventCode {
switch(eventCode) {
case NSStreamEventHasSpaceAvailable: {
if (stream == outputStream) {
[self writeBufferToStream];
}
break;
}
case NSStreamEventOpenCompleted:
if (stream == inputStream) {
readReady = YES;
} else {
writeReady = YES;
}
if ([self isReadyForUse] && [delegate respondsToSelector:#selector(connectionReadyForUse:)])
[delegate connectionReadyForUse:self];
break;
case NSStreamEventHasBytesAvailable: {
if (stream == inputStream) {
int bytesRead = 0;
static uint8_t buffer[kBufferSize];
bytesRead = [inputStream read:buffer maxLength:sizeof(buffer)];
[inBuffer appendBytes:buffer length:bytesRead];
//** Process buffer contents **//
BOOL safe = YES;
while (safe) {
if (inSize <= 0) {
if ([inBuffer length] >= sizeof(uint64_t)) {
memcpy(&inSize, [inBuffer bytes], sizeof(uint64_t));
NSRange rangeToDelete = {0, sizeof(uint64_t)};
[inBuffer replaceBytesInRange:rangeToDelete withBytes:NULL length:0];
} else {
break;
}
}
if (inSize > 0) {
if ([inBuffer length] >= inSize) {
NSMutableData *packetData = [NSMutableData dataWithBytes:[inBuffer bytes] length:inSize];
[delegate connection:self receivedData:packetData];
safe = NO;
NSRange rangeToDelete = {0, inSize};
[inBuffer replaceBytesInRange:rangeToDelete withBytes:NULL length:0];
inSize = 0;
} else {
break;
}
} else {
break;
}
}
}
break;
}
case NSStreamEventErrorOccurred: {
NSError *theError = [stream streamError];
if (stream == inputStream)
if (delegate && [delegate respondsToSelector:#selector(connection:encounteredReadError:)])
[delegate connection:self encounteredReadError:theError];
else{
if (delegate && [delegate respondsToSelector:#selector(connection:encounteredWriteError:)])
[delegate connection:self encounteredWriteError:theError];
}
break;
}
case NSStreamEventEndEncountered: {
if (delegate && [delegate respondsToSelector:#selector(connectionDisconnected:)])
[delegate connectionDisconnected:self];
readReady = NO;
writeReady = NO;
break;
}
default:
break;
}
}
keep your data reading and writing in same thread. If you must do it in multi thread, you can choose lock or barrier to make that thread safe