Catch system calls on Mac OS X - macos

I'm trying to catch all systems-calls called by a given PID with a self-made program (I cant use any of strace, dtruss, gdb...). So i used the function
kern_return_t task_set_emulation(task_t target_port, vm_address_t routine_entry_pt, int routine_number) declared in /usr/include/mach/task.h .
I've written a little program to catch the syscall write :
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <mach/mach.h>
#include <mach/mach_vm.h>
void do_exit(char *msg)
{
printf("Error::%s\n", msg);
exit(42);
}
int main(void)
{
mach_port_t the_task;
mach_vm_address_t address;
mach_vm_size_t size;
mach_port_t the_thread;
kern_return_t kerr;
//Initialisation
address = 0;
size = 1ul * 1024;
the_task = mach_task_self(); //Get the current program task
kerr = mach_vm_allocate(the_task, &address, size, VM_MEMORY_MALLOC); //Allocate a new address for the test
if (kerr != KERN_SUCCESS)
{ do_exit("vm_allocate"); }
printf("address::%llx, size::%llu\n", address, size); //debug
//Process
kerr = task_set_emulation(the_task, address, SYS_write); //About to catch write syscalls
the_thread = mach_thread_self(); //Verify if a thread is opened (even if it's obvious)
printf("kerr::%d, thread::%d\n", kerr, the_thread); //debug
if (kerr != KERN_SUCCESS)
{ do_exit("set_emulation"); }
//Use some writes for the example
write(1, "Bonjour\n", 8);
write(1, "Bonjour\n", 8);
}
The Output is :
address::0x106abe000, size::1024
kerr::46, thread::1295
Error::set_emulation
The kernel error 46 corresponds to the macro KERN_NOT_SUPPORTED described as an "Empty thread activation (No thread linked to it)" in /usr/include/mach/kern_return.h, and happend even before i'm calling write.
My question is: What did I do wrong in this process? Kern_not_supported does mean that it's not implemented yet, instead of a meaningless thread problem?

The source code in XNU for the task_set_emulation is:
kern_return_t
task_set_emulation(
__unused task_t task,
__unused vm_offset_t routine_entry_pt,
__unused int routine_number)
{
return KERN_NOT_SUPPORTED;
}
Which means task_set_emulation is not supported.

Related

Sys V IPC msgsnd(), msgrcv() after fork()

I have a program running on Linux that fork()s after a TCP connection was accept()ed. Before the fork, it connects to a message queue via msgget() and happily sends and receives messages. At some point in the program, both the parent and the child will be waiting at the same time on a msgrcv() using the same msgtype. A separate process then sends a message via msgsnd() using this same msgtype.
However, only one of the forked processes returns from msgrcv(), and it also seems to depend on the path, the parent and the child took. It is very repeatable. In one case, only the parent receives the message, in another case only the child receives the message, leaving the other one waiting infinitely.
Does anyone have a hint on what could go wrong and why not both the parent and the child always receive the message?
I wrote two little test programs, recv.c and send.c, see below.
It turns out that the parent and the child only receive every other message. It seems to be strictly "every other", not even by chance which of the two receives a message. This would very well explain what's happening to my software.
Is this how message queues are supposed to work? Can I not send a message to multiple recipients?
/* recv.c */
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/ipc.h>
#include <sys/msg.h>
#include <sys/wait.h>
int main(void)
{
int msgid = msgget(247, 0666 | IPC_CREAT);
pid_t cldpid = fork();
struct msgform
{
long mtype;
char mbuf[16];
} msg;
msg.mtype = 1;
if (cldpid == 0)
{
while(true)
{
printf("Child waiting\n");
msgrcv(msgid, &msg, sizeof(msg), 1, 0);
printf("Child done\n");
}
}
while(true)
{
printf("Parent waiting\n");
msgrcv(msgid, &msg, sizeof(msg), 1, 0);
printf("Parent done\n");
}
return 0;
}
and
/* send.c */
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/ipc.h>
#include <sys/msg.h>
#include <sys/wait.h>
int main(void)
{
int msgid = msgget(247, 0666 | IPC_CREAT);
struct msgform {
long mtype;
char mbuf[16];
} msg;
msg.mtype = 1;
msgsnd(msgid, &msg, sizeof(msg), IPC_NOWAIT);
return 0;
}
Thanks

Solution to avoid the error: "Entry point not found " even though I'm checking the OS version before callingGetTcpTable2 api on windows XPindows XP?

I'm aware that the GetTcpTable2 api is supported only on windows vista and above versions, hence the code checks for the OS version and only then enters the loop that calls the api. I'm compiling the code on windows 7, visual studio 2008 and the executable runs fine on windows 7 and other OS except Windows XP, the error thrown is :
The code snippet is:`
#include <winsock2.h>
#include <ws2tcpip.h>
#include <iphlpapi.h>
#include <stdio.h>
// Need to link with Iphlpapi.lib and Ws2_32.lib
#pragma comment(lib, "iphlpapi.lib")
#pragma comment(lib, "ws2_32.lib")
#define MALLOC(x) HeapAlloc(GetProcessHeap(), 0, (x))
#define FREE(x) HeapFree(GetProcessHeap(), 0, (x))
/* Note: could also use malloc() and free() */
int main()
{
OSVERSIONINFOEX osvi;
ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
GetVersionEx((OSVERSIONINFO *)&osvi);
if(osvi.dwMajorVersion>=6)
{
// Declare and initialize variables
PMIB_TCPTABLE2 pTcpTable;
ULONG ulSize = 0;
DWORD dwRetVal = 0;
char szLocalAddr[128];
char szRemoteAddr[128];
struct in_addr IpAddr;
int i;
pTcpTable = (MIB_TCPTABLE2 *) MALLOC(sizeof (MIB_TCPTABLE2));
if (pTcpTable == NULL)
{
printf("Error allocating memory\n");
return 1;
}
ulSize = sizeof (MIB_TCPTABLE);
// Make an initial call to GetTcpTable2 to
// get the necessary size into the ulSize variable
if ((dwRetVal = GetTcpTable2(pTcpTable, &ulSize, TRUE)) ==
ERROR_INSUFFICIENT_BUFFER)
{
FREE(pTcpTable);
pTcpTable = (MIB_TCPTABLE2 *) MALLOC(ulSize);
if (pTcpTable == NULL)
{
printf("Error allocating memory\n");
return 1;
}
}
}
else
{
printf("Unsupported OS");
}
return 0;
}
`How do I get the executable to work on Windows XP without crashing/throwing the error shown in attached image?

How to make lldb ignore EXC_BAD_ACCESS exception?

I am writing a program on Mac OSX depending on the sigaction/sa_handler mechanism. Run a code snippet from user and get ready to catch signals/exceptions at any time. The program works fine, but the problem is I can't debug it with lldb. lldb seems not being able to ignore any exceptions even I set
proc hand -p true -s false SIGSEGV
proc hand -p true -s false SIGBUS
The control flow stops at the instruction that triggers the exception and does not jump to the sa_handler I installed earlier even I tried command c. The output was:
Process 764 stopped
* thread #2: tid = 0xf140, 0x00000001000b8000, stop reason = EXC_BAD_ACCESS (code=2, address=0x1000b8000)
How do I make lldb ignore the exception/signal and let the sa_handler of the program do its work?
EDIT: sample code
#include <stdio.h>
#include <string.h>
#include <signal.h>
#include <pthread.h>
#include <unistd.h>
static void handler(int signo, siginfo_t *sigaction, void *context)
{
printf("in handler.\n");
signal(signo, SIG_DFL);
}
static void gen_exception()
{
printf("gen_exception in.\n");
*(int *)0 = 0;
printf("gen_exception out.\n");
}
void *gen_exception_thread(void *parg)
{
gen_exception();
return 0;
}
int main()
{
struct sigaction sa;
sa.sa_sigaction = handler;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_SIGINFO;
if(sigaction(/*SIGBUS*/SIGSEGV, &sa, NULL) == -1) {
printf("sigaction fails.\n");
return 0;
}
pthread_t id;
pthread_create(&id, NULL, gen_exception_thread, NULL);
pthread_join(id, NULL);
return 0;
}
I needed this in a recent project, so I just built my own LLDB. I patched a line in tools/debugserver/source/MacOSX/MachTask.mm from
err = ::task_set_exception_ports (task, m_exc_port_info.mask, m_exception_port, EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES, THREAD_STATE_NONE);
to
err = ::task_set_exception_ports (task, m_exc_port_info.mask & ~EXC_MASK_BAD_ACCESS, m_exception_port, EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES, THREAD_STATE_NONE);
which causes the debugserver to be unable to catch EXC_BAD_ACCESS exceptions. Now, my custom LLDB works just fine: it still catches SIGSEGV and SIGBUS but no longer enters a silly infinite loop when faced with EXC_BAD_ACCESS. Setting process handle options on the previously-fatal signals works fine too, and I can now debug SEGV handlers with impunity.
Apple really ought to make this an option in LLDB...seems like a really easy fix for them.
This is a long-standing bug in the debugger interface in Mac OS X (gdb had the same problem...) If you have a developer account, please file a bug with http://bugreport.apple.com. So few people actually use SIGSEGV handlers that the problem never gets any attention from the kernel folks, so more bugs is good...
We can do it easily. Just add this code.
#include <mach/task.h>
#include <mach/mach_init.h>
#include <mach/mach_port.h>
int ret = task_set_exception_ports(
mach_task_self(),
EXC_MASK_BAD_ACCESS,
MACH_PORT_NULL,//m_exception_port,
EXCEPTION_DEFAULT,
0);
Don't forget to do this
proc hand -p true -s false SIGSEGV
proc hand -p true -s false SIGBUS
Full code:
#include <stdio.h>
#include <string.h>
#include <signal.h>
#include <pthread.h>
#include <unistd.h>
#include <mach/task.h>
#include <mach/mach_init.h>
#include <mach/mach_port.h>
static void handler(int signo, siginfo_t *sigaction, void *context)
{
printf("in handler.\n");
signal(signo, SIG_DFL);
}
static void gen_exception()
{
printf("gen_exception in.\n");
*(int *)0 = 0;
printf("gen_exception out.\n");
}
void *gen_exception_thread(void *parg)
{
gen_exception();
return 0;
}
int main()
{
task_set_exception_ports(
mach_task_self(),
EXC_MASK_BAD_ACCESS,
MACH_PORT_NULL,//m_exception_port,
EXCEPTION_DEFAULT,
0);
struct sigaction sa;
sa.sa_sigaction = handler;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_SIGINFO;
if(sigaction(/*SIGBUS*/SIGSEGV, &sa, NULL) == -1) {
printf("sigaction fails.\n");
return 0;
}
pthread_t id;
pthread_create(&id, NULL, gen_exception_thread, NULL);
pthread_join(id, NULL);
return 0;
}
Refer to (Chinese article): https://zhuanlan.zhihu.com/p/33542591
A little bit of example code can make a question like this a lot easier to answer ... I've never used the sigaction API before but I threw this together -
#include <stdio.h>
#include <signal.h>
#include <unistd.h>
void segv_handler (int in)
{
puts ("in segv_handler()");
}
void sigbus_handler (int in)
{
puts ("in sigbus_handler()");
}
int main ()
{
struct sigaction action;
action.sa_mask = 0;
action.sa_flags = 0;
action.sa_handler = segv_handler;
sigaction (SIGSEGV, &action, NULL);
action.sa_handler = sigbus_handler;
sigaction (SIGBUS, &action, NULL);
puts ("about to send SIGSEGV signal from main()");
kill (getpid(), SIGSEGV);
puts ("about to send SIGBUS signal from main()");
kill (getpid(), SIGBUS);
puts ("exiting main()");
}
% lldb a.out
(lldb) br s -n main
(lldb) r
(lldb) pr h -p true -s false SIGSEGV SIGBUS
(lldb) c
Process 54743 resuming
about to send SIGSEGV signal from main()
Process 54743 stopped and restarted: thread 1 received signal: SIGSEGV
in segv_handler()
about to send SIGBUS signal from main()
Process 54743 stopped and restarted: thread 1 received signal: SIGBUS
in sigbus_handler()
exiting main()
Process 54743 exited with status = 0 (0x00000000)
(lldb)
Everything looks like it's working correctly here. If I'd added -n false to the process handle arguments, lldb wouldn't have printed the lines about Process .. stopped and restarted.
Note that these signal settings do not persist across process executions. So if you're starting your debug session over (r once you've already started the process once), you'll need to re-set these. You may want to create a command alias shortcut and put it in your ~/.lldbinit file so you can set the process handling the way you prefer with a short cmd.

C shell printing output infinitely without stopping at gets()

I am trying to use the SIGCHLD handler but for some reason it prints of the command I gave infinitely. If I remove the struct act it works fine.
Can anyone take a look at it, I am not able to understand what the problem is.
Thanks in advance!!
/* Simplest dead child cleanup in a SIGCHLD handler. Prevent zombie processes
but dont actually do anything with the information that a child died. */
#include <sys/types.h>
#include <sys/wait.h>
#include <signal.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
typedef char *string;
/* SIGCHLD handler. */
static void sigchld_hdl (int sig)
{
/* Wait for all dead processes.
* We use a non-blocking call to be sure this signal handler will not
* block if a child was cleaned up in another part of the program. */
while (waitpid(-1, NULL, WNOHANG) > 0) {
}
}
int main (int argc, char *argv[])
{
struct sigaction act;
int i;
int nbytes = 100;
char my_string[nbytes];
string arg_list[5];
char *str;
memset (&act, 0, sizeof(act));
act.sa_handler = sigchld_hdl;
if (sigaction(SIGCHLD, &act, 0)) {
perror ("sigaction");
return 1;
}
while(1){
printf("myshell>> ");
gets(my_string);
str=strtok(my_string," \n");
arg_list[0]=str;
i =1;
while ( (str=strtok (NULL," \n")) != NULL){
arg_list[i]= str;
i++;
}
if (i==1)
arg_list[i]=NULL;
else
arg_list[i+1]=NULL;
pid_t child_pid;
child_pid=fork();
if (child_pid == (pid_t)-1){
printf("ERROR OCCURED");
exit(0);
}
if(child_pid!=0){
printf("this is the parent process id is %d\n", (int) getpid());
printf("the child's process ID is %d\n",(int)child_pid);
}
else{
printf("this is the child process, with id %d\n", (int) getpid());
execvp(arg_list[0],arg_list);
printf("this should not print - ERROR occured");
abort();
}
}
return 0;
}
I haven't run your code, and am merely hypothesizing:
SIGCHLD is arriving and interrupting fgets (I'll just pretend you didn't use gets). fgets returns before actually reading any data, my_string contains the tokenized list that it had on the previous loop, you fork again, enter fgets, which is interrupted before reading any data, and repeat indefinitely.
In other words, check the return value of fgets. If it is NULL and has set errno to EINTR, then call fgets again. (Or set act.sa_flags = SA_RESTART.)

Getting process base address in Mac OSX

I'm trying to read the memory of a process using task_for_pid / vm_read.
uint32_t sz;
pointer_t buf;
task_t task;
pid_t pid = 9484;
kern_return_t error = task_for_pid(current_task(), pid, &task);
vm_read(task, 0x10e448000, 2048, &buf, &sz);
In this case I read the first 2048 bytes.
This works when I know the base address of the process (which I can find out using gdb "info shared" - in this case 0x10e448000), but how do I find out the base address at runtime (without looking at it with gdb)?
Answering my own question. I was able to get the base address using mach_vm_region_recurse like below. The offset lands in vmoffset. If there is another way that is more "right" - don't hesitate to comment!
#include <stdio.h>
#include <mach/mach_init.h>
#include <sys/sysctl.h>
#include <mach/mach_vm.h>
...
mach_port_name_t task;
vm_map_offset_t vmoffset;
vm_map_size_t vmsize;
uint32_t nesting_depth = 0;
struct vm_region_submap_info_64 vbr;
mach_msg_type_number_t vbrcount = 16;
kern_return_t kr;
if ((kr = mach_vm_region_recurse(task, &vmoffset, &vmsize,
&nesting_depth,
(vm_region_recurse_info_t)&vbr,
&vbrcount)) != KERN_SUCCESS)
{
printf("FAIL");
}
Since you're calling current_task(), I assume you're aiming at your own process at runtime. So the base address you mentioned should be the dynamic base address, i.e. static base address + image slide caused by ASLR, right? Based on this assumption, you can use "Section and Segment Accessors" to get the static base address of your process, and then use the dyld functions to get the image slide. Here's a snippet:
#import <Foundation/Foundation.h>
#include </usr/include/mach-o/getsect.h>
#include <stdio.h>
#include </usr/include/mach-o/dyld.h>
#include <string.h>
uint64_t StaticBaseAddress(void)
{
const struct segment_command_64* command = getsegbyname("__TEXT");
uint64_t addr = command->vmaddr;
return addr;
}
intptr_t ImageSlide(void)
{
char path[1024];
uint32_t size = sizeof(path);
if (_NSGetExecutablePath(path, &size) != 0) return -1;
for (uint32_t i = 0; i < _dyld_image_count(); i++)
{
if (strcmp(_dyld_get_image_name(i), path) == 0)
return _dyld_get_image_vmaddr_slide(i);
}
return 0;
}
uint64_t DynamicBaseAddress(void)
{
return StaticBaseAddress() + ImageSlide();
}
int main (int argc, const char *argv[])
{
printf("dynamic base address (%0llx) = static base address (%0llx) + image slide (%0lx)\n", DynamicBaseAddress(), StaticBaseAddress(), ImageSlide());
while (1) {}; // you can attach to this process via gdb/lldb to view the base address now :)
return 0;
}
Hope it helps!

Resources