In Win32 I want to suspend a thread using Suspend(GetCurrentThread()); but I find I cannot resume it using ResumeThread(suspend thread handle); But I find nothing happened.
Here it's my code.
HANDLE C;
DWORD WINAPI A (LPVOID in)
{
C = GetCurrentThread();
cout << "1";
SuspendThread (C);
cout << "4";
return 0;
}
DWORD WINAPI B (LPVOID in)
{
Sleep (200);
cout << "2";
ResumeThread (C);
cout << "3";
return 0;
}
int main()
{
CreateThread (NULL, 0, A, NULL, 0, NULL);
CreateThread (NULL, 0, B, NULL, 0, NULL);
Sleep (INFINITE);
return 0;
}
And all I get on screen is 123.
It is possible right now that when B calls ResumeThread, the variable C contains an uninitialized value.
However, the current reason why your code does not work is that GetCurrentThread only returns a pseudo-thread handle, a value interpreted to mean the current thread handle. To get the real one which can be used from other threads, you can take the one from the return of the first CreateThread call or convert the pseudo-handle with DuplicateHandle.
Edit: Using method 1:
HANDLE C;
DWORD WINAPI A (LPVOID in)
{
cout << "1";
SuspendThread (C);
cout << "4";
return 0;
}
DWORD WINAPI B (LPVOID in)
{
Sleep (200);
cout << "2";
ResumeThread ((HANDLE)in);
cout << "3";
return 0;
}
int main()
{
C = CreateThread (NULL, 0, A, NULL, 0, NULL);
CreateThread (NULL, 0, B, (LPVOID)C, 0, NULL);
Sleep (INFINITE);
return 0;
}
In fact there is another problem with your code which is that handles returned from CreateThread are being ignored when they should be closed. Also there is a lack of error checking but I have assumed you've omitted that for brevity.
You should also note that, depending on the timing of the context switch it is actually possible for the above code to output:
1243
Using method 2:
HANDLE C = NULL;
DWORD WINAPI A (LPVOID in)
{
C = GetCurrentThread();
DuplicateHandle( GetCurrentProcess(), C, GetCurrentProcess(), &C, 0, FALSE, DUPLICATE_SAME_ACCESS );
cout << "1";
SuspendThread (C);
cout << "4";
return 0;
}
DWORD WINAPI B (LPVOID in)
{
Sleep (200);
cout << "2";
while( C == NULL ) {
Sleep(100);
}
ResumeThread(C);
cout << "3";
return 0;
}
int main()
{
CreateThread (NULL, 0, A, NULL, 0, NULL);
CreateThread (NULL, 0, B, NULL, 0, NULL);
Sleep (INFINITE);
return 0;
}
Related
Note: I have tagged this with both programming and windows networking tags, so please don't shout, I'm just trying to expose this to as many people as may be able to help!
I am trying to set the receive and send buffers for a small client and server I have written, so that when I perform a network capture, I see the window size I have set in the TCP handshake.
For the programmers, please consider the following very simple code for a client and server.
For the none-programmers, please skip past this section to my image.
Client:
#include <WinSock2.h>
#include <mstcpip.h>
#include <Ws2tcpip.h>
#include <thread>
#include <iostream>
using namespace std;
int OutputWindowSize(SOCKET s, unsigned int nType)
{
int buflen = 0;
int nSize = sizeof(buflen);
if (getsockopt(s, SOL_SOCKET, nType, (char *)&buflen, &nSize) == 0)
return buflen;
return -1;
}
bool SetWindowSizeVal(SOCKET s, unsigned int nSize)
{
if (setsockopt(s, SOL_SOCKET, SO_SNDBUF, (char *)&nSize, sizeof(nSize)) == 0)
if (setsockopt(s, SOL_SOCKET, SO_RCVBUF, (char *)&nSize, sizeof(nSize)) == 0)
return true;
return false;
}
int main(int argc, char** argv)
{
if (argc != 3) { cout << "not enough args!\n"; return 0; }
const char* pszHost = argv[1];
const int nPort = atoi(argv[2]);
WSADATA wsaData;
DWORD Ret = 0;
if ((Ret = WSAStartup((2, 2), &wsaData)) != 0)
{
printf("WSAStartup() failed with error %d\n", Ret);
return 1;
}
struct sockaddr_in sockaddr_IPv4;
memset(&sockaddr_IPv4, 0, sizeof(struct sockaddr_in));
sockaddr_IPv4.sin_family = AF_INET;
sockaddr_IPv4.sin_port = htons(nPort);
if (!InetPtonA(AF_INET, pszHost, &sockaddr_IPv4.sin_addr)) { return 0; }
SOCKET clientSock = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); // Create active socket: one which is passed to connect().
if (!SetWindowSizeVal(clientSock, 12345))
{
cout << "Failed to set window size " << endl;
return -1;
}
cout << "Set window size on client socket as: RECV" << OutputWindowSize(clientSock, SO_RCVBUF) <<
" SEND: " << OutputWindowSize(clientSock, SO_SNDBUF) << endl;
int nRet = connect(clientSock, (sockaddr*)&sockaddr_IPv4, sizeof(sockaddr_in));
if (nRet != 0) { return 0; }
char buf[100] = { 0 };
nRet = recv(clientSock, buf, 100, 0);
cout << "Received " << buf << " from the server!" << endl;
nRet = send(clientSock, "Hello from the client!\n", strlen("Hello from the client!\n"), 0);
closesocket(clientSock);
return 0;
}
Server:
#include <WinSock2.h>
#include <mstcpip.h>
#include <Ws2tcpip.h>
#include <iostream>
using namespace std;
int OutputWindowSize(SOCKET s, unsigned int nType)
{
int buflen = 0;
int nSize = sizeof(buflen);
if (getsockopt(s, SOL_SOCKET, nType, (char *)&buflen, &nSize) == 0)
return buflen;
return -1;
}
bool SetWindowSizeVal(SOCKET s, unsigned int nSize)
{
if (setsockopt(s, SOL_SOCKET, SO_SNDBUF, (char *)&nSize, sizeof(nSize)) == 0)
if (setsockopt(s, SOL_SOCKET, SO_RCVBUF, (char *)&nSize, sizeof(nSize)) == 0)
return true;
return false;
}
int main()
{
WSADATA wsaData;
DWORD Ret = 0;
if ((Ret = WSAStartup((2, 2), &wsaData)) != 0)
{
printf("WSAStartup() failed with error %d\n", Ret);
return 1;
}
struct sockaddr_in sockaddr_IPv4;
memset(&sockaddr_IPv4, 0, sizeof(struct sockaddr_in));
sockaddr_IPv4.sin_family = AF_INET;
sockaddr_IPv4.sin_port = htons(19982);
int y = InetPton(AF_INET, L"127.0.0.1", &sockaddr_IPv4.sin_addr);
if (y != 1) return 0;
socklen_t addrlen = sizeof(sockaddr_IPv4);
SOCKET sock = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (!SetWindowSizeVal(sock, 12345))
{
cout << "Failed to set window size " << endl;
return -1;
}
cout << "Set window size on listen socket as: RECV" << OutputWindowSize(sock, SO_RCVBUF) <<
" SEND: " << OutputWindowSize(sock, SO_SNDBUF) << endl;
if (bind(sock, (sockaddr*)&sockaddr_IPv4, sizeof(sockaddr_IPv4)) != 0) { /* error */ }
if (listen(sock, SOMAXCONN) != 0) { return 0; }
while (1)
{
SOCKET sockAccept = accept(sock, (struct sockaddr *) &sockaddr_IPv4, &addrlen);
if (!SetWindowSizeVal(sockAccept, 12345))
{
cout << "Failed to set window size " << endl;
return -1;
}
cout << "Set window size as on accepted socket as: RECV" << OutputWindowSize(sock, SO_RCVBUF) <<
" SEND: " << OutputWindowSize(sock, SO_SNDBUF) << endl;
if (sockAccept == -1) return 0;
int nRet = send(sockAccept, "Hello from the server!\n", strlen("Hello from the server!\n"), 0);
if (!nRet) return 0;
char buf[100] = { 0 };
nRet = recv(sockAccept, buf, 100, 0);
cout << "Received " << buf << " from the client!" << endl;
if (nRet == 0) { cout << "client disonnected!" << endl; }
closesocket(sockAccept);
}
return 0;
}
The output from my program states that the window sizes have been set succesfully:
Set window size on listen socket as: RECV12345 SEND: 12345
Set window size as on accepted socket as: RECV12345 SEND: 12345
for the server, and for the client:
Set window size on listen socket as: RECV12345 SEND: 12345
However, when I capture the traffic using RawCap, I see that the client window size is set fine, but server's window size is not what I set it to be, it is 8192:
Now, I have read this MS link and it says to add a registry value; I did this, adding the value 0x00001234, but it still made no difference.
The interesting thing is, the same code works fine on a Windows 10 machine, which makes me think it is Windows 7 specific. However, I'm not 100% sure on my code, there might be some errors in it.
Can anyone suggest how I can get Windows to honour my requested parameters please?
These are not 'window sizes'. They are send and receive buffer sizes.
There is no such thing as 'output window size'. There is a receive window and a congestion window, and the latter is not relevant to your question.
The send buffer size has exactly nothing to do with the receive window size, and the receive buffer size only determines the maximum receive window size.
The actual receive window size is adjusted dynamically by the protocol. It is the actual size that you are seeing in Wireshark.
The platform is entitled by the specification to adjust the supplied values for the send and receive buffers up or down, and the documentation advises you to get the corresponding values if you want to be sure what they really are.
There is no problem here to solve.
NB You don't have to set the receive window size on an accepted socket if you already set it on the listening socket. It is inherited.
I'm a beginner at OpenCL. I was trying to build a simple app which just add 2 vectors to get results. This is my following host code
#define USE_PLATFORM 0
#define USE_DEVICE 2
#define DATA_SIZE 1024
#define USE_KERNEL_PATH "/Users/huangxin/Documents/August13Programming/FirstEGOpenCL/FirstEGOpenCL/kernel.cl"
using namespace std;
int main(int argc, const char * argv[]) {
int err;
cl_uint numPlatforms;
cl_uint numDevices;
cl_command_queue command;
size_t global;
//Query the number of platforms supported.
err = clGetPlatformIDs(0, NULL, &numPlatforms);
if (err != CL_SUCCESS || USE_PLATFORM >= numPlatforms)
{
printf("Error at: clGetPlatformIDs(querying platforms count failed):\n");
exit(-1);
}
//Get all platforms.
vector<cl_platform_id> platforms(numPlatforms);
err = clGetPlatformIDs(numPlatforms, &platforms[0], &numPlatforms);
if (err != CL_SUCCESS)
{
printf("Error at: clGetPlatformIDs(getting all platforms failed):\n");
exit(-1);
}
//Query the number of devices supported by the platform spicified.
err = clGetDeviceIDs(platforms[USE_PLATFORM], CL_DEVICE_TYPE_ALL, 0, NULL, &numDevices);
if (err != CL_SUCCESS || USE_PLATFORM >= numDevices)
{
printf("Error at: clGetDeviceIDs(querying devices count failed):\n");
exit(-1);
}
//Get all devices.
vector<cl_device_id> devices(numDevices);
err=clGetDeviceIDs(platforms[USE_PLATFORM], CL_DEVICE_TYPE_ALL, numDevices, &devices[0], &numDevices);
if (err != CL_SUCCESS)
{
printf("Error at: clGetDeviceIDs(getting all devices failed):\n");
exit(-1);
}
//Get device infomation.
char deviceInfo[1024];
//get device max work item dimensions.
size_t maxItemSize[3];
clGetDeviceInfo(devices[USE_DEVICE], CL_DEVICE_NAME, sizeof(deviceInfo)*1024, deviceInfo, NULL);
clGetDeviceInfo(devices[USE_DEVICE], CL_DEVICE_MAX_WORK_ITEM_SIZES, sizeof(size_t)*3, maxItemSize, NULL);
cout << "Device selected: " << deviceInfo << endl;
cout << "Max item size: " << maxItemSize[0] << "," << maxItemSize[1] << ","<< maxItemSize[2] << endl;
//Set property with certain platform
cl_context_properties prop[] = {CL_CONTEXT_PLATFORM, reinterpret_cast<cl_context_properties>(platforms[USE_PLATFORM]), 0};
//create context with certain property.
cl_context context = clCreateContextFromType(prop, CL_DEVICE_TYPE_ALL, NULL, NULL, &err);
if (err != CL_SUCCESS)
{
printf("Error at: clCreateContextFromType(get context failed):\n");
exit(-1);
}
//create command queue using selected device and context.
command = clCreateCommandQueue(context, devices[USE_DEVICE], 0, NULL);
//create program with specified kernel source.
const char *kernelSource = getKernelSource(USE_KERNEL_PATH);
cl_program program = clCreateProgramWithSource(context, 1, &kernelSource, 0, &err);
if (err != CL_SUCCESS)
{
printf("Error at: clCreateProgramWithSource(get program failed):\n");
exit(-1);
}
//since OpenCL is a dynamic-compile architechture, we need to build the program.
err = clBuildProgram(program, 0, 0, 0, 0, 0);
if (err != CL_SUCCESS)
{
cout << err << endl;
size_t len;
char buffer[2048];
printf("Error: Failed to build program executable!\n");
clGetProgramBuildInfo(program, devices[USE_DEVICE], CL_PROGRAM_BUILD_LOG, sizeof(buffer), buffer, &len);
printf("%s\n", buffer);
exit(1);
}
//kernel是OpenCL中对执行在一个最小粒度的compute item上的代码及参数的抽象
//create the kernel function using the built program.
cl_kernel adder = clCreateKernel(program, "adder", &err);
if (err != CL_SUCCESS)
{
printf("Error at: clCreateKernel(get kernel function failed):\n");
exit(-1);
}
//create the vector of input random data.
vector<float> inA(DATA_SIZE), inB(DATA_SIZE);
for(int i = 0; i < DATA_SIZE; i++) {
inA[i] = (float)(random() % DATA_SIZE) / 1000;
inB[i] = (float)(random() % DATA_SIZE) / 1000;
}
//create the read-only device mem using specified context, that is to copy the host mem to the device mem.
cl_mem cl_a = clCreateBuffer(context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(cl_float) * DATA_SIZE, &inA[0], NULL);
cl_mem cl_b = clCreateBuffer(context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(cl_float) * DATA_SIZE, &inB[0], NULL);
//create the result mem.
cl_mem cl_res = clCreateBuffer(context, CL_MEM_WRITE_ONLY, sizeof(cl_float) * DATA_SIZE, NULL, NULL);
//setting up the arguement of kernel memory
clSetKernelArg(adder, 0, sizeof(cl_mem), &cl_a);
clSetKernelArg(adder, 1, sizeof(cl_mem), &cl_b);
clSetKernelArg(adder, 2, sizeof(cl_mem), &cl_res);
START_CHECK_RUNNING_TIME
//enqueue the kernel into the specified command(#TODO:come back later to check the remaining arguement.
global = DATA_SIZE;
err = clEnqueueNDRangeKernel(command, adder, 1, 0, &global, 0, 0, 0, 0);
if (err != CL_SUCCESS)
{
printf("Error at: clEnqueueNDRangeKernel(enqueue kernel failed):\n");
exit(-1);
}
printf("*****************FLAG***************");
//copy the results from the kernel into the host(CPU).
vector<float> res(DATA_SIZE);
err = clEnqueueReadBuffer(command, cl_res, CL_TRUE, 0, sizeof(float) * DATA_SIZE, &res[0], 0, 0, 0);
END_CHECK_RUNNING_TIME
//check the number of right compute.
int cnt = 0;
for (int i = 0; i < res.size(); i++) {
cnt += (res[i] == inA[i] + inB[i] ? 1 : 0);
}
cout << "Computed " << res.size() << " values\n";
cout << "Correct values:(" << cnt << "/" << res.size() << "),correct rate:" << (float)cnt / res.size() * 100 << "%" << endl;
gettimeofday(&sTime, NULL);
for (int i = 0; i < res.size(); i++) {
for (int j = 0; j < 10000; j++)
res[i] = inA[i] + inB[i];
}
gettimeofday(&eTime, NULL);timeuse = 1000000 * ( eTime.tv_sec - sTime.tv_sec ) + eTime.tv_usec -sTime.tv_usec; printf("Running time: %fs\n", (double)timeuse/(1000000));
//cleaning up the variables.
clReleaseKernel(adder);
clReleaseProgram(program);
clReleaseMemObject(cl_a);
clReleaseMemObject(cl_b);
clReleaseMemObject(cl_res);
clReleaseCommandQueue(command);
clReleaseContext(context);
return 0;
}
It's a bit long code, but it's really doing simple stuff. this is my kernel code
kernel void adder(global const float* a, global const float* b, global float* result)
{
size_t idx = get_global_id(0);
for (int i = 0; i < 10000; i++)
result[idx] = a[idx] +b[idx];
}
And I got the following result:
Device selected: GeForce GT 650M
-11
Error: Failed to build program executable!
No kernels or only kernel prototypes found.
I don't quite understand what "No kernels or only kernel prototypes found." mean and it's really strange that if I use the first device(CPU) or my second device(HD Graphics 4000), the same code runs perfectly.
I want to know what is wrong and why it happens.
I was running these code in the Xcode with Mac OS X 10.10.
As the comments say, is a good practice to use:
__kernel void adder(__global const float* a, __global const float* b, __global float* result)
Because that way you clearly define those are special CL flags. Tpically all the CL kernels follow that rule, even if the spec allows both.
But your problem is probably due to running the clBuildProgram() without any device in the devices list. Therefore, not compiling anything at all!
In CL every device has an specific compiler (the CPUs don't have the same compiler as GPU, sometimes not even the same instruction sets). So you should give the API the list of devices for which the kernels have to be compiled.
The proper way would be this:
err = clBuildProgram(program, 1, &devices[USE_DEVICE], "", 0, 0);
Note: I added "", because probably in the future you will want to add some build parameters, better to have it ready :)
I want to map file into memory with chunk size equal system granularity. First chunk read without error and all others fails with error 5 (ERROR_ACCESS_DENIED). I tried run program with administrator privileges.
My code:
#include <windows.h>
#include <stdio.h>
int main() {
HANDLE hFile = CreateFile( TEXT("db.txt"),
GENERIC_READ,
FILE_SHARE_READ,
NULL,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
NULL);
if (hFile == INVALID_HANDLE_VALUE) {
printf("[ERROR] File opening error %d\n", GetLastError());
return 1;
}
printf("[DONE] File opened successfully.\n");
HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
if (hMap == NULL) {
printf("[ERROR] Create mapping error %d\n", GetLastError());
return 2;
}
printf("[DONE] Create mapping successfully.\n");
LARGE_INTEGER file_size = { };
if (!GetFileSizeEx(hFile, &file_size)) {
printf("[ERROR] Getiing filesize error %d\n", GetLastError());
return 3;
}
printf("[DONE] Getting file size.\n");
SYSTEM_INFO info = { };
GetSystemInfo(&info);
printf("[DONE] Getting system memory granularity %d.\n", info.dwAllocationGranularity);
DWORD offset = 0;
int size = 0;
do {
char* ENTRY = (char*)MapViewOfFile(hMap, FILE_MAP_READ, HIWORD(offset), LOWORD(offset), info.dwAllocationGranularity);
if (ENTRY == NULL) {
printf("[ERROR] Map entry error %d\n", GetLastError());
} else {
printf("[DONE] MAPPING PART WITH OFFSET %d\n", offset);
//printf("%s\n", ENTRY);
}
if (offset + info.dwAllocationGranularity < file_size.QuadPart) {
offset += info.dwAllocationGranularity;
} else {
offset = file_size.QuadPart;
}
//offset += size;
UnmapViewOfFile(ENTRY);
} while (offset < file_size.QuadPart);
CloseHandle(hMap);
CloseHandle(hFile);
system("pause");
return 0;
}
How I fix it?
You're using HIWORD and LOWORD for the offset in the call to MapViewOfFile, but these only take a 32-bit value and split it into two 16-bit halves - what you want is a 64-bit value split into two 32-bit halves.
Instead you need HIDWORD and LODWORD, which are defined in <intsafe.h>:
#define LODWORD(_qw) ((DWORD)(_qw))
#define HIDWORD(_qw) ((DWORD)(((_qw) >> 32) & 0xffffffff))
Like so:
char* ENTRY = (char*)MapViewOfFile(hMap, FILE_MAP_READ, HIDWORD(offset), LODWORD(offset), info.dwAllocationGranularity);
You need this even though your offset variable is 32 bit (in which case, HIDWORD will just return 0 and the full value of offset is passed as the low-order DWORD).
Working on WinXP SP3.
Visual Studio 2005.
Trying to read memory of another process.
std::cout<<"Reading Process Memory\n";
const DWORD pid = 3476;
HANDLE handle = OpenProcess(PROCESS_VM_READ,FALSE,pid);
if(handle == NULL) {std::cout<<"Failed to open process\n";return 0;}
char* buffer1 = new char[256];
char* buffer2 = new char[256];
memset(buffer1,0,256*sizeof(char));
memset(buffer2,0,256*sizeof(char));
DWORD nbr = 0;
int address = 0x400000;
BOOL result = ReadProcessMemory(handle,&address,buffer1,32,&nbr);
if(result!=1) std::cout<<"Failed to read memory\n";
address = 0x400000+0x1000;
result = ReadProcessMemory(handle,&address,buffer2,32,&nbr);
if(result!=1) std::cout<<"Failed to read memory\n";
int i = 0;
while(i++<10)
{
if(buffer1[i]!=buffer2[i]) {std::cout<<"Buffers are different\n";break;}
}
delete[] buffer1;
delete[] buffer2;
CloseHandle(handle);
std::cin>>i;
return 0;
The problem is that both buffers are getting the same values. ReadProcMemory returns 1 and number of bytes read is the same as requested.
Your calls to ReadProcessMemory are incorrect. You should be using address directly, not &address. You may need to cast it to a const void *.
result = ReadProcessMemory(handle, reinterpret_cast<const void *>(address), buffer, 32, &nbr);
And you probably should declaring address as a type large enough to handle a pointer, like std::ssize_t or INT_PTR.
INT_PTR address = 0x400000;
buffer couldn't be a char, it has to be int, thats a working example
#include <windows.h>
#include <iostream>
#include <string.h>
using namespace std;
int main()
{
int point1=0;
int i=0;
int d=0;
char* value[4];
SIZE_T stBytes = 0;
HWND hwnd;
HANDLE phandle;
DWORD pid;
hwnd = FindWindow(NULL, "calc"); // calc is the name of the windows process
if (hwnd != 0) {
GetWindowThreadProcessId(hwnd, &pid);
phandle = OpenProcess(PROCESS_ALL_ACCESS, 0, pid);
} else {
cout << "process is not executing";
cin.get();
return 0;
}
if (phandle != 0) {
for(i=0;i<4;i++) // 4 or wathever
{
cout << "The pointer is 0x1001000" << endl; //Print the pointer
ReadProcessMemory(phandle, (LPVOID)0x1001000+i, &point1, 4, &stBytes); //Get the content from 0x1001000 and store it in point1
cout << "decimal content point1 " << point1 << " (DEC)" << endl; //Print the decimal content of point1
printf("%x \n",point1); // print hexadecimal content of point1
char *p=(char*)&point1; // point point1 buffer
for(d=0;d<4;d++)
printf("%x",(unsigned int)(unsigned char) *(p+d)); // print backwards (because the buffer is like a LIFO) and see the dbg debugger
}
ReadProcessMemory(phandle, (LPVOID)point1, &value, 6, &stBytes); //Get the value that is in the address pointed by the pointer
cout << "The value in the non-static address is " << (char*)value << endl << endl; //Print the value
cout << "Press ENTER to exit." << endl;
cin.get();
} else {
cout << "Couldn't get a handle";
cin.get();
// address 0x1001000 content hex 5278DA77
}
}
Is there any way how to enumerate process with given PID in windows, and get list of all his opened handles(locked files, etc.)?
EDIT: I dont care about language. If it is in .NET, I'd be glad, if in WinApi (C), it won't hurt. If in something else, I think I can rewrite it :-)
I did a deep googling and found this article.
This article gave a link to download source code:
I tried method in NtSystemInfoTest.cpp ( downloaded source code ) and it worked superbly.
void ListHandles( DWORD processID, LPCTSTR lpFilter )
The code has following declaimer:
// Written by Zoltan Csizmadia, zoltan_csizmadia#yahoo.com
// For companies(Austin,TX): If you would like to get my resume, send an email.
//
// The source is free, but if you want to use it, mention my name and e-mail address
//
//////////////////////////////////////////////////////////////////////////////////////
//
I hope this helps you.
The command-line 'Handle' tool from Sysinternals does this, if you just want a tool. This won't help you if you're looking for a code solution, though.
Here is an example using ZwQueryProcessInformation from the DDK. The DDK is now known as the "WDK" and is available with MSDN. If you don't have MSDN, apparantly, you can also get it from here.
I haven't tried it, I just googled your question.
#include "ntdll.h"
#include <stdlib.h>
#include <stdio.h>
#include "ntddk.h"
#define DUPLICATE_SAME_ATTRIBUTES 0x00000004
#pragma comment(lib,"ntdll.lib")
BOOL EnablePrivilege(PCSTR name)
{
TOKEN_PRIVILEGES priv = {1, {0, 0, SE_PRIVILEGE_ENABLED}};
LookupPrivilegeValue(0, name, &priv.Privileges[0].Luid);
HANDLE hToken;
OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES, &hToken);
AdjustTokenPrivileges(hToken, FALSE, &priv, sizeof priv, 0, 0);
BOOL rv = GetLastError() == ERROR_SUCCESS;
CloseHandle(hToken);
return rv;
}
int main(int argc, char *argv[])
{
if (argc == 1) return 0;
ULONG pid = strtoul(argv[1], 0, 0);
EnablePrivilege(SE_DEBUG_NAME);
HANDLE hProcess = OpenProcess(PROCESS_DUP_HANDLE, FALSE, pid);
ULONG n = 0x1000;
PULONG p = new ULONG[n];
while (NT::ZwQuerySystemInformation(NT::SystemHandleInformation, p, n * sizeof *p, 0)
== STATUS_INFO_LENGTH_MISMATCH)
delete [] p, p = new ULONG[n *= 2];
NT::PSYSTEM_HANDLE_INFORMATION h = NT::PSYSTEM_HANDLE_INFORMATION(p + 1);
for (ULONG i = 0; i < *p; i++) {
if (h[i].ProcessId == pid) {
HANDLE hObject;
if (NT::ZwDuplicateObject(hProcess, HANDLE(h[i].Handle), NtCurrentProcess(), &hObject,
0, 0, DUPLICATE_SAME_ATTRIBUTES)
!= STATUS_SUCCESS) continue;
NT::OBJECT_BASIC_INFORMATION obi;
NT::ZwQueryObject(hObject, NT::ObjectBasicInformation, &obi, sizeof obi, &n);
printf("%p %04hx %6lx %2x %3lx %3ld %4ld ",
h[i].Object, h[i].Handle, h[i].GrantedAccess,
int(h[i].Flags), obi.Attributes,
obi.HandleCount - 1, obi.PointerCount - 2);
n = obi.TypeInformationLength + 2;
NT::POBJECT_TYPE_INFORMATION oti = NT::POBJECT_TYPE_INFORMATION(new CHAR[n]);
NT::ZwQueryObject(hObject, NT::ObjectTypeInformation, oti, n, &n);
printf("%-14.*ws ", oti[0].Name.Length / 2, oti[0].Name.Buffer);
n = obi.NameInformationLength == 0
? MAX_PATH * sizeof (WCHAR) : obi.NameInformationLength;
NT::POBJECT_NAME_INFORMATION oni = NT::POBJECT_NAME_INFORMATION(new CHAR[n]);
NTSTATUS rv = NT::ZwQueryObject(hObject, NT::ObjectNameInformation, oni, n, &n);
if (NT_SUCCESS(rv))
printf("%.*ws", oni[0].Name.Length / 2, oni[0].Name.Buffer);
printf("\n");
CloseHandle(hObject);
}
}
delete [] p;
CloseHandle(hProcess);
return 0;
}