Windows GetPixel typedef C++ - winapi

How do I define the type for GetPixel and / or what else am I missing to use GetPixel?
#include <windows.h>
class PollPixelArray
{
public:
PollPixelArray(HDC hdcMonitor, LPRECT lprcMonitor);
unsigned long createHex(int r, int g, int b);
private:
PollPixelArray();
};
PollPixelArray::PollPixelArray(HDC hdcMonitor, LPRECT lprcMonitor)
{
GetPixel(hdcMonitor, 50, 100);
}
unsigned long createHex(int r, int g, int b){
return (((r & 0xff) << 16) + ((g & 0xff) << 8) + (b & 0xff));
}
Always returns the same unsigned long / DWORD / COLORREF no matter the X or Y co-ordinates.
while (tempX<40){
COLORREF tempREF = GetPixel(hdcMonitor, tempX, tempY); //COLORREF | unsigned long |
unsigned int dummy = GetRValue(tempREF);
std::cout << "RGB: " << ("%d", dummy);
dummy = GetGValue(tempREF);
std::cout << "," << ("%d", dummy);
dummy = GetBValue(tempREF);
std::cout << "," << ("%d", dummy);
std::cout << " at " << ("%d", tempX) << ", " << ("%d", tempY) << std::endl;
tempX++;
tempY++;
}
The loop always returns 255,255,255 for the RGB values.
The HDC Callback function:
#include <windows.h>
#include "pollpixelarray.h"
BOOL CALLBACK MonitorEnumProc(HMONITOR hMonitor, HDC hdcMonitor, LPRECT lprcMonitor, LPARAM dwData)
{
PollPixelArray::PollPixelArray(hdcMonitor, lprcMonitor);
return true;
}
void main()
{
EnumDisplayMonitors(NULL, NULL, MonitorEnumProc, 0);
std::cin.get();
}

This has nothing at all to do with typedefs and GetPixel works correctly. There are a couple of plausible explanations for the behaviour you observe:
The device context is not valid, or
The coordinates that you pass are outside the bounds of the device.
Looking at your code, both are likely to be the case,
The documentation for the hdc parameter of EnumDisplayMonitors says:
If this parameter is NULL, the hdcMonitor parameter passed to the callback function will be NULL, and the visible region of interest is the virtual screen that encompasses all the displays on the desktop.
The documentation for GetPixel says:
If the pixel is outside of the current clipping region, the return value is CLR_INVALID (0xFFFFFFFF defined in Wingdi.h).

Related

Where did torch::jit::load go?

Namespace "torch::jit" is missing member "load"
The official reference says it is there, but I can't use it.
It's not just an intelligence problem, but when I run it, it throws an error saying that there is no "jit::load".
Why?
source code
#include <torch/torch.h>
#include <iostream>
#include <Windows.h>
#include <gdiplus.h>
#include <gdipluspixelformats.h> // PixelFormat24bppRGB
#include <vector>
#include <cstdlib>
#pragma comment(lib, "gdiplus.lib")
int main()
{
Gdiplus::GdiplusStartupInput input;
ULONG_PTR token;
Gdiplus::GdiplusStartup(&token, &input, NULL);
std::vector<uint8_t> pixels;
Gdiplus::BitmapData bmpData;
LARGE_INTEGER freq, start, end;
QueryPerformanceFrequency(&freq);
std::wstring path = L"C:\\Users\\baiji\\Documents\\triggerBot\\data2\\0.jpg";
std::wstring path2 = L"D:\\screenshot\\result\\output.bmp";
auto image = Gdiplus::Bitmap::FromFile(path2.c_str());
QueryPerformanceCounter(&start);
int bWidth = image->GetWidth();
int bHeight = image->GetHeight();
std::cout << bWidth << std::endl;
std::cout << bHeight << std::endl;
auto stride = 3 * bWidth;
pixels.resize(stride * bHeight);
Gdiplus::Rect rect(0, 0, bWidth, bHeight);
image->LockBits(&rect, Gdiplus::ImageLockModeRead, PixelFormat24bppRGB, &bmpData);
for (int y = 0; y < bHeight; ++y) {
memcpy(pixels.data() + y * stride, (byte*)bmpData.Scan0 + y * bmpData.Stride, stride);
}
image->UnlockBits(&bmpData);
Gdiplus::GdiplusShutdown(token);
uint8_t buf1, buf2;
for (int i = 2;i < pixels.size(); i += 3) {
buf1 = pixels[i - 2];
buf2 = pixels[i];
pixels[i-2] = buf2;
pixels[i] = buf1;
}
std::cout << "要素数: " << pixels.size() << "\n";
torch::Tensor tsr = torch::tensor(torch::ArrayRef<uint8_t>(pixels)).to(torch::kFloat64) / 256;
torch::Tensor input = torch::reshape(tsr, { bWidth,bHeight,3 });
torch::jit::script::Module module;
module = torch::jit::load("model to path/traced_model.pt");
QueryPerformanceCounter(&end);
double time = static_cast<double>(end.QuadPart - start.QuadPart) * 1000.0 / freq.QuadPart;
std::cout << time << "ms\n";
//system("PAUSE");
return 1;
}
How can I run torch::jit::load?

TCP buffer parameters not being honoured on Win7 machine

Note: I have tagged this with both programming and windows networking tags, so please don't shout, I'm just trying to expose this to as many people as may be able to help!
I am trying to set the receive and send buffers for a small client and server I have written, so that when I perform a network capture, I see the window size I have set in the TCP handshake.
For the programmers, please consider the following very simple code for a client and server.
For the none-programmers, please skip past this section to my image.
Client:
#include <WinSock2.h>
#include <mstcpip.h>
#include <Ws2tcpip.h>
#include <thread>
#include <iostream>
using namespace std;
int OutputWindowSize(SOCKET s, unsigned int nType)
{
int buflen = 0;
int nSize = sizeof(buflen);
if (getsockopt(s, SOL_SOCKET, nType, (char *)&buflen, &nSize) == 0)
return buflen;
return -1;
}
bool SetWindowSizeVal(SOCKET s, unsigned int nSize)
{
if (setsockopt(s, SOL_SOCKET, SO_SNDBUF, (char *)&nSize, sizeof(nSize)) == 0)
if (setsockopt(s, SOL_SOCKET, SO_RCVBUF, (char *)&nSize, sizeof(nSize)) == 0)
return true;
return false;
}
int main(int argc, char** argv)
{
if (argc != 3) { cout << "not enough args!\n"; return 0; }
const char* pszHost = argv[1];
const int nPort = atoi(argv[2]);
WSADATA wsaData;
DWORD Ret = 0;
if ((Ret = WSAStartup((2, 2), &wsaData)) != 0)
{
printf("WSAStartup() failed with error %d\n", Ret);
return 1;
}
struct sockaddr_in sockaddr_IPv4;
memset(&sockaddr_IPv4, 0, sizeof(struct sockaddr_in));
sockaddr_IPv4.sin_family = AF_INET;
sockaddr_IPv4.sin_port = htons(nPort);
if (!InetPtonA(AF_INET, pszHost, &sockaddr_IPv4.sin_addr)) { return 0; }
SOCKET clientSock = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); // Create active socket: one which is passed to connect().
if (!SetWindowSizeVal(clientSock, 12345))
{
cout << "Failed to set window size " << endl;
return -1;
}
cout << "Set window size on client socket as: RECV" << OutputWindowSize(clientSock, SO_RCVBUF) <<
" SEND: " << OutputWindowSize(clientSock, SO_SNDBUF) << endl;
int nRet = connect(clientSock, (sockaddr*)&sockaddr_IPv4, sizeof(sockaddr_in));
if (nRet != 0) { return 0; }
char buf[100] = { 0 };
nRet = recv(clientSock, buf, 100, 0);
cout << "Received " << buf << " from the server!" << endl;
nRet = send(clientSock, "Hello from the client!\n", strlen("Hello from the client!\n"), 0);
closesocket(clientSock);
return 0;
}
Server:
#include <WinSock2.h>
#include <mstcpip.h>
#include <Ws2tcpip.h>
#include <iostream>
using namespace std;
int OutputWindowSize(SOCKET s, unsigned int nType)
{
int buflen = 0;
int nSize = sizeof(buflen);
if (getsockopt(s, SOL_SOCKET, nType, (char *)&buflen, &nSize) == 0)
return buflen;
return -1;
}
bool SetWindowSizeVal(SOCKET s, unsigned int nSize)
{
if (setsockopt(s, SOL_SOCKET, SO_SNDBUF, (char *)&nSize, sizeof(nSize)) == 0)
if (setsockopt(s, SOL_SOCKET, SO_RCVBUF, (char *)&nSize, sizeof(nSize)) == 0)
return true;
return false;
}
int main()
{
WSADATA wsaData;
DWORD Ret = 0;
if ((Ret = WSAStartup((2, 2), &wsaData)) != 0)
{
printf("WSAStartup() failed with error %d\n", Ret);
return 1;
}
struct sockaddr_in sockaddr_IPv4;
memset(&sockaddr_IPv4, 0, sizeof(struct sockaddr_in));
sockaddr_IPv4.sin_family = AF_INET;
sockaddr_IPv4.sin_port = htons(19982);
int y = InetPton(AF_INET, L"127.0.0.1", &sockaddr_IPv4.sin_addr);
if (y != 1) return 0;
socklen_t addrlen = sizeof(sockaddr_IPv4);
SOCKET sock = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (!SetWindowSizeVal(sock, 12345))
{
cout << "Failed to set window size " << endl;
return -1;
}
cout << "Set window size on listen socket as: RECV" << OutputWindowSize(sock, SO_RCVBUF) <<
" SEND: " << OutputWindowSize(sock, SO_SNDBUF) << endl;
if (bind(sock, (sockaddr*)&sockaddr_IPv4, sizeof(sockaddr_IPv4)) != 0) { /* error */ }
if (listen(sock, SOMAXCONN) != 0) { return 0; }
while (1)
{
SOCKET sockAccept = accept(sock, (struct sockaddr *) &sockaddr_IPv4, &addrlen);
if (!SetWindowSizeVal(sockAccept, 12345))
{
cout << "Failed to set window size " << endl;
return -1;
}
cout << "Set window size as on accepted socket as: RECV" << OutputWindowSize(sock, SO_RCVBUF) <<
" SEND: " << OutputWindowSize(sock, SO_SNDBUF) << endl;
if (sockAccept == -1) return 0;
int nRet = send(sockAccept, "Hello from the server!\n", strlen("Hello from the server!\n"), 0);
if (!nRet) return 0;
char buf[100] = { 0 };
nRet = recv(sockAccept, buf, 100, 0);
cout << "Received " << buf << " from the client!" << endl;
if (nRet == 0) { cout << "client disonnected!" << endl; }
closesocket(sockAccept);
}
return 0;
}
The output from my program states that the window sizes have been set succesfully:
Set window size on listen socket as: RECV12345 SEND: 12345
Set window size as on accepted socket as: RECV12345 SEND: 12345
for the server, and for the client:
Set window size on listen socket as: RECV12345 SEND: 12345
However, when I capture the traffic using RawCap, I see that the client window size is set fine, but server's window size is not what I set it to be, it is 8192:
Now, I have read this MS link and it says to add a registry value; I did this, adding the value 0x00001234, but it still made no difference.
The interesting thing is, the same code works fine on a Windows 10 machine, which makes me think it is Windows 7 specific. However, I'm not 100% sure on my code, there might be some errors in it.
Can anyone suggest how I can get Windows to honour my requested parameters please?
These are not 'window sizes'. They are send and receive buffer sizes.
There is no such thing as 'output window size'. There is a receive window and a congestion window, and the latter is not relevant to your question.
The send buffer size has exactly nothing to do with the receive window size, and the receive buffer size only determines the maximum receive window size.
The actual receive window size is adjusted dynamically by the protocol. It is the actual size that you are seeing in Wireshark.
The platform is entitled by the specification to adjust the supplied values for the send and receive buffers up or down, and the documentation advises you to get the corresponding values if you want to be sure what they really are.
There is no problem here to solve.
NB You don't have to set the receive window size on an accepted socket if you already set it on the listening socket. It is inherited.

Generate functions at compile time

I have an image. Every pixel contains information about RGB intensity. Now I want to sum intenity these channels, but I also want to choose which channels intensity to sum. Straightforwad implementation of this would look like this:
int intensity(const unsiged char* pixel, bool red, bool green, bool blue){
return 0 + (red ? pixel[0] : 0) + (green ? pixel[1] : 0) + (blue ? pixel[2] : 0);
}
Because I will call this function for every pixel in image I want to discard all conditions If I can. So I guess I have to have a function for every case:
std::function<int(const unsigned char* pixel)> generateIntensityAccumulator(
const bool& accumulateRChannel,
const bool& accumulateGChannel,
const bool& accumulateBChannel)
{
if (accumulateRChannel && accumulateGChannel && accumulateBChannel){
return [](const unsigned char* pixel){
return static_cast<int>(pixel[0]) + static_cast<int>(pixel[1]) + static_cast<int>(pixel[2]);
};
}
if (!accumulateRChannel && accumulateGChannel && accumulateBChannel){
return [](const unsigned char* pixel){
return static_cast<int>(pixel[1]) + static_cast<int>(pixel[2]);
};
}
if (!accumulateRChannel && !accumulateGChannel && accumulateBChannel){
return [](const unsigned char* pixel){
return static_cast<int>(pixel[2]);
};
}
if (!accumulateRChannel && !accumulateGChannel && !accumulateBChannel){
return [](const unsigned char* pixel){
return 0;
};
}
if (accumulateRChannel && !accumulateGChannel && !accumulateBChannel){
return [](const unsigned char* pixel){
return static_cast<int>(pixel[0]);
};
}
if (!accumulateRChannel && accumulateGChannel && !accumulateBChannel){
return [](const unsigned char* pixel){
return static_cast<int>(pixel[1]);
};
}
if (accumulateRChannel && !accumulateGChannel && accumulateBChannel){
return [](const unsigned char* pixel){
return static_cast<int>(pixel[0]) + static_cast<int>(pixel[2]);
};
}
if (accumulateRChannel && accumulateGChannel && !accumulateBChannel){
return [](const unsigned char* pixel){
return static_cast<int>(pixel[0]) + static_cast<int>(pixel[1]);
};
}
}
Now I can use this generator before entering image loop and use function without any conditions:
...
auto accumulator = generateIntensityAccumulator(true, false, true);
for(auto pixel : pixels){
auto intensity = accumulator(pixel);
}
...
But it is a lot of writting for such simple task and I have a feeling that there is a better way to accomplish this: for example make compiler to do a dirty work for me and generate all above cases. Can someone point me in the right direction?
Using a std::function like this will cost you dear, because you dont let a chance for the compiler to optimize by inlining what it can.
What you are trying to do is a good job for templates. And since you use integral numbers, the expression itself may be optimized away, sparing you the need to write a specialization of each version. Look at this example :
#include <array>
#include <chrono>
#include <iostream>
#include <random>
#include <vector>
template <bool AccumulateR, bool AccumulateG, bool AccumulateB>
inline int accumulate(const unsigned char *pixel) {
static constexpr int enableR = static_cast<int>(AccumulateR);
static constexpr int enableG = static_cast<int>(AccumulateG);
static constexpr int enableB = static_cast<int>(AccumulateB);
return enableR * static_cast<int>(pixel[0]) +
enableG * static_cast<int>(pixel[1]) +
enableB * static_cast<int>(pixel[2]);
}
int main(void) {
std::vector<std::array<unsigned char, 3>> pixels(
1e7, std::array<unsigned char, 3>{0, 0, 0});
// Fill up with randomness
std::random_device rd;
std::uniform_int_distribution<unsigned char> dist(0, 255);
for (auto &pixel : pixels) {
pixel[0] = dist(rd);
pixel[1] = dist(rd);
pixel[2] = dist(rd);
}
// Measure perf
using namespace std::chrono;
auto t1 = high_resolution_clock::now();
int sum1 = 0;
for (auto const &pixel : pixels)
sum1 += accumulate<true, true, true>(pixel.data());
auto t2 = high_resolution_clock::now();
int sum2 = 0;
for (auto const &pixel : pixels)
sum2 += accumulate<false, true, false>(pixel.data());
auto t3 = high_resolution_clock::now();
std::cout << "Sum 1 " << sum1 << " in "
<< duration_cast<milliseconds>(t2 - t1).count() << "ms\n";
std::cout << "Sum 2 " << sum2 << " in "
<< duration_cast<milliseconds>(t3 - t2).count() << "ms\n";
}
Compiled with Clang 3.9 with -O2, yields this result on my CPU:
Sum 1 -470682949 in 7ms
Sum 2 1275037960 in 2ms
Please notice the fact that we have an overflow here, you may need to use something bigger than an int. A uint64_t might do. If you inspect assembly code, you will see that the two versions of the function are inlined and optimized differently.
First things first. Don't write a std::function that takes a single pixel; write one that takes a contiguous range of pixels (a scanline of pixels).
Second, you want to write a template version of intensity:
template<bool red, bool green, bool blue>
int intensity(const unsiged char* pixel){
return (red ? pixel[0] : 0) + (green ? pixel[1] : 0) + (blue ? pixel[2] : 0);
}
pretty simple, eh? That will optimize down to your hand-crafted version.
template<std::size_t index>
int intensity(const unsiged char* pixel){
return intensity< index&1, index&2, index&4 >(pixel);
}
this one maps from the bits of index to which of the intensity<bool, bool, bool> to call. Now for the scanline version:
template<std::size_t index, std::size_t pixel_stride=3>
int sum_intensity(const unsiged char* pixel, std::size_t count){
int value = 0;
while(count--) {
value += intensity<index>(pixel);
pixel += pixel_stride;
}
return value;
}
We can now generate our scanline intensity calculator:
int(*)( const unsigned char* pel, std::size_t pixels )
scanline_intensity(bool red, bool green, bool blue) {
static const auto table[] = {
sum_intensity<0b000>, sum_intensity<0b001>,
sum_intensity<0b010>, sum_intensity<0b011>,
sum_intensity<0b100>, sum_intensity<0b101>,
sum_intensity<0b110>, sum_intensity<0b111>,
};
std::size_t index = red + green*2 + blue*4;
return sum_intensity[index];
}
and done.
These techniques can be made generic, but you don't need the generic ones.
If your pixel stride is not 3 (say there is an alpha channel), sum_intensity needs to be passed it (as a template parameter ideally).

Passing lambda as argument is not working properly

I am learning about lambdas and I don't understand why passing the lambda as a Predicate below is not working.
class Foo2{
public:
bool operator()(const int& n) const {return n%2 == 0;}
};
template<typename Container, typename Predicate>
unsigned int my_count_if(const Container& c, Predicate p)
{
unsigned int cnt = 0;
for(const auto& x : c){
cnt += p(x) ? 1 : 0;
std::cout << "x = " << x << std::endl; // for debug
std::cout << "p(x) = " << p(x) << std::endl; // for debug
}
return cnt;
}
bool test_func(const int& n)
{
return n%2 == 0;
}
int main()
{
std::vector<int> v {1,2,3,4,5,6};
std::cout << my_count_if(v, [] (const int& n) -> bool {n%2 == 0;}); // not working
std::cout << my_count_if(v, test_func) << std::endl; // works
std::cout << my_count_if(v, Foo2()); // works
return 0;
}
The line in which the lambda is used as the predicate does not work. p(x) = 248 for all values of x, but the second version with the function object does work. Am I missing something concerning the passing of lambdas as function arguments? In this post (in the accepted answer) Pass lambda expression to lambda argument c++11 I read that
where there is no state being captured, the language allows for a conversion from the lambda type to a pointer to function with the signature of the operator() (minus the this part), so the lambda >above can be implicitly converted to a pointer
Thank you for any help!

ReadProcessMemory returns the same data for any address

Working on WinXP SP3.
Visual Studio 2005.
Trying to read memory of another process.
std::cout<<"Reading Process Memory\n";
const DWORD pid = 3476;
HANDLE handle = OpenProcess(PROCESS_VM_READ,FALSE,pid);
if(handle == NULL) {std::cout<<"Failed to open process\n";return 0;}
char* buffer1 = new char[256];
char* buffer2 = new char[256];
memset(buffer1,0,256*sizeof(char));
memset(buffer2,0,256*sizeof(char));
DWORD nbr = 0;
int address = 0x400000;
BOOL result = ReadProcessMemory(handle,&address,buffer1,32,&nbr);
if(result!=1) std::cout<<"Failed to read memory\n";
address = 0x400000+0x1000;
result = ReadProcessMemory(handle,&address,buffer2,32,&nbr);
if(result!=1) std::cout<<"Failed to read memory\n";
int i = 0;
while(i++<10)
{
if(buffer1[i]!=buffer2[i]) {std::cout<<"Buffers are different\n";break;}
}
delete[] buffer1;
delete[] buffer2;
CloseHandle(handle);
std::cin>>i;
return 0;
The problem is that both buffers are getting the same values. ReadProcMemory returns 1 and number of bytes read is the same as requested.
Your calls to ReadProcessMemory are incorrect. You should be using address directly, not &address. You may need to cast it to a const void *.
result = ReadProcessMemory(handle, reinterpret_cast<const void *>(address), buffer, 32, &nbr);
And you probably should declaring address as a type large enough to handle a pointer, like std::ssize_t or INT_PTR.
INT_PTR address = 0x400000;
buffer couldn't be a char, it has to be int, thats a working example
#include <windows.h>
#include <iostream>
#include <string.h>
using namespace std;
int main()
{
int point1=0;
int i=0;
int d=0;
char* value[4];
SIZE_T stBytes = 0;
HWND hwnd;
HANDLE phandle;
DWORD pid;
hwnd = FindWindow(NULL, "calc"); // calc is the name of the windows process
if (hwnd != 0) {
GetWindowThreadProcessId(hwnd, &pid);
phandle = OpenProcess(PROCESS_ALL_ACCESS, 0, pid);
} else {
cout << "process is not executing";
cin.get();
return 0;
}
if (phandle != 0) {
for(i=0;i<4;i++) // 4 or wathever
{
cout << "The pointer is 0x1001000" << endl; //Print the pointer
ReadProcessMemory(phandle, (LPVOID)0x1001000+i, &point1, 4, &stBytes); //Get the content from 0x1001000 and store it in point1
cout << "decimal content point1 " << point1 << " (DEC)" << endl; //Print the decimal content of point1
printf("%x \n",point1); // print hexadecimal content of point1
char *p=(char*)&point1; // point point1 buffer
for(d=0;d<4;d++)
printf("%x",(unsigned int)(unsigned char) *(p+d)); // print backwards (because the buffer is like a LIFO) and see the dbg debugger
}
ReadProcessMemory(phandle, (LPVOID)point1, &value, 6, &stBytes); //Get the value that is in the address pointed by the pointer
cout << "The value in the non-static address is " << (char*)value << endl << endl; //Print the value
cout << "Press ENTER to exit." << endl;
cin.get();
} else {
cout << "Couldn't get a handle";
cin.get();
// address 0x1001000 content hex 5278DA77
}
}

Resources