After I have defined and filled the buffer from binary .exe data --
unsigned char *buffer ; /*buffer*/
buffer = malloc(300) ; /*allocate space on heap*/
fread(buffer, 300, 1, file) ;
Then how do I get bytes at position 121--124 of buffer
as a long value?
I have tried
long Hint = 0;
memcpy(Hint, buffer[121], 4);
printf("Hint=x%x\n", Hint);
but all I get is an abend on memcpy
Here is a simple way to do that (I put numbers in buffer for the example):
unsigned char *buffer ; /*buffer*/
buffer = (unsigned char*) malloc (300) ; /*allocate space on heap*/
for(int i=0;i<300;i++) /*initialize buffer with numbers for the demo*/
buffer[i] = i;
long Hint = 0;
long *h = (long *)&buffer[121];
Hint = *h;
printf("Hint=0x%x\n", Hint);
The output for this will be:
Hint=0x7c7b7a79
Which is the numbers 121-124 in hex.
Related
I am working on C++ code to read and write .bmp image.
Below is my code.
However, I encountered some problems that I couldn't fix. enter image description here
I have googled a lot but none of them solve my problem.
Sorry if my coding style doesn't look good to you, I'm new to Xcode and C++.
Please help me.
I will be really appreciated.
#include <iostream>
#include <stdio.h>
#pragma pack(2)
typedef struct // BMP file header structure
{
unsigned short bfType ; // Magic number for file
unsigned int bfSize ; // Size of file
unsigned short bfReserved1 ; // Reserved, usually set to 0
unsigned short bfReserved2 ; // Reserved, usually set to 0
unsigned int bfoffBits ; // Offset to bitmap data
}BITMAPFILEHEADER;
#pragma pack()
typedef struct
{
unsigned int biSize ; // Size of info header
int biWidth ; // Width of image
int biHeight ; // Height of image
unsigned short biPlanes ; // Number of color planes
unsigned short biBitCount ; // Number of bits per pixel
unsigned int biCompression ; // Type of compression to use, 0 if there is no compression
unsigned int biSizeImage ; // Size of image data
int biXPelsPerMeter ; // X pixels per meter
int biYPelsPerMeter ; // Y pixels per meter
unsigned int biClrUsed ; // Number of color used
unsigned int biClrImportant ; // Number of important color
}BITMAPINFOHEADER;
unsigned char *ReadBitmapFile(const char *filename, BITMAPINFOHEADER *bitmapInfoHeader)
{
FILE* file ; //file pointer
BITMAPFILEHEADER bitmapFileHeader ; //bitmap file header
unsigned char *bitmapimage ; //store image data
int imageIdx = 0 ;
unsigned char tempRGB ; //swap
// open file in read binary mode
file = fopen(filename, "rb");
if (file == NULL)
return NULL;
// read the bitmap file header
fread(&bitmapFileHeader, sizeof(BITMAPFILEHEADER), 1, file);
// read the bitmap info header
fread(bitmapInfoHeader, sizeof(BITMAPINFOHEADER),1,file);
//move file point to the begging of bitmap data
fseek(file, bitmapFileHeader.bfoffBits, SEEK_SET);
//allocate enough memory for the bitmap image data
bitmapimage = (unsigned char*)malloc(bitmapInfoHeader->biSizeImage);
//verify memory allocation
if(!bitmapimage)
{
free(bitmapimage);
fclose(file);
return NULL;
}
// read in the bitmap image data
fread(bitmapimage, bitmapInfoHeader->biSizeImage, 1, file);
//swap the r and b value to get RGB (bitmap is BGR)
for (imageIdx = 0; imageIdx < bitmapInfoHeader->biSizeImage; imageIdx+=3){
tempRGB = bitmapimage[imageIdx];
bitmapimage[imageIdx] = bitmapimage[imageIdx + 2];
bitmapimage[imageIdx + 2] = tempRGB;
}
//close file and return bitmap data
fclose(file);
return bitmapimage;
}
BITMAPINFOHEADER bitmapInfoHeader;
unsigned char *bitmapData = ReadBitmapFile("input1.bmp", &bitmapInfoHeader);
By the way, I am using Xcode8.3.3
I am trying to speed up a single program by using prefetches. The purpose of my program is just for test. Here is what it does:
It uses two int buffers of the same size
It reads one-by-one all the values of the first buffer
It reads the value at the index in the second buffer
It sums all the values taken from the second buffer
It does all the previous steps for bigger and bigger
At the end, I print the number of voluntary and involuntary CPU
In the very first time, values in the first buffers contains the values of its index (cf. function createIndexBuffer in the code just below) .
It will be more clear in the code of my program:
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <sys/time.h>
#define BUFFER_SIZE ((unsigned long) 4096 * 100000)
unsigned int randomUint()
{
int value = rand() % UINT_MAX;
return value;
}
unsigned int * createValueBuffer()
{
unsigned int * valueBuffer = (unsigned int *) malloc(BUFFER_SIZE * sizeof(unsigned int));
for (unsigned long i = 0 ; i < BUFFER_SIZE ; i++)
{
valueBuffer[i] = randomUint();
}
return (valueBuffer);
}
unsigned int * createIndexBuffer()
{
unsigned int * indexBuffer = (unsigned int *) malloc(BUFFER_SIZE * sizeof(unsigned int));
for (unsigned long i = 0 ; i < BUFFER_SIZE ; i++)
{
indexBuffer[i] = i;
}
return (indexBuffer);
}
unsigned long long computeSum(unsigned int * indexBuffer, unsigned int * valueBuffer)
{
unsigned long long sum = 0;
for (unsigned int i = 0 ; i < BUFFER_SIZE ; i++)
{
unsigned int index = indexBuffer[i];
sum += valueBuffer[index];
}
return (sum);
}
unsigned int computeTimeInMicroSeconds()
{
unsigned int * valueBuffer = createValueBuffer();
unsigned int * indexBuffer = createIndexBuffer();
struct timeval startTime, endTime;
gettimeofday(&startTime, NULL);
unsigned long long sum = computeSum(indexBuffer, valueBuffer);
gettimeofday(&endTime, NULL);
printf("Sum = %llu\n", sum);
free(indexBuffer);
free(valueBuffer);
return ((endTime.tv_sec - startTime.tv_sec) * 1000 * 1000) + (endTime.tv_usec - startTime.tv_usec);
}
int main()
{
printf("sizeof buffers = %ldMb\n", BUFFER_SIZE * sizeof(unsigned int) / (1024 * 1024));
unsigned int timeInMicroSeconds = computeTimeInMicroSeconds();
printf("Time: %u micro-seconds = %.3f seconds\n", timeInMicroSeconds, (double) timeInMicroSeconds / (1000 * 1000));
}
If I launch it, I get the following output:
$ gcc TestPrefetch.c -O3 -o TestPrefetch && ./TestPrefetch
sizeof buffers = 1562Mb
Sum = 439813150288855829
Time: 201172 micro-seconds = 0.201 seconds
Quick and fast!!!
According to my knowledge (I may be wrong), one of the reason for having such a fast program is that, as I access my two buffers sequentially, data can be prefetched in the CPU cache.
We can make it more complex in order that data is (almost) prefeched in CPU cache. For example, we can just change the createIndexBuffer function in:
unsigned int * createIndexBuffer()
{
unsigned int * indexBuffer = (unsigned int *) malloc(BUFFER_SIZE * sizeof(unsigned int));
for (unsigned long i = 0 ; i < BUFFER_SIZE ; i++)
{
indexBuffer[i] = rand() % BUFFER_SIZE;
}
return (indexBuffer);
}
Let's try the program once again:
$ gcc TestPrefetch.c -O3 -o TestPrefetch && ./TestPrefetch
sizeof buffers = 1562Mb
Sum = 439835307963131237
Time: 3730387 micro-seconds = 3.730 seconds
More than 18 times slower!!!
We now arrive to my problem. Given the new createIndexBuffer function, I would like to speed up computeSum function using prefetch
unsigned long long computeSum(unsigned int * indexBuffer, unsigned int * valueBuffer)
{
unsigned long long sum = 0;
for (unsigned int i = 0 ; i < BUFFER_SIZE ; i++)
{
__builtin_prefetch((char *) &indexBuffer[i + 1], 0, 0);
unsigned int index = indexBuffer[i];
sum += valueBuffer[index];
}
return (sum);
}
of course I also have to change my createIndexBuffer in order it allocates a buffer having one more element
I relaunch my program: not better! As prefetch may be slower than one "for" loop iteration, I may prefetch not one element before but two elements before
__builtin_prefetch((char *) &indexBuffer[i + 2], 0, 0);
not better! two loops iterations? not better? Three? **I tried it until 50 (!!!) but I cannot enhance the performance of my function computeSum.
Can I would like help to understand why
Thank you very much for your help
I believe that above code is automatically optimized by CPU without any further space for manual optimization.
1. Main problem is that indexBuffer is sequentially accessed. Hardware prefetcher senses it and prefetches further values automatically, without need to call prefetch manually. So, during iteration #i, values indexBuffer[i+1], indexBuffer[i+2],... are already in cache. (By the way, there is no need to add artificial element to the end of array: memory access errors are silently ignored by prefetch instructions).
What you really need to do is to prefetch valueBuffer instead:
__builtin_prefetch((char *) &valueBuffer[indexBuffer[i + 1]], 0, 0);
2. But adding above line of code won't help either in such simple scenario. Cost of accessing memory is hundreds of cycles, while add instruction is ~1 cycle. Your code already spends 99% of time in memory accesses. Adding manual prefetch will make it this one cycle faster and no better.
Manual prefetch would really work well if your math were much more heavy (try it), like using an expression with large number of non-optimized out divisions (20-30 cycles each) or calling some math function (log, sin).
3. But even this doesn't guarantee to help. Dependency between loop iterations is very weak, it is only via sum variable. This allows CPU to execute instructions speculatively: it may start fetching valueBuffer[i+1] concurrently while still executing math for valueBuffer[i].
Prefetch fetches normally a full cache line. This is typically 64 bytes. So the random example fetches always 64 bytes for a 4 byte int. 16 times the data you actually need which fits very well with the slow down by a factor of 18. So the code is simply limited by memory throughput and not latency.
Sorry. What I gave you was not the correct version of my code. The correct version is, what you said:
__builtin_prefetch((char *) &valueBuffer[indexBuffer[i + prefetchStep]], 0, 0);
However, even with the right version, it is unfortunately not better
Then I adapted my program to try your suggestion using the sin function.
My adapted program is the following one:
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <sys/time.h>
#include <math.h>
#define BUFFER_SIZE ((unsigned long) 4096 * 50000)
unsigned int randomUint()
{
int value = rand() % UINT_MAX;
return value;
}
unsigned int * createValueBuffer()
{
unsigned int * valueBuffer = (unsigned int *) malloc(BUFFER_SIZE * sizeof(unsigned int));
for (unsigned long i = 0 ; i < BUFFER_SIZE ; i++)
{
valueBuffer[i] = randomUint();
}
return (valueBuffer);
}
unsigned int * createIndexBuffer(unsigned short prefetchStep)
{
unsigned int * indexBuffer = (unsigned int *) malloc((BUFFER_SIZE + prefetchStep) * sizeof(unsigned int));
for (unsigned long i = 0 ; i < BUFFER_SIZE ; i++)
{
indexBuffer[i] = rand() % BUFFER_SIZE;
}
return (indexBuffer);
}
double computeSum(unsigned int * indexBuffer, unsigned int * valueBuffer, unsigned short prefetchStep)
{
double sum = 0;
for (unsigned int i = 0 ; i < BUFFER_SIZE ; i++)
{
__builtin_prefetch((char *) &valueBuffer[indexBuffer[i + prefetchStep]], 0, 0);
unsigned int index = indexBuffer[i];
sum += sin(valueBuffer[index]);
}
return (sum);
}
unsigned int computeTimeInMicroSeconds(unsigned short prefetchStep)
{
unsigned int * valueBuffer = createValueBuffer();
unsigned int * indexBuffer = createIndexBuffer(prefetchStep);
struct timeval startTime, endTime;
gettimeofday(&startTime, NULL);
double sum = computeSum(indexBuffer, valueBuffer, prefetchStep);
gettimeofday(&endTime, NULL);
printf("prefetchStep = %d, Sum = %f - ", prefetchStep, sum);
free(indexBuffer);
free(valueBuffer);
return ((endTime.tv_sec - startTime.tv_sec) * 1000 * 1000) + (endTime.tv_usec - startTime.tv_usec);
}
int main()
{
printf("sizeof buffers = %ldMb\n", BUFFER_SIZE * sizeof(unsigned int) / (1024 * 1024));
for (unsigned short prefetchStep = 0 ; prefetchStep < 250 ; prefetchStep++)
{
unsigned int timeInMicroSeconds = computeTimeInMicroSeconds(prefetchStep);
printf("Time: %u micro-seconds = %.3f seconds\n", timeInMicroSeconds, (double) timeInMicroSeconds / (1000 * 1000));
}
}
The output is:
$ gcc TestPrefetch.c -O3 -o TestPrefetch -lm && taskset -c 7 ./TestPrefetch
sizeof buffers = 781Mb
prefetchStep = 0, Sum = -1107.523504 - Time: 20895326 micro-seconds = 20.895 seconds
prefetchStep = 1, Sum = 13456.262424 - Time: 12706720 micro-seconds = 12.707 seconds
prefetchStep = 2, Sum = -20179.289469 - Time: 12136174 micro-seconds = 12.136 seconds
prefetchStep = 3, Sum = 12068.302534 - Time: 11233803 micro-seconds = 11.234 seconds
prefetchStep = 4, Sum = 21071.238160 - Time: 10855348 micro-seconds = 10.855 seconds
prefetchStep = 5, Sum = -22648.280105 - Time: 10517861 micro-seconds = 10.518 seconds
prefetchStep = 6, Sum = 22665.381676 - Time: 9205809 micro-seconds = 9.206 seconds
prefetchStep = 7, Sum = 2461.741268 - Time: 11391088 micro-seconds = 11.391 seconds
...
So here, it works better! Honestly, I was almost sure that it will not be better because the math function cost is higher compared to the memory access.
If anyone could give me more information about why it is better now, I would appreciate it
Thank you very much
to convert a byte array from another machine which is big-endian, we can use:
long long convert(unsigned char data[]) {
long long res;
res = 0;
for( int i=0;i < DATA_SIZE; ++i)
res = (res << 8) + data[i];
return res;
}
if another machine is little-endian, we can use
long long convert(unsigned char data[]) {
long long res;
res = 0;
for( int i=DATA_SIZE-1;i >=0 ; --i)
res = (res << 8) + data[i];
return res;
}
why do we need the above functions? shouldn't we use hton at sender and ntoh when receiving? Is it because hton/nton is to convert integer while this convert() is for char array?
The hton/ntoh functions convert between network order and host order. If these two are the same (i.e., on big-endian machines) these functions do nothing. So they cannot be portably relied upon to swap endianness. Also, as you pointed out, they are only defined for 16-bit (htons) and 32-bit (htonl) integers; your code can handle up to the sizeof(long long) depending on how DATA_SIZE is set.
Through the network you always receive a series of bytes (octets), which you can't directly pass to ntohs or ntohl. Supposing the incoming bytes are buffered in the (unsigned) char array buf, you could do
short x = ntohs(*(short *)(buf+offset));
but this is not portable unless buf+offset is always even, so that you read with correct alignment. Similarly, to do
long y = ntohl(*(long *)(buf+offset));
you have to make sure that 4 divides buf+offset. Your convert() functions, though, don't have this limitation, they can process byte series at arbitrary (unaligned) memory address.
I need to convert rgba8 to rgba5551 manually. I found some helpful code from another post and want to modify it to convert from rgba8 to rgba5551. I don't really have experience with bitewise stuff and haven't had any luck messing with the code myself.
void* rgba8888_to_rgba4444( void* src, int src_bytes)
{
// compute the actual number of pixel elements in the buffer.
int num_pixels = src_bytes / 4;
unsigned long* psrc = (unsigned long*)src;
unsigned short* pdst = (unsigned short*)src;
// convert every pixel
for(int i = 0; i < num_pixels; i++){
// read a source pixel
unsigned px = psrc[i];
// unpack the source data as 8 bit values
unsigned r = (px << 8) & 0xf000;
unsigned g = (px >> 4) & 0x0f00;
unsigned b = (px >> 16) & 0x00f0;
unsigned a = (px >> 28) & 0x000f;
// and store
pdst[i] = r | g | b | a;
}
return pdst;
}
The value of RGBA5551 is that it has color info condensed into 16 bits - or two bytes, with only one bit for the alpha channel (on or off). RGBA8888, on the other hand, uses a byte for each channel. (If you don't need an alpha channel, I hear RGB565 is better - as humans are more sensitive to green). Now, with 5 bits, you get the numbers 0 through 31, so r, g, and b each need to be converted to some number between 0 and 31, and since they are originally a byte each (0-255), we multiply each by 31/255. Here is a function that takes RGBA bytes as input and outputs RGBA5551 as a short:
short int RGBA8888_to_RGBA5551(unsigned char r, unsigned char g, unsigned char b, unsigned char a){
unsigned char r5 = r*31/255; // All arithmetic is integer arithmetic, and so floating points are truncated. If you want to round to the nearest integer, adjust this code accordingly.
unsigned char g5 = g*31/255;
unsigned char b5 = b*31/255;
unsigned char a1 = (a > 0) ? 1 : 0; // 1 if a is positive, 0 else. You must decide what is sensible.
// Now that we have our 5 bit r, g, and b and our 1 bit a, we need to shift them into place before combining.
short int rShift = (short int)r5 << 11; // (short int)r5 looks like 00000000000vwxyz - 11 zeroes. I'm not sure if you need (short int), but I've wasted time tracking down bugs where I didn't typecast properly before shifting.
short int gShift = (short int)g5 << 6;
short int bShift = (short int)b5 << 1;
// Combine and return
return rShift | gShift | bShift | a1;
}
You can, of course condense this code.
#include <iostream>
#include <Windows.h>
#include <string>
using namespace std;
HANDLE hPort = CreateFile("COM2",
GENERIC_WRITE|GENERIC_READ,0,NULL,OPEN_EXISTING,FILE_ATTRIBUTE_NORMAL,NULL);
DCB dcb;
bool writebyte(char*data)
{
DWORD byteswritten;
if (!GetCommState(hPort,&dcb))
{
printf("\nSerial port can't be open\n");
return false;
}
dcb.BaudRate = CBR_9600;
dcb.ByteSize = 8;
dcb.Parity = NOPARITY;
dcb.StopBits = ONESTOPBIT;
if (!SetCommState(hPort,&dcb))
return false;
bool retVal = WriteFile(hPort,data,1,&byteswritten,NULL);
return retVal;
}
int ReadByte()
{
int Val;
BYTE Byte;
DWORD dwBytesTransferred;
DWORD dwCommModemStatus;
if (!GetCommState(hPort,&dcb))
return 0;
SetCommMask(hPort,EV_RXCHAR | EV_ERR);
WaitCommEvent (hPort,&dwCommModemStatus,0);
if (dwCommModemStatus & EV_RXCHAR)
ReadFile (hPort,&Byte,1,&dwBytesTransferred,0);
Val = Byte;
return Val;
}
int main() {
POINT p;
int x;
int y;
int z;
while(0==0){
GetCursorPos(&p);
x = p.x;
y = p.y;
HDC hDC;
hDC = GetDC(NULL);
cin >> z;
cout << GetPixel(hDC, x, y) << endl;
Sleep(z);
ReleaseDC(NULL, hDC);
char data = GetPixel(hDC, x, y);
if (writebyte(&data))
cout <<" DATA SENT.. " << (int)data<< "\n";
}
}
in the part of sending data through serial communication, instead of sending the data as GetPixel(hDC, x, y), it only sends the value "-1" . I was thinking it is because char is only for small integers and the output I was giving is a very very long number. I tried to change it to long int but i still get the same result. That it only sends "-1". I thought that the solution might be converting char to long int or long int to char before sending the data but I don't know how..can someone help me?
Why do you use hDC after releasing it?
ReleaseDC(NULL, hDC);
char data = GetPixel(hDC, x, y);
GetPixel will return -1 (CLR_INVALID) in case of an error (see MSDN).
And, by the way, a COLORREF is not a char, so you lose Information when storing the return value of GetPixel in char data. You should store the complete COLORREF and send/receive all of it's bytes (so send/receive sizeof(COLORREF) bytes).
Also be aware of byte order. If you are transferring multi byte data between two machines then you must assure that both agree on the order of the bytes. If for example one machine is little endian and the other big endian, then they store COLORREF with different byte order in memory. One is storing the COLORREF 0x00BBGGRR in memory as { 0, 0xbb, 0xgg, 0xrr } whereas the other is storing it as { 0xrr, 0xgg, 0xbb, 0 }. So you need to define a transmit byte order which both sides use independant of their host byte order. If you don't want to invent the wheel new, you can take a look at network byte order and reuse that. Socket API gives you some functions like ntohl and htonl which help you in converting from host byte order to network byte order and vice versa.