I wrote the following program to record the sound through soundcard in windows and print the PCM data from buffer of waveheader. But it gives only the data 32600-32700. Is the problem with my soundcard? I have used WAVE_MAPPER which automatically selects the source...Please help me
#include <Windows.h>
#include <MMSystem.h>
#include <iostream>
using namespace std;
int main(){
HWAVEIN microHandle;
WAVEHDR waveHeader;
MIXERCAPS mixerCaps;
WAVEFORMATEX format;
//HWAVEOUT hwo; // play
while (1){
const int NUMPTS = 44100 * 0.01; // 10 seconds
int sampleRate = 44100; //can get frequency from here
short int waveIn[NUMPTS]; // 'short int' is a 16-bit type; I request 16-bit samples below
// for 8-bit capture, you'd use 'unsigned char' or 'BYTE' 8-bit types
MMRESULT result = 0;
format.wFormatTag = WAVE_FORMAT_PCM; // simple, uncompressed format
format.wBitsPerSample = 8; // 16 for high quality, 8 for telephone-grade
format.nChannels = 1; // 1=mono, 2=stereo
format.nSamplesPerSec = sampleRate; // 22050
format.nAvgBytesPerSec = format.nSamplesPerSec*format.nChannels*format.wBitsPerSample / 8;
// = nSamplesPerSec * n.Channels * wBitsPerSample/8
format.nBlockAlign = format.nChannels*format.wBitsPerSample / 8;
// = n.Channels * wBitsPerSample/8
format.cbSize = 0;
result = waveInOpen(µHandle, WAVE_MAPPER, &format, 0L, 0L, WAVE_FORMAT_DIRECT);
cout << "checking step 1" << endl;
if (result)
{
cout << "Fail step 1" << endl;
cout << result << endl;
Sleep(10000);
return 0;
}
// Set up and prepare header for input
waveHeader.lpData = (LPSTR)waveIn;
waveHeader.dwBufferLength = NUMPTS;// *2;//why *2
waveHeader.dwBytesRecorded = 0;
waveHeader.dwUser = 0L;
waveHeader.dwFlags = 0L;
waveHeader.dwLoops = 0L;
waveInPrepareHeader(microHandle, &waveHeader, sizeof(WAVEHDR));
// Insert a wave input buffer
result = waveInAddBuffer(microHandle, &waveHeader, sizeof(WAVEHDR));
int NumOfMixers = mixerGetNumDevs();
cout << NumOfMixers << endl;
//cout<<(char*)mixerCaps.szPname<<endl;
//system("pause");
cout << "checking step 2" << endl;
if (result)
{
cout << "Fail step 2" << endl;
cout << result << endl;
Sleep(10000);
return 0;
}
//system("pause");
result = waveInStart(microHandle);
cout << "checking step 3......started recording..." << endl;
if (result)
{
cout << "Fail step 3" << endl;
cout << result << endl;
Sleep(10000);
return 0;
}
// Wait until finished recording
do { cout << "still"; } while (waveInUnprepareHeader(microHandle, &waveHeader, sizeof(WAVEHDR)) == WAVERR_STILLPLAYING);
waveInStop(microHandle);
waveInReset(microHandle);
//waveInUnprepareHeader(hwi, lpWaveHdr, sizeof(WAVEHDR));
waveInClose(microHandle);
//printing the buffer
for (int i = 0; i < waveHeader.dwBufferLength; i++)
{
if (waveIn[i] > 0)
cout << i << "\t" << waveIn[i] << endl;
}
}
system("pause");
//waveInClose(microHandle);
return 0;
}
I would be very grateful if someone could help me...
One obvious problem is that you're mixing the usage of 16 and 8 bits. You're buffer is defined as a 16-bit short. Notice your own comment:
short int waveIn[NUMPTS]; // 'short int' is a 16-bit type; I request 16-bit samples below
// for 8-bit capture, you'd use 'unsigned char' or 'BYTE' 8-bit types
Yet, when defining the audio format you are specifying 8 bits:
format.wBitsPerSample = 8; // 16 for high quality, 8 for telephone-grade
Either change format.wBitsPerSample = 16 or if you want 8 bit audio then do something like this:
unsigned char waveIn[NUMPTS];
...
format.wBitsPerSample = 8; // 16 for high quality, 8 for telephone-grade
//printing the buffer
for (int i = 0; i < waveHeader.dwBufferLength; i++)
{
cout << i << "\t" << (int)waveIn[i] << endl;
}
Related
I'm attempting to decode frames, but memory usage grows with every frame (more specifically, with every call to avcodec_send_packet) until finally the code crashes with a bad_alloc. Here's the basic decode loop:
int rfret = 0;
while((rfret = av_read_frame(inctx.get(), &packet)) >= 0){
if (packet.stream_index == vstrm_idx) {
//std::cout << "Sending Packet" << std::endl;
int ret = avcodec_send_packet(ctx.get(), &packet);
if (ret < 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
std::cout << "avcodec_send_packet: " << ret << std::endl;
break;
}
while (ret >= 0) {
//std::cout << "Receiving Frame" << std::endl;
ret = avcodec_receive_frame(ctx.get(), fr);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
//std::cout << "avcodec_receive_frame: " << ret << std::endl;
av_frame_unref(fr);
// av_frame_free(&fr);
break;
}
std::cout << "frame: " << ctx->frame_number << std::endl;
// eventually do something with the frame here...
av_frame_unref(fr);
// av_frame_free(&fr);
}
}
else {
//std::cout << "Not Video" << std::endl;
}
av_packet_unref(&packet);
}
Memory usage/leakage seems to scale with the resolution of the video I'm decoding. For example, for a 3840x2160 resolution video, the memory usage in windows task manager consistently jumps up by about 8mb (1 byte per pixel??) for each received frame. Do I need to do something besides call av_frame_unref to release the memory?
(more) complete code below
void AVFormatContextDeleter(AVFormatContext* ptr)
{
if (ptr) {
avformat_close_input(&ptr);
}
}
void AVCodecContextDeleter(AVCodecContext* ptr)
{
if (ptr) {
avcodec_free_context(&ptr);
}
}
typedef std::unique_ptr<AVFormatContext, void (*)(AVFormatContext *)> AVFormatContextPtr;
typedef std::unique_ptr<AVCodecContext, void (*)(AVCodecContext *)> AVCodecContextPtr;
AVCodecContextPtr createAvCodecContext(AVCodec *vcodec)
{
AVCodecContextPtr ctx(avcodec_alloc_context3(vcodec), AVCodecContextDeleter);
return ctx;
}
AVFormatContextPtr createFormatContext(const std::string& filename)
{
AVFormatContext* inctxPtr = nullptr;
int ret = avformat_open_input(&inctxPtr, filename.c_str(), nullptr, nullptr);
// int ret = avformat_open_input(&inctx, "D:/Videos/test.mp4", nullptr, nullptr);
if (ret != 0) {
inctxPtr = nullptr;
}
return AVFormatContextPtr(inctxPtr, AVFormatContextDeleter);
}
int testDecode()
{
// open input file context
AVFormatContextPtr inctx = createFormatContext("D:/Videos/Matt Chapman Hi Greg.MOV");
if (!inctx) {
// std::cerr << "fail to avforamt_open_input(\"" << infile << "\"): ret=" << ret;
return 1;
}
// retrieve input stream information
int ret = avformat_find_stream_info(inctx.get(), nullptr);
if (ret < 0) {
//std::cerr << "fail to avformat_find_stream_info: ret=" << ret;
return 2;
}
// find primary video stream
AVCodec* vcodec = nullptr;
const int vstrm_idx = av_find_best_stream(inctx.get(), AVMEDIA_TYPE_VIDEO, -1, -1, &vcodec, 0);
if (vstrm_idx < 0) {
//std::cerr << "fail to av_find_best_stream: vstrm_idx=" << vstrm_idx;
return 3;
}
AVCodecParameters* origin_par = inctx->streams[vstrm_idx]->codecpar;
if (vcodec == nullptr) { // is this even necessary?
vcodec = avcodec_find_decoder(origin_par->codec_id);
if (!vcodec) {
// Can't find decoder
return 4;
}
}
AVCodecContextPtr ctx = createAvCodecContext(vcodec);
if (!ctx) {
return 5;
}
ret = avcodec_parameters_to_context(ctx.get(), origin_par);
if (ret) {
return 6;
}
ret = avcodec_open2(ctx.get(), vcodec, nullptr);
if (ret < 0) {
return 7;
}
//print input video stream informataion
std::cout
//<< "infile: " << infile << "\n"
<< "format: " << inctx->iformat->name << "\n"
<< "vcodec: " << vcodec->name << "\n"
<< "size: " << origin_par->width << 'x' << origin_par->height << "\n"
<< "fps: " << av_q2d(ctx->framerate) << " [fps]\n"
<< "length: " << av_rescale_q(inctx->duration, ctx->time_base, {1,1000}) / 1000. << " [sec]\n"
<< "pixfmt: " << av_get_pix_fmt_name(ctx->pix_fmt) << "\n"
<< "frame: " << inctx->streams[vstrm_idx]->nb_frames << "\n"
<< std::flush;
AVPacket packet;
av_init_packet(&packet);
packet.data = nullptr;
packet.size = 0;
AVFrame *fr = av_frame_alloc();
if (!fr) {
return 8;
}
int rfret = 0;
while((rfret = av_read_frame(inctx.get(), &packet)) >= 0){
if (packet.stream_index == vstrm_idx) {
//std::cout << "Sending Packet" << std::endl;
int ret = avcodec_send_packet(ctx.get(), &packet);
if (ret < 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
std::cout << "avcodec_send_packet: " << ret << std::endl;
break;
}
while (ret >= 0) {
//std::cout << "Receiving Frame" << std::endl;
ret = avcodec_receive_frame(ctx.get(), fr);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
//std::cout << "avcodec_receive_frame: " << ret << std::endl;
av_frame_unref(fr);
// av_frame_free(&fr);
break;
}
std::cout << "frame: " << ctx->frame_number << std::endl;
// do something with the frame here...
av_frame_unref(fr);
// av_frame_free(&fr);
}
}
else {
//std::cout << "Not Video" << std::endl;
}
av_packet_unref(&packet);
}
std::cout << "RFRET = " << rfret << std::endl;
return 0;
}
Update 1: (1/21/2019) Compiling on a different machine and running with different video files I am not seeing the memory usage growing without bound. I'll try to narrow down where the difference lies (compiler?, ffmpeg version?, or video encoding?)
Update 2: (1/21/2019) Ok, it looks like there is some interaction occurring between ffmpeg and Qt's QCamera. In my application, I'm using Qt to manage the webcam, but decided to use ffmpeg libraries to handle decoding/encoding since Qt doesn't have as comprehensive support for different codecs. If I have the camera turned on (through Qt), ffmpeg decoding memory consumption grows without bound. If the camera is off, ffmpeg behaves fine. I've tried this both with a physical camera (Logitech C920) and with a virtual camera using OBS-Virtualcam, with the same result. So far I'm baffled as to how the two systems are interacting...
I had same problem.
before use the av_frame_unref.
call av_freep(buffer->data[0]).
av_frame_unref was not release raw data in frame
example:
av_freep(&pFrame->data[0]);
av_frame_unref(pFrame);
//av_free(pFrame);
EDIT:
I am sorry that English is immature.
When you decode the video, you have the data for the image in the buffer.
It will remain as a NULL pointer until you release it and reallocate it, which means that you will need to allocate memory again at reallocation.
After you have finished using the image data, you should release the buffer.
Are you using it like that?
while (Framecheck = av_read_frame(pFormatCtx, &packet) == NULL ) {
if (d_end == true)
break;
if (packet.stream_index == VSI) {
if (bool res = avcodec_send_packet(pVideoCodecCtx, &packet)) {
printf("avcodec_send_packet failed %d %d %d\n", res, AVERROR(EINVAL), AVERROR(ENOMEM));
}
if (bool res = avcodec_receive_frame(pVideoCodecCtx, pVFrame) == 0) {
printf("avcodec_receive failed %d %d %d\n", res, AVERROR(EINVAL), AVERROR(ENOMEM));
}
if (pVFrame->data[0] == NULL && pVFrame->data[1] == NULL && pVFrame->data[2] == NULL)
continue;
else {
YUV_frame = Con_yuv_RGB(pVFrame);
QFrame->push(YUV_frame);
PushCount++;
}
}
Sleep(5);
}
if (Framecheck != true){
av_packet_unref(&packet);
d_end = true;
return true;
release:
if (FrameQueue->size()) {
while (FrameQueue->size() > 0) {
av_freep(&FrameQueue->front());
//av_frame_unref(FrameQueue->front());
av_free(FrameQueue->front());
FrameQueue->pop();
}
}
Try calling av_frame_free when you're done with the frame (outside your while loop)
And don't call av_frame_unref
See example here:
https://ffmpeg.org/doxygen/4.0/decode__video_8c_source.html
Note: I have tagged this with both programming and windows networking tags, so please don't shout, I'm just trying to expose this to as many people as may be able to help!
I am trying to set the receive and send buffers for a small client and server I have written, so that when I perform a network capture, I see the window size I have set in the TCP handshake.
For the programmers, please consider the following very simple code for a client and server.
For the none-programmers, please skip past this section to my image.
Client:
#include <WinSock2.h>
#include <mstcpip.h>
#include <Ws2tcpip.h>
#include <thread>
#include <iostream>
using namespace std;
int OutputWindowSize(SOCKET s, unsigned int nType)
{
int buflen = 0;
int nSize = sizeof(buflen);
if (getsockopt(s, SOL_SOCKET, nType, (char *)&buflen, &nSize) == 0)
return buflen;
return -1;
}
bool SetWindowSizeVal(SOCKET s, unsigned int nSize)
{
if (setsockopt(s, SOL_SOCKET, SO_SNDBUF, (char *)&nSize, sizeof(nSize)) == 0)
if (setsockopt(s, SOL_SOCKET, SO_RCVBUF, (char *)&nSize, sizeof(nSize)) == 0)
return true;
return false;
}
int main(int argc, char** argv)
{
if (argc != 3) { cout << "not enough args!\n"; return 0; }
const char* pszHost = argv[1];
const int nPort = atoi(argv[2]);
WSADATA wsaData;
DWORD Ret = 0;
if ((Ret = WSAStartup((2, 2), &wsaData)) != 0)
{
printf("WSAStartup() failed with error %d\n", Ret);
return 1;
}
struct sockaddr_in sockaddr_IPv4;
memset(&sockaddr_IPv4, 0, sizeof(struct sockaddr_in));
sockaddr_IPv4.sin_family = AF_INET;
sockaddr_IPv4.sin_port = htons(nPort);
if (!InetPtonA(AF_INET, pszHost, &sockaddr_IPv4.sin_addr)) { return 0; }
SOCKET clientSock = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); // Create active socket: one which is passed to connect().
if (!SetWindowSizeVal(clientSock, 12345))
{
cout << "Failed to set window size " << endl;
return -1;
}
cout << "Set window size on client socket as: RECV" << OutputWindowSize(clientSock, SO_RCVBUF) <<
" SEND: " << OutputWindowSize(clientSock, SO_SNDBUF) << endl;
int nRet = connect(clientSock, (sockaddr*)&sockaddr_IPv4, sizeof(sockaddr_in));
if (nRet != 0) { return 0; }
char buf[100] = { 0 };
nRet = recv(clientSock, buf, 100, 0);
cout << "Received " << buf << " from the server!" << endl;
nRet = send(clientSock, "Hello from the client!\n", strlen("Hello from the client!\n"), 0);
closesocket(clientSock);
return 0;
}
Server:
#include <WinSock2.h>
#include <mstcpip.h>
#include <Ws2tcpip.h>
#include <iostream>
using namespace std;
int OutputWindowSize(SOCKET s, unsigned int nType)
{
int buflen = 0;
int nSize = sizeof(buflen);
if (getsockopt(s, SOL_SOCKET, nType, (char *)&buflen, &nSize) == 0)
return buflen;
return -1;
}
bool SetWindowSizeVal(SOCKET s, unsigned int nSize)
{
if (setsockopt(s, SOL_SOCKET, SO_SNDBUF, (char *)&nSize, sizeof(nSize)) == 0)
if (setsockopt(s, SOL_SOCKET, SO_RCVBUF, (char *)&nSize, sizeof(nSize)) == 0)
return true;
return false;
}
int main()
{
WSADATA wsaData;
DWORD Ret = 0;
if ((Ret = WSAStartup((2, 2), &wsaData)) != 0)
{
printf("WSAStartup() failed with error %d\n", Ret);
return 1;
}
struct sockaddr_in sockaddr_IPv4;
memset(&sockaddr_IPv4, 0, sizeof(struct sockaddr_in));
sockaddr_IPv4.sin_family = AF_INET;
sockaddr_IPv4.sin_port = htons(19982);
int y = InetPton(AF_INET, L"127.0.0.1", &sockaddr_IPv4.sin_addr);
if (y != 1) return 0;
socklen_t addrlen = sizeof(sockaddr_IPv4);
SOCKET sock = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (!SetWindowSizeVal(sock, 12345))
{
cout << "Failed to set window size " << endl;
return -1;
}
cout << "Set window size on listen socket as: RECV" << OutputWindowSize(sock, SO_RCVBUF) <<
" SEND: " << OutputWindowSize(sock, SO_SNDBUF) << endl;
if (bind(sock, (sockaddr*)&sockaddr_IPv4, sizeof(sockaddr_IPv4)) != 0) { /* error */ }
if (listen(sock, SOMAXCONN) != 0) { return 0; }
while (1)
{
SOCKET sockAccept = accept(sock, (struct sockaddr *) &sockaddr_IPv4, &addrlen);
if (!SetWindowSizeVal(sockAccept, 12345))
{
cout << "Failed to set window size " << endl;
return -1;
}
cout << "Set window size as on accepted socket as: RECV" << OutputWindowSize(sock, SO_RCVBUF) <<
" SEND: " << OutputWindowSize(sock, SO_SNDBUF) << endl;
if (sockAccept == -1) return 0;
int nRet = send(sockAccept, "Hello from the server!\n", strlen("Hello from the server!\n"), 0);
if (!nRet) return 0;
char buf[100] = { 0 };
nRet = recv(sockAccept, buf, 100, 0);
cout << "Received " << buf << " from the client!" << endl;
if (nRet == 0) { cout << "client disonnected!" << endl; }
closesocket(sockAccept);
}
return 0;
}
The output from my program states that the window sizes have been set succesfully:
Set window size on listen socket as: RECV12345 SEND: 12345
Set window size as on accepted socket as: RECV12345 SEND: 12345
for the server, and for the client:
Set window size on listen socket as: RECV12345 SEND: 12345
However, when I capture the traffic using RawCap, I see that the client window size is set fine, but server's window size is not what I set it to be, it is 8192:
Now, I have read this MS link and it says to add a registry value; I did this, adding the value 0x00001234, but it still made no difference.
The interesting thing is, the same code works fine on a Windows 10 machine, which makes me think it is Windows 7 specific. However, I'm not 100% sure on my code, there might be some errors in it.
Can anyone suggest how I can get Windows to honour my requested parameters please?
These are not 'window sizes'. They are send and receive buffer sizes.
There is no such thing as 'output window size'. There is a receive window and a congestion window, and the latter is not relevant to your question.
The send buffer size has exactly nothing to do with the receive window size, and the receive buffer size only determines the maximum receive window size.
The actual receive window size is adjusted dynamically by the protocol. It is the actual size that you are seeing in Wireshark.
The platform is entitled by the specification to adjust the supplied values for the send and receive buffers up or down, and the documentation advises you to get the corresponding values if you want to be sure what they really are.
There is no problem here to solve.
NB You don't have to set the receive window size on an accepted socket if you already set it on the listening socket. It is inherited.
I have encountered this exercise which asks for which code is faster between the following two.
First code.
int sum = 0;
for(int i = 0; i < n; i++) {
sum += array[i*n + thread_id];
}
Second code.
int sum = 0;
for(int i = 0; i < n; i++) {
sum += array[n*thread_id + i];
}
I would try the code myself I will not have a Nvidia GPU in the following days.
I think that the first code takes advantage of memory coalescing see here, while the second one would take advantage of caching.
Many thanks to #RobertCrovella for clarifying the issues regarding memory coalescing. This is my attempt to benchmark the two codes as asked for. It can be clearly noticed from the output (run on a NVS5400M GPU laptop) that the first code is twice more efficient as compared to the second one. This is because of the memory coalescing taking place in the first one (kernel1).
#include <cuda.h>
#include <ctime>
#include <iostream>
#include <stdio.h>
using namespace std;
#define BLOCK_SIZE 1024
#define GRID_SIZE 1024
// Error Handling
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
//kernel1<<<8,8>>>(d_array,d_sum1,n);
__global__ void kernel1(int *array, long *sum, int n) {
long result=0;
int thread_id=threadIdx.x+blockIdx.x*blockDim.x;
for(int i=0;i<n;i++) {
result += array[i*n + thread_id];
}
//__syncthreads();
sum[thread_id]=result;
}
__global__ void kernel2(int *array, long *sum, int n) {
long result=0;
int thread_id=threadIdx.x+blockIdx.x*blockDim.x;
for(int i=0;i<n;i++) {
result += array[n*thread_id+i];
}
__syncthreads();
sum[thread_id]=result;
}
int main() {
srand((unsigned)time(0));
long *h_sum1,*d_sum1;
long *h_sum2,*d_sum2;
int n=10;
int size1=n*BLOCK_SIZE*GRID_SIZE+n;
int *h_array;
h_array=new int[size1];
h_sum1=new long[size1];
h_sum2=new long[size1];
//random number range
int min =1, max =10;
for(int i=0;i<size1;i++) {
h_array[i]= min + (rand() % static_cast<int>(max - min + 1));
h_sum1[i]=0;
h_sum2[i]=0;
}
int *d_array;
gpuErrchk(cudaMalloc((void**)&d_array,size1*sizeof(int)));
gpuErrchk(cudaMalloc((void**)&d_sum1,size1*sizeof(long)));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
gpuErrchk(cudaMemcpy(d_array,h_array,size1*sizeof(int),cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_sum1,h_sum1,size1*sizeof(long),cudaMemcpyHostToDevice));
cudaEventRecord(start);
kernel1<<<GRID_SIZE,BLOCK_SIZE>>>(d_array,d_sum1,n);
cudaEventRecord(stop);
gpuErrchk(cudaMemcpy(h_sum1,d_sum1,size1*sizeof(long),cudaMemcpyDeviceToHost));
float milliSeconds1=0;
cudaEventElapsedTime(&milliSeconds1,start,stop);
gpuErrchk(cudaMalloc((void**)&d_sum2,size1*sizeof(long)));
gpuErrchk(cudaMemcpy(d_sum2,h_sum2,size1*sizeof(long),cudaMemcpyHostToDevice));
cudaEventRecord(start);
kernel2<<<GRID_SIZE,BLOCK_SIZE>>>(d_array,d_sum2,10);
cudaEventRecord(stop);
gpuErrchk(cudaMemcpy(h_sum2,d_sum2,size1*sizeof(long),cudaMemcpyDeviceToHost));
float milliSeconds2=0;
cudaEventElapsedTime(&milliSeconds2,start,stop);
long result_device1=0,result_host1=0;
long result_device2=0,result_host2=0;
for(int i=0;i<size1;i++) {
result_device1 += h_sum1[i];
result_device2 += h_sum2[i];
}
for(int thread_id=0;thread_id<GRID_SIZE*BLOCK_SIZE;thread_id++)
for(int i=0;i<10;i++) {
result_host1 += h_array[i*10+thread_id];
result_host2 += h_array[10*thread_id+i];
}
cout << "Device result1 = " << result_device1 << endl;
cout << "Host result1 = " << result_host1 << endl;
cout << "Time1 (ms) = " << milliSeconds1 << endl;
cout << "Device result2 = " << result_device2 << endl;
cout << "Host result2 = " << result_host2 << endl;
cout << "Time2 (ms) = " << milliSeconds2 << endl;
gpuErrchk(cudaFree(d_array));
gpuErrchk(cudaFree(d_sum1));
gpuErrchk(cudaFree(d_sum2));
return 0;
}
The Cuda Event timer output is as under:
Device result1 = 57659226
Host result1 = 57659226
Time1 (ms) = 5.21952
Device result2 = 57674257
Host result2 = 57674257
Time2 (ms) = 11.8356
Working on WinXP SP3.
Visual Studio 2005.
Trying to read memory of another process.
std::cout<<"Reading Process Memory\n";
const DWORD pid = 3476;
HANDLE handle = OpenProcess(PROCESS_VM_READ,FALSE,pid);
if(handle == NULL) {std::cout<<"Failed to open process\n";return 0;}
char* buffer1 = new char[256];
char* buffer2 = new char[256];
memset(buffer1,0,256*sizeof(char));
memset(buffer2,0,256*sizeof(char));
DWORD nbr = 0;
int address = 0x400000;
BOOL result = ReadProcessMemory(handle,&address,buffer1,32,&nbr);
if(result!=1) std::cout<<"Failed to read memory\n";
address = 0x400000+0x1000;
result = ReadProcessMemory(handle,&address,buffer2,32,&nbr);
if(result!=1) std::cout<<"Failed to read memory\n";
int i = 0;
while(i++<10)
{
if(buffer1[i]!=buffer2[i]) {std::cout<<"Buffers are different\n";break;}
}
delete[] buffer1;
delete[] buffer2;
CloseHandle(handle);
std::cin>>i;
return 0;
The problem is that both buffers are getting the same values. ReadProcMemory returns 1 and number of bytes read is the same as requested.
Your calls to ReadProcessMemory are incorrect. You should be using address directly, not &address. You may need to cast it to a const void *.
result = ReadProcessMemory(handle, reinterpret_cast<const void *>(address), buffer, 32, &nbr);
And you probably should declaring address as a type large enough to handle a pointer, like std::ssize_t or INT_PTR.
INT_PTR address = 0x400000;
buffer couldn't be a char, it has to be int, thats a working example
#include <windows.h>
#include <iostream>
#include <string.h>
using namespace std;
int main()
{
int point1=0;
int i=0;
int d=0;
char* value[4];
SIZE_T stBytes = 0;
HWND hwnd;
HANDLE phandle;
DWORD pid;
hwnd = FindWindow(NULL, "calc"); // calc is the name of the windows process
if (hwnd != 0) {
GetWindowThreadProcessId(hwnd, &pid);
phandle = OpenProcess(PROCESS_ALL_ACCESS, 0, pid);
} else {
cout << "process is not executing";
cin.get();
return 0;
}
if (phandle != 0) {
for(i=0;i<4;i++) // 4 or wathever
{
cout << "The pointer is 0x1001000" << endl; //Print the pointer
ReadProcessMemory(phandle, (LPVOID)0x1001000+i, &point1, 4, &stBytes); //Get the content from 0x1001000 and store it in point1
cout << "decimal content point1 " << point1 << " (DEC)" << endl; //Print the decimal content of point1
printf("%x \n",point1); // print hexadecimal content of point1
char *p=(char*)&point1; // point point1 buffer
for(d=0;d<4;d++)
printf("%x",(unsigned int)(unsigned char) *(p+d)); // print backwards (because the buffer is like a LIFO) and see the dbg debugger
}
ReadProcessMemory(phandle, (LPVOID)point1, &value, 6, &stBytes); //Get the value that is in the address pointed by the pointer
cout << "The value in the non-static address is " << (char*)value << endl << endl; //Print the value
cout << "Press ENTER to exit." << endl;
cin.get();
} else {
cout << "Couldn't get a handle";
cin.get();
// address 0x1001000 content hex 5278DA77
}
}
I am trying to record the audio at windows, here is my code. it works well for 8 bit, but it cannot work for 16 bit. Can anyone help me?
#include
#include
#include
#pragma comment(lib,"winmm.lib")
using namespace std;
int test(){
HWAVEIN microHandle;
WAVEHDR waveHeader;
MMRESULT result = 0;
WAVEFORMATEX waveformat;
waveformat.wFormatTag = WAVE_FORMAT_PCM;
waveformat.wBitsPerSample=8;
waveformat.nSamplesPerSec=16000;//8000;
waveformat.nAvgBytesPerSec=waveformat.nSamplesPerSec*waveformat.nSamplesPerSec/8;
waveformat.nChannels=1;
waveformat.nBlockAlign=waveformat.nChannels*waveformat.wBitsPerSample/8;
waveformat.cbSize=0;
result = waveInOpen(µHandle, WAVE_MAPPER, &waveformat, 0L, 0L, CALLBACK_EVENT);
if (result)
{
cout << "Fail step 1" << endl;
cout << result << endl;
Sleep(10000);
return 0;
}
const int BUFSIZE = 16000*4;
char * buf = (char *)malloc(BUFSIZE);
// Set up and prepare header for input
waveHeader.lpData = (LPSTR)buf;
waveHeader.dwBufferLength = BUFSIZE;
waveHeader.dwBytesRecorded=0;
waveHeader.dwUser = 0L;
waveHeader.dwFlags = 0L;
waveHeader.dwLoops = 0L;
waveInPrepareHeader(microHandle, &waveHeader, sizeof(WAVEHDR));
// Insert a wave input buffer
result = waveInAddBuffer(microHandle, &waveHeader, sizeof(WAVEHDR));
if (result)
{
cout << "Fail step 2" << endl;
cout << result << endl;
Sleep(10000);
return 0;
}
result = waveInStart(microHandle);
if (result)
{
cout << "Fail step 3" << endl;
cout << result << endl;
Sleep(10000);
return 0;
}
// Wait until finished recording
do {} while (waveInUnprepareHeader(microHandle, &waveHeader, sizeof(WAVEHDR))==WAVERR_STILLPLAYING);
FILE *fp = fopen("output.pcm","w");
fwrite(buf,1,BUFSIZE,fp);
fclose(fp);
waveInClose(microHandle);
return 0;
}
void main()
{
test();
}
If I set the parameter waveformat.wBitsPerSample = 8, it can record the audio correctly,
but if i set it waveformat.wBitsPerSample = 16, it record the Noise!!!
Can anyone help me?
thanks.
it should bu FILE *fp = fopen("output.pcm","wb"); NOT FILE *fp = fopen("output.pcm","w");