I am working on Visual Studio 12 (Windows 10 OS) and trying to display an image buffer using StretchDIBits(), but it's failing with return 0.
I don't have any header file for that buffer. I don't know how to draw the client area using only buffer data. If anyone knows, please share your thoughts.
I have added sample snippet below:
void DShow::set framebuffer(HDC hDC){
BITMAPINFO m_bi;
DWORD result = 0;
m_bi.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
m_bi.bmiHeader.biWidth = m_curResolutionWidth;
m_bi.bmiHeader.biHeight = m_curResolutionHeight;
m_bi.bmiHeader.biPlanes = (WORD)1;
m_bi.bmiHeader.biBitCount = (WORD)24;
m_bi.bmiHeader.biCompression = 0;
m_bi.bmiHeader.biSizeImage = (m_curResolutionWidth*m_curResolutionHeight)*(24/8);
m_bi.bmiHeader.biXPelsPerMeter = 0;
m_bi.bmiHeader.biYPelsPerMeter = 0;
m_bi.bmiHeader.biClrUsed = 0;
m_bi.bmiHeader.biClrImportant = 0;
result = StretchDIBits(hDC,
m_TaniaDestRect.left, m_TaniaDestRect.top,
m_TaniaDestRect.right - m_TaniaDestRect.left, m_TaniaDestRect.bottom - m_TaniaDestRect.top,
0, 0,
m_curResolutionWidth, abs(m_curResolutionHeight),
g_pBuffer, &m_bi,
DIB_RGB_COLORS, SRCCOPY);
}
Related
Previously, the swapchain setting was RGB32 format, which worked fine.
I want to receive yv12 data and render it.
So I changed swapChain format to DXGI_FORMAT_420_OPAQUE.
DXGI_SWAP_CHAIN_DESC1 swapChainDesc = { 0 };
swapChainDesc.Width = 0;
swapChainDesc.Height = 0;
swapChainDesc.Format = DXGI_FORMAT_420_OPAQUE;// DXGI_FORMAT_B8G8R8A8_UNORM
swapChainDesc.Stereo = false;
swapChainDesc.SampleDesc.Count = 1;
swapChainDesc.SampleDesc.Quality = 0;
swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChainDesc.BufferCount = 2;
swapChainDesc.Scaling = DXGI_SCALING_STRETCH;
swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_DISCARD;
swapChainDesc.Flags = 0;
dxgiFactory->CreateSwapChainForHwnd(_direct3DDevice, _hwnd, &swapChainDesc, nullptr, nullptr, &_DXGISwapChain);
IDXGISurface *dxgiBackBuffer;
_DXGISwapChain->GetBuffer(0, IID_PPV_ARGS(&dxgiBackBuffer));
Then in dxgiBackBuffer I get a NullReferenceException.
I think that you can not create back buffer according to yv12 format.
Do you know if I'm wrong?
Supported swapchain formats are those listed as 'display out' in the DXGI hardware feature levels format tables. No Direct3D hardware feature level supports a YUV format for display out. You have to perform a YUV to RGB colorspace conversion in a shader somewhere.
I am using the vkCmdCopyImageToBuffer function and getting a memory access violation and don't understand why.
Here is the code:
VkBufferImageCopy region = {};
region.bufferOffset = 0;
region.bufferRowLength = width;
region.bufferImageHeight = height;
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.imageSubresource.mipLevel = 0;
region.imageSubresource.baseArrayLayer = 0;
region.imageSubresource.layerCount = 1;
region.imageOffset = { 0, 0, 0 };
region.imageExtent = {
width,
height,
1
};
vkCmdCopyImageToBuffer(m_drawCmdBuffers[i], m_swapChain.buffers[i].image,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, m_renderImage, 1, ®ion);
The swapchain images are created here in the initialization code:
// Get the swap chain images
images.resize(imageCount);
VK_CHECK_RESULT(fpGetSwapchainImagesKHR(device, swapChain, &imageCount, images.data()));
// Get the swap chain buffers containing the image and imageview
buffers.resize(imageCount);
for (uint32_t i = 0; i < imageCount; i++)
{
VkImageViewCreateInfo colorAttachmentView = {};
colorAttachmentView.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
colorAttachmentView.pNext = NULL;
colorAttachmentView.format = colorFormat;
colorAttachmentView.components = {
VK_COMPONENT_SWIZZLE_R,
VK_COMPONENT_SWIZZLE_G,
VK_COMPONENT_SWIZZLE_B,
VK_COMPONENT_SWIZZLE_A
};
colorAttachmentView.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
colorAttachmentView.subresourceRange.baseMipLevel = 0;
colorAttachmentView.subresourceRange.levelCount = 1;
colorAttachmentView.subresourceRange.baseArrayLayer = 0;
colorAttachmentView.subresourceRange.layerCount = 1;
colorAttachmentView.viewType = VK_IMAGE_VIEW_TYPE_2D;
colorAttachmentView.flags = 0;
buffers[i].image = images[i];
colorAttachmentView.image = buffers[i].image;
VK_CHECK_RESULT(vkCreateImageView(device, &colorAttachmentView, nullptr, &buffers[i].view));
}
And my buffer is similarly created here:
VkBufferCreateInfo createinfo = {};
createinfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
createinfo.size = width * height * 4 * sizeof(int8_t);
createinfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
createinfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
//create the image copy buffer
vkCreateBuffer(m_device, &createinfo, NULL, &m_renderImage);
I have tried different pixel formats and different createinfo.usage settings but none help.
VkSurfaceCapabilitiesKHR::supportedUsageFlags defines the limitations on the ways in which you can use the VkImages created by the swap chain. The only one that is guaranteed to be supported is color attachment; all of the other, including transfer src, are optional.
Therefore, you should not assume that you can copy from a presentable image. If you find yourself with a need to do that, you must first query that value. If it does not allow copies, then you must render to your own image, which you copy from. You can render from that image into the presentable one when you intend to present it.
I'm developing a music application for iOS using the AVAudioplayer, in which I want to implement an equalizer.
I searched the internet for a good solution, and ended up with and AUGraph configuration like this:
// multichannel mixer unit
AudioComponentDescription mixer_desc;
mixer_desc.componentType = kAudioUnitType_Mixer;
mixer_desc.componentSubType = kAudioUnitSubType_MultiChannelMixer;
mixer_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
mixer_desc.componentFlags = 0;
mixer_desc.componentFlagsMask = 0;
// iPodEQ unit
AudioComponentDescription eq_desc;
eq_desc.componentType = kAudioUnitType_Effect;
eq_desc.componentSubType = kAudioUnitSubType_AUiPodEQ;
eq_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
eq_desc.componentFlags = 0;
eq_desc.componentFlagsMask = 0;
// output unit
AudioComponentDescription output_desc;
output_desc.componentType = kAudioUnitType_Output;
output_desc.componentSubType = kAudioUnitSubType_GenericOutput;
output_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
output_desc.componentFlags = 0;
output_desc.componentFlagsMask = 0;
// create a new AUGraph
OSStatus result = NewAUGraph(&mGraph);
// Add Audio Nodes to graph
AUNode outputNode;
AUNode eqNode;
AUNode mixerNode;
AUGraphAddNode(mGraph, &mixer_desc, &mixerNode);
AUGraphAddNode(mGraph, &eq_desc, &eqNode);
AUGraphAddNode(mGraph, &output_desc, &outputNode);
// open the graph AudioUnits (but not initialized)
result = AUGraphOpen(mGraph);
// grab the audio unit instances from the nodes
AudioUnit mEQ;
AudioUnit mMixer;
result = AUGraphNodeInfo(mGraph, mixerNode, NULL, &mMixer);
result = AUGraphNodeInfo(mGraph, eqNode, NULL, &mEQ);
// set number of input buses for the mixer Audio Unit
UInt32 numbuses = 0;
AudioUnitSetProperty ( mMixer, kAudioUnitProperty_ElementCount,
kAudioUnitScope_Input, 0, &numbuses, sizeof(numbuses));
// get the equalizer factory presets list
CFArrayRef mEQPresetsArray;
UInt32 sizeof1 = sizeof(mEQPresetsArray);
AudioUnitGetProperty(mEQ, kAudioUnitProperty_FactoryPresets,
kAudioUnitScope_Global, 0, &mEQPresetsArray, &sizeof1);
result = AUGraphConnectNodeInput(mGraph, mixerNode, 0, eqNode, 0);
result = AUGraphConnectNodeInput(mGraph, eqNode, 0, outputNode, 0);
AudioUnitSetParameter(mMixer, kMultiChannelMixerParam_Enable, kAudioUnitScope_Input, 0, 1, 0);
AUPreset *aPreset = (AUPreset*)CFArrayGetValueAtIndex(mEQPresetsArray, 7);
AudioUnitSetProperty (mEQ, kAudioUnitProperty_PresentPreset,
kAudioUnitScope_Global, 0, aPreset, sizeof(AUPreset));
AUGraphInitialize(mGraph);
AUGraphStart(mGraph);
The AUGraph is running, but the EQ isn't applied. The argument '7' in AUPreset *aPreset = (AUPreset*)CFArrayGetValueAtIndex(mEQPresetsArray, 7); is the index of the equalizer that should be applied. (Electronic)
I got that index from logging the values of the mEQPresetsArray-Array:
for (int i = 0; i < CFArrayGetCount(mEQPresetsArray); i++) {
AUPreset *aPreset = (AUPreset*)CFArrayGetValueAtIndex(mEQPresetsArray, i);
NSLog(#"%d: %#", (int)aPreset->presetNumber, aPreset->presetName);
}
How can I solve my problem? I've already tried the NVDSP, but it didn't seem to be working as well. I didn't find any other solution on the internet.
Thanks in advance, Fabian.
If this is for iOS then you need to use kAudioUnitSubType_RemoteIO instead of kAudioUnitSubType_GenericOutput.
You cannot use AVAudioPlayer to do your EQ, you need AVPlayer.
See here for a sample project using the audio tap:
https://developer.apple.com/library/ios/samplecode/AudioTapProcessor/Introduction/Intro.html
I am writing a Mac OS X application to capture some audio through the microphone with echo cancellation. I am creating an AudioUnit of type VoiceProcessingIO. I want to output the audio as Signed Integer Linear PCM. However, when I indicate that I want the output sample format to record as SignedInteger, I get an "Unsupported Format" error.
How can I configure the AudioUnit to output data in the signed integer format? Here is how I am configuring it right now. If I try replacing kAudioFormatFlagIsFloat with kAudioFormatFlagIsSignedInteger, then I get an error :(
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
AudioComponent comp = AudioComponentFindNext(NULL, &desc);
OSStatus status = AudioComponentInstanceNew(comp, &_audioUnit);
...
const int sampleSize = 2;
const int eight_bits_per_byte = 8;
AudioStreamBasicDescription streamFormat;
streamFormat.mSampleRate = 16000;
streamFormat.mFormatID = kAudioFormatLinearPCM;
streamFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
streamFormat.mBytesPerPacket = sampleSize;
streamFormat.mFramesPerPacket = 1;
streamFormat.mBytesPerFrame = sampleSize;
streamFormat.mChannelsPerFrame = 1;
streamFormat.mBitsPerChannel = sampleSize * eight_bits_per_byte;
status = AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &streamFormat, sizeof(streamFormat));
// status = UnsupportedFormatError
I decided that I can't do it. I used the AudioConverter class to convert the floating point format to signed integer. On the bright side, AudioConverter is surprisingly easy to use!
I'm using D3DXSaveSurfaceToFile to save windowed Direct3D 9 surfaces to PNG, BMP and JPG files. There are no errors returned from the D3DXSaveSurfaceToFile call and all files open fine in Windows Photo Viewer and Paint. But they will not open in a higher end image editing program such as Paint Shop Pro or Photoshop. The error messages from these programs basically say that the file is corrupted. If I open the files in Paint and then save them in the same file format with a different file name, then they'll open fine in the other programs.
This leads me to believe that D3DXSaveSurfaceToFile is writing out non-standard versions of these file formats. Is there some way I can get this function to write out files that can be opened in programs like Photoshop without the intermediate step of resaving the files in Paint? Or is there another function I should be using that does a better job of saving a Direct3D surfaces to an image?
Take a look at the file in a image meta viewer. What does it tell you?
Unfortunately D3DXSaveSurfaceToFile() isn't the most stable (it's also exceptionally slow). Personally I do something like the below code. It works even on Anti-aliased displays by doing an offscreen render to take the screenshot then getting it into a buffer. It also supports only the most common of the pixel formats. Sorry for any errors in it, pulled it out of an app I used to work on.
You can then, in your code and probably in another thread, then convert said 'bitmap' to anything you like using a variety of different code.
void HandleScreenshot(IDirect3DDevice9* device)
{
DWORD tcHandleScreenshot = GetTickCount();
LPDIRECT3DSURFACE9 pd3dsBack = NULL;
LPDIRECT3DSURFACE9 pd3dsTemp = NULL;
// Grab the back buffer into a surface
if ( SUCCEEDED ( device->GetBackBuffer(0, 0, D3DBACKBUFFER_TYPE_MONO, &pd3dsBack) ))
{
D3DSURFACE_DESC desc;
pd3dsBack->GetDesc(&desc);
LPDIRECT3DSURFACE9 pd3dsCopy = NULL;
if (desc.MultiSampleType != D3DMULTISAMPLE_NONE)
{
if (SUCCEEDED(device->CreateRenderTarget(desc.Width, desc.Height, desc.Format, D3DMULTISAMPLE_NONE, 0, FALSE, &pd3dsCopy, NULL)))
{
if (SUCCEEDED(device->StretchRect(pd3dsBack, NULL, pd3dsCopy, NULL, D3DTEXF_NONE)))
{
pd3dsBack->Release();
pd3dsBack = pd3dsCopy;
}
else
{
pd3dsCopy->Release();
}
}
}
if (SUCCEEDED(device->CreateOffscreenPlainSurface(desc.Width, desc.Height, desc.Format, D3DPOOL_SYSTEMMEM, &pd3dsTemp, NULL)))
{
DWORD tmpTimeGRTD = GetTickCount();
if (SUCCEEDED(device->GetRenderTargetData(pd3dsBack, pd3dsTemp)))
{
D3DLOCKED_RECT lockedSrcRect;
if (SUCCEEDED(pd3dsTemp->LockRect(&lockedSrcRect, NULL, D3DLOCK_READONLY | D3DLOCK_NOSYSLOCK | D3DLOCK_NO_DIRTY_UPDATE)))
{
int nSize = desc.Width * desc.Height * 3;
BYTE* pixels = new BYTE[nSize +1];
int iSrcPitch = lockedSrcRect.Pitch;
BYTE* pSrcRow = (BYTE*)lockedSrcRect.pBits;
LPBYTE lpDest = pixels;
LPDWORD lpSrc;
switch (desc.Format)
{
case D3DFMT_A8R8G8B8:
case D3DFMT_X8R8G8B8:
for (int y = desc.Height - 1; y >= 0; y--)
{
lpSrc = reinterpret_cast<LPDWORD>(lockedSrcRect.pBits) + y * desc.Width;
for (unsigned int x = 0; x < desc.Width; x++)
{
*reinterpret_cast<LPDWORD>(lpDest) = *lpSrc;
lpSrc++; // increment source pointer by 1 DWORD
lpDest += 3; // increment destination pointer by 3 bytes
}
}
break;
default:
ZeroMemory(pixels, nSize);
}
pd3dsTemp->UnlockRect();
BITMAPINFOHEADER header;
header.biWidth = desc.Width;
header.biHeight = desc.Height;
header.biSizeImage = nSize;
header.biSize = sizeof(BITMAPINFOHEADER);
header.biPlanes = 1;
header.biBitCount = 3 * 8; // RGB
header.biCompression = 0;
header.biXPelsPerMeter = 0;
header.biYPelsPerMeter = 0;
header.biClrUsed = 0;
header.biClrImportant = 0;
BITMAPFILEHEADER bfh = {0};
bfh.bfType = 0x4d42;
bfh.bfOffBits = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER);
bfh.bfSize = bfh.bfOffBits + nSize;
unsigned int rough_size = sizeof(BITMAPINFOHEADER) + sizeof(BITMAPFILEHEADER) + nSize;
unsigned char* p = new unsigned char[rough_size]
memcpy(p, &bfh, sizeof(BITMAPFILEHEADER));
p += sizeof(BITMAPFILEHEADER);
memcpy(p, &header, sizeof(BITMAPINFOHEADER));
p += sizeof(BITMAPINFOHEADER);
memcpy(p, pixels, nSize);
delete [] pixels;
/**********************************************/
// p now has a full BMP file, write it out here
}
}
pd3dsTemp->Release();
}
pd3dsBack->Release();
}
}
Turns out that it was a combination of a bug in my code and Paint being more forgiving than Photoshop when it comes to reading files. The bug in my code caused the files to be saved with the wrong extension (i.e. Image.bmp was actually saved using D3DXIFF_JPG). When opening a file that contained a JPG image, but had a BMP extension, Photoshop just failed the file. I guess Paint worked since it ignored the file extension and just decoded the file contents.
Looking at a file in an image meta viewer helped me to see the problem.