How to control the Microphone Boost in Windows 7? - windows

I am trying to control the Microphone Boost (level/(un)mute) in Windows 7 using the MIXER API in a C/C++ application, but I do not get the controls for the same. Can it be done using WASAPI? Can somebody suggest any other API to control the Microphone Boost in Windows 7?
This is what I have written so far ...
const IID IID_IDeviceTopology = __uuidof(IDeviceTopology);
const IID IID_IPart = __uuidof(IPart);
const IID IID_IAudioAutoGainControl = __uuidof(IAudioAutoGainControl);
HRESULT hr = S_OK;
CoInitialize(NULL);
IMMDeviceEnumerator *deviceEnumerator = NULL;
hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), NULL, CLSCTX_INPROC_SERVER, __uuidof(IMMDeviceEnumerator), (LPVOID *)&deviceEnumerator);
IMMDevice *pEndptDev = NULL;
hr = deviceEnumerator->GetDefaultAudioEndpoint(eCapture, eConsole, &pEndptDev);
deviceEnumerator->Release();
deviceEnumerator = NULL;
IDeviceTopology *pDevTopoEndpt = NULL;
IConnector *pConnEndpt = NULL;
IConnector *pConnHWDev = NULL;
IPart *pPartConn = NULL;
IAudioAutoGainControl *pAGC = NULL;
IControlInterface *pControl = NULL;
UINT pCount = 0;
LPCGUID pIID = ;
// Get the endpoint device's IDeviceTopology interface.
hr = pEndptDev->Activate(IID_IDeviceTopology, CLSCTX_ALL, NULL, (void**)&pDevTopoEndpt);
// The device topology for an endpoint device always
// contains just one connector (connector number 0).
hr = pDevTopoEndpt->GetConnector(0, &pConnEndpt);
// Use the connector in the endpoint device to get the
// connector in the adapter device.
hr = pConnEndpt->GetConnectedTo(&pConnHWDev);
// Query the connector in the adapter device for
// its IPart interface.
hr = pConnHWDev->QueryInterface(IID_IPart, (void**)&pPartConn);
// Use the connector's IPart interface to get the
// IDeviceTopology interface for the adapter device.
hr = pPartConn->Activate(CLSCTX_ALL, IID_IAudioAutoGainControl, (void**)&pAGC);
hr = pPartConn->GetControlInterfaceCount(&pCount);
hr = pPartConn->GetControlInterface(pCount - 1, &pControl);
hr = pControl->GetIID((GUID *)pIID);
//BOOL bEnabled = false;
hr = pAGC->SetEnabled(true, pIID);

WASAPI is the way to do this.
http://msdn.microsoft.com/en-us/library/windows/desktop/dd316531%28v=vs.85%29.aspx
http://msdn.microsoft.com/en-us/library/windows/desktop/dd370853%28v=vs.85%29.aspx

Related

How to get extended file-attributes in Windows in Python/Go/C/Batch?

I have searched a lot and have got some findings on how to get extended FA, but they are in C# using the language's own built-in APIs. I am trying to find the author name for a file in Windows, but my requirement is in Go/Python/C/Batch (order of priority).
In python, the third party packages (exifread and hachoir_metadata) are not working (not giving any result for a sample doc/xlsx file. Maybe the package I am installing via pip-install is erroneous).
Is there any other way or any user-level MSDN API available?
Please let me know if any experience on this. Thanks.
In C, C++ or other language, you get file properties with IPropertyStore interface
For example, for a .jpg file (test on Windows 10, VS 2015) =>
I get for Author :
System.Author(Auteurs) = Test Auteur
PIDLIST_ABSOLUTE pidl = ILCreateFromPath(L"E:\\icon_rose.jpg");
if (pidl != NULL)
{
IPropertyStore *pps;
HRESULT hr = SHGetPropertyStoreFromIDList(pidl, GPS_DEFAULT, IID_PPV_ARGS(&pps));
if (SUCCEEDED(hr))
{
DWORD dwCount;
hr = pps->GetCount(&dwCount);
PROPERTYKEY propKey;
for (DWORD i = 0; i < dwCount; ++i)
{
hr = pps->GetAt(i, &propKey);
if (SUCCEEDED(hr))
{
PWSTR pszCanonicalName = NULL;
hr = PSGetNameFromPropertyKey(propKey, &pszCanonicalName);
PWSTR pszDescriptionName = NULL;
IPropertyDescription *ppd;
hr = PSGetPropertyDescription(propKey, IID_PPV_ARGS(&ppd));
if (SUCCEEDED(hr))
{
hr = ppd->GetDisplayName(&pszDescriptionName);
ppd->Release();
}
PROPVARIANT propvarValue = { 0 };
HRESULT hr = pps->GetValue(propKey, &propvarValue);
if (SUCCEEDED(hr))
{
PWSTR pszDisplayValue = NULL;
hr = PSFormatForDisplayAlloc(propKey, propvarValue, PDFF_DEFAULT, &pszDisplayValue);
if (SUCCEEDED(hr))
{
WCHAR wsBuffer[255];
wsprintf(wsBuffer, L"%s(%s) = %s\n", pszCanonicalName, (pszDescriptionName==NULL?L"Unknown":pszDescriptionName), pszDisplayValue);
OutputDebugString(wsBuffer);
CoTaskMemFree(pszDisplayValue);
}
PropVariantClear(&propvarValue);
}
if (pszCanonicalName != NULL)
CoTaskMemFree(pszCanonicalName);
if (pszDescriptionName != NULL)
CoTaskMemFree(pszDescriptionName);;
}
}
pps->Release();
}
ILFree(pidl);
}

Add audio capability to Capture Filter

I'm trying to add audio capability to a capture source filter in order to make a virtual cam with audio. Beginning with the TMH's and rdp's code I extended it with another pin, called "Audio":
CUnknown * WINAPI CVCam::CreateInstance(LPUNKNOWN lpunk, HRESULT *phr)
{
ASSERT(phr);
CUnknown *punk = new CVCam(lpunk, phr);
return punk;
}
CVCam::CVCam(LPUNKNOWN lpunk, HRESULT *phr) : CSource(LPCSTR(FILTER_NAME), lpunk, CLSID_VirtualCam)
{
ASSERT(phr);
CAutoLock cAutoLock(&m_cStateLock);
m_paStreams = (CSourceStream **) new CVCamStream*[2];
m_paStreams[0] = new CVCamStream(phr, this, L"Video");
m_paStreams[1] = new CVAudioStream(phr, this, L"Audio");
}
HRESULT CVCam::QueryInterface(REFIID riid, void **ppv)
{
if (riid == _uuidof(IAMStreamConfig) || riid == _uuidof(IKsPropertySet))
{
HRESULT hr;
hr = m_paStreams[0]->QueryInterface(riid, ppv);
if (hr != S_OK) return hr;
hr = m_paStreams[1]->QueryInterface(riid, ppv);
if (hr != S_OK) return hr;
}
else return CSource::QueryInterface(riid, ppv);
return S_OK;
}
CVAudioStream::CVAudioStream(HRESULT *phr, CVCam *pParent, LPCWSTR pPinName) : CSourceStream(LPCSTR(pPinName), phr, pParent, pPinName), m_pParent(pParent)
{
GetMediaType(0, &m_mt);
}
CVAudioStream::~CVAudioStream()
{
}
HRESULT CVAudioStream::QueryInterface(REFIID riid, void **ppv)
{
if (riid == _uuidof(IAMStreamConfig)) *ppv = (IAMStreamConfig*)this;
else if (riid == _uuidof(IKsPropertySet)) *ppv = (IKsPropertySet*)this;
else if (riid == _uuidof(IAMBufferNegotiation)) *ppv = (IAMBufferNegotiation*)this;
else return CSourceStream::QueryInterface(riid, ppv);
AddRef();
return S_OK;
}
HRESULT CVAudioStream::FillBuffer(IMediaSample *pms)
{
// fill buffer with Windows audio samples
return NOERROR;
}
STDMETHODIMP CVAudioStream::Notify(IBaseFilter * pSender, Quality q)
{
return E_NOTIMPL;
}
HRESULT CVAudioStream::SetMediaType(const CMediaType *pmt)
{
HRESULT hr = CSourceStream::SetMediaType(pmt);
return hr;
}
HRESULT setupPwfex(WAVEFORMATEX *pwfex, AM_MEDIA_TYPE *pmt) {
pwfex->wFormatTag = WAVE_FORMAT_PCM;
pwfex->cbSize = 0;
pwfex->nChannels = 2;
HRESULT hr;
pwfex->nSamplesPerSec = 11025;
pwfex->wBitsPerSample = 16;
pwfex->nBlockAlign = (WORD)((pwfex->wBitsPerSample * pwfex->nChannels) / 8);
pwfex->nAvgBytesPerSec = pwfex->nSamplesPerSec * pwfex->nBlockAlign;
hr = ::CreateAudioMediaType(pwfex, pmt, FALSE);
return hr;
}
/*HRESULT CVAudioStream::setAsNormal(CMediaType *pmt)
{
WAVEFORMATEX *pwfex;
pwfex = (WAVEFORMATEX *)pmt->AllocFormatBuffer(sizeof(WAVEFORMATEX));
ZeroMemory(pwfex, sizeof(WAVEFORMATEX));
if (NULL == pwfex) return E_OUTOFMEMORY;
return setupPwfex(pwfex, pmt);
}*/
HRESULT CVAudioStream::GetMediaType(int iPosition, CMediaType *pmt)
{
if (iPosition < 0) return E_INVALIDARG;
if (iPosition > 0) return VFW_S_NO_MORE_ITEMS;
if (iPosition == 0)
{
*pmt = m_mt;
return S_OK;
}
WAVEFORMATEX *pwfex = (WAVEFORMATEX *)pmt->AllocFormatBuffer(sizeof(WAVEFORMATEX));
setupPwfex(pwfex, pmt);
return S_OK;
}
HRESULT CVAudioStream::CheckMediaType(const CMediaType *pMediaType)
{
int cbFormat = pMediaType->cbFormat;
if (*pMediaType != m_mt) return E_INVALIDARG;
return S_OK;
}
const int WaveBufferChunkSize = 16 * 1024;
HRESULT CVAudioStream::DecideBufferSize(IMemAllocator *pAlloc, ALLOCATOR_PROPERTIES *pProperties)
{
CheckPointer(pAlloc, E_POINTER);
CheckPointer(pProperties, E_POINTER);
WAVEFORMATEX *pwfexCurrent = (WAVEFORMATEX*)m_mt.Format();
pProperties->cBuffers = 1;
pProperties->cbBuffer = expectedMaxBufferSize;
ALLOCATOR_PROPERTIES Actual;
HRESULT hr = pAlloc->SetProperties(pProperties, &Actual);
if (FAILED(hr)) return hr;
if (Actual.cbBuffer < pProperties->cbBuffer) return E_FAIL;
return NOERROR;
}
HRESULT CVAudioStream::OnThreadCreate()
{
//GetMediaType(0, &m_mt);
//HRESULT hr = LoopbackCaptureSetup();
//if (FAILED(hr)) return hr;
return NOERROR;
}
HRESULT STDMETHODCALLTYPE CVAudioStream::SetFormat(AM_MEDIA_TYPE *pmt)
{
if (!pmt) return S_OK;
if (CheckMediaType((CMediaType *)pmt) != S_OK) return E_FAIL;
m_mt = *pmt;
IPin* pin;
ConnectedTo(&pin);
if (pin)
{
IFilterGraph *pGraph = m_pParent->GetGraph();
pGraph->Reconnect(this);
}
return S_OK;
}
HRESULT STDMETHODCALLTYPE CVAudioStream::GetFormat(AM_MEDIA_TYPE **ppmt)
{
*ppmt = CreateMediaType(&m_mt);
return S_OK;
}
HRESULT STDMETHODCALLTYPE CVAudioStream::GetNumberOfCapabilities(int *piCount, int *piSize)
{
*piCount = 1;
*piSize = sizeof(AUDIO_STREAM_CONFIG_CAPS);
return S_OK;
}
HRESULT STDMETHODCALLTYPE CVAudioStream::GetStreamCaps(int iIndex, AM_MEDIA_TYPE **pmt, BYTE *pSCC)
{
if (iIndex < 0) return E_INVALIDARG;
if (iIndex > 0) return S_FALSE;
if (pSCC == NULL) return E_POINTER;
*pmt = CreateMediaType(&m_mt);
if (*pmt == NULL) return E_OUTOFMEMORY;
DECLARE_PTR(WAVEFORMATEX, pAudioFormat, (*pmt)->pbFormat);
AM_MEDIA_TYPE * pm = *pmt;
setupPwfex(pAudioFormat, pm);
AUDIO_STREAM_CONFIG_CAPS* pASCC = (AUDIO_STREAM_CONFIG_CAPS*)pSCC;
ZeroMemory(pSCC, sizeof(AUDIO_STREAM_CONFIG_CAPS));
pASCC->guid = MEDIATYPE_Audio;
pASCC->MaximumChannels = pAudioFormat->nChannels;
pASCC->MinimumChannels = pAudioFormat->nChannels;
pASCC->ChannelsGranularity = 1; // doesn't matter
pASCC->MaximumSampleFrequency = pAudioFormat->nSamplesPerSec;
pASCC->MinimumSampleFrequency = pAudioFormat->nSamplesPerSec;
pASCC->SampleFrequencyGranularity = 11025; // doesn't matter
pASCC->MaximumBitsPerSample = pAudioFormat->wBitsPerSample;
pASCC->MinimumBitsPerSample = pAudioFormat->wBitsPerSample;
pASCC->BitsPerSampleGranularity = 16; // doesn't matter
return S_OK;
}
HRESULT CVAudioStream::Set(REFGUID guidPropSet, DWORD dwID, void *pInstanceData, DWORD cbInstanceData, void *pPropData, DWORD cbPropData)
{
return E_NOTIMPL;
}
HRESULT CVAudioStream::Get(
REFGUID guidPropSet,
DWORD dwPropID,
void *pInstanceData,
DWORD cbInstanceData,
void *pPropData,
DWORD cbPropData,
DWORD *pcbReturned
)
{
if (guidPropSet != AMPROPSETID_Pin) return E_PROP_SET_UNSUPPORTED;
if (dwPropID != AMPROPERTY_PIN_CATEGORY) return E_PROP_ID_UNSUPPORTED;
if (pPropData == NULL && pcbReturned == NULL) return E_POINTER;
if (pcbReturned) *pcbReturned = sizeof(GUID);
if (pPropData == NULL) return S_OK;
if (cbPropData < sizeof(GUID)) return E_UNEXPECTED;
*(GUID *)pPropData = PIN_CATEGORY_CAPTURE;
return S_OK;
}
HRESULT CVAudioStream::QuerySupported(REFGUID guidPropSet, DWORD dwPropID, DWORD *pTypeSupport)
{
if (guidPropSet != AMPROPSETID_Pin) return E_PROP_SET_UNSUPPORTED;
if (dwPropID != AMPROPERTY_PIN_CATEGORY) return E_PROP_ID_UNSUPPORTED;
if (pTypeSupport) *pTypeSupport = KSPROPERTY_SUPPORT_GET;
return S_OK;
}
My first issue is when I insert the filter in GraphStudioNext and open its properties page. The Audio pin shows the following (incorrect) information:
majorType = GUID_NULL
subType = GUID_NULL
formattype = GUID_NULL
Of course I cannot connect nothing to that pin because is not valid.
I was expecting something like MEDIATYPE_Audio because I set up it:
DEFINE_GUID(CLSID_VirtualCam, 0x8e14549a, 0xdb61, 0x4309, 0xaf, 0xa1, 0x35, 0x78, 0xe9, 0x27, 0xe9, 0x33);
const AMOVIESETUP_MEDIATYPE AMSMediaTypesVideo =
{
&MEDIATYPE_Video,
&MEDIASUBTYPE_NULL
};
const AMOVIESETUP_MEDIATYPE AMSMediaTypesAudio =
{
&MEDIATYPE_Audio,
&MEDIASUBTYPE_NULL
};
const AMOVIESETUP_PIN AMSPinVCam[] =
{
{
L"Video", // Pin string name
FALSE, // Is it rendered
TRUE, // Is it an output
FALSE, // Can we have none
FALSE, // Can we have many
&CLSID_NULL, // Connects to filter
NULL, // Connects to pin
1, // Number of types
&AMSMediaTypesVideo // Pin Media types
},
{
L"Audio", // Pin string name
FALSE, // Is it rendered
TRUE, // Is it an output
FALSE, // Can we have none
FALSE, // Can we have many
&CLSID_NULL, // Connects to filter
NULL, // Connects to pin
1, // Number of types
&AMSMediaTypesAudio // Pin Media types
}
};
const AMOVIESETUP_FILTER AMSFilterVCam =
{
&CLSID_VirtualCam, // Filter CLSID
FILTER_NAME, // String name
MERIT_DO_NOT_USE, // Filter merit
2, // Number pins
AMSPinVCam // Pin details
};
CFactoryTemplate g_Templates[] =
{
{
FILTER_NAME,
&CLSID_VirtualCam,
CVCam::CreateInstance,
NULL,
&AMSFilterVCam
},
};
int g_cTemplates = sizeof(g_Templates) / sizeof(g_Templates[0]);
STDAPI RegisterFilters( BOOL bRegister )
{
HRESULT hr = NOERROR;
WCHAR achFileName[MAX_PATH];
char achTemp[MAX_PATH];
ASSERT(g_hInst != 0);
if( 0 == GetModuleFileNameA(g_hInst, achTemp, sizeof(achTemp))) return AmHresultFromWin32(GetLastError());
MultiByteToWideChar(CP_ACP, 0L, achTemp, lstrlenA(achTemp) + 1, achFileName, NUMELMS(achFileName));
hr = CoInitialize(0);
if(bRegister)
{
hr = AMovieSetupRegisterServer(CLSID_VirtualCam, FILTER_NAME, achFileName, L"Both", L"InprocServer32");
}
if( SUCCEEDED(hr) )
{
IFilterMapper2 *fm = 0;
hr = CreateComObject( CLSID_FilterMapper2, IID_IFilterMapper2, fm );
if( SUCCEEDED(hr) )
{
if(bRegister)
{
IMoniker *pMoniker = 0;
REGFILTER2 rf2;
rf2.dwVersion = 1;
rf2.dwMerit = MERIT_DO_NOT_USE;
rf2.cPins = 2;
rf2.rgPins = AMSPinVCam;
hr = fm->RegisterFilter(CLSID_VirtualCam, FILTER_NAME, &pMoniker, &CLSID_VideoInputDeviceCategory, NULL, &rf2);
}
else
{
hr = fm->UnregisterFilter(&CLSID_VideoInputDeviceCategory, 0, CLSID_VirtualCam);
}
}
if(fm) fm->Release();
}
if( SUCCEEDED(hr) && !bRegister ) hr = AMovieSetupUnregisterServer( CLSID_VirtualCam );
CoFreeUnusedLibraries();
CoUninitialize();
return hr;
}
Second issue: there's also a "Latency" tab but when I click on it GraphStudioNext hangs forever and the VS debugger (which is attached to that process) says nothing. What piece of code control this tab?
UPDATE
Solved first issue:
HRESULT CVAudioStream::GetMediaType(int iPosition, CMediaType *pmt)
{
if (iPosition < 0) return E_INVALIDARG;
if (iPosition > 0) return VFW_S_NO_MORE_ITEMS;
WAVEFORMATEX *pwfex = (WAVEFORMATEX *)pmt->AllocFormatBuffer(sizeof(WAVEFORMATEX));
setupPwfex(pwfex, pmt);
pmt->SetType(&MEDIATYPE_Audio);
pmt->SetFormatType(&FORMAT_WaveFormatEx);
pmt->SetTemporalCompression(FALSE);
pmt->SetSubtype(&MEDIASUBTYPE_PCM);
pmt->SetSampleSize(pwfex->nBlockAlign);
return S_OK;
}
Short version: Microsoft does not really offer an API to supply virtual audio device so that it's nicely accepted by the applications as if it is a real audio capture device.
If virtual video capture filters often work for historical reasons, it is not the case with audio. A kernel level driver that implements an audio device is the way to add an audio device that applications would recognize.
Latency tab shows up because you pretended that you are implementing IAMBufferNegotiation interface:
if (riid == _uuidof(IAMBufferNegotiation)) *ppv = (IAMBufferNegotiation*)this;
The implementation is likely to be incorrect, which results in certain unexpected behavior (freeze, crash etc).
Adding audio pin on the same filter is possible but might be not the best idea, if you expect the stream to be picked as an artificial source. It makes sense in general but real devices almost never expose audio streams like this.
Long story short, the only application which could utilize audio stream like this is the one you develop yourself: no well known application attempts to locate audio pin on the video source filter. For this reason implementation of
IAMStreamConfig and especially IKsPropertySet on such pin is useless.
You will not be able to register the filter under Audio Capture Sources category because you register a filter, and this filter exposes video output pin first, and only then there is some secondary audio. If you target an application that consumes audio via DirectShow (which is already pretty rare for the reasons beyond the scope of this question), you should rather develop a separate source filter. You of course can have the two filters talk to each other behind the scenes to deliver certain feed collaboratively, but in terms of DirectShow it is typical that the filters appear as independent.
...also real webcams expose two different filters and this is why in application like Skype we have to select both under video and audio devices.
Should it be better to create two completely different projects and filters: one for video and one for audio?
Real and typical camera:
Since "real" physical cameras are typically provided with kernel level drivers, their presence in DirectShow takes place through WDM Video Capture Filter which acts as a proxy and enumerates "DirectShow wrappers" of camera drivers under the same category Video Capture Sources where you would register virtual cameras.
That is, such design enables you to mix real and virtual cameras in the list of available devices, which DirectShow based application use when it comes to video capture. This approach has its limitations, which I described earlier e.g. in this question and referenced post Applicability of Virtual DirectShow Sources.
As DirectShow's successor Media Foundation have not had good reception in general, and in addition Media Foundation offers neither good backward compatibility nor video capture extensibility, a multitude of applications including Microsoft's own are still consuming video capture via DirectShow. Vice versa those who look into video capture API for Windows are also often interested in DirectShow and not the "current" API because of availability of samples and related information, API extensibility, application integration options.
It is not the case with audio, however. DirectShow audio capture was not top-notch already at the time DirectShow development stopped. Windows Vista introduces new API for audio WASAPI and DirectShow did not receive a respective connection to the new API, neither for audio capture nor for playback. Audio is simpler itself, and WASAPI was powerful and developer friendly, so developers started switching to the new API for audio related tasks. Much fewer applications use DirectShow for audio capture and your implementing virtual audio source is likely to be a miss: your device will remain "invisible" for applications consuming audio capture via WASAPI. Even if an application has a fallback code patch for Windows XP to do audio capture via DirectShow, it will hardly be a relief for you in newer OSes.
Follow up reading on audio on StackOverflow:
Directshow.net don't detect all mics in Windows 7
Write an audio source filter for use as Lync microphone
Windows Audio and Video Capture Software Paradigm
Also, you don't have to have separate projects for video and audio filters. You can mix them in the same project, they can just be independent filters registered separately.

Direct2d CreateSharedBitmap crashes

I'm trying to create a shared bitmap to share a D3D11Texture2d with Direct2d rendering. After creating the texture and render target I attempt to make a shared bitmap, however the call crashes with a memory access error within d2d1!GetParentTexture. My code as follows:
UINT creationFlags = D3D11_CREATE_DEVICE_BGRA_SUPPORT;
D3D_FEATURE_LEVEL reqFeatureLevels[] =
{
D3D_FEATURE_LEVEL_11_1,
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_10_0,
};
CComPtr<ID3D11Device> device;
CComPtr<ID3D11DeviceContext> context;
HRESULT hr = D3D11CreateDevice(
NULL, // specify null to use the default adapter
D3D_DRIVER_TYPE_HARDWARE,
0,
creationFlags, // optionally set debug and Direct2D compatibility flags
reqFeatureLevels, // list of feature levels this app can support
ARRAYSIZE(reqFeatureLevels), // number of possible feature levels
D3D11_SDK_VERSION,
&device, // returns the Direct3D device created
NULL, // returns feature level of device created
&context // returns the device immediate context
);
if (FAILED(hr)) return -1;
CComPtr<ID3D11Texture2D> renderTexture;
CD3D11_TEXTURE2D_DESC textureDesc(DXGI_FORMAT_B8G8R8A8_UNORM, 100, 100, 1, 1, D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_RENDER_TARGET);
textureDesc.MiscFlags = D3D11_RESOURCE_MISC_SHARED;
hr = device->CreateTexture2D(&textureDesc, NULL, &renderTexture);
if (FAILED(hr)) return -1;
CComPtr<IDXGISurface> dxgiSurf;
hr = renderTexture.QueryInterface<IDXGISurface>(&dxgiSurf);
OnHResult(hr, "QueryInterface<IDXGISurface>", return false);
DXGI_SURFACE_DESC desc;
dxgiSurf->GetDesc(&desc);
CComPtr<ID2D1Factory> factory;
hr = D2D1CreateFactory(D2D1_FACTORY_TYPE_SINGLE_THREADED, __uuidof(ID2D1Factory), NULL, (void**)&factory);
if (FAILED(hr)) return -1;
CComPtr<ID2D1RenderTarget> renderTarget;
D2D1_RENDER_TARGET_PROPERTIES props = D2D1::RenderTargetProperties(D2D1_RENDER_TARGET_TYPE_DEFAULT, D2D1::PixelFormat(DXGI_FORMAT_B8G8R8A8_UNORM, D2D1_ALPHA_MODE_IGNORE));
hr = factory->CreateDxgiSurfaceRenderTarget(dxgiSurf, &props, &renderTarget);
if (FAILED(hr)) return -1;
CComPtr<ID2D1Bitmap> sharedBitmap;
D2D1_BITMAP_PROPERTIES bitmapProperties = D2D1::BitmapProperties(D2D1::PixelFormat(desc.Format, D2D1_ALPHA_MODE_IGNORE));
hr = renderTarget->CreateSharedBitmap(IID_PPV_ARGS(&dxgiSurf), &bitmapProperties, &sharedBitmap);
if (FAILED(hr)) return -1;
Any ideas why this fails?
You pass IID_PPV_ARGS(&dxgiSurf), as first two parameters of CreateSharedBitmap. This macro expands into
IID_IDXGISurface, reinterpret_cast< void * * >(&dxgiSurf)
while the second parameter should be reinterpret_cast< void * >(p_dxgi_surface)` so an extra pointer dereference happens and program crashes.

Transcoding video issue only in Win-8 not Win-7, this is MFCopy

This is from a 2009 Microsoft project, and I'm not sure why I'm getting an error specifically in Windows-8. Here is the link: MSDN MFCopy
This happens when using the '-v' option 'Set Video Format', I get this error:
"Failed to negotiate a format between the source reader and sink writer".
Keep in mind, I am processing the same video files in Win-7 and 8.
Edit: Is this a WinAPI issue? Is there some differences in the calls that need to be made? if so, can that be repaired in the source...
Example Usage is:
MfCopy.exe -t -xa -v WVC1 -r 270 "g:\pd1.mov" "g:\pd3.wmv"
'-t' = Trim Black frames
'-xa' = Video only
'-r' (270) = Rotate 270 degrees
'-v' (WVC1) = (Video options, H264, WMV2, WMV3, WVC1... none work in Win-8)
I also have the sourcecode, and I can debug. Again it runs fine in Win-7. But in Win-8, I get an error specifically at this line:
hr = m_pSinkWriter->SetInputMediaType( streamInfo.dwOutputStreamIndex,
pFullMediaType,
NULL );
The error is: 'The data specified for the media type is invalid, inconsistent, or not supported by this object'.
That is in this code block:
/////////////////////////////////////////////////////////////////
HRESULT CMFCopy::_NegotiateStreamFormat(
__in DWORD dwStreamIndex,
__in REFGUID guidMajorType,
__in DWORD cFormats,
__in_ecount( cFormats ) const GUID **paFormats )
{
HRESULT hr = S_OK;
const StreamInfo& streamInfo = m_paStreamInfo[dwStreamIndex];
IMFMediaType *pPartialMediaType = NULL;
IMFMediaType *pFullMediaType = NULL;
BOOL fConfigured = FALSE;
CHECK_HR( hr = MFCreateMediaType( &pPartialMediaType ) );
CHECK_HR( hr = pPartialMediaType->SetGUID( MF_MT_MAJOR_TYPE, guidMajorType ) );
for( DWORD ii = 0; ii < cFormats; ii++ )
{
SAFE_RELEASE( pFullMediaType );
CHECK_HR( hr = pPartialMediaType->SetGUID( MF_MT_SUBTYPE, *paFormats[ii] ) );
// try to set the partial media type on the source reader
hr = m_pSourceReader->SetCurrentMediaType( dwStreamIndex,
NULL,
pPartialMediaType );
if( S_OK != hr )
{
// format is not supported by the source reader, try the next on the list
hr = S_OK;
continue;
}
// get the full media type from the source reader
CHECK_HR( hr = m_pSourceReader->GetCurrentMediaType( dwStreamIndex,
&pFullMediaType ) );
if (MFMediaType_Video == guidMajorType)
{
_UpdateInputVideoStreamOverscanLineCount(pFullMediaType);
if (RotateNone != m_Options.eRotation)
{
CHECK_HR( hr = GetDefaultStride(pFullMediaType, &m_srcDefaultStride)); // get the default stride for the source
CComPtr<IMFMediaType> spRotatedMediaType;
CHECK_HR(hr = hr = CreateRotatedMediaType(pFullMediaType, spRotatedMediaType, _simpleOverScanLines));
pFullMediaType->Release();
pFullMediaType = spRotatedMediaType.Detach(); // transfer ownership
}
}
// try to set the input media type on the sink writer
hr = m_pSinkWriter->SetInputMediaType( streamInfo.dwOutputStreamIndex,
pFullMediaType,
NULL );
if( S_OK != hr )
{
// format is not supported by the sink writer, try the next on the list
hr = S_OK;
continue;
}
if (MFMediaType_Video == guidMajorType)
{
CHECK_HR( hr = _UpdateRotationInfo(pFullMediaType)); // may need to update info about rotated image
}
fConfigured = TRUE;
break;
}
if( !fConfigured )
{
hr = MF_E_INVALIDMEDIATYPE;
_SetErrorDetails( hr, L"Failed to negotiate a format between the source reader and sink writer" );
goto done;
}
done:
SAFE_RELEASE( pPartialMediaType );
SAFE_RELEASE( pFullMediaType );
return( hr );
}

Direct2D Create SwapChain

I am trying to program a Direct2D desktop app based on a Windows tutorial, but am having problems creating a SwapChain1. In the code below everything gets initialized until the CreateSwapChainForHwnd. The pointer m_pDXGISwapChain1 stays NULL. All the pointers except pOutput are ComPtrs.
D2D1_FACTORY_OPTIONS options;
ZeroMemory(&options, sizeof(D2D1_FACTORY_OPTIONS));
HRESULT hr = D2D1CreateFactory(D2D1_FACTORY_TYPE_SINGLE_THREADED,
__uuidof(ID2D1Factory1), &options, &m_pD2DFactory1);
if(SUCCEEDED(hr))
{
UINT creationFlags = D3D11_CREATE_DEVICE_BGRA_SUPPORT;
D3D_FEATURE_LEVEL featureLevels[] = { D3D_FEATURE_LEVEL_11_1, D3D_FEATURE_LEVEL_11_0 };
hr = D3D11CreateDevice(nullptr, D3D_DRIVER_TYPE_HARDWARE, 0, creationFlags,
featureLevels, ARRAYSIZE(featureLevels), D3D11_SDK_VERSION, &m_pD3DDevice,
&m_featureLevel, &m_pD3DDeviceContext);
}
if(SUCCEEDED(hr))
hr = m_pD3DDevice.As(&m_pDXGIDevice1);
if(SUCCEEDED(hr))
hr = m_pD2DFactory1->CreateDevice(m_pDXGIDevice1.Get(), &m_pD2DDevice);
if(SUCCEEDED(hr))
hr = m_pD2DDevice->CreateDeviceContext(D2D1_DEVICE_CONTEXT_OPTIONS_NONE, &m_pD2DDeviceContext);
if(SUCCEEDED(hr))
hr = m_pDXGIDevice1->GetAdapter(&m_pDXGIAdapter);
if(SUCCEEDED(hr))
hr = m_pDXGIAdapter->GetParent(IID_PPV_ARGS(&m_pDXGIFactory2));
DXGI_SWAP_CHAIN_DESC1 swapChainDesc1 = {0};
swapChainDesc1.Width = 0;
swapChainDesc1.Height = 0;
swapChainDesc1.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
swapChainDesc1.Stereo = false;
swapChainDesc1.SampleDesc.Count = 1;
swapChainDesc1.SampleDesc.Quality = 0;
swapChainDesc1.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChainDesc1.BufferCount = 2;
swapChainDesc1.Scaling = DXGI_SCALING_NONE;
swapChainDesc1.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL;
swapChainDesc1.AlphaMode = DXGI_ALPHA_MODE_IGNORE;
swapChainDesc1.Flags = 0;
IDXGIOutput *pOutput;
m_pDXGIAdapter->EnumOutputs(0, &pOutput);
if(SUCCEEDED(hr))
hr = m_pDXGIFactory2->CreateSwapChainForHwnd(
static_cast<IUnknown*>(m_pD3DDevice.Get()), m_hwnd, &swapChainDesc1,
NULL, pOutput, &m_pDXGISwapChain1);
if(SUCCEEDED(hr))
hr = m_pDXGIDevice1->SetMaximumFrameLatency(1);
if(SUCCEEDED(hr))
hr = m_pDXGISwapChain1->GetBuffer(0, IID_PPV_ARGS(&m_pDXGIBackBuffer));
If all your pointers are ComPtr, then the call should look like this:
ComPtr<ID3D11Device> d3dDevice;
ComPtr<IDXGIFactory2> dxgiFactory;
// assuming d3dDevice and dxgiFactory are initialized correctly:
ComPtr<IDXGISwapChain1> swapChain;
dxgiFactory->CreateSwapChainForHwnd(d3dDevice.Get(), hWnd, &swapChainDescription, nullptr, nullptr, swapChain.GetAddressOf())
As for your swap chain description, if you're making a non-Windows Store App, you should set
swapChainDescription.Scaling = DXGI_SCALING_STRETCH;
swapChainDescription.SwapEffect = DXGI_SWAP_EFFECT_DISCARD;
Both of those values are 0, so you can leave them out.
Here is the complete swap chain description that I use:
DXGI_SWAP_CHAIN_DESC1 swapChainDescription = {};
swapChainDescription.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
swapChainDescription.SampleDesc.Count = 1;
swapChainDescription.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChainDescription.BufferCount = 2;
See this article for full a walkthrough of how to set up Direct2D 1.1 properly - including the CreateSwapChainForHwnd call: http://msdn.microsoft.com/en-us/magazine/dn198239.aspx
Direct2D don't have SwapChain at user level, SwapChain is for Direct3D, I see some DirectX 11 code in your post, do you really want Direct2D? or Direct3D?

Resources