What is the difference between SV_POSITION and POSITION? - directx-11

While looking at the direct3d11 tutorial sample code,
I saw code that I could not understand in the shader code.
struct VS_INPUT
{
float4 Pos : POSITION;
float4 Color : COLOR;
};
struct PS_INPUT
{
float4 Pos : SV_POSITION;
float4 Color : COLOR;
};
VS_INPUT use POSITION and PS_INPUT use SV_POSITION.
What is the difference?
why can't I use POSITION PS_INPUT?

Related

DirectX pixel shows only at (0.0f, 0.0f)

I'm learning a DirectX11 and WinAPI following some Youtube tutorial. I trying to show some pixel on a display at (0.0f, 0.0f) and it works, but if I change coordinate (for example to (-0.5f, 0.0f)) - it doesn't. And when I try to show more than one pixel it shows me pixel at (0.0f, 0.0f) only.
Here's the render method:
void Graphics::RenderFrame()
{
float bgcolor[] = { 0.0f, 0.0f, 0.0f, 1.0f };
this->deviceContext->ClearRenderTargetView(this->renderTargetView.Get(), bgcolor);
this->deviceContext->IASetInputLayout(this->vertexshader.GetInputLayout());
this->deviceContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY::D3D10_PRIMITIVE_TOPOLOGY_POINTLIST);
this->deviceContext->VSSetShader(vertexshader.GetShader(), NULL, 0);
this->deviceContext->PSSetShader(pixelshader.GetShader(), NULL, 0);
UINT stride = sizeof(Vertex);
UINT offset = 0;
this->deviceContext->IASetVertexBuffers(0, 1, vertexBuffer.GetAddressOf(), &stride, &offset);
this->deviceContext->Draw(2,0);
this->swapchain->Present(1, NULL);
}
Vertex structure:
Vertex v[] =
{
Vertex(0.0f, 0.0f),
Vertex(-0.5f, 0.0f),
};
Vertex header:
struct Vertex
{
Vertex()
{
}
Vertex(float x, float y)
: pos(x, y)
{
}
DirectX::XMFLOAT2 pos;
};
Vertex shader:
float4 main(float2 inPos : POSITION) : SV_POSITION
{
return float4(inPos, 0, 1);
}
Pixel shader:
float4 main() : SV_TARGET
{
return float4(1.0f, 1.0f, 1.0f, 1.0f);
}
From the process of my test, I didn't see any problem from the code you provided. The first time I tested the data was the same as you, but because the pixels were too small, I couldn't find the specific location, so I put the pixels Point a little closer and I can clearly see that they are successfully drawn in the window.
I suggest your coordinates to be closer, and better to use D3D11_PRIMITIVE_TOPOLOGY_POINTLIST. here is my test pic. You can see three pixels of the output.
{ 0.0f, -0.0f },
{ 0.0f, -0.1f },
{ 0.0f, -0.2f },
The following is my test code, hope it will help you.
https://gist.github.com/msmshazan/dfd5362004be37ff5e016b6a42be5083
static void RenderFrame()
{
if (!render_occluded)
{
if (render_frame_latency_wait)
{
WaitForSingleObjectEx(render_frame_latency_wait, INFINITE, TRUE);
}
#if WINDOW_DEPTH || WINDOW_STENCIL
ID3D11DeviceContext_OMSetRenderTargets(render_context, 1, &render_window_rtview, render_window_dpview);
ID3D11DeviceContext_OMSetDepthStencilState(render_context, render_depthstencil_state, 0);
ID3D11DeviceContext_ClearDepthStencilView(render_context, render_window_dpview, D3D11_CLEAR_DEPTH | D3D11_CLEAR_STENCIL, 1.f, 0);
#else
ID3D11DeviceContext_OMSetRenderTargets(render_context, 1, &render_window_rtview, NULL);
#endif
// clear background
FLOAT clear_color[] = { 100.f / 255.f, 149.f / 255.f, 237.f / 255.f, 1.f };
ID3D11DeviceContext_ClearRenderTargetView(render_context, render_window_rtview, clear_color);
// draw a triangle
const UINT stride = sizeof(struct Vertex);
const UINT offset = 0;
ID3D11DeviceContext_IASetInputLayout(render_context, render_input_layout);
ID3D11DeviceContext_IASetVertexBuffers(render_context, 0, 1, &render_vertex_buffer, &stride, &offset);
ID3D11DeviceContext_IASetPrimitiveTopology(render_context, D3D11_PRIMITIVE_TOPOLOGY_POINTLIST);
ID3D11DeviceContext_VSSetShader(render_context, render_vertex_shader, NULL, 0);
ID3D11DeviceContext_PSSetShader(render_context, render_pixel_shader, NULL, 0);
ID3D11DeviceContext_RSSetState(render_context, render_raster_state);
ID3D11DeviceContext_OMSetBlendState(render_context, render_blend_state, NULL, ~0U);
ID3D11DeviceContext_Draw(render_context, _countof(vertices), 0);
}
}
struct Vertex
{
float x, y;
float r, g, b;
};
static const struct Vertex vertices[] =
{
{ 0.0f, -0.0f, 1.f, 0.f, 0.f },
{ 0.0f, -0.1f, 1.f, 0.f, 0.f },
{ 0.0f, -0.3f, 0.f, 1.f, 1.f },
};
// You can compile shader to bytecode at build time, this will increase startup
// performance and also will avoid dependency on d3dcompiler dll file.
// To do so, put the shader source in shader.hlsl file and run these two commands:
// fxc.exe /nologo /T vs_4_0_level_9_0 /E vs /O3 /WX /Zpc /Ges /Fh d3d11_vshader.h /Vn d3d11_vshader /Qstrip_reflect /Qstrip_debug /Qstrip_priv shader.hlsl
// fxc.exe /nologo /T ps_4_0_level_9_0 /E ps /O3 /WX /Zpc /Ges /Fh d3d11_pshader.h /Vn d3d11_pshader /Qstrip_reflect /Qstrip_debug /Qstrip_priv shader.hlsl
// then set next setting to 1
#define USE_PRECOMPILED_SHADERS 0
#if USE_PRECOMPILED_SHADERS
#include "d3d11_vshader.h"
#include "d3d11_pshader.h"
#else
#pragma comment (lib, "d3dcompiler.lib")
static const char d3d11_shader[] =
"struct VS_INPUT \n"
"{ \n"
" float2 pos : POSITION; \n"
" float3 col : COLOR0; \n"
"}; \n"
" \n"
"struct PS_INPUT \n"
"{ \n"
" float4 pos : SV_POSITION; \n"
" float3 col : COLOR0; \n"
"}; \n"
" \n"
"PS_INPUT vs(VS_INPUT input) \n"
"{ \n"
" PS_INPUT output; \n"
" output.pos = float4(input.pos.xy, 0.f, 1.f); \n"
" output.col = input.col; \n"
" return output; \n"
"} \n"
" \n"
"float4 ps(PS_INPUT input) : SV_Target \n"
"{ \n"
" return float4(input.col, 1.f); \n"
"} \n";
#endif

rotate part of texture around pivot

I would like to "mask" a area of the sampled texture and rotate that part in circular motion. the mask is also moving around the uv map.
https://ibb.co/JshCDFZ
new to this.. I know I miss a step somewhere..
any tipps appreciated!
float4 PS_circleSampler(psInput In): SV_Target
{
float2 uv = In.TexCd.xy;
float4 col;
float s = sin(phase);
float c = cos(phase);
float2x2 rotationMatrix = float2x2(c, -s,
s, c);
float4 colOriginal = texA.Sample(linearSampler, uv);
float4 colRotated = texA.Sample(linearSampler, mul( pos - uv, rotationMatrix ));
float inCircle = 0.5 - distance (In.TexCd.xy, pos);
colOriginal *= 1- ceil(inCircle);
colRotated *= ceil(inCircle) ;
return colOriginal + colRotated;
}
found the solution myself..
rotating in vertex shader is easier because it uses a -1 to 1 coordinate system.
first translating the uv to the center, then roating, then translating back to its original pos
float2 pos;
float radius;
float phase;
Texture2D texA <string uiname="Texture A";>;
Texture2D texB <string uiname="Texture B";>;
cbuffer cbPerDraw : register( b0 )
{
float4x4 tVP : LAYERVIEWPROJECTION;
};
cbuffer cbPerObj : register( b1 )
{
float4x4 tW : WORLD;
};
SamplerState linearSampler
{
Filter = MIN_MAG_MIP_LINEAR;
AddressU = Clamp;
AddressV = Clamp;
};
struct vsInput
{
float4 PosO : POSITION;
float2 TexCd : TEXCOORD0;
};
struct psInput
{
float4 PosWVP: SV_Position;
float2 TexCd: TEXCOORD0;
float2 TexCdRotated: TEXCOORD1;
};
psInput VS(vsInput In)
{
psInput output;
float2 uv = In.TexCd;
float2 center = pos;
float s = sin(phase);
float c = cos(phase);
float2x2 rotationMatrix = float2x2(c, -s,
s, c);
float2 uv2 = uv - center; // translate into center
uv2 = mul(uv2, rotationMatrix); // rotate the space
uv2 += center; // move it back to the original place
output.TexCdRotated = uv2;
output.PosWVP = In.PosO;
output.TexCd = In.TexCd;
return output;
}
float4 PS_circleSampler(psInput In): SV_Target
{
float2 uv = In.TexCd.xy;
float2 uvRot = In.TexCdRotated.xy;
float4 colOriginal = texA.Sample(linearSampler, uv);
float4 colRotated = texA.Sample(linearSampler, uvRot );
float inCircle = radius - distance (In.TexCd.xy, pos);
colOriginal *= 1- ceil(inCircle);
colRotated *= ceil(inCircle) ;
return colOriginal + colRotated;
}
technique10 circleSampler
{
pass P0
{
SetVertexShader( CompileShader( vs_4_0, VS() ) );
SetPixelShader( CompileShader( ps_4_0, PS_circleSampler() ) );
}
}
``

Transparency in shader opengles2

I have the following shader with two passes and scene blend but I see those faces marked in black..
how would I maintain transparency and not see these faces in the picture ?
The material does two passes on cull faces anticlockwise and the other pass cull faces clockwise.
The shader does a dot product between the normal and the camera position and then modulate the transparency based on that result.
#version 100
precision highp int;
precision highp float;
uniform float time;
uniform float touchX;
uniform float touchY;
uniform float touchZ;
uniform float line;
varying float lightDiffuse;
void main()
{
float rampLight =lightDiffuse;
float light = (1.0 - rampLight) * 1.0;
vec4 lightColor = vec4(1.0,1.0,1.0, 1.0);
vec4 diffuseColor = lightColor * light;
vec4 c;
if(rampLight < 0.0 )
{
discard;
}
diffuseColor = smoothstep(vec4(0.0, 0.0, 0.0, 0.0), vec4(0.8, 0.8, 0.8, 0.8), vec4(diffuseColor));
gl_FragColor = diffuseColor;
}
material Router
{
technique
{
pass
{
scene_blend alpha_blend
depth_write on
depth_check on
cull_hardware anticlockwise
vertex_program_ref movingline_101_vs
{
}
fragment_program_ref movingline_101_fs
{
}
}
pass
{
scene_blend alpha_blend
cull_hardware clockwise
depth_write on
depth_check on
vertex_program_ref movingline_101_vs
{
}
fragment_program_ref movingline_101_fs
{
}
}
}
}
Update:
material Router
{
technique
{
pass
{
depth_write on
vertex_program_ref pass_101_vs
{
}
fragment_program_ref pass_101_fs
{
}
}
pass
{
depth_write off
depth_fun equal
scene_blend add
vertex_program_ref movingline_101_vs
{
}
fragment_program_ref movingline_101_fs
{
}
}
}
}
pass shader
void main()
{
gl_FragColor = vec4(0.0,0.0,0.0,0.0);
}
main shader:
#version 120
precision highp int;
precision highp float;
uniform float time;
uniform float touchX;
uniform float touchY;
uniform float touchZ;
uniform float line;
varying float lightDiffuse;
void main()
{
float rampLight =lightDiffuse;
float light = (1.0 - rampLight) * 1.0;
vec4 lightColor = vec4(1.0,1.0,1.0, 1.0);
vec4 diffuseColor = lightColor * light;
diffuseColor = smoothstep(vec4(0.0, 0.0, 0.0, 0.0), vec4(0.9, 0.9, 0.9, 0.9), vec4(diffuseColor));
gl_FragColor = diffuseColor;
}
If you're just trying to render something with transparency, but without being able to see the faces which would be occluded if it was opaque, then a common simple technique is to prime the depth buffer first.
Pass 1: Write to just the depth buffer. Easiest way to achieve this is by rendering with blending on and a shader that just outputs vec4(0.0, 0.0, 0.0, 0.0) for every fragment. There may be more efficient ways to prime the depth buffer (e.g. using glColorMask instead of blending), but this approach is simple and probably good enough.
Pass 2: Render with a depth test of GL_EQUAL. Use whatever shader you like.
Performance note: discard is often slower than just outputting vec4(0.0, 0.0, 0.0, 0.0) for pixels you want to be transparent on mobile hardware, so avoid discard unless you really need it - in this case you don't need it.

Problems with HLSL render to texture

Question:
How can I render to target through HLSL by returning a struct with semantic mappings?
Hi there. I'm new to DirectX11 and am trying to set up render to texture so I can implement deferred shading. I've spent over a day scouring the net for a solution to my problem, but have had no luck so far.
Currently, I can render my scene to a single texture through HLSL and I finish by rendering that texture to a full-screen quad without problems. However if I bind multiple render targets, all I ever see is the clear color for my targets, the pixel shader never seems to get to any bound textures. I have D3D11_CREATE_DEVICE_DEBUG set with no warnings/errors at runtime and I've tried using the Visual Studio 2013 graphics debugging tool, but am having a difficult time understanding what the issue is. Here is probably the relevant source code:
Shader Code: (compiled with vs_5_0, ps_5_0)
// vertex shader
cbuffer mTransformationBuffer
{
matrix worldMatrix;
matrix viewMatrix;
matrix projectionMatrix;
};
struct VertexInputType
{
float4 position : POSITION;
float4 color : COLOR;
float3 normal : NORMAL;
};
struct PixelInputType
{
float4 position : SV_POSITION;
float4 color : COLOR;
float3 normal : NORMAL;
};
// this does not seem to work!
struct PixelOutputType
{
float4 position : SV_TARGET0;
float4 normal : SV_TARGET1;
};
PixelInputType VertexEntry(VertexInputType input)
{
input.position.w = 1.0f;
PixelInputType output;
output.position = mul(input.position, worldMatrix);
output.normal = mul(input.normal, (float3x3)worldMatrix);
output.normal = normalize(output.normal);
output.color = input.color;
return output;
}
// fragment shader
PixelOutputType FragmentEntry(PixelInputType input)
{
PixelOutputType output;
// render as white so I can see MRT working
output.position = float4(1.0f, 1.0f, 1.0f, 1.0f);
output.normal = float4(1.0f, 1.0f, 1.0f, 1.0f);
return output;
}
Render Target Creation:
void RenderTargetManager::createTarget(const std::string& name, unsigned int width, unsigned int height, DXGI_FORMAT format)
{
unsigned int hash = hashString(name);
RenderTarget target;
// create the texture to render to
target.mTexture = TextureUtils::createTexture2d(mDevice, width, height, format, D3D11_BIND_RENDER_TARGET | D3D11_BIND_SHADER_RESOURCE);
// create the render target view
D3D11_RENDER_TARGET_VIEW_DESC rtViewDesc;
memset(&rtViewDesc, 0, sizeof(rtViewDesc));
rtViewDesc.Format = format;
rtViewDesc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D;
HRESULT ret = S_OK;
ret = mDevice.CreateRenderTargetView(target.mTexture, &rtViewDesc, &target.mRenderTargetView);
ASSERT(ret == S_OK, "Failed to create a render target view.");
// create the shader resource view
D3D11_SHADER_RESOURCE_VIEW_DESC srViewDesc;
memset(&srViewDesc, 0, sizeof(srViewDesc));
srViewDesc.Format = format;
srViewDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
srViewDesc.Texture2D.MipLevels = 1;
ret = mDevice.CreateShaderResourceView(target.mTexture, &srViewDesc, &target.mShaderResourceView);
ASSERT(ret == S_OK, "Failed to create a shader resource view.");
mRenderTargetMap[hash] = target;
}
Setting Render Targets:
void RenderTargetManager::setTargets(ID3D11DepthStencilView* depthStencilView, unsigned int numTargets, ...)
{
va_list args;
va_start(args, numTargets);
std::vector<ID3D11RenderTargetView*> renderTargetViews;
for (unsigned int i = 0; i < numTargets; ++i)
{
std::string renderTargetName = va_arg(args, char*);
unsigned int hash = hashString(renderTargetName);
auto it = mRenderTargetMap.find(hash);
if (it != mRenderTargetMap.end())
{
renderTargetViews.push_back(it->second.mRenderTargetView);
}
else
{
LOGW("Render target view '%s' not found.", renderTargetName.c_str());
}
}
va_end(args);
if (!renderTargetViews.empty())
{
ASSERT(renderTargetViews.size() == numTargets, "Failed to set render targets. Not all targets found.");
mDeviceContext.OMSetRenderTargets(renderTargetViews.size(), &renderTargetViews[0], depthStencilView);
}
}
My pixel shader will write to any one of these targets if it returns a float4, returning a struct never seems to work and results in only the clear color for the render target. I have also tried this as the return struct without success:
struct PixelOutputType
{
float4 position : SV_TARGET
};
Thank you for taking the time to read this. I really appreciate the help.
-T
EDIT:
I have also tried using a static array of ID3D11RenderTargetView*, with the same issues.
EDIT2: I also enabled DirectX Control Panel output and see nothing out of the ordinary.
After a lot of tests and frustration, I finally discovered my issue ;__;
There were no problems with my textures, views, or states, but there was an issue with the shader and SV_POSITION.
I was mistakenly trying to use my pixel shader input 'float4 position : SV_TARGET' as my output position and only transformed it to world coordinates in the vertex shader. After thinking about this for a while I realized that this MUST be the fully transformed point as an input to the pixel shader and sure enough, fully transforming it fixed the issue.
THIS:
output.position = mul(input.position, worldMatrix);
output.position = mul(output.position, viewMatrix);
output.position = mul(output.position, projectionMatrix);
instead of this:
output.position = mul(input.position, worldMatrix);
//output.position = mul(output.position, viewMatrix);
//output.position = mul(output.position, projectionMatrix);

How to achieve brightness + contrast in WebGL?

I am trying to apply window level similar to this example ,drag your mouse over the image to see the brightness + contrast effect.
But I want to achieve the same using WebGL ,as GPU will process them more faster than CPU.
Here's what I have done:
Vertex Shader:
attribute vec3 attrVertexPos;
attribute vec2 attrTextureCoord;
varying highp vec2 vTextureCoord;
void main(void) {
gl_Position = vec4(attrVertexPos, 0.81);
vTextureCoord = attrTextureCoord;
}
Fragment Shader:
#ifdef GL_ES
precision highp float;
#endif
varying highp vec2 vTextureCoord;
uniform sampler2D uImage;
uniform float brightnessFactor;
uniform float contrastFactor;
void main(void) {
gl_FragColor = texture2D(uImage, vTextureCoord) *(brightnessFactor/contrastFactor);
}
But it doesn't work as expected according the link given above.
Here's the Javascript code:
var isMouseDown = false;
document.getElementById('canvas').onmousedown = function() { isMouseDown = true };
document.getElementById('canvas').onmouseup = function() { isMouseDown = false };
document.getElementById('canvas').onmousemove = function(e) {
if (isMouseDown) {
var intWidth = $('canvas').innerWidth();
var intHeight = $('canvas').innerHeight();
var x = (e.clientX/intWidth)*100;
var y = (e.clientY/intHeight)*100;
console.log(x/10 + ' :: ' + y/10);
brightnessVal = x/10;
gl.uniform1f(gl.getUniformLocation(program, "contrastFactor"), brightnessVal);
contrastVal = y/10;
gl.uniform1f(gl.getUniformLocation(program, "brightnessFactor"), contrastVal);
}
};
The difference is in pixel manipulation. You're just multiplying by a coefficient (also, the names of the coefficient you use are incorrect), where as in the linked exampled something a bit more complicated is happening. Some colour range (described by its center and width) from the source image gets expanded to full [0,1] range:
newColor = (oldColor - rangeCenter) / rangeWidth + 0.5
Why is it doing this is beyond my knowledge (the page is an example for medical imaging library and I don't know anything about it). Nevertheless, I've managed to port the formula to your code. First, fragment shader changes:
#ifdef GL_FRAGMENT_PRECISION_HIGH
precision highp float;
#else
precision mediump float;
#endif
varying highp vec2 vTextureCoord;
uniform sampler2D uImage;
// New uniforms
uniform float rangeCenter;
uniform float rangeWidth;
void main(void) {
vec3 c = texture2D(uImage, vTextureCoord).rgb;
gl_FragColor = vec4(
// The above formula, clamped to [0, 1]
clamp((c - windowCenter) / windowWidth + 0.5, 0.0, 1.0),
// Also, let's not screw alpha
1
);
}
As for JavaScript, I've taken the liberty to make it a bit closer to linked example:
var isMouseDown = false,
// Initially we set "equality" colour mapping
rangeCenter = 0.5,
ragneWidth = 1,
lastX, lastY;
document.getElementById('canvas').onmousedown = function(e) {
lastX = e.clientX;
lastY = e.clientY;
isMouseDown = true
};
document.getElementById('canvas').onmouseup = function() {
isMouseDown = false
};
document.getElementById('canvas').onmousemove = function(e) {
if (isMouseDown) {
var intWidth = $('canvas').innerWidth();
var intHeight = $('canvas').innerHeight();
// Change params according to cursor coords delta
rangeWidth += (e.clientX - lastX) / intWidth;
rangeCenter += (e.clientY - lastY) / intHeight;
gl.uniform1f(
gl.getUniformLocation(program, "rangeWidth"),
rangeWidth
);
gl.uniform1f(
gl.getUniformLocation(program, "rangeCenter"),
rangeCenter
);
lastX = e.clientX;
lastY = e.clientY;
}
};
Old answer:
The problem is that, according to your code, you don't redraw the
picture after mousemove event. Just insert a draw call (or several
draw calls) with appropriate parameters after setting uniforms. For
example, it may look like this:
document.getElementById('canvas').onmousemove = function(e) {
if (isMouseDown) {
// your code
gl.drawArrays(/* params, e.g gl.TRIANGLES, 0 and vertex count */);
}
};
A better way would be using requestAnimationFrame callback to
redraw. To do that, define a draw function, which will make the draw
call for you and use it a requestAnimationFrame callback:
function draw () {
/* your draw calls here */
}
document.getElementById('canvas').onmousemove = function(e) {
if (isMouseDown) {
// your code
requestAnimationFrame(draw);
}
};
P.S. Also, I'm intersted, where does this #ifdef GL_ES thing come from? It's incorrect to set highp precision based on GL_ES macro. Hardware doesn't have to support it according to the standard. The right way would be:
#ifdef GL_FRAGMENT_PRECISION_HIGH
precision highp float;
#else
precision mediump float;
#endif

Resources