WebCamTexture works fine on editor, but doesn't work on Build. I have tried: turn off antivirus and firewall, check authorization, put the webcamtexture on UI_RawImage and a plane gameObject, nothing works on build. There is no any real conclusion about the same bug on the forums (many are about Vuforia, and I just want to take a photo by a simple webcam on Windows). Any clues? Below the code. Tks in advance.
using UnityEngine;
using System.Collections;
using System.IO;
using UnityEngine.UI;
using System.Collections.Generic;
// https://stackoverflow.com/questions/24496438/can-i-take-a-photo-in-unity-using-the-devices-camera
public class GetCam2 : MonoBehaviour
{
WebCamTexture webCam;
//string your_path = "D:\\Lixo\\FotosCam";// any path you want to save your image
[SerializeField] string your_path = "";
public RawImage display;
public AspectRatioFitter fit;
public int contadorFotos;
[SerializeField] GameObject planoWeb;
IEnumerator Start()
{
your_path = "" + Application.dataPath;
yield return Application.RequestUserAuthorization(UserAuthorization.WebCam);
if (Application.HasUserAuthorization(UserAuthorization.WebCam)) {
LigaCamComeco1();
}
}
void LigaCamComeco1() {
if (WebCamTexture.devices.Length == 0)
{
Debug.LogError("can not found any camera!");
return;
}
int index = -1;
for (int i = 0; i < WebCamTexture.devices.Length; i++)
{
if (WebCamTexture.devices[i].name.ToLower().Contains("pc"))
{
Debug.Log("WebCam Name:" + WebCamTexture.devices[i].name + " Webcam Index:" + i);
index = i;
}
}
if (index == -1)
{
Debug.LogError("can not found your camera name!");
return;
}
WebCamDevice device = WebCamTexture.devices[index];
webCam = new WebCamTexture(device.name);
webCam.Play();
display.texture = webCam;
planoWeb.GetComponent<Renderer>().materials[0].mainTexture = webCam;
}
public void Update()
{
if (Input.GetKeyDown(KeyCode.PageUp)) {
Ratio();
callTakePhoto();
}
}
void Ratio() {
float ratio = (float)webCam.width / (float)webCam.height;
fit.aspectRatio = ratio;
float ScaleY = webCam.videoVerticallyMirrored ? -1f : 1f;
display.rectTransform.localScale = new Vector3(1f, ScaleY, 1f);
int orient = -webCam.videoRotationAngle;
display.rectTransform.localEulerAngles = new Vector3(0, 0, orient);
}
public void callTakePhoto() // call this function in button click event
{
StartCoroutine(TakePhoto());
}
IEnumerator TakePhoto() // Start this Coroutine on some button click
{
yield return new WaitForEndOfFrame();
Texture2D photo = new Texture2D(webCam.width, webCam.height);
photo.SetPixels(webCam.GetPixels());
photo.Apply();
byte[] bytes = photo.EncodeToJPG();
File.WriteAllBytes(your_path + "\\ZaxisCam" + contadorFotos + ".jpg", bytes);
contadorFotos++;
}
}
I have tried: turn off antivirus and firewall, check authorization, put the webcamtexture on UI_RawImage and a plane gameObject, nothing works on build.
I am making a 2D game in Unity and am trying to make my moveable character stop every time dialogue appears on screen.
I am using the Fungus extension for my dialogue as I'm a newbie to coding. Every thing I try however I run in to problems.
My current issue is that the modifier 'public' is not valid for this item.
Anyone know how this can be fixed? I have attached the code below. I assume the issue is with the public void CantMove() and public void CanMove() lines.
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class PlayerController : MonoBehaviour
{
public float moveSpeed;
public Rigidbody2D theRB;
public float jumpForce;
private bool isGrounded;
public Transform groundCheckPoint;
public LayerMask whatIsGround;
private bool canDoubleJump;
private bool canMove = true;
private Animator anim;
private SpriteRenderer theSR;
// Start is called before the first frame update
void Start()
{
anim = GetComponent<Animator>();
theSR = GetComponent<SpriteRenderer>();
}
// Update is called once per frame
void Update()
{
if(!canMove)
{
theRB.velocity = new Vector2(0, 0);
}
else
{
theRB.velocity = new Vector2(moveSpeed * Input.GetAxis("Horizontal"), theRB.velocity.y);
}
public void CantMove()
{
canMove = false;
}
public void CanMove()
{
canMove = true;
}
//theRB.velocity = new Vector2(moveSpeed * Input.GetAxis("Horizontal"), theRB.velocity.y);
isGrounded = Physics2D.OverlapCircle(groundCheckPoint.position, .2f, whatIsGround);
if(isGrounded)
{
canDoubleJump = true;
}
if(Input.GetButtonDown("Jump"))
{
if (isGrounded)
{
theRB.velocity = new Vector2(theRB.velocity.x, jumpForce);
}
else
{
if(canDoubleJump)
{
theRB.velocity = new Vector2(theRB.velocity.x, jumpForce);
canDoubleJump = false;
}
}
}
if(theRB.velocity.x > 0)
{
theSR.flipX = true;
} else if(theRB.velocity.x < 0)
{
theSR.flipX = false;
}
anim.SetFloat("moveSpeed", Mathf.Abs( theRB.velocity.x));
anim.SetBool("isGrounded", isGrounded);
}
}
'''
Your problem is that your two functions defined for CanMove and CantMove are declared inside of the Update function body... which makes them locally scoped functions which means they can never have public access and can only be called from within the Update function itself.
Move these two functions outside of the Update function body like this...
void Update() {
...
}
public void CantMove() {
canMove = false;
}
public void CanMove() {
canMove = true;
}
NOTE: I'm using Xamarin.Mac, but I believe the intent and/or missteps should be clear enough to most swift Cocoa developers.
PROBLEM:
My custom filter is not being applied to the view it backs.
EXAMPLE
var builtInFilter = new CIColorInvert();
builtInFilter.SetDefaults();
var customFilter = new HazeFilter();
customFilter.SetDefaults();
//In both cases here, the Image and OutputImage properties will have a null value
Layer.Filters = new CIFilter[1]{builtInFilter}; //Works
Layer.Filters = new CIFilter[1]{customFilter}; //Does nothing
The problem isn't that my custom filter isn't capable of doing anything. When I assign its Image property and draw its OutputImage it works as expected.
This tells me that the Kernel and OutputImage method are functioning properly.
Source Code
HazeFilter.cs
public class HazeFilter : CIFilter
{
static CIKernel hazeRemovalKernel;
public HazeFilter () : base()
{
if (hazeRemovalKernel == null) {
hazeRemovalKernel = CIKernel.FromProgramSingle(#"
kernel vec4 myHazeRemovalKernel(sampler src, __color color, float distance, float slope)
{
vec4 t;
float d;
d = destCoord().y * slope + distance;
t = unpremultiply(sample(src, samplerCoord(src)));
t = (t-d*color)/(1.0-d);
return premultiply(t);
}");
}
}
public override void SetDefaults ()
{
base.SetDefaults ();
inputColor = CIColor.FromCGColor (NSColor.Purple.CGColor);
inputDistance = 0.8;
inputSlope = 0.002;
}
CIImage image;
[Export("inputImage")]
public new CIImage Image
{
get { return image; }
set {
WillChangeValue ("inputImage");
image = value;
DidChangeValue ("inputImage");
}
}
CIColor color;
[Export("inputColor")]
public CIColor inputColor
{
get { return color; }
set {
WillChangeValue ("inputColor");
color = value;
DidChangeValue ("inputColor");
}
}
NSNumber distance;
[Export("inputDistance")]
public NSNumber inputDistance{
get { return distance; }
set {
WillChangeValue ("inputDistance");
distance = value;
DidChangeValue ("inputDistance");
}
}
NSNumber slope;
[Export("inputSlope")]
public NSNumber inputSlope{
get { return slope; }
set {
WillChangeValue ("inputSlope");
slope = value;
DidChangeValue ("inputSlope");
}
}
[Export("outputImage")]
public new CIImage OutputImage
{
get
{
if (Image == null)
return null;
var inputSampler = new CISampler (Image);
var argumentArray = NSArray.FromNSObjects (new NSObject[] {
inputSampler,
Runtime.GetNSObject (inputColor.Handle),
inputDistance,
inputSlope
});
return Apply (hazeRemovalKernel, argumentArray, null);
}
}
}
Canvas.cs
public class Canvas : NSView
{
public Canvas (CGRect rect) : base(rect)
{
WantsLayer = true;
Layer.BackgroundColor = NSColor.Clear.CGColor;
Layer.MasksToBounds = true;
Layer.NeedsDisplayOnBoundsChange = true;
LayerUsesCoreImageFilters = true;
}
CIFilter effect;
public CIFilter Effect {
get { return effect; }
set{
effect = value;
Layer.Filters = new CIFilter[1]{Effect};
NeedsDisplay = true;
}
}
}
Instantiation
var builtInFilter = new CIColorInvert();
builtInFilter.SetDefaults();
var customFilter = new HazeFilter();
customFilter.SetDefaults();
wrapPanel.Effect = builtInFilter; //Works as expected
wrapPanel.Effect = customFilter; //Does nothing
Here's my requirement: Using DirectX11 (via SlimDX) I have to download a series of verteces and use them to create a Texture2D of a map of county borders. Then I need to do the same thing with state borders, and draw them over the county borders. Then, I need to take that texture and create 2 different textures from it, each containing unique radar data. Then I want to take those textures and display them so that the user can look at, for example, base reflectivity and base velocity side by side. The user should be able to zoom in and out of particular areas of the map.
Here's what I've got working: I'm creating my Texture2D without multisampling or depth on a billboard which is displaying in 2 separate views. But it looks blocky, and if you zoom too far out, some of the borders start to disappear.
Here are my issues:
1) I can't for the life of me get any multisampling quality. I'm using an ATI Radeon HD 5750, so I know it must be able to do it, but no formats I've tried support a quality greater than 0.
2) I'm uncertain whether I need to use a depth stencil since I'm drawing all these textures on top of each other. I hope not because when I try, the ShaderResourceView says, "Puny Human! You cannot use a depth stencil format in a ShaderResourceView! Bwa ha ha!" (I'm embellishing)
I'm willing to bet that a lot of these issues would be solved if I just drew the primitives directly into the world space, but when I do that rendering takes way too long because there are so many lines to render. Is there perhaps a way I can cut down on the time it takes?
And here's the code of my last working version:
using SlimDX;
using SlimDX.D3DCompiler;
using SlimDX.Direct3D11;
using SlimDX.DXGI;
using SlimDX.Windows;
using System;
using System.Windows.Forms;
using System.Collections.Generic;
using Device = SlimDX.Direct3D11.Device;
using Buffer = SlimDX.Direct3D11.Buffer;
using Resource = SlimDX.Direct3D11.Resource;
using Format = SlimDX.DXGI.Format;
using MapFlags = SlimDX.Direct3D11.MapFlags;
namespace Radar
{
abstract public class Renderer
{
protected static Device mDevice = null;
protected SwapChain mSwapChain = null;
protected RenderTargetView RenderTarget { get; set; }
public static Device Device { get { return mDevice; } protected set { mDevice = value; } }
public static DeviceContext Context { get { return Device.ImmediateContext; } }
protected SwapChain SwapChain { get { return mSwapChain; } set { mSwapChain = value; } }
public Texture2D Texture { get; protected set; }
protected int RenderTargetIndex { get; set; }
protected VertexShader VertexShader { get; set; }
protected PixelShader PixelShader { get; set; }
protected Buffer VertexBuffer { get; set; }
protected Buffer MatrixBuffer { get; set; }
protected InputLayout Layout { get; set; }
protected ShaderSignature InputSignature { get; set; }
protected SamplerState SamplerState { get; set; }
protected Color4 mClearColor = new Color4(0.117f, 0.117f, 0.117f);
protected Color4 ClearColor { get { return mClearColor; } }
protected void CreateDevice(IntPtr inHandle)
{
if (Device == null)
Device = new Device(DriverType.Hardware, DeviceCreationFlags.Debug);
SwapChainDescription chainDescription = new SwapChainDescription()
{
BufferCount = 2,
Usage = Usage.RenderTargetOutput,
OutputHandle = inHandle,
IsWindowed = true,
ModeDescription = new ModeDescription(0, 0, new Rational(60, 1), Format.R8G8B8A8_UNorm),
SampleDescription = new SampleDescription(8, 0),
Flags = SwapChainFlags.AllowModeSwitch,
SwapEffect = SwapEffect.Discard
};
SwapChain = new SwapChain(Device.Factory, Device, chainDescription);
}
protected void SetupViewport(int inWidth, int inHeight)
{
Viewport viewport = new Viewport(0.0f, 0.0f, inWidth, inHeight);
Context.OutputMerger.SetTargets(RenderTarget);
Context.Rasterizer.SetViewports(viewport);
}
public void Clear()
{
Context.ClearRenderTargetView(RenderTarget, ClearColor);
}
public void Present()
{
SwapChain.Present(0, PresentFlags.None);
}
// I do this to ensure the texture is correct
public void Save()
{
Texture2D.ToFile(Context, Texture, ImageFileFormat.Png, "test.png");
}
public virtual void Dispose()
{
Texture.Dispose();
SamplerState.Dispose();
VertexBuffer.Dispose();
Layout.Dispose();
InputSignature.Dispose();
VertexShader.Dispose();
PixelShader.Dispose();
RenderTarget.Dispose();
SwapChain.Dispose();
Device.Dispose();
}
public class RenderTargetParameters
{
public int Width { get; set; }
public int Height { get; set; }
public IntPtr Handle { get; set; }
public RenderTargetParameters()
{
Width = 0;
Height = 0;
Handle = new IntPtr(0);
}
}
public abstract void Render(int inWidth, int inHeight, int inCount = -1);
public abstract void Prepare(string inShaderName = null);
}
public class TextureRenderer : Renderer
{
public TextureRenderer(RenderTargetParameters inParms)
{
CreateDevice(inParms.Handle);
Texture2DDescription description = new Texture2DDescription()
{
Width = inParms.Width,
Height = inParms.Height,
MipLevels = 1,
ArraySize = 1,
Format = Format.R8G8B8A8_UNorm,
SampleDescription = new SampleDescription(8, 0),
Usage = ResourceUsage.Default,
BindFlags = BindFlags.RenderTarget | BindFlags.ShaderResource,
CpuAccessFlags = CpuAccessFlags.None,
OptionFlags = ResourceOptionFlags.None
};
Texture = new Texture2D(Device, description);
RenderTarget = new RenderTargetView(Device, Texture);
SetupViewport(inParms.Width, inParms.Height);
using (ShaderBytecode bytecode = ShaderBytecode.CompileFromFile("ShaderFX.fx", "VShader", "vs_5_0", ShaderFlags.Debug, EffectFlags.None))
{
InputSignature = ShaderSignature.GetInputSignature(bytecode);
VertexShader = new VertexShader(Device, bytecode);
}
// load and compile the pixel shader
InputElement[] elements = new[] { new InputElement("POSITION", 0, Format.R32G32B32_Float, 0) };
Layout = new InputLayout(Device, InputSignature, elements);
Context.InputAssembler.InputLayout = Layout;
Context.InputAssembler.PrimitiveTopology = PrimitiveTopology.LineStrip;
Context.VertexShader.Set(VertexShader);
}
public override void Prepare(string inShaderName)
{
using (ShaderBytecode bytecode = ShaderBytecode.CompileFromFile("ShaderFX.fx", inShaderName, "ps_4_0", ShaderFlags.Debug, EffectFlags.None))
PixelShader = new PixelShader(Device, bytecode);
Context.PixelShader.Set(PixelShader);
}
public void SetVertices(DataStream inShape)
{
VertexBuffer = new Buffer(Device, inShape, (int)inShape.Length, ResourceUsage.Default, BindFlags.VertexBuffer, CpuAccessFlags.None, ResourceOptionFlags.None, 0);
Context.InputAssembler.SetVertexBuffers(0, new VertexBufferBinding(VertexBuffer, 12, 0));
}
public override void Render(int inWidth, int inHeight, int inCount = -1)
{
Context.Draw(inCount, 0);
}
}
public class RuntimeRenderer : Renderer
{
private ShaderResourceView ResourceView { get; set; }
public RuntimeRenderer(RenderTargetParameters inParms, ref TextureRenderer inTextureRenderer)
{
CreateDevice(inParms.Handle);
Texture = inTextureRenderer.Texture;
using (Resource resource = Resource.FromSwapChain<Texture2D>(SwapChain, 0))
RenderTarget = new RenderTargetView(Device, resource);
//using (var factory = SwapChain.GetParent<Factory>())
//factory.SetWindowAssociation(inParms.Handle, WindowAssociationFlags.IgnoreAltEnter);
}
public void Resize()
{
RenderTarget.Dispose();
SwapChain.ResizeBuffers(2, 0, 0, Format.R8G8B8A8_UNorm, SwapChainFlags.AllowModeSwitch);
using (SlimDX.Direct3D11.Resource resource = Resource.FromSwapChain<Texture2D>(SwapChain, 0))
RenderTarget = new RenderTargetView(Device, resource);
}
public override void Prepare(string inShaderName)
{
using (ShaderBytecode bytecode = ShaderBytecode.CompileFromFile("ShaderFX.fx", "TextureVertexShader", "vs_4_0", ShaderFlags.EnableStrictness, EffectFlags.None))
{
InputSignature = ShaderSignature.GetInputSignature(bytecode);
VertexShader = new VertexShader(Device, bytecode);
}
using (ShaderBytecode bytecode = ShaderBytecode.CompileFromFile("ShaderFX.fx", "TexturePixelShader", "ps_4_0", ShaderFlags.EnableStrictness, EffectFlags.None))
PixelShader = new PixelShader(Device, bytecode);
InputElement[] elements = new InputElement[2];
elements[0].SemanticName = "POSITION";
elements[0].SemanticIndex = 0;
elements[0].Format = Format.R32G32B32_Float;
elements[0].Slot = 0;
elements[0].AlignedByteOffset = 0;
elements[0].Classification = InputClassification.PerVertexData;
elements[0].InstanceDataStepRate = 0;
elements[1].SemanticName = "TEXCOORD";
elements[1].SemanticIndex = 0;
elements[1].Format = Format.R32G32_Float;
elements[1].Slot = 0;
elements[1].AlignedByteOffset = InputElement.AppendAligned;
elements[1].Classification = InputClassification.PerVertexData;
elements[1].InstanceDataStepRate = 0;
Layout = new InputLayout(Device, InputSignature, elements);
BufferDescription matrixDescription = new BufferDescription()
{
Usage = ResourceUsage.Dynamic,
SizeInBytes = sizeof(float) * 16 * 4,
BindFlags = BindFlags.ConstantBuffer,
CpuAccessFlags = CpuAccessFlags.Write,
OptionFlags = ResourceOptionFlags.None,
StructureByteStride = 0
};
MatrixBuffer = new Buffer(Device, matrixDescription);
ShaderResourceViewDescription resourceViewDescription = new ShaderResourceViewDescription()
{
Format = Texture.Description.Format,
Dimension = ShaderResourceViewDimension.Texture2DMultisampled,
MipLevels = Texture.Description.MipLevels,
MostDetailedMip = 0,
};
//Texture2D.ToFile(Context, Texture, ImageFileFormat.Png, "test.png");
ResourceView = new ShaderResourceView(Device, Texture, resourceViewDescription);
SamplerDescription samplerDescription = new SamplerDescription()
{
Filter = Filter.MinMagMipLinear,
AddressU = TextureAddressMode.Wrap,
AddressV = TextureAddressMode.Wrap,
AddressW = TextureAddressMode.Wrap,
MipLodBias = 0.0f,
MaximumAnisotropy = 1,
ComparisonFunction = Comparison.Always,
BorderColor = ClearColor,
MinimumLod = 0,
MaximumLod = 99999
};
SamplerState = SamplerState.FromDescription(Device, samplerDescription);
}
public override void Render(int inWidth, int inHeight, int inCount = -1)
{
Clear();
Billboard.SetVerteces(Device, Texture.Description.Width, Texture.Description.Height, inWidth, inHeight);
SetupViewport(inWidth, inHeight);
Context.InputAssembler.SetVertexBuffers(0, new VertexBufferBinding(Billboard.Verteces, 20, 0));
Context.InputAssembler.SetIndexBuffer(Billboard.Indeces, Format.R32_UInt, 0);
Context.InputAssembler.PrimitiveTopology = PrimitiveTopology.TriangleStrip;
Context.InputAssembler.InputLayout = Layout;
Context.VertexShader.Set(VertexShader);
Context.PixelShader.Set(PixelShader);
Context.PixelShader.SetSampler(SamplerState, 0);
Context.VertexShader.SetConstantBuffer(MatrixBuffer, 0);
Context.PixelShader.SetConstantBuffer(MatrixBuffer, 0);
Context.PixelShader.SetShaderResource(ResourceView, 0);
Context.DrawIndexed(4, 0, 0);
Present();
}
}
}
Image 1 is what it looks like if I save the texture to a file (I scaled this down a LOT so it would fit in my post).
Image 2 is what it looks like in runtime when viewed at about a medium distance (not ideal, but not so bad)
Image 3 is what it looks like zoomed in to a county (Eww! Blocky and fuzzy!)
Image 4 is what it looks like zoomed out (where did all the borders go?)
About multisampling, generally you can keep quality to 0, quality setting generally are different "subpixels" (aka : samples) patterns. 0 generally does fine.
In case you render to texture with multisampling, you also need to resolve your resource, multi sampled textures are bound as Texture2DMS (instead of Texture2D) in shaders.
To do so, you need to create a second texture (with same format/size), but with only one sample.
Then once you're done rendering your multisampled texture, you need to do the following call:
deviceContext.ResolveSubresource(multisampledtexture, 0, nonmultisampledtexture,
0, format);
You can then use the ShaderView of the non multisampled texture in subsequent passes.
From what I see you should not need to use a depth stencil, just make sure you draw your elements in the correct order.
About formats, this is normal since depth is a bit "special", you need to pass different formats for resource/views. If you want to use D24_UNorm_S8_UInt (most common format i'd say), you need to setup the following:
In the texture description, format needs to be Format.R24_UNorm_X8_Typeless
In the Depth Stencil view description, Format.D24_UNorm_S8_UInt
In the shader view description, Format.R24_UNorm_X8_Typeless
That will allow you to build a depth stencil that you can read (if you don't need to read your depth buffer, just ignore shader view and use depth format directly).
Also you can increase quality by using mipmaps (which would help a lot, specially when zooming out).
To do so, in your texture description, set the following options (make sure that this texture is not multisampled)
texBufferDesc.OptionFlags |= ResourceOptionFlags.GenerateMipMaps;
texBufferDesc.MipLevels = 0; //0 means "all"
once you're done with your rendering, call:
context.GenerateMips
using the shader resource view of the texture that just got rendered.
About drawing the lines directly behind that's definitely possible, and for certain will give you the best quality.
Not sure how many lines you render, but it doesn't look like something a reasonably modern card would struggle with. And a bit of culling can easily help discard lines that are out of the screen so they don't get drawn.
You could also do some "hybrid" (use texture when zoomed out, render a subset of the lines when zoomed in), that's not too hard to setup either.
I'm pulling my hair out trying the figure this out. I have a simple button, that checks for the mouse to be over, and then changes the texture if it is. It works fine. However, when I add a camera into the mix, it breaks everything. I've tried transforming both the mouse and rectangle I use for bounding-box collision, and it won't work. Here's my code for the button:
using Microsoft.Xna.Framework;
using Microsoft.Xna.Framework.Graphics;
using Microsoft.Xna.Framework.Input;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace x.Graphics.UI
{
public enum ButtonStates
{
Normal,
Hover,
Pressed
}
public delegate void ButtonPress();
public class Button
{
public Texture2D Texture
{
get
{
Texture2D result = null;
switch (ButtonState)
{
case ButtonStates.Normal:
result = NormalTexture;
break;
case ButtonStates.Hover:
result = HoverTexture;
break;
case ButtonStates.Pressed:
result = DownTexture;
break;
}
return result;
}
}
public Vector2 Position { get; set; }
public event ButtonPress ButtonPressed;
public ButtonStates ButtonState { get; set; }
public Rectangle CollisionRect { get; set; }
private Texture2D NormalTexture;
private Texture2D HoverTexture;
private Texture2D DownTexture;
private MouseState mouseState;
private MouseState previousMouseState;
public Button(Texture2D normalTexture, Texture2D hoverTexture, Texture2D downTexture,
Vector2 position)
{
NormalTexture = normalTexture;
HoverTexture = hoverTexture;
DownTexture = downTexture;
Position = position;
mouseState = Mouse.GetState();
previousMouseState = mouseState;
CollisionRect = new Rectangle((int)Position.X, (int)Position.Y,
Texture.Width,
Texture.Height);
}
public void Update (MouseState currentState)
{
mouseState = currentState;
if (CollisionRect.Contains(new Point(mouseState.X, mouseState.Y)))
{
if (mouseState.LeftButton == Microsoft.Xna.Framework.Input.ButtonState.Pressed)
{
ButtonState = ButtonStates.Pressed;
ButtonPressed();
}
else
ButtonState = ButtonStates.Hover;
}
else
ButtonState = ButtonStates.Normal;
}
public void Update(MouseState currentState, Camera camera)
{
Vector2 mouse = new Vector2(mouseState.X, mouseState.Y);
mouse = Vector2.Transform(mouse, camera.InverseTransform);
CollisionRect = CalculateTransformedBoundingBox(CollisionRect, c.InverseTransform);
Console.WriteLine("Rectangle[X: {0}, y: {1}], Mouse:[X: {2}, Y: {3}]", CollisionRect.X, CollisionRect.Y, mouse.X, mouse.Y);
if (CollisionRect.Contains(new Point((int)mouse.X, (int)mouse.Y)))
{
if (mouseState.LeftButton == Microsoft.Xna.Framework.Input.ButtonState.Pressed)
{
ButtonState = ButtonStates.Pressed;
ButtonPressed();
}
else
ButtonState = ButtonStates.Hover;
}
else
ButtonState = ButtonStates.Normal;
}
public void Draw(SpriteBatch spriteBatch)
{
spriteBatch.Draw(Texture, Position, null, Color.White, 0.0f,
Vector2.Zero, 1.0f, SpriteEffects.None, 1.0f);
}
private Rectangle CalculateTransformedBoundingBox(Rectangle local, Matrix toWorldSpace)
{
Vector2 leftTop = new Vector2(local.Left, local.Top);
Vector2 rightTop = new Vector2(local.Right, local.Top);
Vector2 leftBottom = new Vector2(local.Left, local.Bottom);
Vector2 rightBottom = new Vector2(local.Right, local.Bottom);
Vector2.Transform(ref leftTop, ref toWorldSpace,
out leftTop);
Vector2.Transform(ref rightTop, ref toWorldSpace,
out rightTop);
Vector2.Transform(ref leftBottom, ref toWorldSpace,
out leftBottom);
Vector2.Transform(ref rightBottom, ref toWorldSpace,
out rightBottom);
// Find the minimum and maximum extents of the
// rectangle in world space
Vector2 min = Vector2.Min(Vector2.Min(leftTop, rightTop),
Vector2.Min(leftBottom, rightBottom));
Vector2 max = Vector2.Max(Vector2.Max(leftTop, rightTop),
Vector2.Max(leftBottom, rightBottom));
// Return that as a rectangle
return new Rectangle((int)min.X, (int)min.Y,
(int)(max.X - min.X), (int)(max.Y - min.Y));
}
}
}
And my code for the camera:
using Microsoft.Xna.Framework;
using Microsoft.Xna.Framework.Graphics;
using Microsoft.Xna.Framework.Input;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace x.Graphics
{
public class Camera
{
protected float _zoom;
protected Matrix _transform;
protected Matrix _inverseTransform;
protected Vector2 _pos;
protected float _rotation;
protected Viewport _viewport;
protected MouseState _mState;
protected KeyboardState _keyState;
protected Int32 _scroll;
public float Zoom
{
get { return _zoom; }
set { _zoom = value; }
}
public Matrix Transform
{
get { return _transform; }
set { _transform = value; }
}
public Matrix InverseTransform
{
get { return _inverseTransform; }
}
public Vector2 Pos
{
get { return _pos; }
set { _pos = value; }
}
public float Rotation
{
get { return _rotation; }
set { _rotation = value; }
}
public Camera(Viewport viewport)
{
_zoom = 1.0f;
_scroll = 1;
_rotation = 0.0f;
_pos = Vector2.Zero;
_viewport = viewport;
}
public void Update()
{
Input();
MathHelper.Clamp(_zoom, 0.01f, 10.0f);
_rotation = ClampAngle(_rotation);
_transform = Matrix.CreateRotationZ(_rotation) *
Matrix.CreateScale(new Vector3(_zoom, _zoom, 1)) *
Matrix.CreateTranslation(_pos.X, _pos.Y, 0);
_inverseTransform = Matrix.Invert(_transform);
}
protected virtual void Input()
{
_mState = Mouse.GetState();
_keyState = Keyboard.GetState();
//Check Move
if (_keyState.IsKeyDown(Keys.A))
{
_pos.X += 10f;
}
if (_keyState.IsKeyDown(Keys.D))
{
_pos.X -= 10f;
}
if (_keyState.IsKeyDown(Keys.W))
{
_pos.Y -= 10f;
}
if (_keyState.IsKeyDown(Keys.S))
{
_pos.Y += 10f;
}
}
protected float ClampAngle(float radians)
{
while (radians < -MathHelper.Pi)
{
radians += MathHelper.TwoPi;
}
while (radians > MathHelper.Pi)
{
radians -= MathHelper.TwoPi;
}
return radians;
}
}
}
I'm not 100% sure what's wrong, but the mouse position only changes when I press a button. I'm really confused, I've never worked with cameras before. Any help would be really appreciated. Thanks!
UPDATE:
It detects the mouse as being over the button before I try and move the camera. After that, the coorindates of the rectangle are continually incremented.
Don't transform bounding box... is easy to transform mouse coordinates ... ;)
Use the inverse transform of your camera matrix to transform mouse coords to the same space of your bounding box.