Duplicate Windows 7 Image Folder 3d Thumbnail - windows-7

I want to programmatically create a thumbnail from a folder of images, that would look similar to the Windows 7 images folder style like the pic I uploaded here. I have a routine that will add images on top of each other, and rotating them in the x-axis, but I think I need some Y rotation or something to make this illusion complete. I have actually done this with the Windows7APICodePack with the thumbnail methods there, but it seems to force you to have Explorer in the Large Icons mode. I do not want this to depend on Explorer. Nor do I want to use WPF (ViewPort3d). Here is what I want it to look like:
FinalImage http://www.chuckcondron.com/folderexample.JPG
Here is what I have done so far:
StitchedImageThumb http://www.chuckcondron.com/stitchedImageThumb.jpg
As you can see my attempt is not that pretty, nor do the pics come out of the cream color folder (really could use the actual folder pic as well, but not sure how to do that).
current code (c#)
using System;
using System.Collections.Generic;
using System.Drawing;
using System.Drawing.Drawing2D;
using System.Drawing.Imaging;
using System.IO;
using System.Windows.Forms;
namespace ThumbnailCreator
{
public partial class Form1: Form
{
public Form1()
{
InitializeComponent();
}
private void button1_Click(object sender, EventArgs e)
{
//get all the files in a directory
string[] files = Directory.GetFiles(#"C:\MyImages");
//combine them into one image
Bitmap stitchedImage = Combine(files);
if(File.Exists("stitchedImage.jpg")) File.Delete("stitchedImage.jpg");
//save the new image
stitchedImage.Save(#"stitchedImage.jpg", ImageFormat.Jpeg);
if (File.Exists("stitchedImage.jpg"))
{
FileStream s = File.Open("stitchedImage.jpg", FileMode.Open);
Image temp = Image.FromStream(s);
s.Close();
pictureBox1.Image = temp;
}
CreateThumb(#"stitchedImage.jpg", #"stitchedImageThumb.jpg");
}
public Bitmap Combine(string[] files)
{
//read all images into memory
List<Bitmap> images = new List<Bitmap>();
Bitmap finalImage = null;
try
{
int width = 0;
int height = 0;
int rotate = 7;
foreach (string image in files)
{
//create a Bitmap from the file and add it to the list
Bitmap bitmap = new Bitmap(image);
bitmap = RotateImage(bitmap, rotate);
//update the size of the final bitmap
//width += bitmap.Width;
width = 2500;
//height = bitmap.Height > height ? bitmap.Height : height;
height = 1000;
images.Add(bitmap);
rotate += 5;
}
//create a bitmap to hold the combined image
finalImage = new Bitmap(width, height);
//get a graphics object from the image so we can draw on it
using (Graphics g = Graphics.FromImage(finalImage))
{
//set background color
g.Clear(Color.Goldenrod);
//go through each image and draw it on the final image
ImageAttributes ia = new ImageAttributes();
ColorMatrix cm = new ColorMatrix();
cm.Matrix33 = 0.75f;
ia.SetColorMatrix(cm);
foreach (Bitmap image in images)
{
Image rotatedImage = new Bitmap(image);
g.DrawImage(rotatedImage, new Rectangle(0, 0, image.Width, image.Height), 0, 0, image.Width, image.Height, GraphicsUnit.Pixel, ia);
}
}
return finalImage;
}
catch (Exception ex)
{
if (finalImage != null)
finalImage.Dispose();
throw ex;
}
finally
{
//clean up memory
foreach (System.Drawing.Bitmap image in images)
{
image.Dispose();
}
}
}
public void CreateThumb(string source, string destination)
{
Image imgThumb = null;
try
{
Image image = null;
// Check if image exists
image = Image.FromFile(source);
if (image != null)
{
imgThumb = image.GetThumbnailImage(100, 100, null, new IntPtr());
imgThumb.Save(destination);
image.Dispose();
}
}
catch
{
MessageBox.Show("An error occured");
}
if (File.Exists(destination))
{
FileStream s = File.Open(destination, FileMode.Open);
Image temp = Image.FromStream(s);
s.Close();
pictureBox2.Image = temp;
}
}
private Bitmap RotateImage(Bitmap inputImg, double degreeAngle)
{
//Corners of the image
PointF[] rotationPoints = { new PointF(0, 0),
new PointF(inputImg.Width, 0),
new PointF(0, inputImg.Height),
new PointF(inputImg.Width, inputImg.Height)};
//Rotate the corners
PointMath.RotatePoints(rotationPoints, new PointF(inputImg.Width / 2.0f, inputImg.Height / 2.0f), degreeAngle);
//Get the new bounds given from the rotation of the corners
//(avoid clipping of the image)
Rectangle bounds = PointMath.GetBounds(rotationPoints);
//An empy bitmap to draw the rotated image
Bitmap rotatedBitmap = new Bitmap(bounds.Width *2, bounds.Height *2);
using (Graphics g = Graphics.FromImage(rotatedBitmap))
{
g.SmoothingMode = System.Drawing.Drawing2D.SmoothingMode.HighQuality;
g.InterpolationMode = InterpolationMode.HighQualityBicubic;
//Transformation matrix
Matrix m = new Matrix();
m.RotateAt((float)degreeAngle, new PointF(inputImg.Width / 2.0f, inputImg.Height / 2.0f));
m.Translate(-bounds.Left, -bounds.Top, MatrixOrder.Append); //shift to compensate for the rotation
g.Transform = m;
g.DrawImage(inputImg, 0, 0);
}
return rotatedBitmap;
}
}
}

Related

How to fix orientation of Photo Taken from phone camera and bind it to image view?

I am facing some issues when I am trying to bind photo taken from camera through my application and binding it to the image view and binding image from gallery.
I use Micromax Phone for testing our application, it works fine and binds the image as it is from that phone, but when we change our phone for example Samsung the original orientation is different than the hardware orientation so what is happening is that it is binding as per the orientation set by the hardware.
I tried to correct the orientation but was not working at all.
Can any one please try to solve my problem......
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Android.App;
using Android.Content;
using Android.OS;
using Android.Runtime;
using Android.Views;
using Android.Widget;
using Java.IO;
using Android.Graphics;
using Android.Provider;
using Android.Content.PM;
using MyApplication.Droid;
using Android.Media;
namespace MyApplication.Droid
{
[Activity(Label = "CameraActivity")]
public class CameraActivity : Activity
{
File _file;
File _dir;
Bitmap bitmap;
ImageView _imageView, _imageView1;
int PickImageId = 1;
protected override void OnCreate(Bundle savedInstanceState)
{
base.OnCreate(savedInstanceState);
SetContentView(Resource.Layout.Camera);
if (IsThereAnAppToTakePictures())
{
CreateDirectoryForPictures();
Button button = FindViewById<Button>(Resource.Id.TakePik);
_imageView = FindViewById<ImageView>(Resource.Id.imageView2);
_imageView1= FindViewById<ImageView>(Resource.Id.imageView1);
button.Click += TakeAPicture;
}
Button button1 = FindViewById<Button>(Resource.Id.Galary);
button1.Click += delegate
{
Intent intent = new Intent();
intent.SetType("image/*");
intent.SetAction(Intent.ActionGetContent);
StartActivityForResult(Intent.CreateChooser(intent, "Select Picture"), PickImageId);
};
// Create your application here
}
private void CreateDirectoryForPictures()
{
_dir = new File(
Android.OS.Environment.GetExternalStoragePublicDirectory(
Android.OS.Environment.DirectoryPictures), "MRohit Task");
if (!_dir.Exists())
{
_dir.Mkdirs();
}
}
private bool IsThereAnAppToTakePictures()
{
Intent intent = new Intent(MediaStore.ActionImageCapture);
IList<ResolveInfo> availableActivities =
PackageManager.QueryIntentActivities(intent, PackageInfoFlags.MatchDefaultOnly);
return availableActivities != null && availableActivities.Count > 0;
}
private void TakeAPicture(object sender, EventArgs eventArgs)
{
Intent intent = new Intent(MediaStore.ActionImageCapture);
_file = new File(_dir, String.Format("MRohit_Task_{0}.jpg", Guid.NewGuid()));
intent.PutExtra(MediaStore.ExtraOutput, Android.Net.Uri.FromFile(_file));
StartActivityForResult(intent, 0);
}
protected override void OnActivityResult(int requestCode, Result resultCode, Intent data)
{if (requestCode == 1)
{
Android.Net.Uri uri = data.Data;
_imageView1.SetImageURI(uri);
}
else
{
base.OnActivityResult(requestCode, resultCode, data);
// Make it available in the gallery
Intent mediaScanIntent = new Intent(Intent.ActionMediaScannerScanFile);
Android.Net.Uri contentUri = Android.Net.Uri.FromFile(_file);
mediaScanIntent.SetData(contentUri);
SendBroadcast(mediaScanIntent);
// string str_file_path = Android.Net.Uri.Parse(_file.Path.ToString()).ToString();
// Display in ImageView. We will resize the bitmap to fit the display.
// Loading the full sized image will consume to much memory
// and cause the application to crash.
int height = Resources.DisplayMetrics.HeightPixels;
int width = _imageView.Height;
bitmap = _file.Path.LoadAndResizeBitmap(width, height);
ExifInterface ei = new ExifInterface(_file.Path);
int orientation = ei.GetAttributeInt(ExifInterface.TagOrientation,
(int)Android.Media.Orientation.Undefined);
switch (orientation)
{
case (int)Android.Media.Orientation.Rotate90:
rotateImage(bitmap, 90);
break;
case (int)Android.Media.Orientation.Rotate180:
rotateImage(bitmap, 180);
break;
case (int)Android.Media.Orientation.Rotate270:
rotateImage(bitmap, 270);
break;
case (int)Android.Media.Orientation.Normal:
default:
break;
}
if (bitmap != null)
{
_imageView.SetImageBitmap(bitmap);
bitmap = null;
}
// Dispose of the Java side bitmap.
GC.Collect();
}
}
public static Bitmap rotateImage(Bitmap source, float angle)
{
Matrix matrix = new Matrix();
matrix.PostRotate(angle);
return Bitmap.CreateBitmap(source, 0, 0, source.Width, source.Height,
matrix, true);
}
}
//To crop the image size
public static class BitmapHelpers
{
public static Bitmap LoadAndResizeBitmap(this string fileName, int width, int height)
{
////// First we get the the dimensions of the file on disk
BitmapFactory.Options options = new BitmapFactory.Options { InJustDecodeBounds = true };
BitmapFactory.DecodeFile(fileName, options);
////// Next we calculate the ratio that we need to resize the image by
////// in order to fit the requested dimensions.
int outHeight = options.OutHeight;
int outWidth = options.OutWidth;
int inSampleSize = 1;
if (outHeight > height || outWidth > width)
{
inSampleSize = outWidth > outHeight
? outHeight / height
: outWidth / width;
}
// Now we will load the image and have BitmapFactory resize it for us.
options.InSampleSize = inSampleSize;
options.InJustDecodeBounds = false;
Bitmap resizedBitmap = BitmapFactory.DecodeFile(fileName, options);
return resizedBitmap;
}
}
}

ZXing QR Code Generation in Xamarin Forms PCL

I'm trying to generate and display QR code using ZXing package, I tried in following code I was not able to show QR code. It's showing blank image (transparent).
private void OnGenerateQRCodeButton_Clicked(object sender, EventArgs e)
{
var writer = new BarcodeWriter
{
Format = BarcodeFormat.QR_CODE,
Options = new EncodingOptions
{
Height = (int)imageCompanyLogo.Height,
Width = (int) imageCompanyLogo.Width,
Margin = 0,
PureBarcode = true
}
};
var bitmap = writer.Write("www.helloworld.com");
imageQRCode.Source = ImageSource.FromStream(() => new MemoryStream(bitmap));
}
Please suggest any way to do it. Thanks.
Create interface in PCL(Xamarin) project for Dependency Service.
Create a class in Native(Xamarin.Droid) and Inherit from PCL Interface.
Implement the method as shown below.
public Stream ConvertImageStream(string text, int width = 300, int height = 300)
{
var barcodeWriter = new ZXing.Mobile.BarcodeWriter
{
Format = ZXing.BarcodeFormat.QR_CODE,
Options = new ZXing.Common.EncodingOptions
{
Width = width,
Height = height,
Margin = 10
}
};
barcodeWriter.Renderer = new ZXing.Mobile.BitmapRenderer();
var bitmap = barcodeWriter.Write(text);
var stream = new MemoryStream();
bitmap.Compress(Bitmap.CompressFormat.Png, 100, stream); // this is the diff between iOS and Android
stream.Position = 0;
return stream;
}
call the method from PCL(Xamarin) project using Dependency Service.
In xaml.cs
private void OnGenerateQRCodeButton_Clicked(object sender, EventArgs e)
{
string barcodeText = "www.helloworld.com";
var stream = DependencyService.Get<IBarCodeServices>().ConvertImageStream(barcodeText, (int)imageCompanyLogo.Width,(int) imageCompanyLogo.Height);
barcodeImage.Source = ImageSource.FromStream(() => stream);
}
Debug and see if stream in empty or not.
var stream = (Stream)null;
private void OnGenerateQRCodeButton_Clicked(object sender, EventArgs e)
{
var writer = new BarcodeWriter
{
Format = BarcodeFormat.QR_CODE,
Options = new EncodingOptions
{
Height = (int)imageCompanyLogo.Height,
Width = (int) imageCompanyLogo.Width,
Margin = 0,
PureBarcode = true
}
};
using(var bitmap = barcodeWriter.Write("www.helloworld.com"))
{
stream = new MemoryStream();
bitmap.Save(stream, ImageFormat.Png);
stream.Seek(0, SeekOrigin.Begin);
}
imageQRCode.Source = ImageSource.FromStream(() => new MemoryStream(stream));
}

DirectX newb - Multisampled Texture2D with depth on a Billboard

Here's my requirement: Using DirectX11 (via SlimDX) I have to download a series of verteces and use them to create a Texture2D of a map of county borders. Then I need to do the same thing with state borders, and draw them over the county borders. Then, I need to take that texture and create 2 different textures from it, each containing unique radar data. Then I want to take those textures and display them so that the user can look at, for example, base reflectivity and base velocity side by side. The user should be able to zoom in and out of particular areas of the map.
Here's what I've got working: I'm creating my Texture2D without multisampling or depth on a billboard which is displaying in 2 separate views. But it looks blocky, and if you zoom too far out, some of the borders start to disappear.
Here are my issues:
1) I can't for the life of me get any multisampling quality. I'm using an ATI Radeon HD 5750, so I know it must be able to do it, but no formats I've tried support a quality greater than 0.
2) I'm uncertain whether I need to use a depth stencil since I'm drawing all these textures on top of each other. I hope not because when I try, the ShaderResourceView says, "Puny Human! You cannot use a depth stencil format in a ShaderResourceView! Bwa ha ha!" (I'm embellishing)
I'm willing to bet that a lot of these issues would be solved if I just drew the primitives directly into the world space, but when I do that rendering takes way too long because there are so many lines to render. Is there perhaps a way I can cut down on the time it takes?
And here's the code of my last working version:
using SlimDX;
using SlimDX.D3DCompiler;
using SlimDX.Direct3D11;
using SlimDX.DXGI;
using SlimDX.Windows;
using System;
using System.Windows.Forms;
using System.Collections.Generic;
using Device = SlimDX.Direct3D11.Device;
using Buffer = SlimDX.Direct3D11.Buffer;
using Resource = SlimDX.Direct3D11.Resource;
using Format = SlimDX.DXGI.Format;
using MapFlags = SlimDX.Direct3D11.MapFlags;
namespace Radar
{
abstract public class Renderer
{
protected static Device mDevice = null;
protected SwapChain mSwapChain = null;
protected RenderTargetView RenderTarget { get; set; }
public static Device Device { get { return mDevice; } protected set { mDevice = value; } }
public static DeviceContext Context { get { return Device.ImmediateContext; } }
protected SwapChain SwapChain { get { return mSwapChain; } set { mSwapChain = value; } }
public Texture2D Texture { get; protected set; }
protected int RenderTargetIndex { get; set; }
protected VertexShader VertexShader { get; set; }
protected PixelShader PixelShader { get; set; }
protected Buffer VertexBuffer { get; set; }
protected Buffer MatrixBuffer { get; set; }
protected InputLayout Layout { get; set; }
protected ShaderSignature InputSignature { get; set; }
protected SamplerState SamplerState { get; set; }
protected Color4 mClearColor = new Color4(0.117f, 0.117f, 0.117f);
protected Color4 ClearColor { get { return mClearColor; } }
protected void CreateDevice(IntPtr inHandle)
{
if (Device == null)
Device = new Device(DriverType.Hardware, DeviceCreationFlags.Debug);
SwapChainDescription chainDescription = new SwapChainDescription()
{
BufferCount = 2,
Usage = Usage.RenderTargetOutput,
OutputHandle = inHandle,
IsWindowed = true,
ModeDescription = new ModeDescription(0, 0, new Rational(60, 1), Format.R8G8B8A8_UNorm),
SampleDescription = new SampleDescription(8, 0),
Flags = SwapChainFlags.AllowModeSwitch,
SwapEffect = SwapEffect.Discard
};
SwapChain = new SwapChain(Device.Factory, Device, chainDescription);
}
protected void SetupViewport(int inWidth, int inHeight)
{
Viewport viewport = new Viewport(0.0f, 0.0f, inWidth, inHeight);
Context.OutputMerger.SetTargets(RenderTarget);
Context.Rasterizer.SetViewports(viewport);
}
public void Clear()
{
Context.ClearRenderTargetView(RenderTarget, ClearColor);
}
public void Present()
{
SwapChain.Present(0, PresentFlags.None);
}
// I do this to ensure the texture is correct
public void Save()
{
Texture2D.ToFile(Context, Texture, ImageFileFormat.Png, "test.png");
}
public virtual void Dispose()
{
Texture.Dispose();
SamplerState.Dispose();
VertexBuffer.Dispose();
Layout.Dispose();
InputSignature.Dispose();
VertexShader.Dispose();
PixelShader.Dispose();
RenderTarget.Dispose();
SwapChain.Dispose();
Device.Dispose();
}
public class RenderTargetParameters
{
public int Width { get; set; }
public int Height { get; set; }
public IntPtr Handle { get; set; }
public RenderTargetParameters()
{
Width = 0;
Height = 0;
Handle = new IntPtr(0);
}
}
public abstract void Render(int inWidth, int inHeight, int inCount = -1);
public abstract void Prepare(string inShaderName = null);
}
public class TextureRenderer : Renderer
{
public TextureRenderer(RenderTargetParameters inParms)
{
CreateDevice(inParms.Handle);
Texture2DDescription description = new Texture2DDescription()
{
Width = inParms.Width,
Height = inParms.Height,
MipLevels = 1,
ArraySize = 1,
Format = Format.R8G8B8A8_UNorm,
SampleDescription = new SampleDescription(8, 0),
Usage = ResourceUsage.Default,
BindFlags = BindFlags.RenderTarget | BindFlags.ShaderResource,
CpuAccessFlags = CpuAccessFlags.None,
OptionFlags = ResourceOptionFlags.None
};
Texture = new Texture2D(Device, description);
RenderTarget = new RenderTargetView(Device, Texture);
SetupViewport(inParms.Width, inParms.Height);
using (ShaderBytecode bytecode = ShaderBytecode.CompileFromFile("ShaderFX.fx", "VShader", "vs_5_0", ShaderFlags.Debug, EffectFlags.None))
{
InputSignature = ShaderSignature.GetInputSignature(bytecode);
VertexShader = new VertexShader(Device, bytecode);
}
// load and compile the pixel shader
InputElement[] elements = new[] { new InputElement("POSITION", 0, Format.R32G32B32_Float, 0) };
Layout = new InputLayout(Device, InputSignature, elements);
Context.InputAssembler.InputLayout = Layout;
Context.InputAssembler.PrimitiveTopology = PrimitiveTopology.LineStrip;
Context.VertexShader.Set(VertexShader);
}
public override void Prepare(string inShaderName)
{
using (ShaderBytecode bytecode = ShaderBytecode.CompileFromFile("ShaderFX.fx", inShaderName, "ps_4_0", ShaderFlags.Debug, EffectFlags.None))
PixelShader = new PixelShader(Device, bytecode);
Context.PixelShader.Set(PixelShader);
}
public void SetVertices(DataStream inShape)
{
VertexBuffer = new Buffer(Device, inShape, (int)inShape.Length, ResourceUsage.Default, BindFlags.VertexBuffer, CpuAccessFlags.None, ResourceOptionFlags.None, 0);
Context.InputAssembler.SetVertexBuffers(0, new VertexBufferBinding(VertexBuffer, 12, 0));
}
public override void Render(int inWidth, int inHeight, int inCount = -1)
{
Context.Draw(inCount, 0);
}
}
public class RuntimeRenderer : Renderer
{
private ShaderResourceView ResourceView { get; set; }
public RuntimeRenderer(RenderTargetParameters inParms, ref TextureRenderer inTextureRenderer)
{
CreateDevice(inParms.Handle);
Texture = inTextureRenderer.Texture;
using (Resource resource = Resource.FromSwapChain<Texture2D>(SwapChain, 0))
RenderTarget = new RenderTargetView(Device, resource);
//using (var factory = SwapChain.GetParent<Factory>())
//factory.SetWindowAssociation(inParms.Handle, WindowAssociationFlags.IgnoreAltEnter);
}
public void Resize()
{
RenderTarget.Dispose();
SwapChain.ResizeBuffers(2, 0, 0, Format.R8G8B8A8_UNorm, SwapChainFlags.AllowModeSwitch);
using (SlimDX.Direct3D11.Resource resource = Resource.FromSwapChain<Texture2D>(SwapChain, 0))
RenderTarget = new RenderTargetView(Device, resource);
}
public override void Prepare(string inShaderName)
{
using (ShaderBytecode bytecode = ShaderBytecode.CompileFromFile("ShaderFX.fx", "TextureVertexShader", "vs_4_0", ShaderFlags.EnableStrictness, EffectFlags.None))
{
InputSignature = ShaderSignature.GetInputSignature(bytecode);
VertexShader = new VertexShader(Device, bytecode);
}
using (ShaderBytecode bytecode = ShaderBytecode.CompileFromFile("ShaderFX.fx", "TexturePixelShader", "ps_4_0", ShaderFlags.EnableStrictness, EffectFlags.None))
PixelShader = new PixelShader(Device, bytecode);
InputElement[] elements = new InputElement[2];
elements[0].SemanticName = "POSITION";
elements[0].SemanticIndex = 0;
elements[0].Format = Format.R32G32B32_Float;
elements[0].Slot = 0;
elements[0].AlignedByteOffset = 0;
elements[0].Classification = InputClassification.PerVertexData;
elements[0].InstanceDataStepRate = 0;
elements[1].SemanticName = "TEXCOORD";
elements[1].SemanticIndex = 0;
elements[1].Format = Format.R32G32_Float;
elements[1].Slot = 0;
elements[1].AlignedByteOffset = InputElement.AppendAligned;
elements[1].Classification = InputClassification.PerVertexData;
elements[1].InstanceDataStepRate = 0;
Layout = new InputLayout(Device, InputSignature, elements);
BufferDescription matrixDescription = new BufferDescription()
{
Usage = ResourceUsage.Dynamic,
SizeInBytes = sizeof(float) * 16 * 4,
BindFlags = BindFlags.ConstantBuffer,
CpuAccessFlags = CpuAccessFlags.Write,
OptionFlags = ResourceOptionFlags.None,
StructureByteStride = 0
};
MatrixBuffer = new Buffer(Device, matrixDescription);
ShaderResourceViewDescription resourceViewDescription = new ShaderResourceViewDescription()
{
Format = Texture.Description.Format,
Dimension = ShaderResourceViewDimension.Texture2DMultisampled,
MipLevels = Texture.Description.MipLevels,
MostDetailedMip = 0,
};
//Texture2D.ToFile(Context, Texture, ImageFileFormat.Png, "test.png");
ResourceView = new ShaderResourceView(Device, Texture, resourceViewDescription);
SamplerDescription samplerDescription = new SamplerDescription()
{
Filter = Filter.MinMagMipLinear,
AddressU = TextureAddressMode.Wrap,
AddressV = TextureAddressMode.Wrap,
AddressW = TextureAddressMode.Wrap,
MipLodBias = 0.0f,
MaximumAnisotropy = 1,
ComparisonFunction = Comparison.Always,
BorderColor = ClearColor,
MinimumLod = 0,
MaximumLod = 99999
};
SamplerState = SamplerState.FromDescription(Device, samplerDescription);
}
public override void Render(int inWidth, int inHeight, int inCount = -1)
{
Clear();
Billboard.SetVerteces(Device, Texture.Description.Width, Texture.Description.Height, inWidth, inHeight);
SetupViewport(inWidth, inHeight);
Context.InputAssembler.SetVertexBuffers(0, new VertexBufferBinding(Billboard.Verteces, 20, 0));
Context.InputAssembler.SetIndexBuffer(Billboard.Indeces, Format.R32_UInt, 0);
Context.InputAssembler.PrimitiveTopology = PrimitiveTopology.TriangleStrip;
Context.InputAssembler.InputLayout = Layout;
Context.VertexShader.Set(VertexShader);
Context.PixelShader.Set(PixelShader);
Context.PixelShader.SetSampler(SamplerState, 0);
Context.VertexShader.SetConstantBuffer(MatrixBuffer, 0);
Context.PixelShader.SetConstantBuffer(MatrixBuffer, 0);
Context.PixelShader.SetShaderResource(ResourceView, 0);
Context.DrawIndexed(4, 0, 0);
Present();
}
}
}
Image 1 is what it looks like if I save the texture to a file (I scaled this down a LOT so it would fit in my post).
Image 2 is what it looks like in runtime when viewed at about a medium distance (not ideal, but not so bad)
Image 3 is what it looks like zoomed in to a county (Eww! Blocky and fuzzy!)
Image 4 is what it looks like zoomed out (where did all the borders go?)
About multisampling, generally you can keep quality to 0, quality setting generally are different "subpixels" (aka : samples) patterns. 0 generally does fine.
In case you render to texture with multisampling, you also need to resolve your resource, multi sampled textures are bound as Texture2DMS (instead of Texture2D) in shaders.
To do so, you need to create a second texture (with same format/size), but with only one sample.
Then once you're done rendering your multisampled texture, you need to do the following call:
deviceContext.ResolveSubresource(multisampledtexture, 0, nonmultisampledtexture,
0, format);
You can then use the ShaderView of the non multisampled texture in subsequent passes.
From what I see you should not need to use a depth stencil, just make sure you draw your elements in the correct order.
About formats, this is normal since depth is a bit "special", you need to pass different formats for resource/views. If you want to use D24_UNorm_S8_UInt (most common format i'd say), you need to setup the following:
In the texture description, format needs to be Format.R24_UNorm_X8_Typeless
In the Depth Stencil view description, Format.D24_UNorm_S8_UInt
In the shader view description, Format.R24_UNorm_X8_Typeless
That will allow you to build a depth stencil that you can read (if you don't need to read your depth buffer, just ignore shader view and use depth format directly).
Also you can increase quality by using mipmaps (which would help a lot, specially when zooming out).
To do so, in your texture description, set the following options (make sure that this texture is not multisampled)
texBufferDesc.OptionFlags |= ResourceOptionFlags.GenerateMipMaps;
texBufferDesc.MipLevels = 0; //0 means "all"
once you're done with your rendering, call:
context.GenerateMips
using the shader resource view of the texture that just got rendered.
About drawing the lines directly behind that's definitely possible, and for certain will give you the best quality.
Not sure how many lines you render, but it doesn't look like something a reasonably modern card would struggle with. And a bit of culling can easily help discard lines that are out of the screen so they don't get drawn.
You could also do some "hybrid" (use texture when zoomed out, render a subset of the lines when zoomed in), that's not too hard to setup either.

WP7 Crash when trying to create instance of WriteableBitmap

everyone, I've met an strange problem when saving picture to media library, my application crashed without rising an exception. Here is my saving code.
using (MemoryStream stream = new MemoryStream())
{
try
{
WriteableBitmap bitmap = new WriteableBitmap(InkPrest, InkPrest.RenderTransform); // Crash here, the actualHeight of InkPrest is 2370.0
bitmap.SaveJpeg(stream, (int)InkPrest.ActualWidth, (int)InkPrest.ActualHeight, 0, 100);
stream.Seek(0, SeekOrigin.Begin);
MediaLibrary library = new MediaLibrary();
library.SavePicture(DateTime.Now.ToString(), stream.GetBuffer());
}
catch (Exception ex)
{
MessageBox.Show(ex.Message);
}
}
I have debuged step by step, the app crash at
WriteableBitmap bitmap = new WriteableBitmap(InkPrest, InkPrest.RenderTransform); // Crash here, the actualHeight of InkPrest is 2370.0
Any idea on solving this problem?
============================================
Try to save several images
The uielement is 704 * 2370
TranslateTransform transform = new TranslateTransform();
transform.Transform(new Point(0,0));
double MaxHeight = 800;
double height = InkPrest.ActualHeight;
int saveCount = 0;
int succeedCount = 0;
while (height > 0)
{
using (MemoryStream stream = new MemoryStream())
{
try
{
double actualRenderHeight = Math.Min(height, MaxHeight);
WriteableBitmap bitmap = new WriteableBitmap((int)InkPrest.ActualWidth, (int)actualRenderHeight);
bitmap.Render(InkPrest, transform); //Crash here, also no exception.
bitmap.Invalidate();
height -= actualRenderHeight;
transform.Y -= actualRenderHeight;
bitmap.SaveJpeg(stream, (int)InkPrest.ActualWidth, (int)actualRenderHeight, 0, 100);
stream.Seek(0, SeekOrigin.Begin);
MediaLibrary library = new MediaLibrary();
Picture pic = library.SavePicture(manuscriptFile.Title + DateTime.Now.ToString(), stream.GetBuffer());
saveCount++;
if (pic != null)
{
succeedCount++;
}
}
catch (Exception ex)
{
MessageBox.Show(ex.Message);
}
}
}
Check whether you are instantiating writeablebitmap in uithread or some other created thread. you need to create writeablebitmap in uithread.

Copying the image of a ScatterChart to system clipboard in JavaFX 2.0

I need to copy a ScatterChart in JavaFX 2.0 to the system clipboard. I'm not really sure how to copy the whole image of the ScatterChart with the potted points.
Gets rid of the need for any bots to take screenshots
/**
* Sets the image content of the clipboard to the chart supplied
* #param chart chart you wish to copy to the clipboard
*/
public void copyChartToClipboard(ScatterChart<Double, Double> chart) {
WritableImage image = chart.snapshot(new SnapshotParameters(), null);
ClipboardContent cc = new ClipboardContent();
cc.putImage(image);
Clipboard.getSystemClipboard().setContent(cc);
}
See next piece of code. I've added full package names for all non-javafx classes to avoid imports mess.
public void start(final Stage primaryStage) throws Exception {
VBox root = new VBox();
final Scene scene;
primaryStage.setScene(scene = new Scene(root));
NumberAxis xAxis = new NumberAxis("X-Axis", 0d, 8.0d, 1.0d);
NumberAxis yAxis = new NumberAxis("Y-Axis", 0.0d, 5.0d, 1.0d);
ObservableList<XYChart.Series> data = FXCollections.observableArrayList(
new ScatterChart.Series("Series 1", FXCollections.<ScatterChart.Data>observableArrayList(
new XYChart.Data(0.2, 3.5),
new XYChart.Data(0.7, 4.6),
new XYChart.Data(7.8, 4.0))));
final ScatterChart chart = new ScatterChart(xAxis, yAxis, data);
Button btnShoot = new Button("screenshot");
btnShoot.setOnAction(new EventHandler<ActionEvent>() {
public void handle(ActionEvent t) {
try {
// getting screen coordinates
Bounds b = chart.getBoundsInParent();
int x = (int)Math.round(primaryStage.getX() + scene.getX() + b.getMinX());
int y = (int)Math.round(primaryStage.getY() + scene.getY() + b.getMinY());
int w = (int)Math.round(b.getWidth());
int h = (int)Math.round(b.getHeight());
// using ATW robot to get image
java.awt.Robot robot = new java.awt.Robot();
java.awt.image.BufferedImage bi = robot.createScreenCapture(new java.awt.Rectangle(x, y, w, h));
// convert BufferedImage to javafx.scene.image.Image
java.io.ByteArrayOutputStream stream = new java.io.ByteArrayOutputStream();
ImageIO.write(bi, "png", stream);
Image image = new Image(new java.io.ByteArrayInputStream(stream.toByteArray()), w, h, true, true);
// put it to clipboard
ClipboardContent cc = new ClipboardContent();
cc.putImage(image);
Clipboard.getSystemClipboard().setContent(cc);
} catch (Exception ex) {
ex.printStackTrace();
}
}
});
root.getChildren().addAll(chart, btnShoot);
primaryStage.show();
}
N.B.: this approach involves using AWT side-by-side with JavaFX which is generally not a good idea and may not work on all configuration. It's better to use GlassRobot instead of AWTRobot. Unfortunately it's not stable enough yet.

Resources