i want to crop an image present in sdcard .My code is:
Intent intent = new Intent("com.android.camera.action.CROP");
Uri uriOfImageToCrop = Uri.parse(Environment.getExternalStorageDirectory()+"/bookpage.jpg");
intent.setDataAndType(uriOfImageToCrop, "image/*");
intent.putExtra("outputX", 200);
intent.putExtra("outputY", 200);
intent.putExtra("aspectX", 1);
intent.putExtra("aspectY", 1);
intent.putExtra("scale", true);
intent.putExtra("noFaceDetection", true);
intent.putExtra(MediaStore.EXTRA_OUTPUT, uriOfImageCrop);
startActivity(intent);
But it showing the following exception:
java.lang.IllegalStateException: Target host must not be null, or set in parameters.
can anybody help me please.
Android doesn't include a crop API anymore. You need to create your own or import one like :
https://github.com/lvillani/android-cropimage
You can use http://code.google.com/p/catalano-framework/
FastBitmap fb = new FastBitmap(bitmap);
int startX = 10;
int startY = 10;
int newWidth = 100;
int newheigth = 100;
Crop crop = new Crop(startX, startY, newWidth, newHeigth);
crop.applyInPlace(fb);
Related
I am trying to compress a large image into a thumbnail of 600X600 in .NET 6
My code is
public static string CreateThumbnail(int maxWidth, int maxHeight, string path)
{
byte[] bytes = System.IO.File.ReadAllBytes(path);
using (System.IO.MemoryStream ms = new System.IO.MemoryStream(bytes))
{
Image image = Image.FromStream(ms);
return CreateThumbnail(maxWidth, maxHeight, image, path);
}
}
I am getting error on this line
Image image = Image.FromStream(ms);
error is System.Runtime.InteropServices.ExternalException: 'A generic error occurred in GDI+.'
Image size in 8mb, Code works fine for small images. What is the problem in code or is there any better way to create a thumbnail for large images?
Create thumbnail has this code but I get error before calling this function
private static string CreateThumbnail(int maxWidth, int maxHeight, Image image, string path)
{
//var image = System.Drawing.Image.FromStream( (path);
var ratioX = (double)maxWidth / image.Width;
var ratioY = (double)maxHeight / image.Height;
var ratio = Math.Min(ratioX, ratioY);
var newWidth = (int)(image.Width * ratio);
var newHeight = (int)(image.Height * ratio);
using (var newImage = new Bitmap(newWidth, newHeight))
{
using (Graphics thumbGraph = Graphics.FromImage(newImage))
{
thumbGraph.CompositingQuality = CompositingQuality.Default;
thumbGraph.SmoothingMode = SmoothingMode.Default;
//thumbGraph.InterpolationMode = InterpolationMode.HighQualityBicubic;
thumbGraph.DrawImage(image, 0, 0, newWidth, newHeight);
image.Dispose();
//string fileRelativePath = Path.GetFileName(path);
//newImage.Save(path, newImage.RawFormat);
SaveJpeg(path, newImage, 100);
}
}
return path;
}
I am trying to get an image using the camera. The image is to be 256x256 and I want it to come from the centre of a photo taken using the camera on a phone. I found this code at: https://forums.xamarin.com/discussion/37647/cross-platform-crop-image-view
I am using this code for Android...
public byte[] CropPhoto(byte[] photoToCropBytes, Rectangle rectangleToCrop, double outputWidth, double outputHeight)
{
using (var photoOutputStream = new MemoryStream())
{
// Load the bitmap
var inSampleSize = CalculateInSampleSize((int)rectangleToCrop.Width, (int)rectangleToCrop.Height, (int)outputWidth, (int)outputHeight);
var options = new BitmapFactory.Options();
options.InSampleSize = inSampleSize;
//options.InPurgeable = true; see http://developer.android.com/reference/android/graphics/BitmapFactory.Options.html
using (var photoToCropBitmap = BitmapFactory.DecodeByteArray(photoToCropBytes, 0, photoToCropBytes.Length, options))
{
var matrix = new Matrix();
var martixScale = outputWidth / rectangleToCrop.Width * inSampleSize;
matrix.PostScale((float)martixScale, (float)martixScale);
using (var photoCroppedBitmap = Bitmap.CreateBitmap(photoToCropBitmap, (int)(rectangleToCrop.X / inSampleSize), (int)(rectangleToCrop.Y / inSampleSize), (int)(rectangleToCrop.Width / inSampleSize), (int)(rectangleToCrop.Height / inSampleSize), matrix, true))
{
photoCroppedBitmap.Compress(Bitmap.CompressFormat.Jpeg, 100, photoOutputStream);
}
}
return photoOutputStream.ToArray();
}
}
public static int CalculateInSampleSize(int inputWidth, int inputHeight, int outputWidth, int outputHeight)
{
//see http://developer.android.com/training/displaying-bitmaps/load-bitmap.html
int inSampleSize = 1; //default
if (inputHeight > outputHeight || inputWidth > outputWidth) {
int halfHeight = inputHeight / 2;
int halfWidth = inputWidth / 2;
// Calculate the largest inSampleSize value that is a power of 2 and keeps both
// height and width larger than the requested height and width.
while ((halfHeight / inSampleSize) > outputHeight && (halfWidth / inSampleSize) > outputWidth)
{
inSampleSize *= 2;
}
}
return inSampleSize;
}
and this code for iOS...
public byte[] CropPhoto(byte[] photoToCropBytes, Xamarin.Forms.Rectangle
rectangleToCrop, double outputWidth, double outputHeight)
{
byte[] photoOutputBytes;
using (var data = NSData.FromArray(photoToCropBytes))
{
using (var photoToCropCGImage = UIImage.LoadFromData(data).CGImage)
{
//crop image
using (var photoCroppedCGImage = photoToCropCGImage.WithImageInRect(new CGRect((nfloat)rectangleToCrop.X, (nfloat)rectangleToCrop.Y, (nfloat)rectangleToCrop.Width, (nfloat)rectangleToCrop.Height)))
{
using (var photoCroppedUIImage = UIImage.FromImage(photoCroppedCGImage))
{
//create a 24bit RGB image to the output size
using (var cGBitmapContext = new CGBitmapContext(IntPtr.Zero, (int)outputWidth, (int)outputHeight, 8, (int)(4 * outputWidth), CGColorSpace.CreateDeviceRGB(), CGImageAlphaInfo.PremultipliedFirst))
{
var photoOutputRectangleF = new RectangleF(0f, 0f, (float)outputWidth, (float)outputHeight);
// draw the cropped photo resized
cGBitmapContext.DrawImage(photoOutputRectangleF, photoCroppedUIImage.CGImage);
//get cropped resized photo
var photoOutputUIImage = UIKit.UIImage.FromImage(cGBitmapContext.ToImage());
//convert cropped resized photo to bytes and then stream
using (var photoOutputNsData = photoOutputUIImage.AsJPEG())
{
photoOutputBytes = new Byte[photoOutputNsData.Length];
System.Runtime.InteropServices.Marshal.Copy(photoOutputNsData.Bytes, photoOutputBytes, 0, Convert.ToInt32(photoOutputNsData.Length));
}
}
}
}
}
}
return photoOutputBytes;
}
I am struggling to work out exactly what the parameters are to call the function.
Currently, I am doing the following:
double cropSize = Math.Min(DeviceDisplay.MainDisplayInfo.Width, DeviceDisplay.MainDisplayInfo.Height);
double left = (DeviceDisplay.MainDisplayInfo.Width - cropSize) / 2.0;
double top = (DeviceDisplay.MainDisplayInfo.Height - cropSize) / 2.0;
// Get a square resized and cropped from the top image as a byte[]
_imageData = mediaService.CropPhoto(_imageData, new Rectangle(left, top, cropSize, cropSize), 256, 256);
I was expecting this to crop the image to the central square (in portrait mode side length would be the width of the photo) and then scale it down to a 256x256 image. But it never picks the centre of the image.
Has anyone ever used this code and can tell me what I need to pass in for the 'rectangleToCrop' parameter?
Note: Both Android and iOS give the same image, just not the central part that I was expecting.
Here are the two routines I used:
Android:
public byte[] ResizeImageAndCropToSquare(byte[] rawPhoto, int outputSize)
{
// Create object of bitmapfactory's option method for further option use
BitmapFactory.Options options = new BitmapFactory.Options();
// InPurgeable is used to free up memory while required
options.InPurgeable = true;
// Get the original image
using (var originalImage = BitmapFactory.DecodeByteArray(rawPhoto, 0, rawPhoto.Length, options))
{
// The shortest edge will determine the size of the square image
int cropSize = Math.Min(originalImage.Width, originalImage.Height);
int left = (originalImage.Width - cropSize) / 2;
int top = (originalImage.Height - cropSize) / 2;
using (var squareImage = Bitmap.CreateBitmap(originalImage, left, top, cropSize, cropSize))
{
// Resize the square image to the correct size of an Avatar
using (var resizedImage = Bitmap.CreateScaledBitmap(squareImage, outputSize, outputSize, true))
{
// Return the raw data of the resized image
using (MemoryStream resizedImageStream = new MemoryStream())
{
// Resize the image maintaining 100% quality
resizedImage.Compress(Bitmap.CompressFormat.Png, 100, resizedImageStream);
return resizedImageStream.ToArray();
}
}
}
}
}
iOS:
private const int BitsPerComponent = 8;
public byte[] ResizeImageAndCropToSquare(byte[] rawPhoto, int outputSize)
{
using (var data = NSData.FromArray(rawPhoto))
{
using (var photoToCrop = UIImage.LoadFromData(data).CGImage)
{
nint photoWidth = photoToCrop.Width;
nint photoHeight = photoToCrop.Height;
nint cropSize = photoWidth < photoHeight ? photoWidth : photoHeight;
nint left = (photoWidth - cropSize) / 2;
nint top = (photoHeight - cropSize) / 2;
// Crop image
using (var photoCropped = photoToCrop.WithImageInRect(new CGRect(left, top, cropSize, cropSize)))
{
using (var photoCroppedUIImage = UIImage.FromImage(photoCropped))
{
// Create a 24bit RGB image of output size
using (var cGBitmapContext = new CGBitmapContext(IntPtr.Zero, outputSize, outputSize, BitsPerComponent, outputSize << 2, CGColorSpace.CreateDeviceRGB(), CGImageAlphaInfo.PremultipliedFirst))
{
var photoOutputRectangleF = new RectangleF(0f, 0f, outputSize, outputSize);
// Draw the cropped photo resized
cGBitmapContext.DrawImage(photoOutputRectangleF, photoCroppedUIImage.CGImage);
// Get cropped resized photo
var photoOutputUIImage = UIImage.FromImage(cGBitmapContext.ToImage());
// Convert cropped resized photo to bytes and then stream
using (var photoOutputNsData = photoOutputUIImage.AsPNG())
{
var rawOutput = new byte[photoOutputNsData.Length];
Marshal.Copy(photoOutputNsData.Bytes, rawOutput, 0, Convert.ToInt32(photoOutputNsData.Length));
return rawOutput;
}
}
}
}
}
}
}
So I made a small application that basicaly draw a whatever image is in the ClipBoard(memory) and trys to draw it.
This is a sample of the code:
private EventHandler<KeyEvent> copyPasteEvent = new EventHandler() {
final KeyCombination ctrl_V = new KeyCodeCombination(KeyCode.V, KeyCombination.CONTROL_DOWN);
#Override
public void handle(Event event) {
if (ctrl_V.match((KeyEvent) event)) {
System.out.println("Ctrl+V pressed");
Clipboard clipboard = Clipboard.getSystemClipboard();
System.out.println(clipboard.getContentTypes());
//Change canvas size if necessary to allow space for the image to fit
Image copiedImage = clipboard.getImage();
if (copiedImage.getHeight()>canvas.getHeight()){
canvas.setHeight(copiedImage.getHeight());
}
if (copiedImage.getWidth()>canvas.getWidth()){
canvas.setWidth(copiedImage.getWidth());
}
gc.drawImage(clipboard.getImage(), 0,0);
}
}
};
This is the image that was drawn and the correspecting data type:
A print from my screen.
A image from the internet.
However when i copy and paste a direct raw image from paint...
Object Descriptor is an OLE format from Microsoft.
This is why when you copy an image from a Microsoft application, you get these descriptors from Clipboard.getSystemClipboard().getContentTypes():
[[application/x-java-rawimage], [Object Descriptor]]
As for getting the image out of the clipboard... let's try two possible ways to do it: AWT and JavaFX.
AWT
Let's use the awt toolkit to get the system clipboard, and in case we have an image on it, retrieve a BufferedImage. Then we can convert it easily to a JavaFX Image and place it in an ImageView:
try {
DataFlavor[] availableDataFlavors = Toolkit.getDefaultToolkit().
getSystemClipboard().getAvailableDataFlavors();
for (DataFlavor f : availableDataFlavors) {
System.out.println("AWT Flavor: " + f);
if (f.equals(DataFlavor.imageFlavor)) {
BufferedImage data = (BufferedImage) Toolkit.getDefaultToolkit().getSystemClipboard().getData(DataFlavor.imageFlavor);
System.out.println("data " + data);
// Convert to JavaFX:
WritableImage img = new WritableImage(data.getWidth(), data.getHeight());
SwingFXUtils.toFXImage((BufferedImage) data, img);
imageView.setImage(img);
}
}
} catch (UnsupportedFlavorException | IOException ex) {
System.out.println("Error " + ex);
}
It prints:
AWT Flavor: java.awt.datatransfer.DataFlavor[mimetype=image/x-java-image;representationclass=java.awt.Image]
data BufferedImage#3e4eca95: type = 1 DirectColorModel: rmask=ff0000 gmask=ff00 bmask=ff amask=0 IntegerInterleavedRaster: width = 350 height = 364 #Bands = 3 xOff = 0 yOff = 0 dataOffset[0] 0
and displays your image:
This part was based in this answer.
JavaFX
Why didn't we try it with JavaFX in the first place? Well, we could have tried directly:
Image content = (Image) Clipboard.getSystemClipboard().getContent(DataFormat.IMAGE);
imageView.setImage(content);
and you will get a valid image, but when adding it to an ImageView, it will be blank as you already noticed, or with invalid colors.
So how can we get a valid image? If you check the BufferedImage above, it shows type = 1, which means BufferedImage.TYPE_INT_RGB = 1;, in other words, it is an image with 8-bit RGB color components packed into integer pixels, without alpha component.
My guess is that JavaFX implementation for Windows doesn't process correctly this image format, as it probably expects a RGBA format. You can check here how the image is extracted. And if you want to dive into the native implementation, check the native-glass/win/GlassClipboard.cpp code.
So we can try to do it with a PixelReader. Let's read the image and return a byte array:
private byte[] imageToData(Image image) {
int width = (int) image.getWidth();
int height = (int) image.getHeight();
byte[] data = new byte[width * height * 3];
int i = 0;
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
int argb = image.getPixelReader().getArgb(x, y);
int r = (argb >> 16) & 0xFF;
int g = (argb >> 8) & 0xFF;
int b = argb & 0xFF;
data[i++] = (byte) r;
data[i++] = (byte) g;
data[i++] = (byte) b;
}
}
return data;
}
Now, all we need to do is use this byte array to write a new image and set it to the ImageView:
Image content = (Image) Clipboard.getSystemClipboard().getContent(DataFormat.IMAGE);
byte[] data = imageToData(content);
WritableImage writableImage = new WritableImage((int) content.getWidth(), (int) content.getHeight());
PixelWriter pixelWriter = writableImage.getPixelWriter();
pixelWriter.setPixels(0, 0, (int) content.getWidth(), (int) content.getHeight(),
PixelFormat.getByteRgbInstance(), data, 0, (int) content.getWidth() * 3);
imageView.setImage(writableImage);
And now you will get the same result, but only using JavaFX:
I have a series of banners (standard sizes) which all need to load the same corresponding image for each slide. I can load them fine but I want the image to match the size of the container MC that the image is being loaded to, is that possible? Either that or to set the height/width manually...
Everything I have tried doesnt work, here is the code for the working state (where it just loads the image which stretches across the stage)
Code:
var myImage:String = dynamicContent.Profile[0].propImage.Url;
function myImageLoader(file:String, x:Number, y:Number):StudioLoader{
var myImageLoader:StudioLoader = new StudioLoader();
var request:URLRequest = new URLRequest(file);
myImageLoader.load(request);
myImageLoader.x = -52;
myImageLoader.y =-30;
return myImageLoader;
}
propImage1.addChild(loadImage(enabler.getUrl(myImage),-20,0));
You can resize your loaded image after the Event.COMPLETE on the LoaderInfo (contentLoaderInfo) of your URLLoader is fired, so you can do like this :
var request:URLRequest = new URLRequest('http://www.example.com/image.jpg');
var loader:Loader = new Loader();
loader.contentLoaderInfo.addEventListener(Event.COMPLETE, on_loadComplete);
function on_loadComplete(e:Event):void {
var image:DisplayObject = loader.content;
image.x = 100;
image.y = 100;
image.width = 300;
image.height = 200;
addChild(image);
}
loader.load(request);
Edit :
load_image('http://www.example.com/image.jpg');
function load_image(url){
var request:URLRequest = new URLRequest(url);
var loader:Loader = new Loader();
loader.contentLoaderInfo.addEventListener(Event.COMPLETE, on_loadComplete);
function on_loadComplete(e:Event):void {
add_image(loader.content);
}
loader.load(request);
}
function add_image(image:DisplayObject, _x:int = 0, _y:int = 0, _width:int = 100, _height:int = 100){
image.x = _x;
image.y = _y;
image.width = _width;
image.height = _height;
addChild(image);
}
Hope that can help.
I am using PDFBox to extract data from my webapp and put it into a PDF. I have a method that draws the header on each PDF page. However, when I add an image to the each page, the document runs out of memory. I was wondering if anybody had any ideas on a solution? Here is my drawHeader method:
public static void drawHeader(PDDocument doc, PDPage page, PDPageContentStream contentStream, int[] columnWidths, int pageNumber) throws IOException {
contentStream.beginText();
PDFont font = PDType1Font.HELVETICA_BOLD;
contentStream.setFont(font, 24);
contentStream.moveTextPositionByAmount(50, 750);
contentStream.drawString("Producer License Report");
contentStream.endText();
contentStream.beginText();
contentStream.moveTextPositionByAmount(550, 750);
contentStream.setFont(PDType1Font.HELVETICA_BOLD, 8);
contentStream.drawString("Page " + pageNumber);
contentStream.endText();
contentStream.drawLine(50, 740, 340, 740);
contentStream.drawLine(16, 680, 595, 680);
List<String> headerList = new LinkedList<String>();
headerList.add("NPN");
headerList.add("First Name");
headerList.add("Last Name");
headerList.add("Suffix");
headerList.add("License State");
headerList.add("Resident State");
headerList.add("License Number");
contentStream.setFont(PDType1Font.HELVETICA_BOLD, 9);
float textx = 15;
float texty = 685;
InputStream in = new FileInputStream(new File("logo.jpg"));
PDJpeg img = new PDJpeg(doc, in);
contentStream.drawImage(img, 375, 720);
for (int i = 0; i < headerList.size(); i++) {
String text = headerList.get(i);
contentStream.beginText();
contentStream.moveTextPositionByAmount(textx, texty);
contentStream.drawString(text);
contentStream.endText();
textx += (columnWidths[i] * 6.5);
}
}
I found a solution! You have to create the Image-Object before opening the contentStream.
Example:
/* Step 1: Prepare the document.
*/
doc = new PDDocument();
PDPage page = new PDPage();
doc.addPage(page);
/* Step 2: Prepare the image
* PDJpeg is the class you use when dealing with jpg images.
* You will need to mention the jpg file and the document to which it is to be added
* Note that if you complete these steps after the creating the content stream the PDF
* file created will show "Out of memory" error.
*/
PDXObjectImage image = null;
image = new PDJpeg(doc, new FileInputStream("image.jpg"));
PDPageContentStream contentStream = new PDPageContentStream(doc,
page);
....
I'm trying to comment the answer by Timo Hoen but don't have enough rep yet...
Another issue I've found with the "out of memory" error is if the image is large, or you have tried to draw it off the page.
Start with your coordinates as 100, 100 and then work from there.
e.g. contentStream.drawImage(img, 100, 100);
Cheers,
Sam