Maintain orientation when resizing image with opencv - image

FYI, I'm new to OpenCV. I'm creating an AWS Lambda function that will utilize OpenCV 2.4 to resize images that I have in S3. S3 generates an event that launches my Lambda function when an image is uploaded to my source bucket. The lambda function successfully resizes the images and stores the resized image to the target bucket with the same object key as the original image. I have test images that where taken with a digital camera and some from an Android phone. What I've noticed is if I have an image from the phone that was taken in portrait the image in the source bucket is oriented correctly but the resized version in the target bucket is orientated incorrectly. Images taken in portrait orientation are converted to landscape. How do I maintain the orientation when resizing images. Below is the code I'm using.
public Object handleRequest(S3Event input, Context context) {
OpenCV.loadLibrary();
for (S3EventNotificationRecord record : input.getRecords()) {
String bucketName = record.getS3().getBucket().getName();
S3ObjectEntity entity = record.getS3().getObject();
String objectKey = entity.getUrlDecodedKey();
context.getLogger().log("ObjectKey: " + objectKey);
String fileExtension = objectKey.substring(objectKey.lastIndexOf("."));
context.getLogger().log("file extension: " + fileExtension);
S3Object object = s3Client.getObject(new GetObjectRequest(bucketName, objectKey));
InputStream objectData = object.getObjectContent();
ObjectMetadata objectMetadata = object.getObjectMetadata();
// Process the objectData stream.
int nRead;
byte[] data = new byte[16 * 1024];
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
try {
while ((nRead = objectData.read(data, 0, data.length)) != -1) {
buffer.write(data, 0, nRead);
}
byte[] bytes = buffer.toByteArray();
Mat srcImage = Highgui.imdecode(new MatOfByte(bytes), Highgui.CV_LOAD_IMAGE_UNCHANGED);
Mat resizedImage = new Mat();
Imgproc.resize(srcImage, resizedImage, new Size(0,0), 0.1, 0.1, Imgproc.INTER_AREA);
MatOfByte resizedMatOfByte = new MatOfByte();
Highgui.imencode(fileExtension, resizedImage, resizedMatOfByte);
byte[] bytesToWrite = resizedMatOfByte.toArray();
ObjectMetadata meta = new ObjectMetadata();
meta.setContentLength(bytesToWrite.length);
meta.setContentType(objectMetadata.getContentType());
PutObjectRequest putRequest = new PutObjectRequest(Configuration.MINIMIZED_ARTIFACT_BUCKET, objectKey, new ByteArrayInputStream(bytesToWrite), meta);
s3Client.putObject(putRequest);
objectData.close();
} catch (IOException e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}
}
return null;
}

Related

Cognitive face detection is not working when I try to upload the image with a face mask

Any suggestions on how to get info of the image with a mask in cognitive face recognition?
When I upload image with headwear or eyeglasses then cognitive service returns the image information but when picking an image with mask, Cognitive service doesn't return any information. That means my implementation of cognitive service is not able to recognize the image with the mask. If anybody has faced this issue and resolved it please suggest me a solution.
public string subscriptionKey = "88c**************************f7";
public string uriBase = "https://westcentralus.api.cognitive.microsoft.com/face/v1.0/detect";
//Method to pick an image from the gallery
async void btnPick_Clicked(object sender, System.EventArgs e)
{
try
{
if (!CrossMedia.Current.IsPickPhotoSupported)
{
return;
}
var file = await CrossMedia.Current.PickPhotoAsync(new Plugin.Media.Abstractions.PickMediaOptions
{
PhotoSize = Plugin.Media.Abstractions.PhotoSize.Medium
});
if (file == null) return;
imgSelected.Source = ImageSource.FromStream(() => {
var stream = file.GetStream();
return stream;
});
MakeAnalysisRequest(file.Path);
}
catch (Exception ex)
{
string test = ex.Message;
}
}
> //convert Convert image to byte array
public byte[] GetImageAsByteArray(string imageFilePath)
{
using (FileStream fileStream =
new FileStream(imageFilePath, FileMode.Open, FileAccess.Read))
{
BinaryReader binaryReader = new BinaryReader(fileStream);
return binaryReader.ReadBytes((int)fileStream.Length);
}
}
> //Method to get image information from the detection Url
public async void MakeAnalysisRequest(string imageFilePath)
{
HttpClient client = new HttpClient();
client.DefaultRequestHeaders.Add("Ocp-Apim-Subscription-Key", subscriptionKey);
string requestParameters = "returnFaceId=true&returnFaceLandmarks=false" +
"&returnFaceAttributes=age,gender,headPose,smile,facialHair,glasses," +
"emotion,hair,makeup,occlusion,accessories,blur,exposure,noise";
string uri = uriBase + "?" + requestParameters;
HttpResponseMessage response;
byte[] byteData = GetImageAsByteArray(imageFilePath);
using (ByteArrayContent content = new ByteArrayContent(byteData))
{
content.Headers.ContentType = new MediaTypeHeaderValue("application/octet-stream");
response = await client.PostAsync(uri, content);
string contentString = await response.Content.ReadAsStringAsync();
//***************************************************
//Here it return null in case of mask else its working fine
//***************************************************
List<ResponseModel> faceDetails = JsonConvert.DeserializeObject<List<ResponseModel>>(contentString);
if (faceDetails.Count != 0)
{
lblTotalFace.Text = "Total Faces : " + faceDetails.Count;
lblGender.Text = "Gender : " + faceDetails[0].faceAttributes.gender;
lblAge.Text = "Total Age : " + faceDetails[0].faceAttributes.age;
Console.WriteLine(faceDetails[0].faceAttributes.accessories.FirstOrDefault(x => x.type == "mask").confidence);
}
}
}
There a 2 different things that you must have in mind:
Some faces might not be seen by the services, see doc:
Some faces might not be detected because of technical challenges.
Extreme face angles (head pose) or face occlusion (objects such as
sunglasses or hands that block part of the face) can affect detection.
Frontal and near-frontal faces give the best results.
There are currently 2 detection models in Face API: "detection_01" and "detection_02". This latest model (existing since May 2019 if I remember well) has better performances (especially for rotated or partially hidden faces) but is not providing all the information in output that model 1 is giving.
Difference in detection models
I made a quick demo using a the "Cognitive Workbench" demo portal (available here) with the following image:
With detection_01: no face found
With detection_02: the face is found, as you can see in this capture:
But if you need to use specific attributes from the face, this might not solve your problem. See API documentation extract:

Insert an image into an existing pdf document using iText Sharp

I need to insert an image into an existing pdf at a specific location. I tried the answer at this question. But whatever different ways I do the image is being inserted at (0,0) position (bottom left corner). I tried another approach where instead of using stream I used Document class in iTextSharp as shown here. Now I am able to place the image at the desired position but this method is creating a new document with just this image. Most of the articles I searched are using PdfReader and PdfStamper so I think this is the recommended way. Any help is appreciated. Posting below code for both the methods I tried.
PdfStamper method
private void AddImage(string filePath)
{
string imageURL = #"ImagePath\Image.jpg";
using (Stream inputPdfStream = new FileStream(filePath, FileMode.Open, FileAccess.Read))
using (Stream inputImageStream = new FileStream(imageURL, FileMode.Open, FileAccess.Read))
using (Stream outputPdfStream = new FileStream(#"ResultingPdfPath\Abcd.pdf", FileMode.Create, FileAccess.ReadWrite))
{
Image image = Image.GetInstance(inputImageStream);
image.ScaleToFit(100, 100);
var reader = new PdfReader(inputPdfStream);
var stamper = new PdfStamper(reader, outputPdfStream);
PdfContentByte content = stamper.GetUnderContent(1);
image.SetAbsolutePosition(100f, 150f);
content.AddImage(image);
stamper.Close();
reader.Close();
}
}
Document class method
private void TestMessage(string filePath)
{
string imageURL = #"ImagePath\Image.jpg";
Document doc = new Document(PageSize.A4);
PdfWriter writer = PdfWriter.GetInstance(doc, new FileStream(filePath, FileMode.Open));
doc.Open();
iTextSharp.text.Image jpg = iTextSharp.text.Image.GetInstance(imageURL);
jpg.ScaleToFit(140f, 120f);
jpg.SetAbsolutePosition(100, 100);
jpg.SpacingBefore = 10f;
jpg.SpacingAfter = 1f;
jpg.Alignment = Element.ALIGN_LEFT;
doc.Add(jpg);
doc.Close();
}
Let me know if you need further information.
I adapted your method to accept variable out paths and positions and tested it with iTextSharp 5.5.7 like this:
[TestFixture]
class TestInsertImage
{
/// iText stamp image on top not always working
/// http://stackoverflow.com/questions/33898280/itext-stamp-image-on-top-not-always-working
///
[Test]
public void AddStampToTestPdf()
{
Directory.CreateDirectory(#"C:\Temp\test-results\content\");
AddImage(#"d:\Issues\stackoverflow\iText stamp image on top not always working\Multipage.pdf", #"C:\Temp\test-results\content\Multipage-stamp-Image-100-150.pdf", 100f, 150f);
AddImage(#"d:\Issues\stackoverflow\iText stamp image on top not always working\Multipage.pdf", #"C:\Temp\test-results\content\Multipage-stamp-Image-150-100.pdf", 150f, 100f);
}
private void AddImage(string filePath, string outPath, float x, float y)
{
string imageURL = #"c:\Repo\GitHub\testarea\itext5\src\test\resources\mkl\testarea\itext5\layer\Willi-1.jpg";
using (Stream inputPdfStream = new FileStream(filePath, FileMode.Open, FileAccess.Read))
using (Stream inputImageStream = new FileStream(imageURL, FileMode.Open, FileAccess.Read))
using (Stream outputPdfStream = new FileStream(outPath, FileMode.Create, FileAccess.ReadWrite))
{
Image image = Image.GetInstance(inputImageStream);
image.ScaleToFit(100, 100);
var reader = new PdfReader(inputPdfStream);
var stamper = new PdfStamper(reader, outputPdfStream);
PdfContentByte content = stamper.GetUnderContent(1);
image.SetAbsolutePosition(x, y);
content.AddImage(image);
stamper.Close();
reader.Close();
}
}
}
The results are included below.
As you see, the positioning information clearly are respected, and the image is definitely not always at the bottom left corner.
If this indeed does not work for the OP, he is keeping information from us required to help him.
Multipage-stamp-Image-100-150.pdf
Created using
AddImage(#"d:\Issues\stackoverflow\iText stamp image on top not always working\Multipage.pdf", #"C:\Temp\test-results\content\Multipage-stamp-Image-100-150.pdf", 100f, 150f);
Multipage-stamp-Image-150-100.pdf
Created using:
AddImage(#"d:\Issues\stackoverflow\iText stamp image on top not always working\Multipage.pdf", #"C:\Temp\test-results\content\Multipage-stamp-Image-150-100.pdf", 150f, 100f);

Windows Phone - update live tile from background agent with custom image

I am trying to add cloud image to album cover if the cover is loaded from internet. I am trying to do this in Background Audio agent and I think I almost got it. The problem is that I have black image in tile. Few times when testing I got cover image with my cloud image in it but mostly I get black image (and sometimes black image with cloud in it).
Can anyone help me find the problem? Thanks
private void UpdateAppTile()
{
var apptile = ShellTile.ActiveTiles.First();
if (apptile != null && _playList != null && _playList.Any())
{
var track = _playList[currentTrackNumber];
var size = 360;
Uri coverUrl;
if (track.AlbumArt.OriginalString.StartsWith("http"))
{
BitmapImage img = null;
using (AutoResetEvent are = new AutoResetEvent(false))
{
string filename = Path.GetFileNameWithoutExtension(track.AlbumArt.OriginalString);
var urlToNewCover = String.Format("http://.../{0}/{1}", filename, size);
coverUrl = new Uri(urlToNewCover, UriKind.Absolute);
Deployment.Current.Dispatcher.BeginInvoke(() =>
{
img = new BitmapImage(coverUrl);
are.Set();
});
are.WaitOne();
var wbmp = CreateTileImageWithCloud(img);
SaveTileImage(wbmp, "/shared/shellcontent/test.jpg");
coverUrl = new Uri("isostore:/shared/shellcontent/test.jpg", UriKind.RelativeOrAbsolute);
}
}
else
{
var coverId = track.Tag.Split(',')[1];
var urlToNewCover = String.Format("http://.../{0}/{1}", coverId, size);
coverUrl = new Uri(urlToNewCover, UriKind.Absolute);
}
var appTileData = new FlipTileData
{
BackgroundImage = coverUrl,
WideBackgroundImage = coverUrl,
...
}
apptile.Update(appTileData);
}
}
public static BitmapImage LoadBitmap(string iFilename)
{
Uri imgUri = new Uri(iFilename, UriKind.Relative);
StreamResourceInfo imageResource = Application.GetResourceStream(imgUri);
BitmapImage image = new BitmapImage();
image.SetSource(imageResource.Stream);
return image;
}
private void SaveTileImage(WriteableBitmap wbmp, string filename)
{
using (var store = IsolatedStorageFile.GetUserStoreForApplication())
{
if (store.FileExists(filename))
store.DeleteFile(filename);
var stream = store.OpenFile(filename, FileMode.OpenOrCreate);
wbmp.SaveJpeg(stream, wbmp.PixelWidth, wbmp.PixelHeight, 100, 100);
stream.Close();
}
}
private WriteableBitmap CreateTileImageWithCloud(BitmapImage img)
{
Image image = null;
WriteableBitmap wbmp = null;
using (AutoResetEvent are = new AutoResetEvent(false))
{
Deployment.Current.Dispatcher.BeginInvoke(() =>
{
image = new Image { Source = img };
Canvas.SetLeft(image, 0);
Canvas.SetTop(image, 0);
var cloud = new BitmapImage(new Uri("Assets/Images/Other/Cloud_no.png", UriKind.Relative));
var cloudImg = new Image { Source = cloud };
Canvas.SetLeft(cloudImg, 125);
Canvas.SetTop(cloudImg, 10);
var canvas = new Canvas
{
Height = 176,
Width = 176
};
canvas.Children.Add(image);
canvas.Children.Add(cloudImg);
wbmp = new WriteableBitmap(176, 176);
wbmp.Render(canvas, null);
wbmp.Invalidate();
are.Set();
});
are.WaitOne();
}
return wbmp;
}
Edit
I found little pattern in which this is working and in which not. When application is running and I called this twice (in TrackReady and SkipNext) then I very often get cover image with cloud. When I am running just background agent (without running app) I get always black image. And often first UpdateAppTile call is just black image and second it's black image with cloud. That black color is default canvas background so I guess I have problems with delay when loading cover image from url. But I am not sure how in my case use ImageOpened event and if it help.
I think that you should call Measure and Arrange after adding elements to canvas and before rendering canvas (as with other UIElements):
canvas.Measure( new Size( Width, Height ) );
canvas.Arrange( new Rect( 0, 0, Width, Height ) );

Image overlay with camera captured image in android

I need to take a picture with the camera and at the same time show an overlay image on top of the camera view. After the picture is taken, i need to save what the user saw while taking the picture. Can anyone suggest me? Please.
public void onPictureTaken(byte[] data, Camera camera){
Bitmap cameraBitmap = BitmapFactory.decodeByteArray(data, 0, data.length);
wid = cameraBitmap.getWidth();
hgt = cameraBitmap.getHeight();
Bitmap newImage = Bitmap.createBitmap(wid , hgt , Bitmap.Config.ARGB_8888);
Canvas canvas = new Canvas(newImage);
canvas.drawBitmap(cameraBitmap, 0f, 0f, null);
Drawable drawable = getResources().getDrawable(R.drawable.d);
drawable.setBounds(20 ,20, 260, 160);
drawable.draw(canvas);
File storagePath = new File(Environment.getExternalStorageDirectory() + "/Vampire Photos/");
storagePath.mkdirs();
File myImage = new File(storagePath,Long.toString(System.currentTimeMillis()) + ".jpg");
try
{
FileOutputStream out = new FileOutputStream(myImage);
newImage.compress(Bitmap.CompressFormat.JPEG, 90, out);
out.flush();
out.close();
}
catch(FileNotFoundException e)
{
Log.d("In Saving File", e + "");
}
catch(IOException e)
{
Log.d("In Saving File", e + "");
}
camera.startPreview();
drawable = null;
newImage.recycle();
newImage = null;
cameraBitmap.recycle();
cameraBitmap = null;
} };

Blackberry - ListField with images from filesystem

I use the following code to retrieve image from the phone or SDCard and I use that image in to my ListField. It gives the output but it takes very Long time to produce the screen. How to solve this problem ?? Can any one help me?? Thanks in advance!!!
String text = fileholder.getFileName();
try{
String path="file:///"+fileholder.getPath()+text;
//path=”file:///SDCard/BlackBerry/pictures/image.bmp”
InputStream inputStream = null;
//Get File Connection
FileConnection fileConnection = (FileConnection) Connector.open(path);
inputStream = fileConnection.openInputStream();
ByteArrayOutputStream baos = new ByteArrayOutputStream();
int j = 0;
while((j=inputStream.read()) != -1) {
baos.write(j);
}
byte data[] = baos.toByteArray();
inputStream.close();
fileConnection.close();
//Encode and Resize image
EncodedImage eImage = EncodedImage.createEncodedImage(data,0,data.length);
int scaleFactorX = Fixed32.div(Fixed32.toFP(eImage.getWidth()),
Fixed32.toFP(180));
int scaleFactorY = Fixed32.div(Fixed32.toFP(eImage.getHeight()),
Fixed32.toFP(180));
eImage=eImage.scaleImage32(scaleFactorX, scaleFactorY);
Bitmap bitmapImage = eImage.getBitmap();
graphics.drawBitmap(0, y+1, 40, 40,bitmapImage, 0, 0);
graphics.drawText(text, 25, y,0,width);
}
catch(Exception e){}
You should read files once (on App start or before screen open, maybe put a progress dialog there), put images in array and use this array in paint.

Resources