I'm trying to make a real-time advertisement billboard detection in road using android smartphone. The goal is to crop the area of the advertisement billboard object (regions of interest) and save it to database.
For example:
enter image description here
enter image description here
For preprocessing, I used grayscaling and Canny Edge Detection (Otsu thresholding is used to set the upper and lower threshold). Then, I used contour-based method to detect whether the object is rectangular by checking the point. I use Java OpenCV in android studio for implementation. When I run the program, it only detect rectangular object in plain background and if the rectangular having a high contrast from the background. Currently, it can only detect rectangle with 90 degree and it failed to detect object with rounded rectangle shape. Furthermore, my program failed completely to detect rectangular object in a more complex background, like road scene where the object I'm trying to detect is having similar color to the background/low contrast or there are many occlusions like tree, traffic light, and cables which caused the detection to fail.
This is the code I use for edge detection
Mat destination = new Mat(oriMat.rows(), oriMat.cols(), oriMat.type());
Imgproc.cvtColor(oriMat, destination, Imgproc.COLOR_RGBA2GRAY);
Imgproc.GaussianBlur(destination, destination, new Size(3,3), 0, 0, Imgproc.BORDER_DEFAULT);
double otsuThresholdValue = Imgproc.threshold(destination, destination, 0, 255, Imgproc.THRESH_BINARY | Imgproc.THRESH_OTSU);
double lowerThreshold = otsuThresholdValue*0.5;
double upperThreshold = otsuThresholdValue;
Mat canny = new Mat();
Imgproc.Canny(destination, canny, lowerThreshold, upperThreshold);
Mat abs = new Mat();
Core.convertScaleAbs(canny, abs);
Mat result = new Mat();
Core.addWeighted(abs, 0.5, abs, 0.5, 0, result);
Here is the code I use for contour-based detection
ArrayList<MatOfPoint> contours = new ArrayList<>();
// find contours and store them all as a list
Imgproc.findContours(matData.monoChrome.clone(), contours, new Mat(), Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
final int width = matData.monoChrome.rows();
final int height = matData.monoChrome.cols();
int matArea = width * height;
for (int i = 0; i < contours.size(); i++) {
double contoursArea = Imgproc.contourArea(contours.get(i));
MatOfPoint2f approx = new MatOfPoint2f();
MatOfPoint2f contour = new MatOfPoint2f(contours.get(i).toArray());
double epsilon = Imgproc.arcLength(contour, true) * 0.1;
// Imgproc.minAreaRect(contour);
// approximate contour with accuracy proportional to the contour perimeter
Imgproc.approxPolyDP(contour, approx, epsilon, true);
if (Math.abs(contoursArea) < matArea * 0.01 ||
!Imgproc.isContourConvex(new MatOfPoint(approx.toArray()))) {
continue;
}
Imgproc.drawContours(matData.resizeMat, contours, i, new Scalar(0, 255, 0));
List<Point> points = approx.toList();
int pointCount = points.size();
LinkedList<Double> cos = new LinkedList<>();
for (int j = 2; j < pointCount + 1; j++) {
cos.addLast(angle(points.get(j % pointCount), points.get(j - 2), points.get(j - 1)));
}
Collections.sort(cos, (lhs, rhs) -> lhs.intValue() - rhs.intValue());
double mincos = cos.getFirst();
double maxcos = cos.getLast();
if (points.size() == 4 && mincos >= -0.3 && maxcos <= 0.5) {
for (int j = 0; j < points.size(); j++) {
Core.circle(matData.resizeMat, points.get(j), 6, new Scalar(255, 0, 0), 6);
}
matData.points = points;
break;
}
}
Is there any method I can use to recognize advertisement billboard in road?
I would appreciate any answers and ideas. Thank you!
Related
i have two images taken by two cameras, i want to use surf algorithm or any algorithm in Emgu to get just matched features locations in two images to calculates (estimate) real distance from cameras and this features (objects), i found example to use surf algorithm in Emgu examples, but its draw lines between matched features i want to get x and y for any begin and end of each line.
features matched by surf algorithm sample image
i try to add some code in surf algorithm example but not work as expected in Draw method
long num_matches = matches.Size;
float lower = matches[0][0].Distance;
List<PointF> matched_points1= new List<PointF>();
List<PointF> matched_points2=new List<PointF>();
for (int i = 0; i < num_matches; i++)
{
if (matches[i][0].Distance < 0.095)
{
int idx1 = matches[i][0].TrainIdx;
int idx2 = matches[i][0].QueryIdx;
matched_points1.Add(observedKeyPoints[idx1].Point);
matched_points2.Add(observedKeyPoints[idx2].Point);
CvInvoke.Circle(result, new Point((int)observedKeyPoints[idx2].Point.X , (int)observedKeyPoints[idx2].Point.Y), 1, new MCvScalar(255, 0, 0));
CvInvoke.Circle(result, new Point((int)modelKeyPoints[idx1].Point.X + modelImage.Width, (int)modelKeyPoints[idx1].Point.Y), 1, new MCvScalar(255, 0, 0));
}
if (lower > matches[i][0].Distance)
lower = matches[i][0].Distance;
}
public static Point[] FindPoints(Mat modelImage, Mat observedImage, out long matchTime)
{
Mat homography;
VectorOfKeyPoint modelKeyPoints;
VectorOfKeyPoint observedKeyPoints;
using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
{
Mat mask;
FindMatch(modelImage, observedImage, out matchTime, out modelKeyPoints, out observedKeyPoints, matches,out mask, out homography);
//Draw the matched keypoints
Mat result = new Mat();
Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask);
Point[] points = null;
if (homography != null)
{
//draw a rectangle along the projected model
Rectangle rect = new Rectangle(Point.Empty, modelImage.Size);
PointF[] pts = new PointF[]
{
new PointF(rect.Left, rect.Bottom),
new PointF(rect.Right, rect.Bottom),
new PointF(rect.Right, rect.Top),
new PointF(rect.Left, rect.Top)
};
pts = CvInvoke.PerspectiveTransform(pts, homography);
points = Array.ConvertAll<PointF, Point>(pts, Point.Round);
}
return points;
}
}
//add the code in the Draw function
for (int i = 0; i < matches.Size; i++) { var a = matches[i].ToArray(); if (mask.GetData(i)[0] == 0) continue; foreach (var e in a) { Point p = new `Point(e.TrainIdx, e.QueryIdx); Console.WriteLine(string.Format("Point: {0}", p)); } Console.WriteLine("-----------------------"); }`
I'm looking for few days a solution to draw rectangle on image frame. Basically I'm using CvInvoke.cvRectangle method to draw rectangle on image because I need antialiased rect.
But problem is when I need to rotate a given shape for given angle. I can't find any good solution.
I have tryed to draw rectangle on separate frame then rotate hole frame and apply this new image on top of my base frame. But in this solution there is a problem with antialiasing. It's not working.
I'm working on simple application that should allow draw few kinds of shape, resize them and rotation for given angle.
Any idea how to achive this?
The best way I found to draw a minimum enclosing rectangle on the contour is using the Polylines() function which uses vertices that are returned from MinAreaRect() function. There are surely other ways to do it as well. Here is the code walk down:
// Find contours
var contours = new Emgu.CV.Util.VectorOfVectorOfPoint();
Mat hierarchy = new Mat();
CvInvoke.FindContours(image, contours, hierarchy, RetrType.Tree, ChainApproxMethod.ChainApproxSimple);
// According to your metric, get an index of the contour you want to find the min enclosing rectangle for
int index = 2; // Say, 2nd index works for you.
var rectangle = CvInvoke.MinAreaRect(contours[index]);
Point[] vertices = Array.ConvertAll(rectangle.GetVertices(), Point.Round);
CvInvoke.Polylines(image, vertices, true, new MCvScalar(0, 0, 255), 5);
The result can be visualized in the image below, in red is the minimum enclosing rectangle.
I use C# and EMGU.CV(4.1), and I think this code will not be difficult to transfer to any platform.
Add function in the in your helper:
public static Mat DrawRect(Mat input, RotatedRect rect, MCvScalar color = default(MCvScalar),
int thickness = 1, LineType lineType = LineType.EightConnected, int shift = 0)
{
var v = rect.GetVertices();
var prevPoint = v[0];
var firstPoint = prevPoint;
var nextPoint = prevPoint;
var lastPoint = nextPoint;
for (var i = 1; i < v.Length; i++)
{
nextPoint = v[i];
CvInvoke.Line(input, Point.Round(prevPoint), Point.Round(nextPoint), color, thickness, lineType, shift);
prevPoint = nextPoint;
lastPoint = prevPoint;
}
CvInvoke.Line(input, Point.Round(lastPoint), Point.Round(firstPoint), color, thickness, lineType, shift);
return input;
}
This draws roteted rectangle by points. Here used rounding points by method Point.Round becose RotatedRect has points in float coordinates and CvInvoke.Line takes points as integer.
Use:
var mat = Mat.Zeros(200, 200, DepthType.Cv8U, 3);
mat.GetValueRange();
var rRect = new RotatedRect(new PointF(100, 100), new SizeF(100, 50), 30);
DrawRect(mat, rRect,new MCvScalar(255,0,0));
var brect = CvInvoke.BoundingRectangle(new VectorOfPointF(rRect.GetVertices()));
CvInvoke.Rectangle(mat, brect, new MCvScalar(0,255,0), 1, LineType.EightConnected, 0);
Result:
You should read the OpenCV documentation.
There is a RotatedRectangle class that you can use for your task. You can specify the angle by which the rectangle will be rotated.
Here is a sample code (taken from the docs) for drawing a rotated rectangle:
Mat image(200, 200, CV_8UC3, Scalar(0));
RotatedRect rRect = RotatedRect(Point2f(100,100), Size2f(100,50), 30);
Point2f vertices[4];
rRect.points(vertices);
for (int i = 0; i < 4; i++)
line(image, vertices[i], vertices[(i+1)%4], Scalar(0,255,0));
Rect brect = rRect.boundingRect();
rectangle(image, brect, Scalar(255,0,0));
imshow("rectangles", image);
waitKey(0);
Here is the result:
I would like to find all vertex (e.g. return x, y positions) for the black object.
I will use Java and JavaCV to implements. Is there any API or algorithm can help?
Sorry for not enough reputation to post images. I post the link here.
The original image like this:
http://i.stack.imgur.com/geubs.png
The expected result like this:
http://i.stack.imgur.com/MA7uq.png
Here is what you should do (for explanation, see comments with code),
CODE
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
// Load the image
String path = "/home/bikz05/Desktop/geubs.png";
Mat original = Highgui.imread(path);
Mat image = new Mat();
Imgproc.cvtColor(original, image, Imgproc.COLOR_BGR2GRAY);
// Threshold the image
Mat threshold = new Mat();
Imgproc.threshold(image, threshold, 127, 255, 1);
// Find the contours
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(threshold, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
// Get contour index with largest area
double max_area = -1;
int index = 0;
for(int i=0; i< contours.size();i++) {
if (Imgproc.contourArea(contours.get(i)) > max_area) {
max_area = Imgproc.contourArea(contours.get(i));
index = i;
}
}
// Approximate the largest contour
MatOfPoint2f approxCurve = new MatOfPoint2f();
MatOfPoint2f oriCurve = new MatOfPoint2f( contours.get(index).toArray() );
Imgproc.approxPolyDP(oriCurve, approxCurve, 6.0, true);
// Draw contour points on the original image
Point [] array = approxCurve.toArray();
for(int i=0; i < array.length;i++) {
Core.circle(original, array[i], 2, new Scalar(0, 0 ,255), 2);
}
INPUT IMAGE
OUTPUT IMAGE
OpenCV allows you to take a binary image and carry out contour analysis.
http://docs.opencv.org/doc/tutorials/imgproc/shapedescriptors/find_contours/find_contours.html
You could use findContours to find all of the contours (all of the edge points) then simply average them or pick and choose the ones that suit your purpose.
Here is a good example for JavaCV..
opencv/javacv: How to iterate over contours for shape identification?
I am writing a ray tracer program for my computer graphics class. So far I only have spheres implemented and a shadow ray. The current problem is that when i move my sphere off center it stretches. here is the code that i use to calculate if a ray is intersecting a sphere:
bool Sphere::onSphere(Ray r)
{
float b = (r.dir*2).innerProduct(r.pos + centre*-1);
float c = (r.pos + centre*-1).innerProduct(r.pos + centre*-1) - radius*radius;
return b*b - 4*c >= 0;
}
here is the code that i use to spawn each ray:
for(int i = -cam.width/2; i < cam.width/2; i++)
{
for(int j = -cam.height/2; j < cam.height/2; j++)
{
float normi = (float)i;
float normj = (float)j;
Vector pixlePos = cam.right*normi + cam.up*normj + cam.forward*cam.dist + cam.pos*1;
Vector direction = pixlePos + cam.pos*-1;
direction.normalize();
Vector colour = recursiveRayTrace(Ray(pixlePos, direction), 30, 1, 0);
float red = colour.getX()/255;
float green = colour.getY()/255;
float blue = colour.getZ()/255;
fwrite (&red, sizeof(float), 1, myFile);
fwrite (&green, sizeof(float), 1, myFile);
fwrite (&blue, sizeof(float), 1, myFile);
}
}
recursiveRayTrace:
Vector Scene::recursiveRayTrace(Ray r, float maxDist, int maxBounces, int bounces)
{
if(maxBounces < bounces)
return Vector(0,0,0);
int count = 0;
for(int i = 0; i < spheres.size(); i++)
{
if(spheres.at(i).onSphere(r))
{
Vector colour(ambiant.colour);
for(int j = 0; j < lights.size(); j++)
{
Vector intersection(r.pos + r.dir*spheres.at(i).getT(r));
Ray nRay(intersection, lights.at(i).centre + intersection*-1);
colour = colour + lights.at(i).colour;
}
return colour;
}
}
return Vector(0,0,0);
}
What i get is an sphere that is stretched in the direction of the vector from the center to the center of the circle. I'm not looking for anyone to do my homework. I am just having a really hard time debugging this on. Any hints are appreciated :) Thanks!
Edit: cam.dist is the distance from the camera to the view plane
The stretching is actually a natural consequence of perspective viewing and it is exaggerated if you have a very wide field of view. In other words moving the camera back from your image plane should make it seem more natural.
I'm currently making an application using processing intended to take an image and apply 8bit style processing to it: that is to make it look pixelated. To do this it has a method that take a style and window size as parameters (style is the shape in which the window is to be displayed - rect, ellipse, cross etc, and window size is a number between 1-10 squared) - to produce results similar to the iphone app pxl ( http://itunes.apple.com/us/app/pxl./id499620829?mt=8 ). This method then counts through the image's pixels, window by window averages the colour of the window and displays a rect(or which every shape/style chosen) at the equivalent space on the other side of the sketch window (the sketch when run is supposed to display the original image on the left mirror it with the processed version on the right).
The problem Im having is when drawing the averaged colour rects, the order in which they display becomes skewed..
Although the results are rather amusing, they are not what I want. Here the code:
//=========================================================
// GLOBAL VARIABLES
//=========================================================
PImage img;
public int avR, avG, avB;
private final int BLOCKS = 0, DOTS = 1, VERTICAL_CROSSES = 2, HORIZONTAL_CROSSES = 3;
public sRGB styleColour;
//=========================================================
// METHODS FOR AVERAGING WINDOW COLOURS, CREATING AN
// 8 BIT REPRESENTATION OF THE IMAGE AND LOADING AN
// IMAGE
//=========================================================
public sRGB averageWindowColour(color [] c){
// RGB Variables
float r = 0;
float g = 0;
float b = 0;
// Iterator
int i = 0;
int sizeOfWindow = c.length;
// Count through the window's pixels, store the
// red, green and blue values in the RGB variables
// and sum them into the average variables
for(i = 0; i < c.length; i++){
r = red (c[i]);
g = green(c[i]);
b = blue (c[i]);
avR += r;
avG += g;
avB += b;
}
// Divide the sum of the red, green and blue
// values by the number of pixels in the window
// to obtain the average
avR = avR / sizeOfWindow;
avG = avG / sizeOfWindow;
avB = avB / sizeOfWindow;
// Return the colour
return new sRGB(avR,avG,avB);
}
public void eightBitIT(int style, int windowSize){
img.loadPixels();
for(int wx = 0; wx < img.width; wx += (sqrt(windowSize))){
for(int wy = 0; wy < img.height; wy += (sqrt(windowSize))){
color [] tempCols = new color[windowSize];
int i = 0;
for(int x = 0; x < (sqrt(windowSize)); x ++){
for(int y = 0; y < (sqrt(windowSize)); y ++){
int loc = (wx+x) + (y+wy)*(img.width-windowSize);
tempCols[i] = img.pixels[loc];
// println("Window loc X: "+(wx+(img.width+5))+" Window loc Y: "+(wy+5)+" Window pix X: "+x+" Window Pix Y: "+y);
i++;
}
}
//this is ment to be in a switch test (0 = rect, 1 ellipse etc)
styleColour = new sRGB(averageWindowColour(tempCols));
//println("R: "+ red(styleColour.returnColourScaled())+" G: "+green(styleColour.returnColourScaled())+" B: "+blue(styleColour.returnColourScaled()));
rectMode(CORNER);
noStroke();
fill(styleColour.returnColourScaled());
//println("Rect Loc X: "+(wx+(img.width+5))+" Y: "+(wy+5));
ellipse(wx+(img.width+5),wy+5,sqrt(windowSize),sqrt(windowSize));
}
}
}
public PImage load(String s){
PImage temp = loadImage(s);
temp.resize(600,470);
return temp;
}
void setup(){
background(0);
// Load the image and set size of screen to its size*2 + the borders
// and display the image.
img = loadImage("oscilloscope.jpg");
size(img.width*2+15,(img.height+10));
frameRate(25);
image(img,5,5);
// Draw the borders
strokeWeight(5);
stroke(255);
rectMode(CORNERS);
noFill();
rect(2.5,2.5,img.width+3,height-3);
rect(img.width+2.5,2.5,width-3,height-3);
stroke(255,0,0);
strokeWeight(1);
rect(5,5,9,9); //window example
// process the image
eightBitIT(BLOCKS, 16);
}
void draw(){
//eightBitIT(BLOCKS, 4);
//println("X: "+mouseX+" Y: "+mouseY);
}
This has been bugging me for a while now as I can't see where in my code im offsetting the coordinates so they display like this. I know its probably something very trivial but I can seem to work it out. If anyone can spot why this skewed reordering is happening i would be much obliged as i have quite a lot of other ideas i want to implement and this is holding me back...
Thanks,