find the white/ black pixels in specific region javacv - javacv

I have tried this code. 540 is the left most x value of the box,3 is left most y value of the box,262 - width ,23 -height of the region which I am going to calculate the ratio of the white/black pixels. What I really wanted to do is detect the number of white/black pixel ratio in a specific region.I have calculate the coordinates for each cell (regions which I am going to specified)and try with this code.But the error in counting.
Can I please have an idea about this issue please..
I am really stuck here with my final year project.
CvSize cvSize = cvSize(img.width(), img.height());
IplImage image = cvCreateImage(cvSize, IPL_DEPTH_8U, 1);
IplImage image2 = cvCreateImage(cvSize, IPL_DEPTH_8U, 3);
cvCvtColor(image2, image, CV_RGB2GRAY);
cvSetImageROI(image2, cvRect(540,3,262,23));
//IplImage image2 = cvCreateImage(cvSize, IPL_DEPTH_8U, 3);
//
//cvCvtColor(arg0, arg1, arg2)
// cvCvtColor(image2, image, CV_RGB2GRAY);
//cvThreshold(image, image, 128, 255, CV_THRESH_BINARY);
CvLineIterator iterator = new CvLineIterator();
double sum = 0, green_sum = 0, red_sum = 0;
CvPoint p2 = new CvPoint(802,3);
CvPoint p1 = new CvPoint(540,26);
int lineCount = cvInitLineIterator(image2, p1, p2, iterator, 8, 0 );
for (int i = 0; i < lineCount; i++) {
sum += iterator.ptr().get() & 0xFF;
}
System.out.println("sum................"+sum);
CV_NEXT_LINE_POINT(iterator);
}
}
it gave the result as sum................0.0
I have really stuck with this..can you please give any solution for this issue please

Move CV_NEXT_LINE_POINT(iterator); line inside the for loop. Then it should work.

Related

Align 2 images based on Hough Lines with openCV

I have 2 (aerial) images, taken from a slightly different angle:
image 1:
image2:
I need to rescale image1 in horizontal direction to align it to image2. Without any modification, both images placed next to each other look like this:
This is the desired result: (made with photoshop)
In Photoshop, I took the right half of image1 and scaled it down horizontally a little bit. I did the same for the left half of image1, where I had to scale slightly more.
I would like to know how I can accomplish this using openCV - by using Hough Line Transform. I already started drawing hough lines, but I have no idea how to do the transform to make the hough lines match:
Here's my C++ code (called from objective-c):
cv::Mat image1 = [im1 CVMat3];
cv::Mat gray_image1;
// Convert to Grayscale
cvtColor( image1, gray_image1, CV_RGB2GRAY );
cv::Mat dst1, cdst;
Canny(image1, dst1, 40, 90, 3);
double minLineLength = 0;
double maxLineGap = 10;
std::vector<cv::Vec2f> lines;
// detect lines
cv::HoughLines(dst1, lines, 1, CV_PI/180, 90, minLineLength,maxLineGap );
for( size_t i = 0; i < lines.size(); i++ ) {
float rho = lines[i][0], theta = lines[i][1];
if( theta>CV_PI/180*70 && theta<CV_PI/180*110) {
cv::Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
line( image1, pt1, pt2, cvScalar(10,100,255), 3, CV_AA);
}
}
Some help would be really appreciated :-). Thanks in advance.

Real-time object recognition in complex background

I'm trying to make a real-time advertisement billboard detection in road using android smartphone. The goal is to crop the area of the advertisement billboard object (regions of interest) and save it to database.
For example:
enter image description here
enter image description here
For preprocessing, I used grayscaling and Canny Edge Detection (Otsu thresholding is used to set the upper and lower threshold). Then, I used contour-based method to detect whether the object is rectangular by checking the point. I use Java OpenCV in android studio for implementation. When I run the program, it only detect rectangular object in plain background and if the rectangular having a high contrast from the background. Currently, it can only detect rectangle with 90 degree and it failed to detect object with rounded rectangle shape. Furthermore, my program failed completely to detect rectangular object in a more complex background, like road scene where the object I'm trying to detect is having similar color to the background/low contrast or there are many occlusions like tree, traffic light, and cables which caused the detection to fail.
This is the code I use for edge detection
Mat destination = new Mat(oriMat.rows(), oriMat.cols(), oriMat.type());
Imgproc.cvtColor(oriMat, destination, Imgproc.COLOR_RGBA2GRAY);
Imgproc.GaussianBlur(destination, destination, new Size(3,3), 0, 0, Imgproc.BORDER_DEFAULT);
double otsuThresholdValue = Imgproc.threshold(destination, destination, 0, 255, Imgproc.THRESH_BINARY | Imgproc.THRESH_OTSU);
double lowerThreshold = otsuThresholdValue*0.5;
double upperThreshold = otsuThresholdValue;
Mat canny = new Mat();
Imgproc.Canny(destination, canny, lowerThreshold, upperThreshold);
Mat abs = new Mat();
Core.convertScaleAbs(canny, abs);
Mat result = new Mat();
Core.addWeighted(abs, 0.5, abs, 0.5, 0, result);
Here is the code I use for contour-based detection
ArrayList<MatOfPoint> contours = new ArrayList<>();
// find contours and store them all as a list
Imgproc.findContours(matData.monoChrome.clone(), contours, new Mat(), Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
final int width = matData.monoChrome.rows();
final int height = matData.monoChrome.cols();
int matArea = width * height;
for (int i = 0; i < contours.size(); i++) {
double contoursArea = Imgproc.contourArea(contours.get(i));
MatOfPoint2f approx = new MatOfPoint2f();
MatOfPoint2f contour = new MatOfPoint2f(contours.get(i).toArray());
double epsilon = Imgproc.arcLength(contour, true) * 0.1;
// Imgproc.minAreaRect(contour);
// approximate contour with accuracy proportional to the contour perimeter
Imgproc.approxPolyDP(contour, approx, epsilon, true);
if (Math.abs(contoursArea) < matArea * 0.01 ||
!Imgproc.isContourConvex(new MatOfPoint(approx.toArray()))) {
continue;
}
Imgproc.drawContours(matData.resizeMat, contours, i, new Scalar(0, 255, 0));
List<Point> points = approx.toList();
int pointCount = points.size();
LinkedList<Double> cos = new LinkedList<>();
for (int j = 2; j < pointCount + 1; j++) {
cos.addLast(angle(points.get(j % pointCount), points.get(j - 2), points.get(j - 1)));
}
Collections.sort(cos, (lhs, rhs) -> lhs.intValue() - rhs.intValue());
double mincos = cos.getFirst();
double maxcos = cos.getLast();
if (points.size() == 4 && mincos >= -0.3 && maxcos <= 0.5) {
for (int j = 0; j < points.size(); j++) {
Core.circle(matData.resizeMat, points.get(j), 6, new Scalar(255, 0, 0), 6);
}
matData.points = points;
break;
}
}
Is there any method I can use to recognize advertisement billboard in road?
I would appreciate any answers and ideas. Thank you!

Emgu CV draw rotated rectangle

I'm looking for few days a solution to draw rectangle on image frame. Basically I'm using CvInvoke.cvRectangle method to draw rectangle on image because I need antialiased rect.
But problem is when I need to rotate a given shape for given angle. I can't find any good solution.
I have tryed to draw rectangle on separate frame then rotate hole frame and apply this new image on top of my base frame. But in this solution there is a problem with antialiasing. It's not working.
I'm working on simple application that should allow draw few kinds of shape, resize them and rotation for given angle.
Any idea how to achive this?
The best way I found to draw a minimum enclosing rectangle on the contour is using the Polylines() function which uses vertices that are returned from MinAreaRect() function. There are surely other ways to do it as well. Here is the code walk down:
// Find contours
var contours = new Emgu.CV.Util.VectorOfVectorOfPoint();
Mat hierarchy = new Mat();
CvInvoke.FindContours(image, contours, hierarchy, RetrType.Tree, ChainApproxMethod.ChainApproxSimple);
// According to your metric, get an index of the contour you want to find the min enclosing rectangle for
int index = 2; // Say, 2nd index works for you.
var rectangle = CvInvoke.MinAreaRect(contours[index]);
Point[] vertices = Array.ConvertAll(rectangle.GetVertices(), Point.Round);
CvInvoke.Polylines(image, vertices, true, new MCvScalar(0, 0, 255), 5);
The result can be visualized in the image below, in red is the minimum enclosing rectangle.
I use C# and EMGU.CV(4.1), and I think this code will not be difficult to transfer to any platform.
Add function in the in your helper:
public static Mat DrawRect(Mat input, RotatedRect rect, MCvScalar color = default(MCvScalar),
int thickness = 1, LineType lineType = LineType.EightConnected, int shift = 0)
{
var v = rect.GetVertices();
var prevPoint = v[0];
var firstPoint = prevPoint;
var nextPoint = prevPoint;
var lastPoint = nextPoint;
for (var i = 1; i < v.Length; i++)
{
nextPoint = v[i];
CvInvoke.Line(input, Point.Round(prevPoint), Point.Round(nextPoint), color, thickness, lineType, shift);
prevPoint = nextPoint;
lastPoint = prevPoint;
}
CvInvoke.Line(input, Point.Round(lastPoint), Point.Round(firstPoint), color, thickness, lineType, shift);
return input;
}
This draws roteted rectangle by points. Here used rounding points by method Point.Round becose RotatedRect has points in float coordinates and CvInvoke.Line takes points as integer.
Use:
var mat = Mat.Zeros(200, 200, DepthType.Cv8U, 3);
mat.GetValueRange();
var rRect = new RotatedRect(new PointF(100, 100), new SizeF(100, 50), 30);
DrawRect(mat, rRect,new MCvScalar(255,0,0));
var brect = CvInvoke.BoundingRectangle(new VectorOfPointF(rRect.GetVertices()));
CvInvoke.Rectangle(mat, brect, new MCvScalar(0,255,0), 1, LineType.EightConnected, 0);
Result:
You should read the OpenCV documentation.
There is a RotatedRectangle class that you can use for your task. You can specify the angle by which the rectangle will be rotated.
Here is a sample code (taken from the docs) for drawing a rotated rectangle:
Mat image(200, 200, CV_8UC3, Scalar(0));
RotatedRect rRect = RotatedRect(Point2f(100,100), Size2f(100,50), 30);
Point2f vertices[4];
rRect.points(vertices);
for (int i = 0; i < 4; i++)
line(image, vertices[i], vertices[(i+1)%4], Scalar(0,255,0));
Rect brect = rRect.boundingRect();
rectangle(image, brect, Scalar(255,0,0));
imshow("rectangles", image);
waitKey(0);
Here is the result:

Bicubic Image Interpolation Algorithm - Glitches

I am trying to implement bicubic image interpolation. I have only pasted relevant sections of the code. I have skipped the code dealing with loading the image into a buffer and reading pixels from them etc. I am reasonably sure my math is correct. However, I seem to be having terrible artifacts in the output.
All the action happens in the resize method.
Am hoping that experienced graphics programmers may be able to share their hunches as to what I could be doing wrong.
The following are the input and output images that I get when resizing the input to twice its width and height.
double Interpolator::interpolate(const double p0, const double p1, const double p2, const double p3, const double x){
return (-0.5f*p0+1.5f*p1-1.5f*p2+0.5*p3)*pow(x,3)+
(p0-2.5f*p1+2.f*p2-0.5f*p3)*pow(x,2)+
(-0.5f*p0+0.5f*p2)*x+
p1;
}
bool Image::equals(double a, double b, double threshold){
if(fabs(a-b)<=threshold)
return true;
return false;
}
void Image::interpolate(const Pixel p[], double offset, Pixel& result){
result.r = Interpolator::interpolate(p[0].r,p[1].r,p[2].r,p[3].r,offset);
result.g = Interpolator::interpolate(p[0].g,p[1].g,p[2].g,p[3].g,offset);
result.b = Interpolator::interpolate(p[0].b,p[1].b,p[2].b,p[3].b,offset);
result.a = Interpolator::interpolate(p[0].a,p[1].a,p[2].a,p[3].a,offset);
}
void Image::getSamplingCoords(const int nearest,
const int max,
int coords[]){
coords[0] = nearest-1;
if(coords[0]<0)
coords[0] = nearest;
coords[1] = nearest;
coords[2] = nearest+1;
if(coords[2]>=max)
coords[2] = nearest;
coords[3] = nearest+2;
//The following check should not be necessary
//since this is never expected to occur. Nevertheless...
if(coords[3]>=max)
coords[3] = nearest;
}
void Image::interpolateAlongY(int x, int y, int yMax, double yOffset, Pixel& result){
if(equals(yOffset,0.f,ERROR_THRESHOLD)){
//No interpolation required
getPixel(x,y,result);
return;
}
int yCoords[4];
getSamplingCoords(y, yMax, yCoords);
Pixel interpolants[4];
for(int i=0; i<4; ++i){
getPixel(x, yCoords[i], interpolants[i]);
}
interpolate(interpolants, y, result);
}
void Image::resize(const int newWidth, const int newHeight){
//Ensure that we have a valid buffer already
if(buffer==NULL){
printf("ERROR: Must load an image before resizing it!");
assert(false);
}
//We first need to create a new buffer with the new dimensions
unsigned char* newBuffer = new unsigned char[newWidth*newHeight*channelCount];
for(int j=0; j<newHeight; ++j){
for(int i=0; i<newWidth; ++i){
size_t newIndexOffset = (j*newWidth+i)*channelCount;
//For this pixel in the target image we
//a) Find the nearest pixel in the source image
//b) Find the offset from the aforementioned nearest pixel
int xNear,yNear;
double xOffset,yOffset;
double x = ((double)width/(double)newWidth)*i;
double y = ((double)height/(double)newHeight)*j;
xNear = floor(x);
yNear = floor(y);
xOffset = x-xNear;
yOffset = y-yNear;
//If offset is 0, we don't need any interpolation
//we simply need to sample the source pixel and proceed
// if(equals(xOffset,0.f,ERROR_THRESHOLD) && equals(yOffset,0.f,ERROR_THRESHOLD)){
// Pixel result;
// getPixel(xNear, yNear, result);
// *(newBuffer+newIndexOffset) = result.r;
// *(newBuffer+newIndexOffset+1) = result.g;
// *(newBuffer+newIndexOffset+2) = result.b;
// if(channelCount==4)
// *(buffer+newIndexOffset+3) = result.a;
// continue;
// }
//We make a check that xNear and yNear obtained above
//are always smaller than the edge pixels at the extremeties
if(xNear>=width || yNear>=height){
printf("ERROR: Nearest pixel computation error!");
assert(false);
}
//Next we find four pixels along the x direction around this
//nearest pixel
int xCoords[4];
getSamplingCoords(xNear,width,xCoords);
//For each of these sampling xCoords, we interpolate 4 nearest points
//along Y direction
Pixel yInterps[4];
for(int k=0; k<4; k++){
interpolateAlongY(xCoords[k], yNear, height, yOffset, yInterps[k]);
}
//Finally, the resultant pixel is a cubic interpolation
//on the 4 obtained pixels above
Pixel result;
if(equals(xOffset,0.f,ERROR_THRESHOLD)){
result.r = yInterps[0].r;
result.g = yInterps[0].g;
result.b = yInterps[0].b;
result.a = yInterps[0].a;
}else{
interpolate(yInterps, xOffset, result);
}
*(newBuffer+newIndexOffset) = result.r;
*(newBuffer+newIndexOffset+1) = result.g;
*(newBuffer+newIndexOffset+2) = result.b;
if(channelCount==4)
*(newBuffer+newIndexOffset+3) = result.a;
}
}
//Now we can deallocate the memory of our current buffer
delete [] buffer;
//Reassign our newly sampled buffer to our own
buffer = newBuffer;
//Reset our image dimensions
height = newHeight;
width = newWidth;
}
interpolateAlongY is wrong, the last line is
interpolate(interpolants, y, result);
and should be
interpolate(interpolants, yOffset, result);
Additionally the comment in getSamplingCoords for coords[3] is wrong.
Edit:
After I looked through JansonD's answer I noticed two more problems, the excepetions with if(equals(xOffset,0.f,ERROR_THRESHOLD)) are not even necessary, the interpolation function does the right thing when the value of the offset is close to 0.
The other thing is, that your interpolation function is probably a lowpass filter as you add information from the neighboring pixels, so you are probably loosing detail on high frequency data like edges.
In addition to Josef's answer regarding:
interpolate(interpolants, y, result);
Needing to reference yOffset, there is also:
if(equals(xOffset,0.f,ERROR_THRESHOLD)){
result.r = yInterps[0].r;
result.g = yInterps[0].g;
result.b = yInterps[0].b;
result.a = yInterps[0].a;
Which should use yInterps[1].
Also none of the output values are clamped, so there may be underflow and overflow around sharp edges.
And the comment for:
if(coords[3]>=max)
coords[3] = nearest;
Isn't the only thing wrong, but indeed the clamp should be to max-1, and not nearest, unless you mean to reflect the edge pixels rather than repeat them.

Pixel reordering is wrong when trying to process and display image copy with lower res

I'm currently making an application using processing intended to take an image and apply 8bit style processing to it: that is to make it look pixelated. To do this it has a method that take a style and window size as parameters (style is the shape in which the window is to be displayed - rect, ellipse, cross etc, and window size is a number between 1-10 squared) - to produce results similar to the iphone app pxl ( http://itunes.apple.com/us/app/pxl./id499620829?mt=8 ). This method then counts through the image's pixels, window by window averages the colour of the window and displays a rect(or which every shape/style chosen) at the equivalent space on the other side of the sketch window (the sketch when run is supposed to display the original image on the left mirror it with the processed version on the right).
The problem Im having is when drawing the averaged colour rects, the order in which they display becomes skewed..
Although the results are rather amusing, they are not what I want. Here the code:
//=========================================================
// GLOBAL VARIABLES
//=========================================================
PImage img;
public int avR, avG, avB;
private final int BLOCKS = 0, DOTS = 1, VERTICAL_CROSSES = 2, HORIZONTAL_CROSSES = 3;
public sRGB styleColour;
//=========================================================
// METHODS FOR AVERAGING WINDOW COLOURS, CREATING AN
// 8 BIT REPRESENTATION OF THE IMAGE AND LOADING AN
// IMAGE
//=========================================================
public sRGB averageWindowColour(color [] c){
// RGB Variables
float r = 0;
float g = 0;
float b = 0;
// Iterator
int i = 0;
int sizeOfWindow = c.length;
// Count through the window's pixels, store the
// red, green and blue values in the RGB variables
// and sum them into the average variables
for(i = 0; i < c.length; i++){
r = red (c[i]);
g = green(c[i]);
b = blue (c[i]);
avR += r;
avG += g;
avB += b;
}
// Divide the sum of the red, green and blue
// values by the number of pixels in the window
// to obtain the average
avR = avR / sizeOfWindow;
avG = avG / sizeOfWindow;
avB = avB / sizeOfWindow;
// Return the colour
return new sRGB(avR,avG,avB);
}
public void eightBitIT(int style, int windowSize){
img.loadPixels();
for(int wx = 0; wx < img.width; wx += (sqrt(windowSize))){
for(int wy = 0; wy < img.height; wy += (sqrt(windowSize))){
color [] tempCols = new color[windowSize];
int i = 0;
for(int x = 0; x < (sqrt(windowSize)); x ++){
for(int y = 0; y < (sqrt(windowSize)); y ++){
int loc = (wx+x) + (y+wy)*(img.width-windowSize);
tempCols[i] = img.pixels[loc];
// println("Window loc X: "+(wx+(img.width+5))+" Window loc Y: "+(wy+5)+" Window pix X: "+x+" Window Pix Y: "+y);
i++;
}
}
//this is ment to be in a switch test (0 = rect, 1 ellipse etc)
styleColour = new sRGB(averageWindowColour(tempCols));
//println("R: "+ red(styleColour.returnColourScaled())+" G: "+green(styleColour.returnColourScaled())+" B: "+blue(styleColour.returnColourScaled()));
rectMode(CORNER);
noStroke();
fill(styleColour.returnColourScaled());
//println("Rect Loc X: "+(wx+(img.width+5))+" Y: "+(wy+5));
ellipse(wx+(img.width+5),wy+5,sqrt(windowSize),sqrt(windowSize));
}
}
}
public PImage load(String s){
PImage temp = loadImage(s);
temp.resize(600,470);
return temp;
}
void setup(){
background(0);
// Load the image and set size of screen to its size*2 + the borders
// and display the image.
img = loadImage("oscilloscope.jpg");
size(img.width*2+15,(img.height+10));
frameRate(25);
image(img,5,5);
// Draw the borders
strokeWeight(5);
stroke(255);
rectMode(CORNERS);
noFill();
rect(2.5,2.5,img.width+3,height-3);
rect(img.width+2.5,2.5,width-3,height-3);
stroke(255,0,0);
strokeWeight(1);
rect(5,5,9,9); //window example
// process the image
eightBitIT(BLOCKS, 16);
}
void draw(){
//eightBitIT(BLOCKS, 4);
//println("X: "+mouseX+" Y: "+mouseY);
}
This has been bugging me for a while now as I can't see where in my code im offsetting the coordinates so they display like this. I know its probably something very trivial but I can seem to work it out. If anyone can spot why this skewed reordering is happening i would be much obliged as i have quite a lot of other ideas i want to implement and this is holding me back...
Thanks,

Resources