I am using the following code to detect the circles only . But it is also detecting the other shapes . Please help to do this . I have used the HoughCircles but it is not giving the Good results. My requirement is have to detect the circles only .
Mat src, src_gray;
/// Read the image
src = t2;
if(! src.data ) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl ;
cv::waitKey(5000);
}
/// Convert it to gray
cvtColor( src, src_gray, CV_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) );
/// Reduce the noise so we avoid false circle detection
GaussianBlur( src_gray, src_gray, Size(9, 9), 2, 2 );
Mat src_lines; Mat src_gray_lines;
int thresh_lines = 100;
RNG rng_lines(12345);
Mat threshold_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
src_gray_lines = src_gray;
/// Detect edges using Threshold
threshold( src_gray_lines, threshold_output, thresh_lines, 255, THRESH_BINARY );
/// Find contours
findContours( threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Find the rotated rectangles and ellipses for each contour
vector<RotatedRect> minRect( contours.size() );
vector<RotatedRect> minEllipse( contours.size() );
for( size_t i = 0; i < contours.size(); i++ )
{
minRect[i] = minAreaRect( Mat(contours[i]) );
}
/// Draw contours + rotated rects + ellipses
Mat drawing = Mat::zeros( threshold_output.size(), CV_8UC3 );
for( size_t i = 0; i< contours.size(); i++ )
{
// rotated rectangle
Point2f rect_points[4];
minRect[i].points( rect_points );
for( int j = 0; j < 4; j++ )
line( src, rect_points[j], rect_points[(j+1)%4], Scalar(255,0,0), 1, 8 );
}
Please let me know if my question is not clear .
You have contour vectors so you can easily check their length. Circle has also area.
Circle shape should have specific ratio area to length (you should compute what this ratio should be). Now eliminating shapes which does not fit this ratio (with some delta) you are getting only circles.
Related
Sorry in advance, this is more of an algorithmic problem rather than a coding problem, but I wasn't sure where to put it. For simplicity sake, say you have a binary image (white background, solid black object in foreground)
Example:
sample input
I want to divide this object (meaning only the black pixels) into N sections, all with the same number of pixels (so each section should contain (1/N)*(total # of black pixels)).
With the current algorithm that I'm using, I (1) find the total number of black pixels and (2) divide by N. Then I (3) scan the image row by row marking all black pixels. The result looks something like this:
current output sketch
The problem with this is the last (yellow) section, which isn't continuous. I want to divide the image in a way that makes more sense, like this:
ideal output
Basically, I'd like the boundary between the sections to be as short as possible.
I've been stumped on this for a while, but my old code just isn't cutting it anymore. I only need an approach to identifying the sections, I'll ultimately be outputting each section as individual images, as well as a grayscale copy of the input image where every pixel's value corresponds to its section number (these things I don't need help with). Any ideas?
I only need an approach to identifying the sections
According to this, I tried couple of approaches, these may help for guidelines:
Find contour of the image
Find the moments of contour and detect mass center.
For outer corners, you can simply use convex hull
Find the closest contour points(which are will be inner corners) to mass center
Then you can seperate it to desired regions by using these important points
Here is the result and code:
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
vector<Point>innerCorners;
bool isClose(Point test);
int main()
{
Mat src_gray;
int thresh = 100;
Mat src = imread("image/dir/star.png");
cvtColor( src, src_gray, COLOR_BGR2GRAY );
namedWindow( "Source",WINDOW_NORMAL );
Mat canny_output;
Canny( src_gray, canny_output, thresh, thresh*2 );
vector<vector<Point> > contours;
findContours( canny_output, contours, RETR_TREE, CHAIN_APPROX_SIMPLE );
vector<Vec4i> hierarchy;
vector<vector<Point> >hull( contours.size() );
vector<Moments> mu(contours.size() );
for( int i = 0; i <(int)contours.size(); i++ )
{ mu[i] = moments( contours[i], false ); }
for( size_t i = 0; i < contours.size(); i++ )
{
if(contours[i].size()>20)
convexHull( contours[i], hull[i] );
}
vector<Point2f> mc( contours.size() );
for( int i = 0; i <(int)contours.size(); i++ )
{ mc[i] = Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 ); }
Mat drawing = Mat::zeros( canny_output.size(), CV_8UC3 );
int onlyOne = 1;
for( size_t i = 0; i< contours.size(); i++ )
{
if(contours[i].size()>20 && onlyOne)
{
circle( src, mc[i], 4, Scalar(0,255,255), -1, 8, 0 );
Scalar color = Scalar(255,0,0);
drawContours( drawing, contours, (int)i, color );
drawContours( src, hull, (int)i, color,5 );
Point centerMass = mc[i];
for(int a=0; a<(int)contours[i].size();a++)
{
if(cv::norm(cv::Mat(contours[i][a]),Mat(centerMass))<200 && isClose(contours[i][a]))
{
circle(src,contours[i][a],5,Scalar(0,0,255),10);
innerCorners.push_back(contours[i][a]);
line(src,contours[i][a],centerMass,Scalar(0,255,255),5);
}
}
onlyOne = 0;
}
}
namedWindow( "Hull demo",WINDOW_NORMAL );
imshow( "Hull demo", drawing );
imshow("Source", src );
waitKey();
return 0;
}
bool isClose(Point test){
if(innerCorners.size()==0)
return 1;
for(Point a:innerCorners)
if((cv::norm(cv::Mat(a),cv::Mat(test)))<70)
return 0;
return 1;
}
In the above image, if the entire width is specified say 30'5". How do I calculate height and width for each individual contour on that image using opencv
To obtain the height and width of a contour, you can use cv2.boundingRect. The function returns the contour information in the form of x,y,w,h. The height for a specific contour will be h and the width will be w. Here's the result with the w in pixels drawn onto the image.
import cv2
# Load image, convert to grayscale, Otsu's threshold
image = cv2.imread('1.jpg')
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# Find contours, obtain bounding rect, and draw width
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
cv2.putText(image, str(w), (x,y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36,255,12), 2)
cv2.rectangle(image, (x, y), (x + w, y + h), (36,255,12), 1)
cv2.imshow('image', image)
cv2.waitKey()
My approach is using minAreaRect:
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
int main()
{
Mat src; Mat src_gray;
int thresh = 100;
RNG rng(12345);
/// Load source image and convert it to gray
src = imread( "/ur/img/directory/image.jpg", 1 );
Mat original = src.clone();
/// Convert image to gray and blur it
cvtColor( src, src_gray, CV_BGR2GRAY );
Mat threshold_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using Threshold
threshold( src_gray, threshold_output, thresh, 255, THRESH_BINARY );
/// Find contours
findContours( threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Find the rotated rectangles for each contour
vector<RotatedRect> minRect( contours.size() );
for( int i = 0; i < contours.size(); i++ )
minRect[i] = minAreaRect( Mat(contours[i]) );
/// Draw contours + rotated rects
Mat drawing = Mat::zeros( threshold_output.size(), CV_8UC3 );
Mat result_zero = Mat::zeros( threshold_output.size(), CV_8UC3 );
for( int i = 0; i< contours.size(); i++ )
{
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
// detect contours
drawContours( drawing, contours, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
// detect rectangle for each contour
Point2f rect_points[4]; minRect[i].points( rect_points );
double length_1 = cv::norm(cv::Mat(rect_points[0]),cv::Mat(rect_points[1]));
double length_2 = cv::norm(cv::Mat(rect_points[1]),cv::Mat(rect_points[2]));
for( int j = 0; j < 4; j++ )
{
int temp1 = (int)length_1;
int temp2 = (int)length_2;
if(length_1>length_2)
putText(original,to_string(temp1),rect_points[0],FONT_HERSHEY_SIMPLEX,1.0,Scalar(0,255,255),2);
else
putText(original,to_string(temp2),rect_points[0],FONT_HERSHEY_SIMPLEX,1.0,Scalar(0,255,255),2);
line( result_zero, rect_points[j], rect_points[(j+1)%4], color, 1, 8 );
}
}
/// Show in windows
imshow("First",original);
imshow( "Contours", drawing );
waitKey(0);
return(0);
}
Source image:
Detected rectangles for each line:
Line lengths by pixel:
std::vector<std::vector<cv::Point2i>> vecContours;
cv::Mat mat = cv::imread("[path to image]", cv::IMREAD_GRAYSCALE);
cv::threshold(mat, mat, 200, 255, cv::THRESH_BINARY);
cv::findContours(mat, vecContours, cv::RetrievalModes::RETR_EXTERNAL, cv::CHAIN_APPROX_NONE);
float inchPerPixel = 30.5f / mat.cols;
for (const std::vector<cv::Point2i>& vecContour : vecContours) {
cv::Rect2i contourRect = cv::boundingRect(vecContour);
printf("Contour width pixels : %d, width inches %f\n", contourRect.width, inchPerPixel*contourRect.width);
}
You can achieve this by:
Creating a binary image by using the threshold method
Using findContours method to find the contour of the rectangles in the image
Get the size of the rectangle contour by using the boundingRect method
Multiply the with of the contour by the calculated inch per pixel factor
I'm trying to make a real-time advertisement billboard detection in road using android smartphone. The goal is to crop the area of the advertisement billboard object (regions of interest) and save it to database.
For example:
enter image description here
enter image description here
For preprocessing, I used grayscaling and Canny Edge Detection (Otsu thresholding is used to set the upper and lower threshold). Then, I used contour-based method to detect whether the object is rectangular by checking the point. I use Java OpenCV in android studio for implementation. When I run the program, it only detect rectangular object in plain background and if the rectangular having a high contrast from the background. Currently, it can only detect rectangle with 90 degree and it failed to detect object with rounded rectangle shape. Furthermore, my program failed completely to detect rectangular object in a more complex background, like road scene where the object I'm trying to detect is having similar color to the background/low contrast or there are many occlusions like tree, traffic light, and cables which caused the detection to fail.
This is the code I use for edge detection
Mat destination = new Mat(oriMat.rows(), oriMat.cols(), oriMat.type());
Imgproc.cvtColor(oriMat, destination, Imgproc.COLOR_RGBA2GRAY);
Imgproc.GaussianBlur(destination, destination, new Size(3,3), 0, 0, Imgproc.BORDER_DEFAULT);
double otsuThresholdValue = Imgproc.threshold(destination, destination, 0, 255, Imgproc.THRESH_BINARY | Imgproc.THRESH_OTSU);
double lowerThreshold = otsuThresholdValue*0.5;
double upperThreshold = otsuThresholdValue;
Mat canny = new Mat();
Imgproc.Canny(destination, canny, lowerThreshold, upperThreshold);
Mat abs = new Mat();
Core.convertScaleAbs(canny, abs);
Mat result = new Mat();
Core.addWeighted(abs, 0.5, abs, 0.5, 0, result);
Here is the code I use for contour-based detection
ArrayList<MatOfPoint> contours = new ArrayList<>();
// find contours and store them all as a list
Imgproc.findContours(matData.monoChrome.clone(), contours, new Mat(), Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
final int width = matData.monoChrome.rows();
final int height = matData.monoChrome.cols();
int matArea = width * height;
for (int i = 0; i < contours.size(); i++) {
double contoursArea = Imgproc.contourArea(contours.get(i));
MatOfPoint2f approx = new MatOfPoint2f();
MatOfPoint2f contour = new MatOfPoint2f(contours.get(i).toArray());
double epsilon = Imgproc.arcLength(contour, true) * 0.1;
// Imgproc.minAreaRect(contour);
// approximate contour with accuracy proportional to the contour perimeter
Imgproc.approxPolyDP(contour, approx, epsilon, true);
if (Math.abs(contoursArea) < matArea * 0.01 ||
!Imgproc.isContourConvex(new MatOfPoint(approx.toArray()))) {
continue;
}
Imgproc.drawContours(matData.resizeMat, contours, i, new Scalar(0, 255, 0));
List<Point> points = approx.toList();
int pointCount = points.size();
LinkedList<Double> cos = new LinkedList<>();
for (int j = 2; j < pointCount + 1; j++) {
cos.addLast(angle(points.get(j % pointCount), points.get(j - 2), points.get(j - 1)));
}
Collections.sort(cos, (lhs, rhs) -> lhs.intValue() - rhs.intValue());
double mincos = cos.getFirst();
double maxcos = cos.getLast();
if (points.size() == 4 && mincos >= -0.3 && maxcos <= 0.5) {
for (int j = 0; j < points.size(); j++) {
Core.circle(matData.resizeMat, points.get(j), 6, new Scalar(255, 0, 0), 6);
}
matData.points = points;
break;
}
}
Is there any method I can use to recognize advertisement billboard in road?
I would appreciate any answers and ideas. Thank you!
I have a problem whereby I want to estimate the gradient of the line on the contour. Please note that I dont need the pixel gradient but the rate of change of line.
If you see the attached image, you will see a binary image with green contour. I want to label each pixel based on the gradient of the pixel on the contour.
Why I need the gradient is because I want to compute the points where the gradient orientation changes from + to - or from - to +.
I cannot think of a good method, to estimate this point on the image. Could someone help me with suggestion on how I can estimate this points.
Here is a small program that computes the tangent at each contour pixel location in a very simple way (there exist other and probably better ways! the easy ones are: http://en.wikipedia.org/wiki/Finite_difference#Forward.2C_backward.2C_and_central_differences):
for a contour pixel c_{i} get the neighbors c_{i-1} and c_{i+1}
tangent direction at c_i is (c_{i-1} - c_{i+1}
So this is all on CONTOUR PIXELS but maybe you could so something similar if you compute the orthogonal to the full image pixel gradient... not sure about that ;)
here's the code:
int main()
{
cv::Mat input = cv::imread("../inputData/ContourTangentBin.png");
cv::Mat gray;
cv::cvtColor(input,gray,CV_BGR2GRAY);
// binarize
cv::Mat binary = gray > 100;
// find contours
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
findContours( binary.clone(), contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_NONE ); // CV_CHAIN_APPROX_NONE to get each single pixel of the contour!!
for( int i = 0; i< contours.size(); i++ )
{
std::vector<cv::Point> & cCont = contours[i];
std::vector<cv::Point2f> tangents;
if(cCont.size() < 3) continue;
// 1. compute tangent for first point
cv::Point2f cPoint = cCont.front();
cv::Point2f tangent = cCont.back() - cCont.at(1); // central tangent => you could use another method if you like to
tangents.push_back(tangent);
// display first tangent
cv::Mat tmpOut = input.clone();
cv::line(tmpOut, cPoint + 10*tangent, cPoint-10*tangent, cv::Scalar(0,0,255),1);
cv::imshow("tangent",tmpOut);
cv::waitKey(0);
for(unsigned int j=1; j<cCont.size(); ++j)
{
cPoint = cCont[j];
tangent = cCont[j-1] - cCont[(j+1)%cCont.size()]; // central tangent => you could use another method if you like to
tangents.push_back(tangent);
//display current tangent:
tmpOut = input.clone();
cv::line(tmpOut, cPoint + 10*tangent, cPoint-10*tangent, cv::Scalar(0,0,255),1);
cv::imshow("tangent",tmpOut);
cv::waitKey(0);
//if(cv::waitKey(0) == 's') cv::imwrite("../outputData/ContourTangentTangent.png", tmpOut);
}
// now there are all the tangent directions in "tangents", do whatever you like with them
}
for( int i = 0; i< contours.size(); i++ )
{
drawContours( input, contours, i, cv::Scalar(0,255,0), 1, 8, hierarchy, 0 );
}
cv::imshow("input", input);
cv::imshow("binary", binary);
cv::waitKey(0);
return 0;
}
I used this image:
and got outputs like:
in the result you get a vector with a 2D tangent information (line direction) for each pixel of that contour.
I am trying to work with this code so that SURF can be implemented using color frames/images and then use the code here Kalman_Color_Object_Trackto track the detected object using the color value by Kalman filter. So, these are the steps that I intend to do but I am stuck since this SURF detection code does not accept/work with color images:
"book1.png" is the color image
After the rectangle around the image is detected from the incoming frames, the Mat structure is changed to IplImage since the Kalman_Color_Object_Track code is in C++ by
dest_image=cvCloneImage(&(IplImage)image);
mat_frame=cvCloneImage(&(IplImage)frame);
Call the Kalman_Color_Object_Track( mat_frame,dest_image,30); method.
Questions : (A) How to make this code work so that SURF features can be extracted and detected for color images? (B) I am unsure what should be passed in the function signature of Kalman_Color_Object_Track() and (C) where exactly in the object detection module should it be called?
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
using namespace cv;
IplImage *mat_dest_image=0;
IplImage *mat_frame=0;
/* Object Detection and recognition from video*/
int main()
{
Mat object = imread( "book1.png", );
if( !object.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
//Detect the keypoints using SURF Detector
int minHessian = 500;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> kp_object;
detector.detect( object, kp_object );
//Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat des_object;
extractor.compute( object, kp_object, des_object );
FlannBasedMatcher matcher;
namedWindow("Good Matches");
namedWindow("Tracking");
std::vector<Point2f> obj_corners(4);
//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );
char key = 'a';
int framecount = 0;
VideoCapture cap("booksvideo.avi");
for(; ;)
{
Mat frame;
cap >> frame;
imshow("Good Matches", frame);
Mat des_image, img_matches;
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
Mat image;
//cvtColor(frame, image, CV_RGB2GRAY);
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
matcher.knnMatch(des_object, des_image, matches, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
//Draw only "good" matches
drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
if (good_matches.size() >= 4)
{
for( int i = 0; i < good_matches.size(); i++ )
{
//Get the keypoints from the good matches
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
}
H = findHomography( obj, scene, CV_RANSAC );
perspectiveTransform( obj_corners, scene_corners, H);
//Draw lines between the corners (the mapped object in the scene image )
line( img_matches, scene_corners[0] + Point2f( object.cols, 0), scene_corners[1] + Point2f( object.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( object.cols, 0), scene_corners[2] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( object.cols, 0), scene_corners[3] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( object.cols, 0), scene_corners[0] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
mat_dest_image=cvCloneImage(&(IplImage)image);
mat_frame=cvCloneImage(&(IplImage)frame);
Kalman_Color_Object_Track( ); // The tracking method
}
//Show detected matches
imshow( "Good Matches", img_matches );
for( int i = 0; i < good_matches.size(); i++ )
{ printf( "-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }
waitKey(0);
}
return 0;
}
This paper implemented the SIFT descriptor on color images by computing gradient histograms for each channel independently. Perhaps you could try the same approach for SURF features.