How to find height and width for each individual contour on an image using OpenCV - image

In the above image, if the entire width is specified say 30'5". How do I calculate height and width for each individual contour on that image using opencv

To obtain the height and width of a contour, you can use cv2.boundingRect. The function returns the contour information in the form of x,y,w,h. The height for a specific contour will be h and the width will be w. Here's the result with the w in pixels drawn onto the image.
import cv2
# Load image, convert to grayscale, Otsu's threshold
image = cv2.imread('1.jpg')
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# Find contours, obtain bounding rect, and draw width
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
cv2.putText(image, str(w), (x,y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36,255,12), 2)
cv2.rectangle(image, (x, y), (x + w, y + h), (36,255,12), 1)
cv2.imshow('image', image)
cv2.waitKey()

My approach is using minAreaRect:
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
int main()
{
Mat src; Mat src_gray;
int thresh = 100;
RNG rng(12345);
/// Load source image and convert it to gray
src = imread( "/ur/img/directory/image.jpg", 1 );
Mat original = src.clone();
/// Convert image to gray and blur it
cvtColor( src, src_gray, CV_BGR2GRAY );
Mat threshold_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using Threshold
threshold( src_gray, threshold_output, thresh, 255, THRESH_BINARY );
/// Find contours
findContours( threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Find the rotated rectangles for each contour
vector<RotatedRect> minRect( contours.size() );
for( int i = 0; i < contours.size(); i++ )
minRect[i] = minAreaRect( Mat(contours[i]) );
/// Draw contours + rotated rects
Mat drawing = Mat::zeros( threshold_output.size(), CV_8UC3 );
Mat result_zero = Mat::zeros( threshold_output.size(), CV_8UC3 );
for( int i = 0; i< contours.size(); i++ )
{
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
// detect contours
drawContours( drawing, contours, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
// detect rectangle for each contour
Point2f rect_points[4]; minRect[i].points( rect_points );
double length_1 = cv::norm(cv::Mat(rect_points[0]),cv::Mat(rect_points[1]));
double length_2 = cv::norm(cv::Mat(rect_points[1]),cv::Mat(rect_points[2]));
for( int j = 0; j < 4; j++ )
{
int temp1 = (int)length_1;
int temp2 = (int)length_2;
if(length_1>length_2)
putText(original,to_string(temp1),rect_points[0],FONT_HERSHEY_SIMPLEX,1.0,Scalar(0,255,255),2);
else
putText(original,to_string(temp2),rect_points[0],FONT_HERSHEY_SIMPLEX,1.0,Scalar(0,255,255),2);
line( result_zero, rect_points[j], rect_points[(j+1)%4], color, 1, 8 );
}
}
/// Show in windows
imshow("First",original);
imshow( "Contours", drawing );
waitKey(0);
return(0);
}
Source image:
Detected rectangles for each line:
Line lengths by pixel:

std::vector<std::vector<cv::Point2i>> vecContours;
cv::Mat mat = cv::imread("[path to image]", cv::IMREAD_GRAYSCALE);
cv::threshold(mat, mat, 200, 255, cv::THRESH_BINARY);
cv::findContours(mat, vecContours, cv::RetrievalModes::RETR_EXTERNAL, cv::CHAIN_APPROX_NONE);
float inchPerPixel = 30.5f / mat.cols;
for (const std::vector<cv::Point2i>& vecContour : vecContours) {
cv::Rect2i contourRect = cv::boundingRect(vecContour);
printf("Contour width pixels : %d, width inches %f\n", contourRect.width, inchPerPixel*contourRect.width);
}
You can achieve this by:
Creating a binary image by using the threshold method
Using findContours method to find the contour of the rectangles in the image
Get the size of the rectangle contour by using the boundingRect method
Multiply the with of the contour by the calculated inch per pixel factor

Related

Let PShapes in an array rotate on its own axis in Processing

I have this code that basically reads each pixel of an image and redraws it with different shapes. All shapes will get faded in using a sin() wave.
Now I want to rotate every "Pixelshape" around its own axis (shapeMode(CENTER)) while they are faded in and the translate function gives me a headache in this complex way.
Here is the code so far:
void setup() {
size(1080, 1350);
shapeMode(CENTER);
img = loadImage("loremipsum.png");
…
}
void draw() {
background(123);
for (int gridX = 0; gridX < img.width; gridX++) {
for (int gridY = 0; gridY < img.height; gridY++) {
// grid position + tile size
float tileWidth = width / (float)img.width;
float tileHeight = height / (float)img.height;
float posX = tileWidth*gridX;
float posY = tileHeight*gridY;
// get current color
color c = img.pixels[gridY*img.width+gridX];
// greyscale conversion
int greyscale = round(red(c)*0.222+green(c)*0.707+blue(c)*0.071);
int gradientToIndex = round(map(greyscale, 0, 255, 0, shapeCount-1));
//FADEIN
float wave = map(sin(radians(frameCount*4)), -1, 1, 0, 2);
//translate(HEADACHE);
rotate(radians(wave));
shape(shapes[gradientToIndex], posX, posY, tileWidth * wave, tileHeight * wave);
}
}
I have tried many calculations but it just lets my sketch explode.
One that worked in another sketch where I tried basically the same but just in loop was (equivalent written):
translate(posX + tileWidth/2, posY + tileHeight/2);
I think I just don't get the matrix right? How can I translate them to its meant place?
Thank you very much #Rabbid76 – at first I just pasted in your idea and it went of crazy – then I added pushMatrix(); and popMatrix(); – turned out your translate(); code was in fact right!
Then I had to change the x and y location where every shape is drawn to 0,0,
And this is it! Now it works!
See the code here:
float wave = map(sin(radians(frameCount*4)), -1, 1, 0, 2);
pushMatrix();
translate(posX + tileWidth/2, posY + tileHeight/2);
rotate(radians(wave*180));
shape(shapes[gradientToIndex], 0, 0, tileWidth*wave , tileHeight*wave );
popMatrix();
PERFECT! Thank you so much!
rotate defines a rotation matrix and multiplies the current matrix by the rotation matrix. rotate therefore causes a rotation by (0, 0).
You have to center the rectangle around (0, 0), rotate it and move the rotated rectangle to the desired position with translate.
Since translate and rotate multiplies the current matrix by a new matrix, you must store and restore the matrix by pushMatrix() respectively popMatrix().
The center of a tile is (posX + tileWidth/2, posY + tileHeight/2):
pushMatrix();
translate(posX + tileWidth/2, posY + tileHeight/2);
rotate(radians(wave));
shape(shapes[gradientToIndex],
-tileWidth*wave/2, -tileHeight*wave/2,
tileWidth * wave, tileHeight * wave);
popMatrix();

Align 2 images based on Hough Lines with openCV

I have 2 (aerial) images, taken from a slightly different angle:
image 1:
image2:
I need to rescale image1 in horizontal direction to align it to image2. Without any modification, both images placed next to each other look like this:
This is the desired result: (made with photoshop)
In Photoshop, I took the right half of image1 and scaled it down horizontally a little bit. I did the same for the left half of image1, where I had to scale slightly more.
I would like to know how I can accomplish this using openCV - by using Hough Line Transform. I already started drawing hough lines, but I have no idea how to do the transform to make the hough lines match:
Here's my C++ code (called from objective-c):
cv::Mat image1 = [im1 CVMat3];
cv::Mat gray_image1;
// Convert to Grayscale
cvtColor( image1, gray_image1, CV_RGB2GRAY );
cv::Mat dst1, cdst;
Canny(image1, dst1, 40, 90, 3);
double minLineLength = 0;
double maxLineGap = 10;
std::vector<cv::Vec2f> lines;
// detect lines
cv::HoughLines(dst1, lines, 1, CV_PI/180, 90, minLineLength,maxLineGap );
for( size_t i = 0; i < lines.size(); i++ ) {
float rho = lines[i][0], theta = lines[i][1];
if( theta>CV_PI/180*70 && theta<CV_PI/180*110) {
cv::Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
line( image1, pt1, pt2, cvScalar(10,100,255), 3, CV_AA);
}
}
Some help would be really appreciated :-). Thanks in advance.

Emgu CV draw rotated rectangle

I'm looking for few days a solution to draw rectangle on image frame. Basically I'm using CvInvoke.cvRectangle method to draw rectangle on image because I need antialiased rect.
But problem is when I need to rotate a given shape for given angle. I can't find any good solution.
I have tryed to draw rectangle on separate frame then rotate hole frame and apply this new image on top of my base frame. But in this solution there is a problem with antialiasing. It's not working.
I'm working on simple application that should allow draw few kinds of shape, resize them and rotation for given angle.
Any idea how to achive this?
The best way I found to draw a minimum enclosing rectangle on the contour is using the Polylines() function which uses vertices that are returned from MinAreaRect() function. There are surely other ways to do it as well. Here is the code walk down:
// Find contours
var contours = new Emgu.CV.Util.VectorOfVectorOfPoint();
Mat hierarchy = new Mat();
CvInvoke.FindContours(image, contours, hierarchy, RetrType.Tree, ChainApproxMethod.ChainApproxSimple);
// According to your metric, get an index of the contour you want to find the min enclosing rectangle for
int index = 2; // Say, 2nd index works for you.
var rectangle = CvInvoke.MinAreaRect(contours[index]);
Point[] vertices = Array.ConvertAll(rectangle.GetVertices(), Point.Round);
CvInvoke.Polylines(image, vertices, true, new MCvScalar(0, 0, 255), 5);
The result can be visualized in the image below, in red is the minimum enclosing rectangle.
I use C# and EMGU.CV(4.1), and I think this code will not be difficult to transfer to any platform.
Add function in the in your helper:
public static Mat DrawRect(Mat input, RotatedRect rect, MCvScalar color = default(MCvScalar),
int thickness = 1, LineType lineType = LineType.EightConnected, int shift = 0)
{
var v = rect.GetVertices();
var prevPoint = v[0];
var firstPoint = prevPoint;
var nextPoint = prevPoint;
var lastPoint = nextPoint;
for (var i = 1; i < v.Length; i++)
{
nextPoint = v[i];
CvInvoke.Line(input, Point.Round(prevPoint), Point.Round(nextPoint), color, thickness, lineType, shift);
prevPoint = nextPoint;
lastPoint = prevPoint;
}
CvInvoke.Line(input, Point.Round(lastPoint), Point.Round(firstPoint), color, thickness, lineType, shift);
return input;
}
This draws roteted rectangle by points. Here used rounding points by method Point.Round becose RotatedRect has points in float coordinates and CvInvoke.Line takes points as integer.
Use:
var mat = Mat.Zeros(200, 200, DepthType.Cv8U, 3);
mat.GetValueRange();
var rRect = new RotatedRect(new PointF(100, 100), new SizeF(100, 50), 30);
DrawRect(mat, rRect,new MCvScalar(255,0,0));
var brect = CvInvoke.BoundingRectangle(new VectorOfPointF(rRect.GetVertices()));
CvInvoke.Rectangle(mat, brect, new MCvScalar(0,255,0), 1, LineType.EightConnected, 0);
Result:
You should read the OpenCV documentation.
There is a RotatedRectangle class that you can use for your task. You can specify the angle by which the rectangle will be rotated.
Here is a sample code (taken from the docs) for drawing a rotated rectangle:
Mat image(200, 200, CV_8UC3, Scalar(0));
RotatedRect rRect = RotatedRect(Point2f(100,100), Size2f(100,50), 30);
Point2f vertices[4];
rRect.points(vertices);
for (int i = 0; i < 4; i++)
line(image, vertices[i], vertices[(i+1)%4], Scalar(0,255,0));
Rect brect = rRect.boundingRect();
rectangle(image, brect, Scalar(255,0,0));
imshow("rectangles", image);
waitKey(0);
Here is the result:

OpenCV: Issues in using color frames with SURF features

I am trying to work with this code so that SURF can be implemented using color frames/images and then use the code here Kalman_Color_Object_Trackto track the detected object using the color value by Kalman filter. So, these are the steps that I intend to do but I am stuck since this SURF detection code does not accept/work with color images:
"book1.png" is the color image
After the rectangle around the image is detected from the incoming frames, the Mat structure is changed to IplImage since the Kalman_Color_Object_Track code is in C++ by
dest_image=cvCloneImage(&(IplImage)image);
mat_frame=cvCloneImage(&(IplImage)frame);
Call the Kalman_Color_Object_Track( mat_frame,dest_image,30); method.
Questions : (A) How to make this code work so that SURF features can be extracted and detected for color images? (B) I am unsure what should be passed in the function signature of Kalman_Color_Object_Track() and (C) where exactly in the object detection module should it be called?
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
using namespace cv;
IplImage *mat_dest_image=0;
IplImage *mat_frame=0;
/* Object Detection and recognition from video*/
int main()
{
Mat object = imread( "book1.png", );
if( !object.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
//Detect the keypoints using SURF Detector
int minHessian = 500;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> kp_object;
detector.detect( object, kp_object );
//Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat des_object;
extractor.compute( object, kp_object, des_object );
FlannBasedMatcher matcher;
namedWindow("Good Matches");
namedWindow("Tracking");
std::vector<Point2f> obj_corners(4);
//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );
char key = 'a';
int framecount = 0;
VideoCapture cap("booksvideo.avi");
for(; ;)
{
Mat frame;
cap >> frame;
imshow("Good Matches", frame);
Mat des_image, img_matches;
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
Mat image;
//cvtColor(frame, image, CV_RGB2GRAY);
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
matcher.knnMatch(des_object, des_image, matches, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
//Draw only "good" matches
drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
if (good_matches.size() >= 4)
{
for( int i = 0; i < good_matches.size(); i++ )
{
//Get the keypoints from the good matches
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
}
H = findHomography( obj, scene, CV_RANSAC );
perspectiveTransform( obj_corners, scene_corners, H);
//Draw lines between the corners (the mapped object in the scene image )
line( img_matches, scene_corners[0] + Point2f( object.cols, 0), scene_corners[1] + Point2f( object.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( object.cols, 0), scene_corners[2] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( object.cols, 0), scene_corners[3] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( object.cols, 0), scene_corners[0] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
mat_dest_image=cvCloneImage(&(IplImage)image);
mat_frame=cvCloneImage(&(IplImage)frame);
Kalman_Color_Object_Track( ); // The tracking method
}
//Show detected matches
imshow( "Good Matches", img_matches );
for( int i = 0; i < good_matches.size(); i++ )
{ printf( "-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }
waitKey(0);
}
return 0;
}
This paper implemented the SIFT descriptor on color images by computing gradient histograms for each channel independently. Perhaps you could try the same approach for SURF features.

Detect Only Circles using OpenCV

I am using the following code to detect the circles only . But it is also detecting the other shapes . Please help to do this . I have used the HoughCircles but it is not giving the Good results. My requirement is have to detect the circles only .
Mat src, src_gray;
/// Read the image
src = t2;
if(! src.data ) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl ;
cv::waitKey(5000);
}
/// Convert it to gray
cvtColor( src, src_gray, CV_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) );
/// Reduce the noise so we avoid false circle detection
GaussianBlur( src_gray, src_gray, Size(9, 9), 2, 2 );
Mat src_lines; Mat src_gray_lines;
int thresh_lines = 100;
RNG rng_lines(12345);
Mat threshold_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
src_gray_lines = src_gray;
/// Detect edges using Threshold
threshold( src_gray_lines, threshold_output, thresh_lines, 255, THRESH_BINARY );
/// Find contours
findContours( threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Find the rotated rectangles and ellipses for each contour
vector<RotatedRect> minRect( contours.size() );
vector<RotatedRect> minEllipse( contours.size() );
for( size_t i = 0; i < contours.size(); i++ )
{
minRect[i] = minAreaRect( Mat(contours[i]) );
}
/// Draw contours + rotated rects + ellipses
Mat drawing = Mat::zeros( threshold_output.size(), CV_8UC3 );
for( size_t i = 0; i< contours.size(); i++ )
{
// rotated rectangle
Point2f rect_points[4];
minRect[i].points( rect_points );
for( int j = 0; j < 4; j++ )
line( src, rect_points[j], rect_points[(j+1)%4], Scalar(255,0,0), 1, 8 );
}
Please let me know if my question is not clear .
You have contour vectors so you can easily check their length. Circle has also area.
Circle shape should have specific ratio area to length (you should compute what this ratio should be). Now eliminating shapes which does not fit this ratio (with some delta) you are getting only circles.

Resources