Measuring an object by reference on an image considering perspective or angle - image

I made an algorithm to measure an object using a reference, like this:
The reference is the frame and the other (AOL) is the desired object. My code obtained this result:
But the real AOL is 78.6. This is because of the perspective/angle of photograph. So I used in my code Deep Learning and I obtained the the reference and AOL mask, and I made a simple calculation based on the number of pixels for each mask to obtain AOL area (cm²), once I know the actual size of the reference. I tried to correct the angle/perpective based on the reference and I used the reference mask:
I tried to calculate quadrangle vertices based on the reference mask to correct the perspective. I created this code based on this reference Perspective correction in OpenCV using python:
# import the necessary packages
from scipy.spatial import distance as dist
from imutils import perspective
from imutils import contours
import numpy as np
import imutils
import cv2
import math
import matplotlib.pyplot as plt
# get the single external contours
# load the image, convert it to grayscale, and blur it slightly
image = cv2.imread("./ref/20210702_114527.png") ## Mask Image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
# perform edge detection, then perform a dilation + erosion to
# close gaps in between object edges
edged = cv2.Canny(gray, 50, 100)
edged = cv2.dilate(edged, None, iterations=1)
edged = cv2.erode(edged, None, iterations=1)
# find contours in the edge map
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# sort the contours from left-to-right and initialize the
# 'pixels per metric' calibration variable
(cnts, _) = contours.sort_contours(cnts)
pixelsPerMetric = None
orig = image.copy()
box = cv2.minAreaRect(min(cnts, key=cv2.contourArea))
box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
box = np.array(box, dtype="int")
# order the points in the contour such that they appear
# in top-left, top-right, bottom-right, and bottom-left
# order, then draw the outline of the rotated bounding
# box
box = perspective.order_points(box)
cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 2)
# loop over the original points and draw them
for (x, y) in box:
cv2.circle(orig, (int(x), int(y)), 5, (0, 0, 255), -1)
print('Box: ',box)
cv2.imshow('Orig', orig)
img = cv2.imread("./meat/sair/20210702_114527.jpg") #original image
rows,cols,ch = img.shape
#pts1 = np.float32([[185,9],[304,80],[290, 134],[163,64]]) #ficou legal 6e.jpg
### Coletando os pontos
pts1 = np.float32(box)
### Draw the vertices on the original image
for (x, y) in pts1:
cv2.circle(img, (int(x), int(y)), 5, (0, 0, 255), -1)
ratio= 1.6
moldH=math.sqrt((pts1[2][0]-pts1[1][0])*(pts1[2][0]-pts1[1][0])+(pts1[2][1]-pts1[1][1])*(pts1[2][1]-pts1[1][1]))
moldW=ratio*moldH
pts2 = np.float32([[pts1[0][0],pts1[0][1]], [pts1[0][0]+moldW, pts1[0][1]], [pts1[0][0]+moldW, pts1[0][1]+moldH], [pts1[0][0], pts1[0][1]+moldH]])
#print('cardH: ',cardH,cardW)
M = cv2.getPerspectiveTransform(pts1,pts2)
print('M:', M)
print('pts1:', pts1)
print('pts2:', pts2)
offsetSize= 320
transformed = np.zeros((int(moldW+offsetSize), int(moldH+offsetSize)), dtype=np.uint8)
dst = cv2.warpPerspective(img, M, transformed.shape)
plt.subplot(121),plt.imshow(img),plt.title('Input')
plt.subplot(122),plt.imshow(dst),plt.title('Output')
plt.show()
And I got this:
No perspective correction. I have a lot of information like vertices, the correct size of the reference. Is it possible to do a mathematical correction based on quadrangle vertices, like a regression? Not necessarily correcting the image directly, unless there is a good method to correct the perspective image. Or maybe a different approach based on math? Thanks for your patience.
For Christoph:
This is the result position too:
pts1: [[ 9. 51.]
[392. 56.]
[388. 336.]
[ 5. 331.]]

Related

Image processing how to detect specific custom shape in image

I want to detect a specific shape which is combination of 2 shapes Pentagon and a square. However side of a square should be around 3 times of a pentagon to match a valid shape as shown in the image.
I'm learning different image processing techniques for my requirement.
Contour Detection: Issue is knowing there are 7 corners is not enough as x : 3x need to be validated.
Haar features: Issue is images consist of background and there are text inside these object. So haar will not optimal method in my opinion.
My idea
I think may be I can detect lines corners and using side lengths I can do some math and identify the image.Is there any image processing technique that use mathematical distance, scale, location to identify object?
Original image
matching
not matching
Here's a simple approach:
Obtain binary image. Load image, convert to grayscale, Gaussian blur, then Otsu's threshold.
Filter for desired contour. We find contours then filter using a combination of contour approximation + contour area. With the observation that the shapes have a large area difference, we can use a predetermined threshold area to identify between the two.
Input -> Binary image -> Detected shape
Result
Match
Input -> Binary image -> Detected shape
Result
No Match
Since you didn't specify a language, I implemented in Python
import cv2
import numpy as np
# Load image, grayscale, Gaussian blur, Otsus threshold
image = cv2.imread('1.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (3,3), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Find contours
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.04 * peri, True)
area = cv2.contourArea(c)
# Filter using contour approximation and area filtering (Remove small noise)
if len(approx) > 4 and len(approx) < 8 and area > 200:
# This is the larger version
if area >= 5500:
print('Match')
# Smaller version
elif area < 5500:
print('No Match')
cv2.drawContours(image, [c], -1, (255,0,12), 3)
cv2.imshow('thresh', thresh)
cv2.imshow('image', image)
cv2.waitKey()

How to group nearby contours in OpenCV Python? - Zebra Crossing detection

I want to detect the zebra crossing lines. I have tried to find out the co-ordinates of zebra crossing line in the image using contour but it gives the output for the distinct white boxes (only white lines in the zebra crossing). But I need the co-ordinates of the entire zebra crossing.
Please let me know the way to group the contours or suggest me another method to detect zebra crossing.
Input image
Output image obtained
Expected output
import cv2
import numpy as np
image = cv2.imread('d.jpg',-1)
paper = cv2.resize(image,(500,500))
ret, thresh_gray = cv2.threshold(cv2.cvtColor(paper, cv2.COLOR_BGR2GRAY),
200, 255, cv2.THRESH_BINARY)
image, contours, hier = cv2.findContours(thresh_gray, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
for c in contours:
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
# convert all coordinates floating point values to int
box = np.int0(box)
cv2.drawContours(paper, [box], 0, (0, 255, 0),1)
cv2.imshow('paper', paper)
cv2.imwrite('paper.jpg',paper)
cv2.waitKey(0)
You can closing morphological operation for closing the gaps.
I cat suggest the following stages:
Find contours in thresh_gray.
Erase contours with very small area (noise).
Erase contours with low aspect ratio (assume zebra line must be long and narrow.
Use morphologyEx to perform closing morphological operation - the closing merges close components.
Find contours again in the image after erasing and closing.
At the last stage, ignore small contours.
Here is a working code sample:
import cv2
import numpy as np
image = cv2.imread('d.jpg', -1)
paper = cv2.resize(image, (500,500))
ret, thresh_gray = cv2.threshold(cv2.cvtColor(paper, cv2.COLOR_BGR2GRAY), 200, 255, cv2.THRESH_BINARY)
image, contours, hier = cv2.findContours(thresh_gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# Erase small contours, and contours which small aspect ratio (close to a square)
for c in contours:
area = cv2.contourArea(c)
# Fill very small contours with zero (erase small contours).
if area < 10:
cv2.fillPoly(thresh_gray, pts=[c], color=0)
continue
# https://stackoverflow.com/questions/52247821/find-width-and-height-of-rotatedrect
rect = cv2.minAreaRect(c)
(x, y), (w, h), angle = rect
aspect_ratio = max(w, h) / min(w, h)
# Assume zebra line must be long and narrow (long part must be at lease 1.5 times the narrow part).
if (aspect_ratio < 1.5):
cv2.fillPoly(thresh_gray, pts=[c], color=0)
continue
# Use "close" morphological operation to close the gaps between contours
# https://stackoverflow.com/questions/18339988/implementing-imcloseim-se-in-opencv
thresh_gray = cv2.morphologyEx(thresh_gray, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (51,51)));
# Find contours in thresh_gray after closing the gaps
image, contours, hier = cv2.findContours(thresh_gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for c in contours:
area = cv2.contourArea(c)
# Small contours are ignored.
if area < 500:
cv2.fillPoly(thresh_gray, pts=[c], color=0)
continue
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
# convert all coordinates floating point values to int
box = np.int0(box)
cv2.drawContours(paper, [box], 0, (0, 255, 0),1)
cv2.imshow('paper', paper)
cv2.imwrite('paper.jpg', paper)
cv2.waitKey(0)
cv2.destroyAllWindows()
thresh_gray before erasing small and squared contours:
thresh_gray after erasing small and squared contours:
thresh_gray after close operation:
Final result:
Remark:
I have some doubts about the benefit of using morphological operation for closing the gaps.
It might be better using a smart logic based on geometry instead.

Counting homogeneous objects along the contour

I'm trying to get amount of objects on the frame by finding their contours with OpenCV.
That's a frame after Canny filter applying:
Then I call findContours() method and leave ones which suitable in size.
When I overlay them on a frame I've got
the following picture.
It can be seen that we've got only full contour objects.
So the question is:
How can we artificially make the boundaries of objects holistic?
I tried to use dilate and erode (result of that) but after that borders of objects are glued together and we can't find their contours any more.
Since the contours are connected together, findContours will detect the connected contours as a single contour instead of individual separated circles. When you have connected contours, a potential approach is to use Watershed to label and detect each contour. Here's the results:
Input image
Output
Code
import cv2
import numpy as np
from skimage.feature import peak_local_max
from skimage.morphology import watershed
from scipy import ndimage
# Load in image, convert to gray scale, and Otsu's threshold
image = cv2.imread('1.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# Compute Euclidean distance from every binary pixel
# to the nearest zero pixel then find peaks
distance_map = ndimage.distance_transform_edt(thresh)
local_max = peak_local_max(distance_map, indices=False, min_distance=20, labels=thresh)
# Perform connected component analysis then apply Watershed
markers = ndimage.label(local_max, structure=np.ones((3, 3)))[0]
labels = watershed(-distance_map, markers, mask=thresh)
# Iterate through unique labels
for label in np.unique(labels):
if label == 0:
continue
# Create a mask
mask = np.zeros(gray.shape, dtype="uint8")
mask[labels == label] = 255
# Find contours and determine contour area
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
c = max(cnts, key=cv2.contourArea)
color = list(np.random.random(size=3) * 256)
cv2.drawContours(image, [c], -1, color, 4)
cv2.imshow('image', image)
cv2.waitKey()
Here are some other references:
Image segmentation with Watershed Algorithm
Watershed Algorithm: Marker-based Segmentation
How to define the markers for Watershed
Find contours after watershed
It seems like you have a pattern for your objects, and those objects sometimes overlap.
I'd suggest you convolve your image with an object pattern and then process the outcoming scores image.
In more detail:
Suppose for simplicity that your initial image has only one channel. and the object you're looking for looks like this:. this is our pattern. say it's size is [W_p,H_p]
First step: construct new image - scores - where each pixel S in scores = probability that this pixel is the pattern center.
One way to do that is: for each pixel P in the original image, "cut" the [W_p,H_p] patch around P ( e.g. img(Rect(P-W_p/2,P-H_p/2,W_p,H_p))), and subtract patch from pattern to find the "distance" between them (e.g. cv::sum(cv::absdiff(patch, pattern)) function in opencv), and save this sum to S.
Another way to do this is: S = P.clone(); pattern = pattern / cv::sum(pattern);
and then use cv::filter2D for S with pattern...
Now that you have a scores image, you should filter false positives:
1. take the top 2% of the scores( one way is with cv::calcHist)
2. for each pixel that has a neighbor within [W_p,H_p] with a heigher score - turn this pixel to zero !
Now you should remain with image of zeroes where only pattern centers have some value. Hurray!
If you don't know in advance how an object will look like, you can find one object using contours, then 'cut it out' using convex hull of its contour (+ bounding box), and use it as the convolution kernel for finding the rest.

How to filter white bits in a monochrome image that exceed a specified area?

I have this fancy image of "Brainbow image showing map of neuronal circuits of the mouse cerebral cortex"
and I want to count the number of green flashes so ran k-means on it with 15 clusters and I isolated the two colors that together do the job, but i am left with many green streaks/ tails and edges of yellow flashes.
I was hoping to find some algo that thresholds by area and select only the actual green flashes or maybe another aproach where I would not face this problem. I have used python k-means from sklearn.cluster
You could use color thresholding to isolate the green flashes. The idea is to convert the image to HSV format and define a lower/upper color threshold range. This will give you a binary mask. From here we can do additional processing by performing morphological opening with an elliptical shaped kernel to remove noise and tails. Finally we can find contours and filter using contour area with a defined threshold area to only keep the larger blobs. Here's the result:
Count: 116
Code
import numpy as np
import cv2
# Color threshold
image = cv2.imread('1.jpg')
original = image.copy()
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
lower = np.array([42, 67, 0])
upper = np.array([69, 255, 255])
mask = cv2.inRange(hsv, lower, upper)
# Perform morphological operations
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=1)
# Find contours and filter using contour area
cnts = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
count = 0
for c in cnts:
area = cv2.contourArea(c)
if area < 5:
cv2.drawContours(opening,[c], -1, (0,0,0), -1)
else:
count += 1
result = cv2.bitwise_and(original,original,mask=opening)
print('Count: {}'.format(count))
cv2.imshow('mask', mask)
cv2.imshow('opening', opening)
cv2.imshow('result', result)
cv2.waitKey()

Horizontal Line detection with OpenCV

I am trying to find horizontal and vertical lines from an image which came from a "document". The documents are scanned pages from contracts and so the lines look like what you would see in a table or in a contract block.
I have been trying OpenCV for the job. The Hough transform implementation in OpenCV seemed useful for the job, but I could not find any combination of parameters that would allow it to cleanly find the vertical and horizontal lines. I tried with and without edge detection. No luck. If anyone has done anything similar I'm interested in knowing how.
See here an image of my before and after experimentation with HoughP in OpenCV. It's the best I could do, http://dl.dropbox.com/u/3787481/Untitled%201.png
So now I'm wondering whether there is another kind of transform I could use which would allow me to reliably find horizontal and vertical lines (and preferably dashed lines too).
I know this problem is solvable because I have Nuance and ABBYY OCR tools which can both reliably extract horizontal and vertical lines and return me the bounding box of the lines.
Thanks!
Patrick.
Have you seen a code sample from HoughLinesP function documentation?
I think you can use it as starting point for your algorithm. To pick horizontal an vertical lines you just need to filter out other lines by line angle.
UPDATE:
As I see you need to find not the lines but horizontal an vertical edges on the page. For this task you need to combine several processing steps to get good results.
For your image I'm able to get good results by combining Canny edge detection with HoughLinesP. Here is my code (I've used python, but I think you see the idea):
img = cv2.imread("C:/temp/1.png")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 80, 120)
lines = cv2.HoughLinesP(edges, 1, math.pi/2, 2, None, 30, 1);
for line in lines[0]:
pt1 = (line[0],line[1])
pt2 = (line[2],line[3])
cv2.line(img, pt1, pt2, (0,0,255), 3)
cv2.imwrite("C:/temp/2.png", img)
Result looks like:
Here's a complete OpenCV solution using morphological operations.
Obtain binary image
Create horizontal kernel and detect horizontal lines
Create vertical kernel and detect vertical lines
Here's a visualization of the process. Using this input image:
Binary image
import cv2
# Load image, convert to grayscale, Otsu's threshold
image = cv2.imread('1.png')
result = image.copy()
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
Detected horizontal lines highlighted in green
# Detect horizontal lines
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (40,1))
detect_horizontal = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, horizontal_kernel, iterations=2)
cnts = cv2.findContours(detect_horizontal, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
cv2.drawContours(result, [c], -1, (36,255,12), 2)
Detected vertical lines highlighted in green
# Detect vertical lines
vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1,10))
detect_vertical = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, vertical_kernel, iterations=2)
cnts = cv2.findContours(detect_vertical, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
cv2.drawContours(result, [c], -1, (36,255,12), 2)
Result
Here's the output using another input image
Input -> Binary -> Detected Horizontal -> Detected Vertical -> Result
Note: Depending on the image, you may have to modify the kernel size. For instance to capture longer horizontal lines, it may be necessary to increase the horizontal kernel from (40, 1) to say (80, 1). If you wanted to detect thicker horizontal lines, then you could increase the width of the kernel to say (80, 2). In addition, you could increase the number of iterations when performing cv2.morphologyEx(). Similarly, you could modify the vertical kernels to detect more or less vertical lines. There is a trade-off when increasing or decreasing the kernel size as you may capture more or less of the lines. Again, it all varies depending on the input image
Full code for completeness
import cv2
# Load image, convert to grayscale, Otsu's threshold
image = cv2.imread('1.png')
result = image.copy()
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Detect horizontal lines
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (40,1))
detect_horizontal = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, horizontal_kernel, iterations=2)
cnts = cv2.findContours(detect_horizontal, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
cv2.drawContours(result, [c], -1, (36,255,12), 2)
# Detect vertical lines
vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1,10))
detect_vertical = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, vertical_kernel, iterations=2)
cnts = cv2.findContours(detect_vertical, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
cv2.drawContours(result, [c], -1, (36,255,12), 2)
cv2.imshow('result', result)
cv2.waitKey()
If you just want the "lines" and not the "line segments", I would avoid using Canny, Hough, FindContours or any other such function in case you want more speed in your code. If your images are not rotated and what you want to find is always vertical or horizontal, I would just use cv::Sobel (one for vertical, and another for horizontal) and create accumulation arrays for columns and rows. Then you can search for maxima in such accumulations or profiles, for instance by setting a threshold, and you will know the row or column in which there is a vertical or horizontal edge lines.
You might consider leaving the Hough line detection since this method looks for "global" lines, not necessarily line segments. I recently implemented an application that identified "parallelograms" - essentially squares that might be rotated and perspective fore-shortened due to viewing angle. You might consider something similar. My pipeline was:
Convert from RGB to grayscale (cvCvtColor)
Smooth (cvSmooth)
Threshold (cvThreshold)
Detect edges (cvCanny)
Find contours (cvFindContours)
Approximate contours with linear features (cvApproxPoly)
In your application, the resulting contour list will likely be large (depending upon the "aggressiveness" of smoothing and the feature enhancement of the Canny edge detector. You can prune this list by a variety of parameters: number of points returned from the contour finder, area of the contour (cvContourArea), etc. From my experience, I would expect that "valid" lines in your application would have well-defined area and vertex count properties. Additionally, you can filter out contours based on distance between end-points, angle defined by the line connecting end-points, etc.
Depending upon how much CPU "time" you have, you can always pair the Hough algorithm with an algorithm like that above to robustly identify horizontal and vertical lines.
Don´t convert the RGB to grayscale. Sometimes, different colors in RGB can be merged to the same grayscale value, so it could miss some contours. You should analyze each of the RGB channels separately.
Here is an approach that accumulates arrays for columns and rows. Then one can search for maxima in such accumulations (above a certain threshold) and deduce in which row or column there is a vertical or horizontal line.
If you want to quickly test the code, use the following Google Colab Notebook.
Google Colab Notebook
import numpy as np
import cv2
import scipy
from scipy.signal import find_peaks
from matplotlib import pyplot as plt
url = "https://i.stack.imgur.com/S00ap.png"
!wget $url -q -O input.jpg
fileName = 'input.jpg'
img = cv2.imread(fileName)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
tmp = img.copy()
gray = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY)
blurred = cv2.bilateralFilter(gray, 11, 61, 39)
edges = cv2.Canny(blurred, 0, 255)
v_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1,3))
h_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7,1))
v_morphed = cv2.morphologyEx(edges, cv2.MORPH_OPEN, v_kernel, iterations=2)
v_morphed = cv2.dilate(v_morphed, None)
h_morphed = cv2.morphologyEx(edges, cv2.MORPH_OPEN, h_kernel, iterations=2)
h_morphed = cv2.dilate(h_morphed, None)
v_acc = cv2.reduce(v_morphed, 0, cv2.REDUCE_SUM, dtype=cv2.CV_32S)
h_acc = cv2.reduce(h_morphed, 1, cv2.REDUCE_SUM, dtype=cv2.CV_32S)
def smooth(y, box_pts):
box = np.ones(box_pts)/box_pts
y_smooth = np.convolve(y, box, mode='same')
return y_smooth
s_v_acc = smooth(v_acc[0,:],9)
s_h_acc = smooth(h_acc[:,0],9)
v_peaks, v_props = find_peaks(s_v_acc, 0.70*np.max(np.max(s_v_acc)))
h_peaks, h_props = find_peaks(s_h_acc, 0.70*np.max(np.max(s_h_acc)))
for peak_index in v_peaks:
cv2.line(tmp, (peak_index, 0), (peak_index, img.shape[0]), (255, 0, 0),2)
for peak_index in h_peaks:
cv2.line(tmp, (0, peak_index), (img.shape[1], peak_index), (0, 0, 255),2)
v_height = v_props['peak_heights'] #list of the heights of the peaks
h_height = h_props['peak_heights'] #list of the heights of the peaks
def align_axis_x(ax, ax_target):
"""Make x-axis of `ax` aligned with `ax_target` in figure"""
posn_old, posn_target = ax.get_position(), ax_target.get_position()
ax.set_position([posn_target.x0, posn_old.y0, posn_target.width, posn_old.height])
def align_axis_y(ax, ax_target):
"""Make y-axis of `ax` aligned with `ax_target` in figure"""
posn_old, posn_target = ax.get_position(), ax_target.get_position()
ax.set_position([posn_old.x0, posn_target.y0, posn_old.width, posn_target.height])
fig = plt.figure(constrained_layout=False, figsize=(24,16))
spec = fig.add_gridspec(ncols=4, nrows=2, height_ratios=[1, 1])
ax1 = fig.add_subplot(spec[0,0])
ax1.imshow(tmp)
ax2 = fig.add_subplot(spec[0, 1])
ax2.imshow(v_morphed)
ax3 = fig.add_subplot(spec[0, 2])
ax3.imshow(h_morphed)
ax4 = fig.add_subplot(spec[0, 3], sharey=ax3)
ax4.plot(h_acc[:,0], np.arange(len(h_acc[:,0])), 'y', marker="o", ms=1, mfc="k", mec="k")
ax4.plot(s_h_acc, np.arange(len(s_h_acc)), 'r', lw=1)
ax4.plot(h_height, h_peaks, "x", lw="5")
ax5 = fig.add_subplot(spec[1, 1], sharex=ax2)
ax5.plot(np.arange(len(v_acc[0,:])), v_acc[0,:], 'y', marker="o", ms=1, mfc="k", mec="k")
ax5.plot(np.arange(len(s_v_acc)), s_v_acc, 'r', lw=2)
ax5.plot(v_peaks, v_height, "x", lw="5")
plt.tight_layout()
align_axis_y(ax4,ax3)
align_axis_x(ax5,ax2)

Resources