Interested in differentiate square and rectangle in an image without using openCV - image

I have an image which consist of rectangle and square, Now I am interested in distinguish which one is rectangle and which one is square in that image. I have used Harris corner detection algorithm to extract the corner points. Using these corner point I am able to extract the index of these corner pixel. The next task I am interested is differentiate which one is rectangle and which one is square? I Know the conditions for square height=width. Using this information I wanted to execute the differentiation.
mport numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as im
from scipy import ndimage
# 1. Before doing any operations convert the image into gray scale image
img = im.imread('OD6.jpg')
plt.imshow(img)
plt.show()
# split
R=img[:,:,0]
G=img[:,:,1]
B=img[:,:,2]
M,N=R.shape
gray_img=np.zeros((M,N), dtype=int);
for i in range(M):
for j in range(N):
gray_img[i, j]=(R[i, j]*0.2989)+(G[i, j]*0.5870)+(B[i, j]*0.114);
plt.imshow(gray_img, cmap='gray')
plt.show()
# 2. Applying sobel filter to find the gradients in x and y direction respectively and remove noise
# using gaussian filter with sigma=1
imarr = np.asarray(gray_img, dtype=np.float64)
ix = ndimage.sobel(imarr, 0)
iy = ndimage.sobel(imarr, 1)
ix2 = ix * ix
iy2 = iy * iy
ixy = ix * iy
ix2 = ndimage.gaussian_filter(ix2, sigma=1)
iy2 = ndimage.gaussian_filter(iy2, sigma=1)
ixy = ndimage.gaussian_filter(ixy, sigma=1)
c, l = imarr.shape
result = np.zeros((c, l))
r = np.zeros((c, l))
rmax = 0 # initialize the maximum value of harris response
for i in range(c):
for j in range(l):
m = np.array([[ix2[i, j], ixy[i, j]], [ixy[i, j], iy2[i, j]]], dtype=np.float64)
r[i, j] = np.linalg.det(m) - 0.04 * (np.power(np.trace(m), 2))
if r[i, j] > rmax:
rmax = r[i, j]
# 3. Applying non maximum supression
for i in range(c - 1):
for j in range(l - 1):
if r[i, j] > 0.01 * rmax and r[i, j] > r[i-1, j-1] and r[i, j] > r[i-1, j+1]\
and r[i, j] > r[i+1, j-1] and r[i, j] > r[i+1, j+1]:
result[i, j] = 1
xy_coords = np.flip(np.column_stack(np.where(result==1)), axis=1)
print (xy_coords)
pc, pr = np.where(result == 1)
plt.plot(pr, pc, "b.")
plt.imshow(img, 'gray')
plt.show()

Related

How to apply the non-maximum suppression for the corner detection

Finally I have a working code which detects the corner of the rectangles in an image.But the problem is the code is detecting multiple points at same corner. Now I am trying to introduce non-maximum suppression in my code but it was not working. I have tried one suggestion previous times but it is also not working. how to carry this non-maximum suppression properly.
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as im
from scipy import ndimage
# 1. Before doing any operations convert the image into gray scale image
img = im.imread('OD6.jpg')
plt.imshow(img)
plt.show()
# split
R=img[:,:,0]
G=img[:,:,1]
B=img[:,:,2]
M,N=R.shape
gray_img=np.zeros((M,N), dtype=int);
for i in range(M):
for j in range(N):
gray_img[i, j]=(R[i, j]*0.2989)+(G[i, j]*0.5870)+(B[i, j]*0.114);
plt.imshow(gray_img, cmap='gray')
plt.show()
# 2. Applying sobel filter to find the gradients in x and y direction respectively and remove noise
# using gaussian filter with sigma=1
imarr = np.asarray(gray_img, dtype=np.float64)
ix = ndimage.sobel(imarr, 0)
iy = ndimage.sobel(imarr, 1)
ix2 = ix * ix
iy2 = iy * iy
ixy = ix * iy
ix2 = ndimage.gaussian_filter(ix2, sigma=1)
iy2 = ndimage.gaussian_filter(iy2, sigma=1)
ixy = ndimage.gaussian_filter(ixy, sigma=1)
c, l = imarr.shape
result = np.zeros((c, l))
r = np.zeros((c, l))
rmax = 0 # initialize the maximum value of harris response
for i in range(c):
for j in range(l):
m = np.array([[ix2[i, j], ixy[i, j]], [ixy[i, j], iy2[i, j]]], dtype=np.float64)
r[i, j] = np.linalg.det(m) - 0.04 * (np.power(np.trace(m), 2))
if r[i, j] > rmax:
rmax = r[i, j]
# 3. Applying non maximum supression
for i in range(c - 1):
for j in range(l - 1):
if r[i, j] > 0.01 * rmax and r[i, j] > r[i-1, j-1] and r[i, j] > r[i-1, j+1]\
and r[i, j] > r[i+1, j-1] and r[i, j] > r[i+1, j+1]:
result[i, j] = 1
xy_coords = np.flip(np.column_stack(np.where(result==1)), axis=1)
print (xy_coords)
pc, pr = np.where(result == 1)
plt.plot(pr, pc, "b.")
plt.imshow(img, 'gray')
plt.show()
There are lots of materials available on Corner Detection. This is also solved in StackOverflow, please see here.

Convolution of image with kernel gives white output

I've a code that filters image with 3x3 Gaussian kernel but the output is white. GuassianFilter function works(output is correct) but there is problem in convolution function.
What would be the problem? I checked code again but couldn't solve this.
import math
import numpy as np
import cv2
path="funny_hats.jpg"
inputImage = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
def GaussianFilter(img):
#generating 3x3 kernel
kernel = np.ones((3,3), dtype='float64')
size = 3
mean = int(size/2)
sigma = 1 # standart deviation is 1
sumAll = 0
for i in range(size):
for j in range(size):
kernel[i,j] = math.exp(-1* ((math.pow( (i-mean)/sigma, 2.0) + (math.pow((j-mean)/sigma, 2.0)) ) / (2* math.pow(sigma,2)) )) / (sigma * math.pow(2*math.pi, 1/2))
sumAll += kernel[i,j]
# normalizing kernel
for i in range(size):
for j in range(size):
kernel[i,j] /= sumAll
# Filter image with created kernel
img = convolution(img, kernel) # filtered image
print(img)
cv2.imshow('aa', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def convolution(img, dest):
res = img
[h,w] = img.shape
[kh, kw] = dest.shape # kernel shape
kr = int(kh/2) # kernel radius
res = np.zeros(img.shape)
for i in range(0+kr,h-kr):
for j in range(0+kr,w-kr):
for k in range(-1 * kr, kr + 1):
for m in range(-1 * kr, kr + 1):
res[i,j] += dest[k,m]*img[i+k, j+m]
res[:,0] = res[:, 1]
res[:,w-1] = res[:, w-2]
res[0,:] = res[1,:]
res[h-1,:] = res[h-2,:]
return res
GaussianFilter(inputImage)
res = img
This is wrong. You must create image where all pixels will be zero (black).

Interleaving Coding on a variable array size

I am trying to think of / find an algorithm that will allow interleaving on an array size that isn't a power of two. The current method that I am using takes the array size, finds the square root (n) and creates an n x n matrix. The rows and columns are then exchanged and the matrix is then extended back to an array.
I'm trying to find some sort of indexing system that is flexible with any input array size but also allows for a decent distribution of data and allows for reconstruction of the original array. I provided some example code explicitly showing how the nxn interleaver is working.
import numpy as np
import math
N = 75
input_stream = np.random.random_integers(0,100,size=N)
#############
# Interleaver
#############
mat_dim = int(math.sqrt(len(input_stream)))
interleave_mat = np.zeros((mat_dim,mat_dim), dtype=np.int)
interleave_out = np.zeros(mat_dim**2, dtype=np.int)
for i in range(0,mat_dim):
for j in range(0,mat_dim):
interleave_mat[i][j] = input_stream[i*mat_dim + j]
for i in range(0,mat_dim):
for j in range(0,mat_dim):
interleave_out[i*mat_dim + j] = interleave_mat[j][i]
################
# De-Interleaver
################
deinterleave_mat = np.zeros((mat_dim,mat_dim), dtype=np.int)
deinterleave_out = np.zeros(mat_dim**2, dtype=np.int)
for i in range(0,mat_dim):
for j in range(0,mat_dim):
deinterleave_mat[i][j] = interleave_out[i*mat_dim + j]
for i in range(0,mat_dim):
for j in range(0,mat_dim):
deinterleave_out[i*mat_dim + j] = deinterleave_mat[j][i]
output_stream = deinterleave_out
error_count = sum(1 for a,b in zip(input_stream, output_stream) if a != b)
if len(input_stream) > len(output_stream):
error_count += len(input_stream) - len(output_stream)
print("Number of errors: {}").format(error_count)
print("input stream: {}").format(input_stream)
print("output stream: {}").format(output_stream)

How to keep the distance between $n$ particles within a certain range?

I am working on a problem in Molecular Dynamics and need to randomly generate a position array for np particles within a box of size [-L,L] x [-L,L]. In fact, I need to generate the x-array for the x-coordinates with x(1) = 0 and the y-array for the y-coordinates with y(1)=y(2) =0. I need the particles to be such that the distances between neighboring particles are within some range (e.g: 0.9 <= r <= 1.1) like in the following picture:
However in my code I get something like this:
See how the red lines are larger than what I want.
My code is
REAL, DIMENSION(np) :: x, y
REAL :: w1, w2, minv, maxv, xij, yij, rij
INTEGER :: i, j
!Generating random coordinates for the particles
x(1) = 0.0d0
y(1) = 0.0d0
y(2) = 0.0d0
!-------------------------------------------------------------------------
! translation and rotaion of the whole system were froze (saving 4 degrees of
! freedome)
! x(1) = 0.0d0; y(1) = 0.0d0 fix one particle in the origin
! y(2) = 0.0d0 fix the second particle on the x-axis
!-------------------------------------------------------------------------
rmatrix = 100.0
minv = 0.0
maxv = 10
iter0 = 0
101 DO WHILE(maxv >= 1.1 .OR. minv <= 0.9)
iter0 = iter0 + 1
PRINT *, iter0
CALL init_random_seed()
DO i = 2, np
CALL RANDOM_NUMBER(w1)
x(i) = 10 * w1 - 5
END DO
DO i = 3, np
CALL RANDOM_NUMBER(w2)
y(i) = 10 * w2 - 5
END DO
! rmatrix contains the distances between all particles
DO i = 1, np
DO j = 1, np
IF(j .NE. i) THEN
xij = x(i) - x(j)
yij = y(i) - y(j)
rij = SQRT(xij * xij + yij * yij)
rmatrix(i,j) = rij
END IF
END DO
END DO
minv = MINVAL(rmatrix) ! This is the minimum distance between any two
! particles ( distance cannot be smaller)
! which is the left endpoint of the range interval
DO i = 1, np ! Here is my attempt to control the righ endpoint of
DO j = 1, np ! the range interval. ( This needs to be edited)
IF(j .NE. i) THEN
maxv = MIN(maxv, rmatrix(i,j))
END IF
END DO
IF(maxv >= 1.1) THEN
GOTO 101
END IF
END DO
END DO
CONTANIS
SUBROUTINE init_random_seed()
INTEGER :: i, n, clock
INTEGER, DIMENSION(:), ALLOCATABLE :: seed
CALL RANDOM_SEED(size = n)
ALLOCATE(seed(n))
CALL SYSTEM_CLOCK(COUNT=clock)
seed = clock + 37 * (/ (i - 1, i = 1, n) /)
CALL RANDOM_SEED(PUT = seed)
END SUBROUTINE init_random_seed

Scipy - A better way to avoid manually loop when matrix is sparse

Logistic regression's objective function is
and the gradient is
where w is a scipy's csr sparse matrix with dim n-by-1.
My question is, when I have one scipy's csr sparse matrix and one numpy array, X_train and y_train respectively. (Each row of X_train is x_i, each element of y_train is y_i)
Is there a better way to calculate the gradient without using manully for loop?
For further information, I'm implementing large scale logistic regression. Therefore the performance is important.
Thanks.
Update 5/19 (Add my current code)
Thanks for #Jaime's reminding, here is my code. I basically want to see if there is a better way to implement gradient(X, y, w).
import numpy as np
import scipy as sp
from sklearn import datasets
from numpy.linalg import norm
from scipy import sparse
eta = 0.01
xi = 0.1
C = 1
X_train, y_train = datasets.load_svmlight_file('lr/datasets/a9a')
X_test, y_test = datasets.load_svmlight_file('lr/datasets/a9a.t', n_features=X_train.shape[1])
def gradient(X, y, w):
# w should be a col vector
summation = w
for i in range(X.shape[0]):
exp_i = np.exp( y[i] * X.getrow(i).dot(w)[0, 0] )
summation = summation - (y[i] / (1 + exp_i)) * X.getrow(i).T
return summation
def hes_mul(X, D, s):
# w and s should be a col vector
# should return a col vector
return s + C * X.T.dot( D.dot( X.dot(s) ) )
def cg(X, y, w):
# gradF is col vector, so all of these are col vectors
gradF = gradient(X, y, w)
s = sparse.csr_matrix( np.zeros(X_train.shape[1]) ).T
r = -1 * gradF
d = r
D = []
for i in range(X.shape[0]):
exp_i = np.exp( (-1) * y[i] * w.T.dot(X.getrow(i).T)[0, 0] )
D.append(exp_i / ((1 + exp_i) ** 2))
D = sparse.diags(D, 0)
while True:
r_norm = np.sqrt((r.data ** 2).sum())
print r_norm
print np.sqrt((gradF.data ** 2).sum())
if r_norm <= xi * np.sqrt((gradF.data ** 2).sum()):
return s
hes_mul_d = hes_mul(X, D, d)
alpha = (r_norm ** 2) / d.T.dot( hes_mul_d )[0, 0]
s = s + alpha * d
r = r - alpha * hes_mul_d
beta = (r.data ** 2).sum() / (r_norm ** 2)
d = r + beta * d
w = sparse.csr_matrix( np.zeros(X_train.shape[1]) ).T
s = cg(X_train, y_train, w)

Resources