Convolution of image with kernel gives white output - image

I've a code that filters image with 3x3 Gaussian kernel but the output is white. GuassianFilter function works(output is correct) but there is problem in convolution function.
What would be the problem? I checked code again but couldn't solve this.
import math
import numpy as np
import cv2
path="funny_hats.jpg"
inputImage = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
def GaussianFilter(img):
#generating 3x3 kernel
kernel = np.ones((3,3), dtype='float64')
size = 3
mean = int(size/2)
sigma = 1 # standart deviation is 1
sumAll = 0
for i in range(size):
for j in range(size):
kernel[i,j] = math.exp(-1* ((math.pow( (i-mean)/sigma, 2.0) + (math.pow((j-mean)/sigma, 2.0)) ) / (2* math.pow(sigma,2)) )) / (sigma * math.pow(2*math.pi, 1/2))
sumAll += kernel[i,j]
# normalizing kernel
for i in range(size):
for j in range(size):
kernel[i,j] /= sumAll
# Filter image with created kernel
img = convolution(img, kernel) # filtered image
print(img)
cv2.imshow('aa', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def convolution(img, dest):
res = img
[h,w] = img.shape
[kh, kw] = dest.shape # kernel shape
kr = int(kh/2) # kernel radius
res = np.zeros(img.shape)
for i in range(0+kr,h-kr):
for j in range(0+kr,w-kr):
for k in range(-1 * kr, kr + 1):
for m in range(-1 * kr, kr + 1):
res[i,j] += dest[k,m]*img[i+k, j+m]
res[:,0] = res[:, 1]
res[:,w-1] = res[:, w-2]
res[0,:] = res[1,:]
res[h-1,:] = res[h-2,:]
return res
GaussianFilter(inputImage)

res = img
This is wrong. You must create image where all pixels will be zero (black).

Related

How to properly process images with mixed noise types

The picture with noise is like this.
Noised picture: Image3.bmp
I was doing image processing in MatLab with some built-in and self-implemented filters.
I have already tried a combination of bilateral, median and gaussian. bilateral and gaussian code are at the end of this post.
img3 = double(imread('Image3.bmp')); % this is the noised image
lena = double(imread('lena_gray.jpg')); % this is the original one
img3_com = bilateral(img3, 3, 2, 80);
img3_com = medfilt2(img3_com, [3 3], 'symmetric');
img3_com = gaussian(img3_com, 3, 0.5);
img3_com = bilateral(double(img3_com), 6, 100, 13);
SNR3_com = snr(img3_com,img3_com - lena); % 17.1107
However, the result is not promising with SNR of only 17.11.
Filtered image: img3_com
The original picture is like this.
Clean original image: lena_gray.jpg
Could you please give me any possible ideas about how to process it? Like what noise generators generated the noised image and what filtering methods or image processing method I can use to deal with it. Appreciate!!!
My bilateral function bilateral.m
function img_new = bilateral(img_gray, window, sigmaS, sigmaI)
imgSize = size(img_gray);
img_new = zeros(imgSize);
for i = 1:imgSize(1)
for j = 1:imgSize(2)
sum = 0;
simiSum = 0;
for a = -window:window
for b = -window:window
x = i + a;
y = j + b;
p = img_gray(i,j);
q = 0;
if x < 1 || y < 1 || x > imgSize(1) || y > imgSize(2)
% q=0;
continue;
else
q = img_gray(x,y);
end
gaussianFilter = exp( - double((a)^2 + (b)^2)/ (2 * sigmaS^2 ) - (double(p-q)^2)/ (2 * sigmaI^2 ));
% gaussianFilter = gaussian((a^2 + b^2)^(1/2), sigma) * gaussian(abs(p-q), sigma);
sum = sum + gaussianFilter * q;
simiSum = simiSum + gaussianFilter;
end
end
img_new(i,j) = sum/simiSum;
end
end
% disp SNR
lena = double(imread('lena_gray.jpg'));
SNR1_4_ = snr(img_new,img_new - lena);
disp(SNR1_4_);
My gaussian implementation gaussian.m
function img_gau = gaussian(img, hsize, sigma)
h = fspecial('gaussian', hsize, sigma);
img_gau = conv2(img,h,'same');
% disp SNR
lena = double(imread('lena_gray.jpg'));
SNR1_4_ = snr(img_gau,img_gau - lena);
disp(SNR1_4_);

How to apply the non-maximum suppression for the corner detection

Finally I have a working code which detects the corner of the rectangles in an image.But the problem is the code is detecting multiple points at same corner. Now I am trying to introduce non-maximum suppression in my code but it was not working. I have tried one suggestion previous times but it is also not working. how to carry this non-maximum suppression properly.
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as im
from scipy import ndimage
# 1. Before doing any operations convert the image into gray scale image
img = im.imread('OD6.jpg')
plt.imshow(img)
plt.show()
# split
R=img[:,:,0]
G=img[:,:,1]
B=img[:,:,2]
M,N=R.shape
gray_img=np.zeros((M,N), dtype=int);
for i in range(M):
for j in range(N):
gray_img[i, j]=(R[i, j]*0.2989)+(G[i, j]*0.5870)+(B[i, j]*0.114);
plt.imshow(gray_img, cmap='gray')
plt.show()
# 2. Applying sobel filter to find the gradients in x and y direction respectively and remove noise
# using gaussian filter with sigma=1
imarr = np.asarray(gray_img, dtype=np.float64)
ix = ndimage.sobel(imarr, 0)
iy = ndimage.sobel(imarr, 1)
ix2 = ix * ix
iy2 = iy * iy
ixy = ix * iy
ix2 = ndimage.gaussian_filter(ix2, sigma=1)
iy2 = ndimage.gaussian_filter(iy2, sigma=1)
ixy = ndimage.gaussian_filter(ixy, sigma=1)
c, l = imarr.shape
result = np.zeros((c, l))
r = np.zeros((c, l))
rmax = 0 # initialize the maximum value of harris response
for i in range(c):
for j in range(l):
m = np.array([[ix2[i, j], ixy[i, j]], [ixy[i, j], iy2[i, j]]], dtype=np.float64)
r[i, j] = np.linalg.det(m) - 0.04 * (np.power(np.trace(m), 2))
if r[i, j] > rmax:
rmax = r[i, j]
# 3. Applying non maximum supression
for i in range(c - 1):
for j in range(l - 1):
if r[i, j] > 0.01 * rmax and r[i, j] > r[i-1, j-1] and r[i, j] > r[i-1, j+1]\
and r[i, j] > r[i+1, j-1] and r[i, j] > r[i+1, j+1]:
result[i, j] = 1
xy_coords = np.flip(np.column_stack(np.where(result==1)), axis=1)
print (xy_coords)
pc, pr = np.where(result == 1)
plt.plot(pr, pc, "b.")
plt.imshow(img, 'gray')
plt.show()
There are lots of materials available on Corner Detection. This is also solved in StackOverflow, please see here.

Interested in differentiate square and rectangle in an image without using openCV

I have an image which consist of rectangle and square, Now I am interested in distinguish which one is rectangle and which one is square in that image. I have used Harris corner detection algorithm to extract the corner points. Using these corner point I am able to extract the index of these corner pixel. The next task I am interested is differentiate which one is rectangle and which one is square? I Know the conditions for square height=width. Using this information I wanted to execute the differentiation.
mport numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as im
from scipy import ndimage
# 1. Before doing any operations convert the image into gray scale image
img = im.imread('OD6.jpg')
plt.imshow(img)
plt.show()
# split
R=img[:,:,0]
G=img[:,:,1]
B=img[:,:,2]
M,N=R.shape
gray_img=np.zeros((M,N), dtype=int);
for i in range(M):
for j in range(N):
gray_img[i, j]=(R[i, j]*0.2989)+(G[i, j]*0.5870)+(B[i, j]*0.114);
plt.imshow(gray_img, cmap='gray')
plt.show()
# 2. Applying sobel filter to find the gradients in x and y direction respectively and remove noise
# using gaussian filter with sigma=1
imarr = np.asarray(gray_img, dtype=np.float64)
ix = ndimage.sobel(imarr, 0)
iy = ndimage.sobel(imarr, 1)
ix2 = ix * ix
iy2 = iy * iy
ixy = ix * iy
ix2 = ndimage.gaussian_filter(ix2, sigma=1)
iy2 = ndimage.gaussian_filter(iy2, sigma=1)
ixy = ndimage.gaussian_filter(ixy, sigma=1)
c, l = imarr.shape
result = np.zeros((c, l))
r = np.zeros((c, l))
rmax = 0 # initialize the maximum value of harris response
for i in range(c):
for j in range(l):
m = np.array([[ix2[i, j], ixy[i, j]], [ixy[i, j], iy2[i, j]]], dtype=np.float64)
r[i, j] = np.linalg.det(m) - 0.04 * (np.power(np.trace(m), 2))
if r[i, j] > rmax:
rmax = r[i, j]
# 3. Applying non maximum supression
for i in range(c - 1):
for j in range(l - 1):
if r[i, j] > 0.01 * rmax and r[i, j] > r[i-1, j-1] and r[i, j] > r[i-1, j+1]\
and r[i, j] > r[i+1, j-1] and r[i, j] > r[i+1, j+1]:
result[i, j] = 1
xy_coords = np.flip(np.column_stack(np.where(result==1)), axis=1)
print (xy_coords)
pc, pr = np.where(result == 1)
plt.plot(pr, pc, "b.")
plt.imshow(img, 'gray')
plt.show()

Smoothing Mask using healpy

I am using healpy.sphtfunc.smoothing for apodization of my binary mask and I am having problem that, if I have temperature cut of 100K and I made a binary mask corresponding to cut, then after apodization of mask using above routine when I apply it on my map I get 120K or number more than 100K. So I am confuse that does one do apodization on Binary mask or (map*Binary_mask)
def getMapValue(map, ra, dec, theta):
nSide = hp.pixelfunc.npix2nside(map.size)
# Extract the region around the source
vec = hp.pixelfunc.ang2vec(np.pi / 2 - np.deg2rad(dec), np.deg2rad(ra))
vec = np.array(vec)
innerPixels = hp.query_disc(nSide, vec, radius=np.radians(theta))
return innerPixels
def masking_map(map1, nside, npix, limit):
mask = np.ones(hp.nside2npix(nside), dtype=np.float64)
index = (map1 > limit)
mask[index] = 0.0
for ipix in xrange(0, npix):
theta1, phi = hp.pixelfunc.pix2ang(nside, ipix)
if 70. < np.degrees(theta1) < 110.:
mask[ipix] = 0.0
inner_pix = getMapValue(map1,329.6, 17.5, 54.0)
outer_pix = getMapValue(map1,329.6, 17.5, 62.0)
index = np.setdiff1d(outer_pix, inner_pix)
index1 = []
for ipix1 in index:
theta, phi = hp.pixelfunc.pix2ang(nside, ipix1)
if np.degrees(theta) < 90.0:
if 0.0 < np.degrees(phi)< 60.0:
index1.append(ipix1)
if 320.0 < np.degrees(phi)< 360.0:
index1.append(ipix1)
index1=np.asarray(index1)
mask[index1]=0.0
return mask
def apodiz(mask, theta):
apodiz_mask = hp.sphtfunc.smoothing(mask, fwhm=np.radians(theta),
iter=3, use_weights=True,
verbose=False)
index = (apodiz_mask < 0.0)
apodiz_mask[index] = 0.000
return apodiz_mask
def main():
filename = 'haslam408_dsds_Remazeilles2014.fits'
NSIDE = 512
input_map = loadMap(fname)
NPIX = hp.pixelfunc.nside2npix(NSIDE)
LIMIT = 50 # for 50K cut
theta_ap = 5.0 # FWHM for Apodization in degrees
Binary_mask = masking_map(input_map, NSIDE, NPIX, LIMIT)
imp_map = apodiz(Binary_mask, theta_ap)
masked_map = input_map*imp_map
hp.mollview(apoMask, xsize=2000, coord=['G'], unit=r'$T_{B}(K)$', nest=False, title='%s' % key[count])
hp.mollview(imp_map, xsize=2000, coord=['G'], unit=r'$T_{B}(K)$', nest=False, title='%s' % key[count])
hp.mollview(masked_map, xsize=2000, coord=['G'], unit=r'$T_{B}(K)$', nest=False, title='408 MHz,%s' % key[count])
hp.mollview(input_map*Binary_mask, xsize=2000, coord=['G'], unit=r'$T_{B}(K)$', nest=False, title='408 MHz,%s' % key[count])
count+=1
if __name__ == "__main__":
This is resulting map without apodization of binar mask
This is resulting map after 5 degree apodized of Binary mask

Scipy - A better way to avoid manually loop when matrix is sparse

Logistic regression's objective function is
and the gradient is
where w is a scipy's csr sparse matrix with dim n-by-1.
My question is, when I have one scipy's csr sparse matrix and one numpy array, X_train and y_train respectively. (Each row of X_train is x_i, each element of y_train is y_i)
Is there a better way to calculate the gradient without using manully for loop?
For further information, I'm implementing large scale logistic regression. Therefore the performance is important.
Thanks.
Update 5/19 (Add my current code)
Thanks for #Jaime's reminding, here is my code. I basically want to see if there is a better way to implement gradient(X, y, w).
import numpy as np
import scipy as sp
from sklearn import datasets
from numpy.linalg import norm
from scipy import sparse
eta = 0.01
xi = 0.1
C = 1
X_train, y_train = datasets.load_svmlight_file('lr/datasets/a9a')
X_test, y_test = datasets.load_svmlight_file('lr/datasets/a9a.t', n_features=X_train.shape[1])
def gradient(X, y, w):
# w should be a col vector
summation = w
for i in range(X.shape[0]):
exp_i = np.exp( y[i] * X.getrow(i).dot(w)[0, 0] )
summation = summation - (y[i] / (1 + exp_i)) * X.getrow(i).T
return summation
def hes_mul(X, D, s):
# w and s should be a col vector
# should return a col vector
return s + C * X.T.dot( D.dot( X.dot(s) ) )
def cg(X, y, w):
# gradF is col vector, so all of these are col vectors
gradF = gradient(X, y, w)
s = sparse.csr_matrix( np.zeros(X_train.shape[1]) ).T
r = -1 * gradF
d = r
D = []
for i in range(X.shape[0]):
exp_i = np.exp( (-1) * y[i] * w.T.dot(X.getrow(i).T)[0, 0] )
D.append(exp_i / ((1 + exp_i) ** 2))
D = sparse.diags(D, 0)
while True:
r_norm = np.sqrt((r.data ** 2).sum())
print r_norm
print np.sqrt((gradF.data ** 2).sum())
if r_norm <= xi * np.sqrt((gradF.data ** 2).sum()):
return s
hes_mul_d = hes_mul(X, D, d)
alpha = (r_norm ** 2) / d.T.dot( hes_mul_d )[0, 0]
s = s + alpha * d
r = r - alpha * hes_mul_d
beta = (r.data ** 2).sum() / (r_norm ** 2)
d = r + beta * d
w = sparse.csr_matrix( np.zeros(X_train.shape[1]) ).T
s = cg(X_train, y_train, w)

Resources