How to convert a numpy array to greyscale image? - image

I am trying to convert an 8-by-8 numpy array of binary format (0 represents black and 1 represents white). This is what I run:
from PIL import Image
data = im.fromarray(array) # array is an 8*8 binary numpy
data.save('dummy_pic.png')
But in the output I get a fully black square. Could anyone give me a hand please?

Black square is probably very dark gray, because you may have np.array with uint8 datatype, which has range of 0-255, not 0-1 like binary array. Try changing array datatype to bool, or scale values to 0-255 range.
Here is code snippet in which binary array is generated and displayed. If you scale by smaller value, circle will become slightly darker.
from PIL import Image
import numpy as np
# Generating binary array which represents circle
radius = 0.9
size = 100
x,y = np.meshgrid(np.linspace(-1,1,size),np.linspace(-1,1,size))
f = np.vectorize(lambda x,y: ( 1.0 if x*x + y*y < radius*radius else 0.0))
array = f(x,y)
# Solution 1:
# convert np.array to bool datatype
array = (array).astype(np.bool)
# Solution 2:
# scale values to uint8 maximum 255
array = ((array) * 255).astype(np.uint8)
img = Image.fromarray(array)
display(img)
Result: White circle

Related

How to do data augmentation on an 8 by 8 greyscale image?

I want to to data augmentation on an 8*8-pixel greayscale image through the codes below on Keras (the pixel values are only 0 and 1):
from ctypes import sizeof
from re import X
from turtle import shape
from keras.preprocessing.image import ImageDataGenerator
from skimage import io
import numpy as np
from PIL import Image
datagen = ImageDataGenerator(
rotation_range=45, #Random rotation between 0 and 45
width_shift_range=0.2, #% shift
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest') #Also try nearest, constant, reflect, wrap
# forming a binary 8*8 array
array = np.array([[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,1,1,1,0,0,0],
[0,0,0,1,1,1,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0],[0,0,0,0,0,0,1,0],[0,0,0,0,0,0,0,0]])
# scale values to uint8 maximum 255, and convert it to greyscale image
array = ((array) * 255).astype(np.uint8)
x = Image.fromarray(array)
i = 0
for batch in datagen.flow(x, batch_size=16,
save_to_dir='augmented',
save_prefix='aug',
save_format='png'):
i += 1
if i > 20:
break # otherwise the generator would loop indefinitely
But I get this error in the output (when I have .flow function):
ValueError: ('Input data in `NumpyArrayIterator` should have rank 4. You passed an array with shape', (8, 8))
Could anyone give me some hands please?
ImageDataGenerator accepts input as 4-dimensional tensor, where first dimension is sample number and last dimension are color channels. In your code you should convert this (8,8) tensor to (1,8,8,1) tensor. This can be done by
array = np.expand_dims(array, (0, -1))
Also you should not convert array to image before passing it to generator as you did it here
x = Image.fromarray(array)
you should simply pass array to generator.

Add Gaussian noise to a binary image knowing noise variance or SNR in python

I am using python open cv for image restoration and in the first step, I have to add gaussian noise to my "binary" image. My image pixel values are integers that can take values 0 or 1. How can I add gaussian noise to my image knowing SNR or noise variance?
I came up with this piece of code which adds 15 percent noise to my image but I don't know if this noise is normal Gaussian and how I can find its variance and SNR.
def add_noise(im):
im_noisy = im.copy()
for i in range(len(im_noisy)):
for j in range(len(im_noisy[0])):
r = np.random.rand()
if r < 0.15:
im_noisy[i][j] = -im_noisy[i][j]+1 #(converts 0 to 1 and vice versa)
return im_noisy
Since the image is binary, the solution is not well defined - we have to select a threshold that above the threshold the noise is 1.
The Gaussian noise supposes to be symmetric, so pixels below the minus threshold are going to be -1.
We may define the threshold by number of sigma's (for example: above 2 sigma, the noise is 1, and below -2 sigma the noise is -1).
When selecting threshold of 2 sigma, statistically we expect that about 2.5% of total pixels are going to be 1 and 2.5% are going to be -1.
Select lower threshold for higher percentage.
Create random normal (Gaussian) distribution image with mean=0 and sigma=1:
sigma = 1
gauss = np.random.normal(0, sigma, im.shape) # Create random normal (Gaussian) distribution image with mean=0 and sigma=1.
Convert to values to -1, 0, 1 - assume pixels value above 2 sigma are "1", below -2 sigma are -1 and other are "0" (2 sigma is an example, we may select other value):
binary_gauss = (gauss > 2*sigma).astype(np.int8)
binary_gauss[gauss < -2*sigma] = -1
After adding binary_gauss to im, clip the result to range [0, 1]:
noisey_im = (im + binary_gauss).clip(0, 1)
Code sample (first part reads a sample image, and convert to binary):
import numpy as np
import cv2 # Used only for testing
im = cv2.imread('chelsea.png', cv2.IMREAD_GRAYSCALE) # Read input image (for testing).
im = cv2.threshold(im, 0, 1, cv2.THRESH_OTSU)[1] # Convert image to binary (for testing).
im = im.astype(np.int8) # Convert to type int8 (8 bits singed)
sigma = 1
gauss = np.random.normal(0, sigma, im.shape) # Create random normal (Gaussian) distribution image with mean=0 and sigma=1.
binary_gauss = (gauss > 2*sigma).astype(np.int8) # Convert to binary - assume pixels with value above 2 sigmas are "1".
binary_gauss[gauss < -2*sigma] = -1 # Set all pixels below 2 sigma to "-1".
noisey_im = (im + binary_gauss).clip(0, 1) # Add noise image, and clip the result ot [0, 1].
noisey_im = noisey_im.astype(np.uint8) # Convert to type uint8
# Show images (for testing).
cv2.imshow('im', im*255)
cv2.imshow('binary_gauss', (binary_gauss+1).astype(np.uint8)*127)
cv2.imshow('noisey_im', noisey_im*255)
cv2.waitKey()
cv2.destroyAllWindows()
im (input image after converting to binary):
binary_gauss (noise image after threshold - values are -1, 0, 1):
noisey_im (im + binary_gauss after threshold):

Why is the whole picture turning red?

We’ve been trying to fix this program for hours, yet nothing seems to work, we just can’t figure out what the problem is. It is supposed to make the whole picture black white, besides the red pixels. (https://imgur.com/a/gxRm3N1 should look like that afterwards)
Here is the result after the using the program, the whole picture is turning red:
https://imgur.com/a/yWYVoIx
How can I fix this?
from image_helper import *
img = load_rgb_image("ara.jpg")
w, h = img.size
pixels = load_rgb_pixels("ara.jpg")
#img.show()
for i in range(w*h):
r,g,b = pixels[i]
new_r = 2*r
new_g = g // 2
new_b = b + 10
pixels[i] = (new_r, new_g, new_b)
new_img = new_rgb_image(w, h, pixels)
new_img.show()
There is an excellent solution implemented in MATLAB.
I was tempting to translate the code from MATLAB to Python (using OpenCV).
Convert the image to HSV color space.
Select "non-red" pixels. In HSV color space, the first channel is the hue - there is a range of hue for pixels that considered to be red.
Set the selected pixel saturation channel to 0. (Pixels with zero saturation are gray).
Convert the image back from HSV to BGR color space.
Here is the Python code (conversation of the original MATLAB code):
import cv2
# The following code is a Python conversion to the following MATLAB code:
# Original MATLAB code: https://stackoverflow.com/questions/4063965/how-can-i-convert-an-rgb-image-to-grayscale-but-keep-one-color
img = cv2.imread('roses.jpg') # Load image.
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # Convert the image to HSV color space.
h = hsv[:, :, 0] # Note: in OpenCV hue range is [0,179]) The original MATLAB code is 360.*hsvImage(:, :, 1), when hue range is [0, 1].
s = hsv[:, :, 1] # Get the saturation plane.
non_red_idx = (h > 20//2) & (h < 340//2) # Select "non-red" pixels (divide the original MATLAB values by 2 due to the range differences).
s[non_red_idx] = 0 # Set the selected pixel saturations to 0.
hsv[:, :, 1] = s # Update the saturation plane.
new_img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) # Convert the image back to BGR space
# Show images for testing:
cv2.imshow('img', img)
cv2.imshow('new_img', new_img)
cv2.waitKey()
cv2.destroyAllWindows()
cv2.imwrite('new_img.jpg', new_img)
Result:
Notes:
For the method used for selecting the color range, refer to the original post.
The reference post has a simpler solution, using simple for loop (with inferior results), that more resembles your code.
Consider using this code as reference.

What's this brightness function

The brightness of an image can be measured by the below function as this paper mentioned
In this paper, they didn't talk about Cr, Cg and Cb. Can anyone explain this function?
Thanks in advance.
Cr: Red channel
Cg: Green Channel
Cb: Blue Channel
The coefficients (0.241, 0.691, 0.068) are used to calculate the luminance
For example:
If you have a color (RGB) image and you want to convert to greyscale:
You will extract each channel from the image
greyscale = (0.2126 * Cr) + (0.7152 * Cg) + (0.0722 * Cb)
The coefficients are recommended by ITU-BT709 and are standards for HDTV.
So for calculating the brightness the accepted coefficients are 0.241, 0.691, and 0.068.
UPDATE:
Check more about new coefficients here.
You can print the brightness values:
import cv2
import numpy as np
# img will be BGR image
img = cv2.imread("samurgut3.jpeg")
#When we square the values overflow will occur if we have uint8 type
img = np.float64(img)
# Extract each channel
Cr = img[:, :, 2]
Cg = img[:, :, 1]
Cb = img[:, :, 0]
# Get width and height
Width = img.shape[0]
Height = img.shape[1]
#I don't think so about the height and width will not be here
brightness = np.sqrt((0.241 * (Cr**2)) + (0.691 * (Cg**2)) + (0.068 * (Cb**2))) / (Width * Height)
#We convert float64 to uint8
brightness =np.uint8(np.absolute(brightness))
print(brightness)
Output:
[[4.42336257e-05 4.09825832e-05 4.09825832e-05 ... 3.44907525e-05
4.13226678e-05 4.13226678e-05]
[4.09825832e-05 4.09825832e-05 4.09825832e-05 ... 3.44907525e-05
4.13226678e-05 4.13226678e-05]
the squaring is probably an attempt to convert from gamma-mapped values to linear values.
A somewhat better exponent would be something around 1/0.45 (~2.22) or 2.2 or 2.4.
An even better operation would be to find out the actual gamma curve and apply that. sRGB is defined piecewise and does not match exactly to a simple exponentiation.

how to convert bayerrg8 format image to rgb image

I've got a camera that provides images in Bayer RG8 format.
I'm using skimage for processing images, but I could not find away to convert the Bayer RG8 format to standard RGB (to display on screen).
Is there any way to do this with skimage?
I did find a reference to opencv conversion, but I'm trying to avoid including opencv in my app (unless it is absolutely necessary).
As you have not provided any input data, I took the greyscale image from here and made it into a raw Bayer8 file with GBRG ordering using ImageMagick as follows:
magick mandi.png -trim -depth 8 gray:bayer.bin
which gives me an 1013x672 pixel file of 680,736 bytes.
Then I read it like this and made it into an image that skimage can understand like this:
#!/usr/bin/env python3
import numpy as np
from skimage.io import imsave
# Width and height of Bayer image, not original which is w/2 x h/2
w, h = 1013, 672
ow, oh = w//2, h//2
# Load in Bayer8 image, assumed raw 8-bit GBRG
bayer = np.fromfile('bayer.bin', dtype=np.uint8).reshape((h,w))
# Pick up raw uint8 samples
R = bayer[1::2, 0::2] # rows 1,3,5,7 columns 0,2,4,6
B = bayer[0::2, 1::2] # rows 0,2,4,6 columns 1,3,5,7
G0 = bayer[0::2, 0::2] # rows 0,2,4,6 columns 0,2,4,6
G1 = bayer[1::2, 1::2] # rows 1,3,5,7 columns 1,3,5,7
# Chop any left-over edges and average the 2 Green values
R = R[:oh,:ow]
B = B[:oh,:ow]
G = G0[:oh,:ow]//2 + G1[:oh,:ow]//2
# Formulate image by stacking R, G and B and save
out = np.dstack((R,G,B))
imsave('result.png',out)
And get this:
Copyright Mathworks, Inc.
Of course, there are more sophisticated methods of interpolating, but this is the most basic and you are welcome to take it and improve it!
Ok, I had some time and I tried to do a 2d-interpolation of the missing values in the Bayer array. I am not 100% confident of my answer, but I think it should be pretty close.
Basically, I copy the original Bayer array at full resolution, and overwrite all green and blue samples with np.Nan and call that Red. Then I do a 2d-interpolation to replace the Nans.
Same again for green and blue, that gives this:
#!/usr/bin/env python3
import numpy as np
from skimage.io import imsave
from scipy.interpolate import griddata
def interp2d(im):
"""Interpolate in 2d array, replacing NaNs with interpolated values"""
x, y = np.indices(im.shape)
im[np.isnan(im)] = griddata(
(x[~np.isnan(im)], y[~np.isnan(im)]),
im[~np.isnan(im)],
(x[np.isnan(im)], y[np.isnan(im)]))
im = np.nan_to_num(im)
return np.clip((im),0,255)
# Width and height of Bayer image
w, h = 1013, 672
# Calculate output width and height as multiples of 4
ow = (w//4) * 4
oh = (h//4) * 4
# Load in Bayer8 image, assumed raw 8-bit GBRG, reshape and make sides multiple of 4
bayer = np.fromfile('bayer.bin', dtype=np.uint8).reshape((h,w)).astype(np.float)[:oh, :ow]
# In following code you'll see "cell" which is the basic repeating 2x2 cell of a Bayer matrix
#
# cell = G B
# R G
#
# Set everything not Red in bayer array to Nan, then replace Nans with interpolation
cell = np.array([[np.NaN, np.NaN],
[1.0 , np.NaN]])
R = bayer*np.tile(cell,(oh//2,ow//2))
R = interp2d(R).astype(np.uint8)
# Set everything not Green in bayer array to Nan, then replace Nans with interpolation
cell = np.array([[1.0 , np.NaN],
[np.NaN, 1.0 ]])
G = bayer*np.tile(cell,(oh//2,ow//2))
G = interp2d(G).astype(np.uint8)
# Set everything not Blue in bayer array to Nan, then replace Nans with interpolation
cell = np.array([[np.NaN, 1.0 ],
[np.NaN, np.NaN]])
B = bayer*np.tile(cell,(oh//2,ow//2))
B = interp2d(B).astype(np.uint8)
# Form image by stacking R, G and B and save
imsave('result.png',np.dstack((R,G,B)))
Keywords: Python, bayer, bayer8, debayer, de-bayer, de-mosaic, de-mosaicking, image, raw, CFA, skimage, scikit-image, image processing.

Resources