How to convert numpy array into image using PIL? - image

The following code gives me black images and I can't understand why:
Imports:
import numpy as np
from PIL import Image
Code:
arr2 = np.zeros((200,200), dtype=int)
arr2[80:120,80:120]=1
im = Image.fromarray(arr2,mode="1")
im.save("C:/Users/Admin/Desktop/testImage.jpg")

I think you want something more like this, using Boolean True and False:
import numpy as np
from PIL import Image
# Create black 1-bit array
arr2 = np.full((200,200), False, dtype=bool)
# Set some bits white
arr2[80:120,80:120]=True
im = Image.fromarray(arr2)
im.save('a.png')
print(im)
<PIL.Image.Image image mode=1 size=200x200 at 0x103FF2770>

Related

Trying to create image from appended array

I have a list of images in a directory. I am trying to extract a column from each image (image size is 403 px by 1288 px by 3 bands) , and sequentially build an array from these columns using numpy append that I want to save as an image. I'm trying to use numpy and pillow to make an image from this appended array.
I have researched Pillor, Numpy documentation
# !/usr/bin/python3
import numpy as np
from numpy import array
from PIL import Image
import os, time, sys, subprocess
savpath =
'C:/data/marsobot/spectral/pushbroom/zwoexperiments/fullsuntheframes/'
os.chdir('C:/data/marsobot/spectral/pushbroom/zwoexperiments/fullsuntheframes/')
toappendarr = np.empty ([403, 1288, 3])
for root, dirs, files in os.walk(".", topdown = False):
for name in files:
img = Image.open(name)
arr = array(img)
value = arr[:, 300, 1]
toappendarr = np.append(toappendarr, value, axis=1)
print(toappendarr.shape)
imgout = Image.fromarray(arr)
imgout.save("output.jpg")
I expected an image but instead I got:
ValueError: all the input arrays must have same number of dimensions

How can I convert this image to grayscale in Python 3.6?

I have this code:
bPlane = myImage[:,:,0] - 0.5*(myImage[:,:,2]) - 0.5*(myImage[:,:,1]);
purple = bPlane > 20
purple2 = morphology.remove_small_objects(BW_2, 400);
where myImage is a BGR.
How can I convert "purple2" to a grayscale image in Python 3.6?
If your image is an array (arr) at first, a possible solution would be:
import numpy as np
from PIL import Image
arr = np.random.rand(100,100)
im = Image.fromarray(arr)
gray = im.convert('L') # Converting to grayscale

when resizing a image set only resizing one image

When I was trying to resize an image set, it was only resizing the first image.How to resize all images? This is my code:
import numpy as np
import os
import cv2
pic_num = 1
img = cv2.imread("E:\ele/"+str(pic_num)+'.jpg',cv2.IMREAD_GRAYSCALE)
resized_image = cv2.resize(img,(100,100))
cv2.imwrite("E:\eye/"+str(pic_num)+'.jpg',resized_image)
pic_num += 1
If you are just looping through files and change it, and don't worried about time.
then you can just use for loop in python
For example you have pics from 1 too 100
Then you can just do following:
import numpy as np
import os
import cv2
for pic_num in range(1,100):
img = cv2.imread("E:\ele/"+str(pic_num)+'.jpg',cv2.IMREAD_GRAYSCALE)
resized_image = cv2.resize(img,(100,100))
cv2.imwrite("E:\eye/"+str(pic_num)+'.jpg',resized_image)

How to select irregular shapes in a image

Using python code we are able to create image segments as shown in the screenshot. our requirement is how to select specific segment in the image and apply different color to it ?
The following is our python snippet
from skimage.segmentation import felzenszwalb, slic,quickshift
from skimage.segmentation import mark_boundaries
from skimage.util import img_as_float
import matplotlib.pyplot as plt
from skimage import measure
from skimage import restoration
from skimage import img_as_float
image = img_as_float(io.imread("leaf.jpg"))
segments = quickshift(image, ratio=1.0, kernel_size=20, max_dist=10,return_tree=False, sigma=0, convert2lab=True, random_seed=42)
fig = plt.figure("Superpixels -- %d segments" % (500))
ax = fig.add_subplot(1, 1, 1)
ax.imshow(mark_boundaries(image, segments))
plt.axis("off")
plt.show()
do this:
seg_num = 64 # desired segment to be colored
color = float64([1,0,0]) # red color
image[segments == 64] = color # assign color to the segment
You can use OpenCV python module - example:

pyplot.imsave() saves image correctly but cv2.imwrite() saved the same image as black

from scipy.misc import imread
from matplotlib import pyplot
import cv2
from cv2 import cv
from SRM import SRM ## Module for Statistical Regional Segmentation
im = imread("lena.png")
im2 = cv2.imread("lena.png")
print type(im), type(im2), im.shape, im2.shape
## Prints <type 'numpy.ndarray'> <type 'numpy.ndarray'> (120, 120, 3) (120, 120, 3)
srm = SRM(im, 256)
segmented = srm.run()
srm2 = SRM(im2, 256)
segmented2 = srm2.run()
pic = segmented/256
pic2 = segmented2/256
pyplot.imshow(pic)
pyplot.imsave("onePic.jpg", pic)
pic = pic.astype('uint8')
cv2.imwrite("onePic2.jpg", pic2)
pyplot.show()
onePic.jpg gives the correct segmented image but onePic2.jpg gives a complete black image.
Converting the datatype to uint8 using pic = pic.astype('uint8') did not help. I still gives a black image!
onePic.jpg using pyplot.imsave():
onePic2.jpg using cv2.imwrite():
Please help!
Before converting pic to uint8, you need to multiply it by 255 to get the correct range.
Although I agree with #sansuiso, in my case I found a possible edge case where my images were being shifted either one bit up in the scale or one bit down.
Since we're dealing with unsigned ints, a single shift means a possible underflow/overflow, and this can corrupt the whole image.
I found cv2's convertScaleAbs with an alpha value of 255.0 to yield better results.
def write_image(path, img):
# img = img*(2**16-1)
# img = img.astype(np.uint16)
# img = img.astype(np.uint8)
img = cv.convertScaleAbs(img, alpha=(255.0))
cv.imwrite(path, img)
This answer goes into more detail.
I encountered a similar situation with face detection, I wonder if there is a better way to execute this, here is my solution here as a reference.
from deepface import DeepFace
import cv2
import matplotlib.pyplot as plt
# import image and output
img_path = "image.jpg"
detected_face = DeepFace.detectFace(img_path, target_size = (128, 128))
plt.imshow(detected_face)
# image color scaling and saving
detected_face = cv2.cvtColor( detected_face,cv2.COLOR_BGR2RGB)
detected_face = cv2.convertScaleAbs(detected_face, alpha=(255.0))
cv2.imwrite("image_thumbnail.jpg", detected_face)

Resources