The average image of multiple images - image

I'm using this code but it dosen't work. could youn tell me what is the problem?
import glob , cv2
import numpy as np
def read_img(img_list , img):
n=cv2.imread(img)
img_list.append(n)
return img_list
path = glob.glob("02291G0AR/*.bmp")
list_ = []
cv_image = [read_img(list_,img) for img in path]
for img in cv_image:
cv2.imshow('image',img)
and the error is:
cv2.imshow('image',img)
TypeError: mat is not a numpy array, neither a scalar

I think you'll get on better with something like this:
#!/usr/bin/env python3
import glob , cv2
import numpy as np
# Load an image by name and return as Numpy array
def read_img(name):
img=cv2.imread(name)
return img
# Generate list of all image names
names = glob.glob("*.bmp")
# Load all images into list
images = [read_img(name) for name in names]
# Display all images in list
for img in images:
cv2.imshow('image',img)
cv2.waitKey()

Related

How to visualize a .pfm file as image?

I have a .pfm which is a (.PF/.pf) file. I am trying to visualize it but I am unable to do so. Normally the .pfm files contain the header of the format.
PF/pf
width height
scale=1
But my file has this header.I am unable to visualize it as the image can anyone help me out. Any help is appreciated
Typ=Pic98::TPlane
Lines=750
Columns=1125
FirstLine=0
FirstColumn=0
import re
import numpy as np
file = open("PF file.PF", 'rb')
header = file.readline().rstrip().decode('utf-8')
if header == 'PF':
raise Exception('Only ONE channel image is supported.')
elif header == 'Typ=Pic98::TPlane<float>':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'(^(\w+).(\d+)$)\n(^(\w+).(\d+)\s$)',
file.readline().decode('ascii'))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
if header == 'Typ=Pic98::TPlane<float>':
scale =1
endian = '>'
else:
scale = -scale
endian = '<'
npImage = np.reshape(npImage, width,height)
npImage = np.flipud(npImage)
if ret_PIL:
img = Image.fromarray(npImage, 'F')
return img
return npImage
file.close()
Updated Answer
I have re-written my answer below in a slightly different, hopefully clearer style.
#!/usr/bin/env python3
import re
import cv2
import numpy as np
from PIL import Image
def readPF(filename):
"""Read named PF file into Numpy array"""
# Slurp entire file into memory as binary 'bytes'
with open(filename, 'rb') as f:
data = f.read()
# Check correct header, return None if incorrect
if not re.match(b'Typ=Pic98::TPlane<float>', data):
return None
# Get Lines and Columns, both must be present, else return None
L = re.search(b'Lines=(\d+)', data)
C = re.search(b'Columns=(\d+)', data)
if not (L and C):
return None
height = int(L.groups()[0])
width = int(C.groups()[0])
print(f'DEBUG: Height={height}, width={width}')
# Take the data from the END of the file in case other header lines added at start
na = np.frombuffer(data[-4*height*width:], dtype=np.dtype('<f4')).reshape((height,width))
# Some debug stuff
min, max, mean = na.min(), na.max(), na.mean()
print(f'DEBUG: min={min}, max={max}, mean={mean}')
return na
################################################################################
# Main
################################################################################
na = readPF('PF file.PF')
################################################################################
# Use either of the following to save the image:
################################################################################
# Save with OpenCV as scaled PNG
u16 = (65535*(na - np.min(na))/np.ptp(na)).astype(np.uint16)
cv2.imwrite('OpenCV.png', u16)
# Convert to PIL Image and save as TIFF
pi = Image.fromarray(na, mode='F')
pi.save('PIL.tif')
Original Answer
Not too sure what I image I should be expecting, but here is a rough idea:
#!/usr/bin/env python3
import re
import cv2
import numpy as np
from PIL import Image
file = open("PF file.PF", 'rb')
header = file.readline().rstrip().decode('utf-8')
if header == 'PF':
raise Exception('Only ONE channel image is supported.')
elif header == 'Typ=Pic98::TPlane<float>':
color = False
else:
raise Exception('Not a PFM file.')
while True:
line = file.readline().decode('ascii')
match = re.match('(\w+)=(\d+)', line)
n, v = match.groups()
if n == 'Lines':
height = int(v)
print(f'Height: {height}')
if n == 'Columns':
width = int(v)
print(f'Width: {width}')
break
# Seek backwards from the end of the file in case any clown has added something to the header
file.seek(-height*width*4,2)
# Read remainder of file into Numpy array of floats and reshape
na = np.fromfile(file, dtype=np.float32).reshape((height,width))
# Some debug stuff
min, max, mean = na.min(), na.max(), na.mean()
print(f'DEBUG: min={min}, max={max}, mean={mean}')
################################################################################
# Use either of the following to save the image:
################################################################################
# Save with OpenCV as scaled PNG
u16 = (65535*(na - np.min(na))/np.ptp(na)).astype(np.uint16)
cv2.imwrite('OpenCV.png', u16)
# Convert to PIL Image and save as TIFF
pi = Image.fromarray(na, mode='F')
pi.save('PIL.tif')
The output is as follows:
Height: 750
Width: 1125
DEBUG: min=0.0, max=127881704.0, mean=1618343.625
Another possibility is to use ImageMagick to make it into a PNG, I get the following, and ImageMagick defaults to little-endian, so if this is correct, your image is little endian.
magick -define quantum:format=floating-point -depth 32 -size 1125x750+80 gray:"PF file.pf" -auto-level image.png
Keywords: Python, ImageMagick, image processing, float, float32, Numpy, PFM
I am using this cvkit tool https://github.com/roboception/cvkit on Mac and Ubuntu. It works very well to visualize '.pfm' files (e.g., disparity or depth maps saved as pfm file).

Process depth image message from ROS with openCV

so i am currently writing a python script that is supposed to receive a ros image message and then convert it to cv2 so i can do further processing. Right now the program just receives an image and then outputs it in a little window as well as saves it as a png.
Here is my code:
#! /usr/bin/python
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import cv2
bridge = CvBridge()
def image_callback(msg):
print("Received an image!")
print(msg.encoding)
try:
# Convert your ROS Image message to OpenCV2
# Converting the rgb8 image of the front camera, works fine
cv2_img = bridge.imgmsg_to_cv2(msg, 'rgb8')
# Converting the depth images, does not work
#cv2_img = bridge.imgmsg_to_cv2(msg, '32FC1')
except CvBridgeError, e:
print(e)
else:
# Save your OpenCV2 image as a png
cv2.imwrite('camera_image.png', cv2_img)
cv2.imshow('pic', cv2_img)
cv2.waitKey(0)
def main():
rospy.init_node('image_listener')
#does not work:
#image_topic = "/pepper/camera/depth/image_raw"
#works fine:
image_topic = "/pepper/camera/front/image_raw"
rospy.Subscriber(image_topic, Image, image_callback)
rospy.spin()
if __name__ == '__main__':
main()
So my problem is that my code works perfectly fine if i use the data of the front camera but does not work for the depth images.
To make sure i get the correct encoding type i used the command msg.encoding which tells me the encoding type of the current ros message.
The cv2.imshow works exactly like it should for the front camera pictures and it shows me the same as i would get if i used ros image_view but as soon as i try it with the depth image i just get a fully black or white picture unlike what image_view shows me
Here the depth image i get when i use image_view
Here the depth image i receive when i use the script and cv2.imshow
Does anyone have experience working on depth images with cv2 and can help me to get it working with the depth images as well?
I really would appreciate any help :)
Best regards
You could try in the following way to acquire the depth images,
import rospy
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
import numpy as np
import cv2
def convert_depth_image(ros_image):
cv_bridge = CvBridge()
try:
depth_image = cv_bridge.imgmsg_to_cv2(ros_image, desired_encoding='passthrough')
except CvBridgeError, e:
print e
depth_array = np.array(depth_image, dtype=np.float32)
np.save("depth_img.npy", depth_array)
rospy.loginfo(depth_array)
#To save image as png
# Apply colormap on depth image (image must be converted to 8-bit per pixel first)
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
cv2.imwrite("depth_img.png", depth_colormap)
#Or you use
# depth_array = depth_array.astype(np.uint16)
# cv2.imwrite("depth_img.png", depth_array)
def pixel2depth():
rospy.init_node('pixel2depth',anonymous=True)
rospy.Subscriber("/pepper/camera/depth/image_raw", Image,callback=convert_depth_image, queue_size=1)
rospy.spin()
if __name__ == '__main__':
pixel2depth()

How do I display a count of how many files I've converted?

from PIL import Image
import glob
import os
directory = r'C:\Users\Umar Iqbal\Desktop\\outputimages'
for image in glob.glob('./*.jpg'):
img = Image.open(image)
clean_name = os.path.splitext(image)[0]
img.save(f'{directory}{clean_name}.png', 'png')
print(f'{clean_name} was converted to PNG!')
glob.glob() returns a list, and you can use the built-in function len() to get the length of any list. So:
file_list = glob.glob('./*.jpg')
for image in file_list:
img = Image.open(image)
clean_name = os.path.splitext(image)[0]
img.save(f'{directory}{clean_name}.png', 'png')
print(f'{clean_name} was converted to PNG!')
print(f'{len(file_list)} files were converted')

Pytorch: load dataset of grayscale images

I want to load a dataset of grayscale images. I used ImageFolder but this doesn't load gray images by default as it converts images to RGB.
I found solutions that load images with ImageFolder and after convert images in grayscale, using:
transforms.Grayscale(num_output_channels=1)
or
ImageOps.grayscale(image)
Is it correct?
How can I load grayscale imaged without conversion? I try ImageDataBunch, but I have problems to import fastai.vision
Assuming the dataset is stored in the "Dataset" folder as given below, set the root directory as "Dataset":
Dataset
class_1
img1.png
img2.png
class_2
img1.png
img2.png
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader, random_split
from torchvision import transforms
root = 'Dataset/'
data_transform = transforms.Compose([transforms.Grayscale(num_output_channels=1),
transforms.ToTensor()])
dataset = ImageFolder(root, transform=data_transform)
For reference, train and test dataset are being split into 70% and 30% respectively.
# Split test and train dataset
train_size = int(0.7 * len(dataset))
test_size = len(dataset) - train_size
train_data, test_data = random_split(dataset, [train_size, test_size])
This dataset can be further divided into train and test data loaders as given below to perform operation in batches.
Usually you will see the dataset is assigned batch_size once to be used for both train and test loaders. But, I try to define it separately. The idea is to give the batch_size such that it is a factor of the train/test data loader's size, otherwise it will give an error.
# Set batch size of train data loader
batch_size_train = 20
# Set batch size of test data loader
batch_size_test = 22
# load the split train and test data into batches via DataLoader()
train_loader = DataLoader(train_data, batch_size=batch_size_train, shuffle=True)
test_loader = DataLoader(test_data, batch_size=batch_size_test, shuffle=True)
Yes, that is correct and AFAIK pillow by default loads images in RGB, see e.g. answers to this question. So conversion to grayscale is the only way, though takes time of course.
Pure pytorch solution (if ImageFolder isn't appropriate)
You can roll out your own data loading functionalities and If I were you I wouldn't go fastai route as it's pretty high level and takes away control from you (you might not need those functionalities anyway).
In principle, all you have to do is to create something like this below:
import pathlib
import torch
from PIL import Image
class ImageDataset(torch.utils.data.Dataset):
def __init__(self, path: pathlib.Path, images_class: int, regex="*.png"):
self.files = [file for file in path.glob(regex)]
self.images_class: int = images_class
def __getitem__(self, index):
return Image.open(self.files[index]).convert("LA"), self.images_class
# Assuming you have `png` images, can modify that with regex
final_dataset = (
ImageDataset(pathlib.Path("/path/to/dogs/images"), 0)
+ ImageDataset(pathlib.Path("/path/to/cats/images"), 1)
+ ImageDataset(pathlib.Path("/path/to/turtles/images"), 2)
)
Above would get you images from the paths provided above and each image would return appropriate provided class.
This gives you more flexibility (different folder setting than torchvision.datasets.ImageFolder) for a few more lines.
Ofc, you could add more of those or use loop or whatever else.
You could also apply torchvision.transforms, e.g. transforming images above to tensors, read
torchdata solution
Disclaimer, author here. If you are cocerned about loading times of your data and grayscale transformation you could use torchdata third party library for pytorch.
Using it one could create the same thing as above but use cache or map (to use torchvision.transforms or other transformations easily) and some other things known e.g. from tensorflow.data module, see below:
import pathlib
from PIL import Image
import torchdata
# Change inheritance
class ImageDataset(torchdata.Dataset):
def __init__(self, path: pathlib.Path, images_class: int, regex="*.png"):
super().__init__() # And add constructor call and that's it
self.files = [file for file in path.glob(regex)]
self.images_class: int = images_class
def __getitem__(self, index):
return Image.open(self.files[index]), self.images_class
final_dataset = (
ImageDataset(pathlib.Path("/path/to/dogs/images"), 0)
+ ImageDataset(pathlib.Path("/path/to/cats/images"), 1)
+ ImageDataset(pathlib.Path("/path/to/turtles/images"), 2)
).cache() # will cache data in-memory after first pass
# You could apply transformations after caching for possible speed-up
torchvision ImageFolder loader
As correctly pointed out by #jodag in the comments, one can use loader callable with single argument path to do customized data opening, e.g. for grayscale it could be:
from PIL import Image
import torchvision
dataset = torchvision.datasets.ImageFolder(
"/path/to/images", loader=lambda path: Image.open(path).convert("LA")
)
Please notice you could also use it for other types of files, those doesn't have to be images.
Make custom loader, feed it to ImageFolder:
import numpy as np
from PIL import Image, ImageOps
def gray_reader(image_path):
im = Image.open(image_path)
im2 = ImageOps.grayscale(im)
im.close()
return np.array(im2) # return np array
# return im2 # return PIL Image
some_dataset = ImageFolder(image_root_path, loader=gray_reader)
Edit:
Below code is much better than previous, get color image and convert to grayscale in transform()
def get_transformer(h, w):
valid_transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Grayscale(num_output_channels=1),
transforms.Resize((h, w)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]) ])
return valid_transform

How to load images from a directory on the computer in Python

Hello I am New to python and I wanted to know how i can load images from a directory on the computer into python variable.
I have a set of images in a folder on disk and I want to display these images in a loop.
You can use PIL (Python Imaging Library) http://www.pythonware.com/products/pil/ to load images.
Then you can make an script to read images from a directory and load them to python, something like this.
#!/usr/bin/python
from os import listdir
from PIL import Image as PImage
def loadImages(path):
# return array of images
imagesList = listdir(path)
loadedImages = []
for image in imagesList:
img = PImage.open(path + image)
loadedImages.append(img)
return loadedImages
path = "/path/to/your/images/"
# your images in an array
imgs = loadImages(path)
for img in imgs:
# you can show every image
img.show()
pip install ThreadedFileLoader
You can use ThreadedFileLoader module. It uses threading to load images.
from ThreadedFileLoader.ThreadedFileLoader import *
instance = ThreadedImageLoader("path_to_folder/*.jpg")
instance.start_loading()
images = instance.loaded_objects
print(len(images))
print(images[0].shape)
You can use glob and imageio python packages to achieve the same. Below is code in python 3:
import glob
import imageio
for image_path in glob.glob("<your image directory path>\\*.png"):
im = imageio.imread(image_path)
print (im.shape)
print (im.dtype)
If you have images in your google drive, and want to load, resize and save the images, then the following code works well.
import os, sys
from os import listdir
from PIL import Image
from google.colab import drive
import matplotlib.pyplot as plt
drive.mount('/content/gdrive')
# need to enter password to access your google drive
from google.colab import files
main_dir = "/content/gdrive/My Drive/Panda/"
files = listdir(main_dir)
# you can change file extension below to read other image types
images_list = [i for i in files if i.endswith('.jpg')] ## output file names only
for idx,image in enumerate(images_list):
print(idx)
img = Image.open(main_dir + image)
#print(img.size)
#plt.imshow(img)
img = img.resize((480, 600))
img.save(main_dir + image)

Resources