I have been able to get a program that can read a single video and convert it into images. However, actually, I am having many videos and I would like to write a function that can read the videos one by one and convert them into images. I aim to convert the videos into images so as to facilitate the processing. The code for converting a single video is as follows.
import sys
import argparse
import cv2
vidcap = cv2.VideoCapture('E:/DATA/1/12.mp4')
path_out = "E:/DATA_Synth/12"
success,image = vidcap.read()
#image=cv2.resize(image, (640, 480))
count = 0
while success:
cv2.imwrite(path_out + "/frame%03d.jpg" % count, image)
#cv2.imwrite(path_out + "/frame%d.jpg" % count, image)
success,image = vidcap.read()
#image = cv2.resize(image, (640, 480))
print ('Read a new frame: ', success)
count += 1
Any suggestions and comments would be highly appreciated
You should create a loop to iterate through your folder which contains all your videos and pick each video and run your above code.
import os
import sys
import argparse
import cv2
directory="enter your folder directory for video"
for vidfile in os.listdir(directory):
if vidfile.endswith('.mp4'):
vidcap=cv2.VideoCapture(os.path.join(directory, vidfile))
## write your code here for converting the video to individual frames
else:
continue
I have updated the codes as follows.
import os
import sys
import argparse
import cv2
directory="E:/Training/video"
path_out = "E:/DATA_Synth/12"
for vidfile in os.listdir(directory):
if vidfile.endswith('.avi'):
vidcap=cv2.VideoCapture(os.path.join(directory, vidfile))
## write your code here for converting the video to individual frames
success,image = vidcap.read()
#image=cv2.resize(image, (640, 480))
count = 0
while success:
#vidcap.set(cv2.CAP_PROP_POS_MSEC,(count*1000))
cv2.imwrite(path_out + "/frame%03d.jpg" % count, image) # save frame as JPEG file
#cv2.imwrite(path_out + "/frame%d.jpg" % count, image)
success,image = vidcap.read()
#image = cv2.resize(image, (640, 480))
#print ('Read a new frame: ', success)
count += 1
else:
continue
However, only the frames from the last video in my directory are saved. Please, how can I modify the codes in such a way that the name of frame as written here
cv2.imwrite(path_out + "/frame%d.jpg" % count, image)
may also contain the name of the corresponding video.
Related
I'm trying to create and save a .gif from a set of 3 images inside a folder, called V_N1.png, V_M2.png and V_M3.png.
I'm using matplotlib.animation but when I open the .gif save it only shows the last image, can someone help me?
Thanks in advance!
I'm not using ImageMagickWriter since it says:
MovieWriter ImageMagickWriter unavailable; using Pillow instead.
Below the code.
import matplotlib.animation as animation
import matplotlib.pyplot as plt
nframes = 3
imgFolder = "C:/ftavdf/Image"
def animate(i):
for idx in range(0, nframes):
im = plt.imread(imgFolder + "/V_M{0}.png".format(idx + 1))
anim = animation.FuncAnimation(plt.gcf(), animate, frames=nframes)
anim.save(imgFolder + "/output.gif", writer='ffmpeg')
I have a .pfm which is a (.PF/.pf) file. I am trying to visualize it but I am unable to do so. Normally the .pfm files contain the header of the format.
PF/pf
width height
scale=1
But my file has this header.I am unable to visualize it as the image can anyone help me out. Any help is appreciated
Typ=Pic98::TPlane
Lines=750
Columns=1125
FirstLine=0
FirstColumn=0
import re
import numpy as np
file = open("PF file.PF", 'rb')
header = file.readline().rstrip().decode('utf-8')
if header == 'PF':
raise Exception('Only ONE channel image is supported.')
elif header == 'Typ=Pic98::TPlane<float>':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'(^(\w+).(\d+)$)\n(^(\w+).(\d+)\s$)',
file.readline().decode('ascii'))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
if header == 'Typ=Pic98::TPlane<float>':
scale =1
endian = '>'
else:
scale = -scale
endian = '<'
npImage = np.reshape(npImage, width,height)
npImage = np.flipud(npImage)
if ret_PIL:
img = Image.fromarray(npImage, 'F')
return img
return npImage
file.close()
Updated Answer
I have re-written my answer below in a slightly different, hopefully clearer style.
#!/usr/bin/env python3
import re
import cv2
import numpy as np
from PIL import Image
def readPF(filename):
"""Read named PF file into Numpy array"""
# Slurp entire file into memory as binary 'bytes'
with open(filename, 'rb') as f:
data = f.read()
# Check correct header, return None if incorrect
if not re.match(b'Typ=Pic98::TPlane<float>', data):
return None
# Get Lines and Columns, both must be present, else return None
L = re.search(b'Lines=(\d+)', data)
C = re.search(b'Columns=(\d+)', data)
if not (L and C):
return None
height = int(L.groups()[0])
width = int(C.groups()[0])
print(f'DEBUG: Height={height}, width={width}')
# Take the data from the END of the file in case other header lines added at start
na = np.frombuffer(data[-4*height*width:], dtype=np.dtype('<f4')).reshape((height,width))
# Some debug stuff
min, max, mean = na.min(), na.max(), na.mean()
print(f'DEBUG: min={min}, max={max}, mean={mean}')
return na
################################################################################
# Main
################################################################################
na = readPF('PF file.PF')
################################################################################
# Use either of the following to save the image:
################################################################################
# Save with OpenCV as scaled PNG
u16 = (65535*(na - np.min(na))/np.ptp(na)).astype(np.uint16)
cv2.imwrite('OpenCV.png', u16)
# Convert to PIL Image and save as TIFF
pi = Image.fromarray(na, mode='F')
pi.save('PIL.tif')
Original Answer
Not too sure what I image I should be expecting, but here is a rough idea:
#!/usr/bin/env python3
import re
import cv2
import numpy as np
from PIL import Image
file = open("PF file.PF", 'rb')
header = file.readline().rstrip().decode('utf-8')
if header == 'PF':
raise Exception('Only ONE channel image is supported.')
elif header == 'Typ=Pic98::TPlane<float>':
color = False
else:
raise Exception('Not a PFM file.')
while True:
line = file.readline().decode('ascii')
match = re.match('(\w+)=(\d+)', line)
n, v = match.groups()
if n == 'Lines':
height = int(v)
print(f'Height: {height}')
if n == 'Columns':
width = int(v)
print(f'Width: {width}')
break
# Seek backwards from the end of the file in case any clown has added something to the header
file.seek(-height*width*4,2)
# Read remainder of file into Numpy array of floats and reshape
na = np.fromfile(file, dtype=np.float32).reshape((height,width))
# Some debug stuff
min, max, mean = na.min(), na.max(), na.mean()
print(f'DEBUG: min={min}, max={max}, mean={mean}')
################################################################################
# Use either of the following to save the image:
################################################################################
# Save with OpenCV as scaled PNG
u16 = (65535*(na - np.min(na))/np.ptp(na)).astype(np.uint16)
cv2.imwrite('OpenCV.png', u16)
# Convert to PIL Image and save as TIFF
pi = Image.fromarray(na, mode='F')
pi.save('PIL.tif')
The output is as follows:
Height: 750
Width: 1125
DEBUG: min=0.0, max=127881704.0, mean=1618343.625
Another possibility is to use ImageMagick to make it into a PNG, I get the following, and ImageMagick defaults to little-endian, so if this is correct, your image is little endian.
magick -define quantum:format=floating-point -depth 32 -size 1125x750+80 gray:"PF file.pf" -auto-level image.png
Keywords: Python, ImageMagick, image processing, float, float32, Numpy, PFM
I am using this cvkit tool https://github.com/roboception/cvkit on Mac and Ubuntu. It works very well to visualize '.pfm' files (e.g., disparity or depth maps saved as pfm file).
so i am currently writing a python script that is supposed to receive a ros image message and then convert it to cv2 so i can do further processing. Right now the program just receives an image and then outputs it in a little window as well as saves it as a png.
Here is my code:
#! /usr/bin/python
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import cv2
bridge = CvBridge()
def image_callback(msg):
print("Received an image!")
print(msg.encoding)
try:
# Convert your ROS Image message to OpenCV2
# Converting the rgb8 image of the front camera, works fine
cv2_img = bridge.imgmsg_to_cv2(msg, 'rgb8')
# Converting the depth images, does not work
#cv2_img = bridge.imgmsg_to_cv2(msg, '32FC1')
except CvBridgeError, e:
print(e)
else:
# Save your OpenCV2 image as a png
cv2.imwrite('camera_image.png', cv2_img)
cv2.imshow('pic', cv2_img)
cv2.waitKey(0)
def main():
rospy.init_node('image_listener')
#does not work:
#image_topic = "/pepper/camera/depth/image_raw"
#works fine:
image_topic = "/pepper/camera/front/image_raw"
rospy.Subscriber(image_topic, Image, image_callback)
rospy.spin()
if __name__ == '__main__':
main()
So my problem is that my code works perfectly fine if i use the data of the front camera but does not work for the depth images.
To make sure i get the correct encoding type i used the command msg.encoding which tells me the encoding type of the current ros message.
The cv2.imshow works exactly like it should for the front camera pictures and it shows me the same as i would get if i used ros image_view but as soon as i try it with the depth image i just get a fully black or white picture unlike what image_view shows me
Here the depth image i get when i use image_view
Here the depth image i receive when i use the script and cv2.imshow
Does anyone have experience working on depth images with cv2 and can help me to get it working with the depth images as well?
I really would appreciate any help :)
Best regards
You could try in the following way to acquire the depth images,
import rospy
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
import numpy as np
import cv2
def convert_depth_image(ros_image):
cv_bridge = CvBridge()
try:
depth_image = cv_bridge.imgmsg_to_cv2(ros_image, desired_encoding='passthrough')
except CvBridgeError, e:
print e
depth_array = np.array(depth_image, dtype=np.float32)
np.save("depth_img.npy", depth_array)
rospy.loginfo(depth_array)
#To save image as png
# Apply colormap on depth image (image must be converted to 8-bit per pixel first)
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
cv2.imwrite("depth_img.png", depth_colormap)
#Or you use
# depth_array = depth_array.astype(np.uint16)
# cv2.imwrite("depth_img.png", depth_array)
def pixel2depth():
rospy.init_node('pixel2depth',anonymous=True)
rospy.Subscriber("/pepper/camera/depth/image_raw", Image,callback=convert_depth_image, queue_size=1)
rospy.spin()
if __name__ == '__main__':
pixel2depth()
I want to load a dataset of grayscale images. I used ImageFolder but this doesn't load gray images by default as it converts images to RGB.
I found solutions that load images with ImageFolder and after convert images in grayscale, using:
transforms.Grayscale(num_output_channels=1)
or
ImageOps.grayscale(image)
Is it correct?
How can I load grayscale imaged without conversion? I try ImageDataBunch, but I have problems to import fastai.vision
Assuming the dataset is stored in the "Dataset" folder as given below, set the root directory as "Dataset":
Dataset
class_1
img1.png
img2.png
class_2
img1.png
img2.png
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader, random_split
from torchvision import transforms
root = 'Dataset/'
data_transform = transforms.Compose([transforms.Grayscale(num_output_channels=1),
transforms.ToTensor()])
dataset = ImageFolder(root, transform=data_transform)
For reference, train and test dataset are being split into 70% and 30% respectively.
# Split test and train dataset
train_size = int(0.7 * len(dataset))
test_size = len(dataset) - train_size
train_data, test_data = random_split(dataset, [train_size, test_size])
This dataset can be further divided into train and test data loaders as given below to perform operation in batches.
Usually you will see the dataset is assigned batch_size once to be used for both train and test loaders. But, I try to define it separately. The idea is to give the batch_size such that it is a factor of the train/test data loader's size, otherwise it will give an error.
# Set batch size of train data loader
batch_size_train = 20
# Set batch size of test data loader
batch_size_test = 22
# load the split train and test data into batches via DataLoader()
train_loader = DataLoader(train_data, batch_size=batch_size_train, shuffle=True)
test_loader = DataLoader(test_data, batch_size=batch_size_test, shuffle=True)
Yes, that is correct and AFAIK pillow by default loads images in RGB, see e.g. answers to this question. So conversion to grayscale is the only way, though takes time of course.
Pure pytorch solution (if ImageFolder isn't appropriate)
You can roll out your own data loading functionalities and If I were you I wouldn't go fastai route as it's pretty high level and takes away control from you (you might not need those functionalities anyway).
In principle, all you have to do is to create something like this below:
import pathlib
import torch
from PIL import Image
class ImageDataset(torch.utils.data.Dataset):
def __init__(self, path: pathlib.Path, images_class: int, regex="*.png"):
self.files = [file for file in path.glob(regex)]
self.images_class: int = images_class
def __getitem__(self, index):
return Image.open(self.files[index]).convert("LA"), self.images_class
# Assuming you have `png` images, can modify that with regex
final_dataset = (
ImageDataset(pathlib.Path("/path/to/dogs/images"), 0)
+ ImageDataset(pathlib.Path("/path/to/cats/images"), 1)
+ ImageDataset(pathlib.Path("/path/to/turtles/images"), 2)
)
Above would get you images from the paths provided above and each image would return appropriate provided class.
This gives you more flexibility (different folder setting than torchvision.datasets.ImageFolder) for a few more lines.
Ofc, you could add more of those or use loop or whatever else.
You could also apply torchvision.transforms, e.g. transforming images above to tensors, read
torchdata solution
Disclaimer, author here. If you are cocerned about loading times of your data and grayscale transformation you could use torchdata third party library for pytorch.
Using it one could create the same thing as above but use cache or map (to use torchvision.transforms or other transformations easily) and some other things known e.g. from tensorflow.data module, see below:
import pathlib
from PIL import Image
import torchdata
# Change inheritance
class ImageDataset(torchdata.Dataset):
def __init__(self, path: pathlib.Path, images_class: int, regex="*.png"):
super().__init__() # And add constructor call and that's it
self.files = [file for file in path.glob(regex)]
self.images_class: int = images_class
def __getitem__(self, index):
return Image.open(self.files[index]), self.images_class
final_dataset = (
ImageDataset(pathlib.Path("/path/to/dogs/images"), 0)
+ ImageDataset(pathlib.Path("/path/to/cats/images"), 1)
+ ImageDataset(pathlib.Path("/path/to/turtles/images"), 2)
).cache() # will cache data in-memory after first pass
# You could apply transformations after caching for possible speed-up
torchvision ImageFolder loader
As correctly pointed out by #jodag in the comments, one can use loader callable with single argument path to do customized data opening, e.g. for grayscale it could be:
from PIL import Image
import torchvision
dataset = torchvision.datasets.ImageFolder(
"/path/to/images", loader=lambda path: Image.open(path).convert("LA")
)
Please notice you could also use it for other types of files, those doesn't have to be images.
Make custom loader, feed it to ImageFolder:
import numpy as np
from PIL import Image, ImageOps
def gray_reader(image_path):
im = Image.open(image_path)
im2 = ImageOps.grayscale(im)
im.close()
return np.array(im2) # return np array
# return im2 # return PIL Image
some_dataset = ImageFolder(image_root_path, loader=gray_reader)
Edit:
Below code is much better than previous, get color image and convert to grayscale in transform()
def get_transformer(h, w):
valid_transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Grayscale(num_output_channels=1),
transforms.Resize((h, w)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]) ])
return valid_transform
I am new to ROS. I need to convert a preexisting video file, or a large amount of images that can be concatenated into a video stream, into a .bag file in ROS. I found this code online: http://answers.ros.org/question/11537/creating-a-bag-file-out-of-a-image-sequence/, but it says it is for camera calibration, so not sure if it fits my purpose.
Could someone with a good knowledge of ROS confirm that I can use the code in the link provided for my purposes, or if anyone actually has the code I'm looking for, could you please post it here?
The following code converts a video file to a bag file, inspired from the code in the link provided.
Little reminder:
this code depends on cv2 (opencv python)
time stamp of ROS message is calculated by frame index and fps. fps will be set to 24 if opencv unable to read it from the video.
import time, sys, os
from ros import rosbag
import roslib, rospy
roslib.load_manifest('sensor_msgs')
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
import cv2
TOPIC = 'camera/image_raw/compressed'
def CreateVideoBag(videopath, bagname):
'''Creates a bag file with a video file'''
bag = rosbag.Bag(bagname, 'w')
cap = cv2.VideoCapture(videopath)
cb = CvBridge()
prop_fps = cap.get(cv2.CAP_PROP_FPS)
if prop_fps != prop_fps or prop_fps <= 1e-2:
print "Warning: can't get FPS. Assuming 24."
prop_fps = 24
ret = True
frame_id = 0
while(ret):
ret, frame = cap.read()
if not ret:
break
stamp = rospy.rostime.Time.from_sec(float(frame_id) / prop_fps)
frame_id += 1
image = cb.cv2_to_compressed_imgmsg(frame)
image.header.stamp = stamp
image.header.frame_id = "camera"
bag.write(TOPIC, image, stamp)
cap.release()
bag.close()
if __name__ == "__main__":
if len( sys.argv ) == 3:
CreateVideoBag(*sys.argv[1:])
else:
print( "Usage: video2bag videofilename bagfilename")