How to visualize a .pfm file as image? - image

I have a .pfm which is a (.PF/.pf) file. I am trying to visualize it but I am unable to do so. Normally the .pfm files contain the header of the format.
PF/pf
width height
scale=1
But my file has this header.I am unable to visualize it as the image can anyone help me out. Any help is appreciated
Typ=Pic98::TPlane
Lines=750
Columns=1125
FirstLine=0
FirstColumn=0
import re
import numpy as np
file = open("PF file.PF", 'rb')
header = file.readline().rstrip().decode('utf-8')
if header == 'PF':
raise Exception('Only ONE channel image is supported.')
elif header == 'Typ=Pic98::TPlane<float>':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'(^(\w+).(\d+)$)\n(^(\w+).(\d+)\s$)',
file.readline().decode('ascii'))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
if header == 'Typ=Pic98::TPlane<float>':
scale =1
endian = '>'
else:
scale = -scale
endian = '<'
npImage = np.reshape(npImage, width,height)
npImage = np.flipud(npImage)
if ret_PIL:
img = Image.fromarray(npImage, 'F')
return img
return npImage
file.close()

Updated Answer
I have re-written my answer below in a slightly different, hopefully clearer style.
#!/usr/bin/env python3
import re
import cv2
import numpy as np
from PIL import Image
def readPF(filename):
"""Read named PF file into Numpy array"""
# Slurp entire file into memory as binary 'bytes'
with open(filename, 'rb') as f:
data = f.read()
# Check correct header, return None if incorrect
if not re.match(b'Typ=Pic98::TPlane<float>', data):
return None
# Get Lines and Columns, both must be present, else return None
L = re.search(b'Lines=(\d+)', data)
C = re.search(b'Columns=(\d+)', data)
if not (L and C):
return None
height = int(L.groups()[0])
width = int(C.groups()[0])
print(f'DEBUG: Height={height}, width={width}')
# Take the data from the END of the file in case other header lines added at start
na = np.frombuffer(data[-4*height*width:], dtype=np.dtype('<f4')).reshape((height,width))
# Some debug stuff
min, max, mean = na.min(), na.max(), na.mean()
print(f'DEBUG: min={min}, max={max}, mean={mean}')
return na
################################################################################
# Main
################################################################################
na = readPF('PF file.PF')
################################################################################
# Use either of the following to save the image:
################################################################################
# Save with OpenCV as scaled PNG
u16 = (65535*(na - np.min(na))/np.ptp(na)).astype(np.uint16)
cv2.imwrite('OpenCV.png', u16)
# Convert to PIL Image and save as TIFF
pi = Image.fromarray(na, mode='F')
pi.save('PIL.tif')
Original Answer
Not too sure what I image I should be expecting, but here is a rough idea:
#!/usr/bin/env python3
import re
import cv2
import numpy as np
from PIL import Image
file = open("PF file.PF", 'rb')
header = file.readline().rstrip().decode('utf-8')
if header == 'PF':
raise Exception('Only ONE channel image is supported.')
elif header == 'Typ=Pic98::TPlane<float>':
color = False
else:
raise Exception('Not a PFM file.')
while True:
line = file.readline().decode('ascii')
match = re.match('(\w+)=(\d+)', line)
n, v = match.groups()
if n == 'Lines':
height = int(v)
print(f'Height: {height}')
if n == 'Columns':
width = int(v)
print(f'Width: {width}')
break
# Seek backwards from the end of the file in case any clown has added something to the header
file.seek(-height*width*4,2)
# Read remainder of file into Numpy array of floats and reshape
na = np.fromfile(file, dtype=np.float32).reshape((height,width))
# Some debug stuff
min, max, mean = na.min(), na.max(), na.mean()
print(f'DEBUG: min={min}, max={max}, mean={mean}')
################################################################################
# Use either of the following to save the image:
################################################################################
# Save with OpenCV as scaled PNG
u16 = (65535*(na - np.min(na))/np.ptp(na)).astype(np.uint16)
cv2.imwrite('OpenCV.png', u16)
# Convert to PIL Image and save as TIFF
pi = Image.fromarray(na, mode='F')
pi.save('PIL.tif')
The output is as follows:
Height: 750
Width: 1125
DEBUG: min=0.0, max=127881704.0, mean=1618343.625
Another possibility is to use ImageMagick to make it into a PNG, I get the following, and ImageMagick defaults to little-endian, so if this is correct, your image is little endian.
magick -define quantum:format=floating-point -depth 32 -size 1125x750+80 gray:"PF file.pf" -auto-level image.png
Keywords: Python, ImageMagick, image processing, float, float32, Numpy, PFM

I am using this cvkit tool https://github.com/roboception/cvkit on Mac and Ubuntu. It works very well to visualize '.pfm' files (e.g., disparity or depth maps saved as pfm file).

Related

Rasterio Reprojection

I am trying to create a function to re-project rasters in a dataset. However I get an error.
RasterioIOError: Attempt to create new tiff file 'E:/MapsForTesting/Reprojected' failed: Permission denied
How do I allow my computer or rasterio to write files? I can't seem to find the answer.
Here is my code:
```
###############################################################################################
import os
import rasterio
from rasterio.warp import calculate_default_transform , reproject, Resampling
#get working directory
os.chdir('E:\MapsForTesting')
#open unprojected raster
dstCrs = 'EPSG:32620' # the EPSG code for UTM Zone 20
def reproject(infile, outfilepath):
#kill = rasterio.open(infile) """Attempt to solve issue does not work"""
with rasterio.open(infile) as srs: #if input is halifax.jpg or raw .tif will recieve north up error
srs_crs = srs.crs
transform, width, height = calculate_default_transform(
srs.crs, dstCrs, srs.width, srs.height, *srs.bounds) #calculate transform
kwargs = srs.meta.copy()
kwargs.update({
'crs': dstCrs,
'transform': transform,
'width': width,
'height': height
})
#kill.close()# close the rasterio dataset """Attempt to solve issue does not work"""
#os.remove(infile)# delete the file """Attempt to solve issue does not work"""
with rasterio.open(outfilepath.format(), 'w', **kwargs) as dst:
for i in range(1, srs.count + 1):
reproject(
source = rasterio.band(srs, i),
destination = rasterio.band(dst, i),
srs_transform = srs.transform,
srs_crs = src.crs,
dst_transform = transform,
dst_crs = dst_crs,
resampling = Resampling.nearest)
dst.write(reproject)
return(outfilepath)
reproject('E:\MapsForTesting\halifaxCRS1.tif','E:\MapsForTesting\Reprojected')
################################################################################################# `
I have validated that my function would work if I overwrite the same file however the output is not desirable and I would like the code to write a new file instead.

Process depth image message from ROS with openCV

so i am currently writing a python script that is supposed to receive a ros image message and then convert it to cv2 so i can do further processing. Right now the program just receives an image and then outputs it in a little window as well as saves it as a png.
Here is my code:
#! /usr/bin/python
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import cv2
bridge = CvBridge()
def image_callback(msg):
print("Received an image!")
print(msg.encoding)
try:
# Convert your ROS Image message to OpenCV2
# Converting the rgb8 image of the front camera, works fine
cv2_img = bridge.imgmsg_to_cv2(msg, 'rgb8')
# Converting the depth images, does not work
#cv2_img = bridge.imgmsg_to_cv2(msg, '32FC1')
except CvBridgeError, e:
print(e)
else:
# Save your OpenCV2 image as a png
cv2.imwrite('camera_image.png', cv2_img)
cv2.imshow('pic', cv2_img)
cv2.waitKey(0)
def main():
rospy.init_node('image_listener')
#does not work:
#image_topic = "/pepper/camera/depth/image_raw"
#works fine:
image_topic = "/pepper/camera/front/image_raw"
rospy.Subscriber(image_topic, Image, image_callback)
rospy.spin()
if __name__ == '__main__':
main()
So my problem is that my code works perfectly fine if i use the data of the front camera but does not work for the depth images.
To make sure i get the correct encoding type i used the command msg.encoding which tells me the encoding type of the current ros message.
The cv2.imshow works exactly like it should for the front camera pictures and it shows me the same as i would get if i used ros image_view but as soon as i try it with the depth image i just get a fully black or white picture unlike what image_view shows me
Here the depth image i get when i use image_view
Here the depth image i receive when i use the script and cv2.imshow
Does anyone have experience working on depth images with cv2 and can help me to get it working with the depth images as well?
I really would appreciate any help :)
Best regards
You could try in the following way to acquire the depth images,
import rospy
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
import numpy as np
import cv2
def convert_depth_image(ros_image):
cv_bridge = CvBridge()
try:
depth_image = cv_bridge.imgmsg_to_cv2(ros_image, desired_encoding='passthrough')
except CvBridgeError, e:
print e
depth_array = np.array(depth_image, dtype=np.float32)
np.save("depth_img.npy", depth_array)
rospy.loginfo(depth_array)
#To save image as png
# Apply colormap on depth image (image must be converted to 8-bit per pixel first)
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
cv2.imwrite("depth_img.png", depth_colormap)
#Or you use
# depth_array = depth_array.astype(np.uint16)
# cv2.imwrite("depth_img.png", depth_array)
def pixel2depth():
rospy.init_node('pixel2depth',anonymous=True)
rospy.Subscriber("/pepper/camera/depth/image_raw", Image,callback=convert_depth_image, queue_size=1)
rospy.spin()
if __name__ == '__main__':
pixel2depth()

Read many videos and convert them into images

I have been able to get a program that can read a single video and convert it into images. However, actually, I am having many videos and I would like to write a function that can read the videos one by one and convert them into images. I aim to convert the videos into images so as to facilitate the processing. The code for converting a single video is as follows.
import sys
import argparse
import cv2
vidcap = cv2.VideoCapture('E:/DATA/1/12.mp4')
path_out = "E:/DATA_Synth/12"
success,image = vidcap.read()
#image=cv2.resize(image, (640, 480))
count = 0
while success:
cv2.imwrite(path_out + "/frame%03d.jpg" % count, image)
#cv2.imwrite(path_out + "/frame%d.jpg" % count, image)
success,image = vidcap.read()
#image = cv2.resize(image, (640, 480))
print ('Read a new frame: ', success)
count += 1
Any suggestions and comments would be highly appreciated
You should create a loop to iterate through your folder which contains all your videos and pick each video and run your above code.
import os
import sys
import argparse
import cv2
directory="enter your folder directory for video"
for vidfile in os.listdir(directory):
if vidfile.endswith('.mp4'):
vidcap=cv2.VideoCapture(os.path.join(directory, vidfile))
## write your code here for converting the video to individual frames
else:
continue
I have updated the codes as follows.
import os
import sys
import argparse
import cv2
directory="E:/Training/video"
path_out = "E:/DATA_Synth/12"
for vidfile in os.listdir(directory):
if vidfile.endswith('.avi'):
vidcap=cv2.VideoCapture(os.path.join(directory, vidfile))
## write your code here for converting the video to individual frames
success,image = vidcap.read()
#image=cv2.resize(image, (640, 480))
count = 0
while success:
#vidcap.set(cv2.CAP_PROP_POS_MSEC,(count*1000))
cv2.imwrite(path_out + "/frame%03d.jpg" % count, image) # save frame as JPEG file
#cv2.imwrite(path_out + "/frame%d.jpg" % count, image)
success,image = vidcap.read()
#image = cv2.resize(image, (640, 480))
#print ('Read a new frame: ', success)
count += 1
else:
continue
However, only the frames from the last video in my directory are saved. Please, how can I modify the codes in such a way that the name of frame as written here
cv2.imwrite(path_out + "/frame%d.jpg" % count, image)
may also contain the name of the corresponding video.

The average image of multiple images

I'm using this code but it dosen't work. could youn tell me what is the problem?
import glob , cv2
import numpy as np
def read_img(img_list , img):
n=cv2.imread(img)
img_list.append(n)
return img_list
path = glob.glob("02291G0AR/*.bmp")
list_ = []
cv_image = [read_img(list_,img) for img in path]
for img in cv_image:
cv2.imshow('image',img)
and the error is:
cv2.imshow('image',img)
TypeError: mat is not a numpy array, neither a scalar
I think you'll get on better with something like this:
#!/usr/bin/env python3
import glob , cv2
import numpy as np
# Load an image by name and return as Numpy array
def read_img(name):
img=cv2.imread(name)
return img
# Generate list of all image names
names = glob.glob("*.bmp")
# Load all images into list
images = [read_img(name) for name in names]
# Display all images in list
for img in images:
cv2.imshow('image',img)
cv2.waitKey()

How do I convert a video or a sequence of images to a bag file?

I am new to ROS. I need to convert a preexisting video file, or a large amount of images that can be concatenated into a video stream, into a .bag file in ROS. I found this code online: http://answers.ros.org/question/11537/creating-a-bag-file-out-of-a-image-sequence/, but it says it is for camera calibration, so not sure if it fits my purpose.
Could someone with a good knowledge of ROS confirm that I can use the code in the link provided for my purposes, or if anyone actually has the code I'm looking for, could you please post it here?
The following code converts a video file to a bag file, inspired from the code in the link provided.
Little reminder:
this code depends on cv2 (opencv python)
time stamp of ROS message is calculated by frame index and fps. fps will be set to 24 if opencv unable to read it from the video.
import time, sys, os
from ros import rosbag
import roslib, rospy
roslib.load_manifest('sensor_msgs')
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
import cv2
TOPIC = 'camera/image_raw/compressed'
def CreateVideoBag(videopath, bagname):
'''Creates a bag file with a video file'''
bag = rosbag.Bag(bagname, 'w')
cap = cv2.VideoCapture(videopath)
cb = CvBridge()
prop_fps = cap.get(cv2.CAP_PROP_FPS)
if prop_fps != prop_fps or prop_fps <= 1e-2:
print "Warning: can't get FPS. Assuming 24."
prop_fps = 24
ret = True
frame_id = 0
while(ret):
ret, frame = cap.read()
if not ret:
break
stamp = rospy.rostime.Time.from_sec(float(frame_id) / prop_fps)
frame_id += 1
image = cb.cv2_to_compressed_imgmsg(frame)
image.header.stamp = stamp
image.header.frame_id = "camera"
bag.write(TOPIC, image, stamp)
cap.release()
bag.close()
if __name__ == "__main__":
if len( sys.argv ) == 3:
CreateVideoBag(*sys.argv[1:])
else:
print( "Usage: video2bag videofilename bagfilename")

Resources