Error in reading video file in Matlab - image

Hi I am getting a strange error while trying to read a video, frame wise in matlab. I am doing the following:
xyloObj = VideoReader(vid_name);
fps = xyloObj.FrameRate;
nFrames = xyloObj.NumberOfFrames;
vidHeight = xyloObj.Height;
vidWidth = xyloObj.Width;
% Preallocate movie structure.
mov(1:nFrames) = ...
struct('cdata', zeros(vidHeight, vidWidth, 3, 'uint8'),...
'colormap', []);
index =1;
for k = 1:nFrames
mov(index).cdata = read(xyloObj, k);
index = index+1;
end
I get the following error:
Error using VideoReader/read (line 80)
The file could not be read.
Haven't found solution to this error anywhere else.
EDIT: File format is avi. something like: D:\videos\drunk.avi.

How about using a mmread? I used VideoReader on Linux, but in my case, the length of frames is not correct.
Moreover, since I need the timestamp of videos, I have changed from VideoReader mmread.

Related

Open and read a .RAW file

I need to writing a MATLAB program that will read this image file Download here.
This picture shows the instruction and the expected result I have tried the following code, but I couldn't get the expected results.
row=256; col=256;
f=fopen('e6712s4i50.raw','r');
a=fread(f, [col row],'*int16');
Z=a;
imshow(Z)
How do I read this picture correctly?
check below code:
fid = fopen('raw.raw')
rawdata = fread(fid, inf, '*uint8')
fclose(fid);
B = reshape(rawdata(2:2:131072),256,256)';
Y = circshift(B,[240 140]);
imshow(Y)
whats about this?

why is Matlab slow in for loop with large number of iterations but fast with a small number of iterations?

I am running a function to extract some information from 100,000+ patient xray dicom files. the files are stored within a veracrypt encryption container for security purposes.
when i run the function on a small sample of files it performs really quickly, however when i run the function on the entire dataset it is very slow in comparison, going from several files per second to 1 file per second (approximately).
i was wandering why this is happening? i have tried storing the data on an ssd and on a normal hard drive and get the same sort of slow down when using a larger dataset compared to a small one.
i have added the code below for reference but haven't commented it fully yet.. this is for my thesis so i will do it once i get the extraction finished..
thanks for any help.
function [ DB, corrupted_files ] = extract_from_dcm( folder_name )
%EXTRACT_FROM_DCM Summary of this function goes here
% Detailed explanation goes here
if nargin == 0
folder_name = 'I:\Find and Treat\MXU Old Backup\2005';
end
Database_Check = strcat(folder_name, '\DataBase.mat');
if exist(Database_Check, 'file')
load(Database_Check);
entry_start = length(DB) + 1;
else
entry_start = 1;
[ found_dicoms ] = recursive_search( folder_name );
end
mat_file_location = strcat(folder_name, '\DataBase.mat');
excel_DB_file = strcat(folder_name, '\DataBase.xlsx');
excel_Corrupted_file = strcat(folder_name, '\Corrupted_Files.xlsx');
% the recursive search creates a struct with the path for each
% dcm file found. the list is then recursivly used to locate
% the image and extract the relevant information from it.
fprintf('---------------------------------------------\n');
fprintf('Start Patient Data Extraction\n');
tic
h = waitbar(0,'','Name','Patient Data Extraction');
entry_end = length(found_dicoms);
if entry_end == 0
% set(handles.info_box, 'String', 'No Dicom Files Found in this Folder or its Subfolders');
else
% set(handles.info_box, 'String', 'Congratulations Dicom Files have been found Look Through the Data Base using the Buttons Below....Press Save Button to save the Database. (Database Save format is EXCEL SpreadSheet and MAT file');
for kk = entry_start : entry_end
progress = kk/entry_end;
progress_percent = round(progress * 100);
waitbar(progress,h, sprintf('%d%% %d/%d of images processed', progress_percent, kk, entry_end));
img_full_path = found_dicoms(kk).name;
% search_path = folder_name;
% img_full_path = strrep(img_full_path, search_path, '');
try %# Attempt to perform some computation
dicom_info = dicominfo(img_full_path); %# The operation you are trying to perform goes here
try %# Attempt to perform some computation
dicom_read = dicomread(dicom_info); %# The operation you are trying to perform goes here
old = dicominfo(img_full_path);
DB(kk).StudyDate = old.StudyDate;
DB(kk).StudyTime = old.StudyTime;
if isfield(old.PatientName, 'FamilyName')
DB(kk).Forename = old.PatientName.FamilyName;
else
DB(kk).Forename = 'NA';
end
if isfield(old.PatientName, 'GivenName')
DB(kk).LastName = old.PatientName.GivenName;
else
DB(kk).LastName = 'NA';
end
if isfield(old, 'PatientSex')
DB(kk).PatientSex = old.PatientSex;
else
DB(kk).PatientSex = 'NA';
end
if isempty(old.PatientBirthDate)
DB(kk).PatientBirthDate = '00000000';
else
DB(kk).PatientBirthDate = old.PatientBirthDate;
end
if strcmp(old.Manufacturer, 'Philips Medical Systems')
DB(kk).Van = '1';
else
DB(kk).Van = '0';% section to represent organising by different vans
end
DB(kk).img_Path = img_full_path;
save(mat_file_location,'DB','found_dicoms');
catch exception %# Catch the exception
fprintf('read - file %d corrupt.\n',kk);
continue %# Pass control to the next loop iteration
end
catch exception %# Catch the exception
fprintf('info - file %d corrupt.\n',kk);
continue %# Pass control to the next loop iteration
end
end
end
[ corrupted_files, DB ] = corruption_check( DB, found_dicoms, folder_name );
toc
fprintf('End Patient Data Extraction\n');
fprintf('---------------------------------------------\n');
fprintf('---------------------------------------------\n');
fprintf('Start Saving Extracted Data \n');
tic
save(mat_file_location,'DB','corrupted_files','found_dicoms');
if isempty(DB)
msg = sprintf('No Dicom Files Found');
msgbox(strcat(msg));
else
DB_table = struct2table(DB);
writetable(DB_table, excel_DB_file);
end
close(h);
toc
fprintf('End Saving Extracted Data \n');
fprintf('---------------------------------------------\n');
end
OK thanks for all the help..
My problem was the saving at the end of each iteration but the biggest problem was the line where i run the dicomread function. i changed the saving to occur for every 20 images processed.
I also removed the preallocation suggested in the comments to see what difference it made without the dicromread and saving a swell. it was considerably slower than with the preallocation.
... i just need to find a solution for dicomread (which i was using as a way to check if the file was corrupt or not).

Pyaudio : how to check volume

I'm currently developping a VOIP tool in python working as a client-server. My problem is that i'm currently sending the Pyaudio input stream as follows even when there is no sound (well, when nobody talks or there is no noise, data is sent as well) :
CHUNK = 1024
p = pyaudio.PyAudio()
stream = p.open(format = pyaudio.paInt16,
channels = 1,
rate = 44100,
input = True,
frames_per_buffer = CHUNK)
while 1:
self.conn.sendVoice(stream.read(CHUNK))
I would like to check volume to get something like this :
data = stream.read(CHUNK)
if data.volume > 20%:
self.conn.sendVoice(data)
This way I could avoid sending useless data and spare connection/ increase performance. (Also, I'm looking for some kind of compression but I think I will have to ask it in another topic).
Its can be done using root mean square (RMS).
One way to build your own rms function using python is:
def rms( data ):
count = len(data)/2
format = "%dh"%(count)
shorts = struct.unpack( format, data )
sum_squares = 0.0
for sample in shorts:
n = sample * (1.0/32768)
sum_squares += n*n
return math.sqrt( sum_squares / count )
Another choice is use audioop to find rms:
data = stream.read(CHUNK)
rms = audioop.rms(data,2)
Now if do you want you can convert rms to decibel scale decibel = 20 * log10(rms)

Getting the dimensions of the image in ruby

To get the image dimensions in ruby, I tried to use identify to get image dimensions. I wanted to retrieve the output of this system call and get the output as a string
str = system('identify -format "%[fx:w]x%[fx:h]" image.png')
output = `ls`
print output
But, I'm getting the last lines of output and not the output to this particular system call.
Also, if there is a simpler way to get the image dimensions without external gems or libraries, please suggest as it would be great !
Since you already use an external library (ImageMagick), you could use its Ruby wrapper RMagick:
require 'RMagick'
img = Magick::Image::read('image.png').first
arr = [img.columns, img.rows]
Here's an example of a very simple PNG parser:
data = File.binread('image.png', 100) # read first 100 bytes
if data[0, 8] == [137, 80, 78, 71, 13, 10, 26, 10].pack("C*")
# file has a PNG file signature, let's get the image header chunk
length, chunk_type = data[8, 8].unpack("l>a4")
raise "unknown format, expecting image header" unless chunk_type == "IHDR"
chunk_data = data[16, length].unpack("l>l>CCCCC")
width = chunk_data[0]
height = chunk_data[1]
bit_depth = chunk_data[2]
color_type = chunk_data[3]
compression_method = chunk_data[4]
filter_method = chunk_data[5]
interlace_method = chunk_data[6]
puts "image size: #{width}x#{height}"
else
# handle other formats
end
Okay, I finally found a solution after some experiments.
str = `identify -format "%[fx:w]x%[fx:h]" image.png`
arr = str.split('x')
The array arr now contains dimensions in it [width,height] .
This worked for me ! Please suggest other approaches that might be more easier or simpler.

Detect duplicate videos from YouTube

In consideration to my M.tech Project
I want to know if there is any algorithm to detect duplicate videos from youtube.
For example (here are links of two videos):
random user upload
upload by official channel
Amongst these second is official video and T-series has it's copyright.
Is youtube officially doing something to remove duplicate videos from youtube?
Not only videos, there exists duplicate youtube channels also.
Sometimes the original video has less number of views than that of pirated version.
So, while searching found this
(see page number [49] of pdf)
What I learnt from the given link
Original vs copyright infringed video detection Classifier is used.
Given a query, firstly top k search results are being retrieved.Thereafter three parameters are used to classify the videos
Number of subscribers
user profile
username popularity
and on the basis of these parameters, original video is identified as described in the link.
EDIT 1:
There are basically two different objectives
To identify original video with the above method
To eliminate the duplicate videos
obviously identifying original video is easier than finding out all the duplicate videos.
So i preferred to first find out the original video.
Approach which i can think till now
to improve the accuracy:
We can first find out the original videos with above method
And then use the most popular publicized frames(may be multiple) of that video to search on google image. This method therefore retrieves the list of duplicate videos in google image search results.
After getting these duplicate videos, we can once again check frame by frame and reach a level of satisfaction(yes retrieved videos were "exact or "almost" duplicate copy of original video)
Will this approach work?
if not, is there any better algorithm, to improve upon the given method?
Please write in the comments section if i am unable to explain my approach clearly.
I will soon add some more details.
I've recently hacked together a small tool for that purpose. It's still work in progress but usually pretty accurate. The idea is to simply compare time between brightness maxima in the center of the video. Therefore it should work with different resolutions, frame rates and rotation of the video.
ffmpeg is used for decoding, imageio as bridge to python, numpy/scipy for maxima computation and some k-nearest-neighbor library (annoy, cyflann, hnsw) for comparison.
At the moment it's not polished at all so you should know a little python to run it or simply copy the idea.
Me too had the same problem.. So wrote a program myself..
Problem is I had videos of various formats and resolution.. So needed to take hash of each video frame and compare.
https://github.com/gklc811/duplicate_video_finder
you can just change the directories at top and you are good to go..
from os import path, walk, makedirs, rename
from time import clock
from imagehash import average_hash
from PIL import Image
from cv2 import VideoCapture, CAP_PROP_FRAME_COUNT, CAP_PROP_FRAME_WIDTH, CAP_PROP_FRAME_HEIGHT, CAP_PROP_FPS
from json import dump, load
from multiprocessing import Pool, cpu_count
input_vid_dir = r'C:\Users\gokul\Documents\data\\'
json_dir = r'C:\Users\gokul\Documents\db\\'
analyzed_dir = r'C:\Users\gokul\Documents\analyzed\\'
duplicate_dir = r'C:\Users\gokul\Documents\duplicate\\'
if not path.exists(json_dir):
makedirs(json_dir)
if not path.exists(analyzed_dir):
makedirs(analyzed_dir)
if not path.exists(duplicate_dir):
makedirs(duplicate_dir)
def write_to_json(filename, data):
file_full_path = json_dir + filename + ".json"
with open(file_full_path, 'w') as file_pointer:
dump(data, file_pointer)
return
def video_to_json(filename):
file_full_path = input_vid_dir + filename
start = clock()
size = round(path.getsize(file_full_path) / 1024 / 1024, 2)
video_pointer = VideoCapture(file_full_path)
frame_count = int(VideoCapture.get(video_pointer, int(CAP_PROP_FRAME_COUNT)))
width = int(VideoCapture.get(video_pointer, int(CAP_PROP_FRAME_WIDTH)))
height = int(VideoCapture.get(video_pointer, int(CAP_PROP_FRAME_HEIGHT)))
fps = int(VideoCapture.get(video_pointer, int(CAP_PROP_FPS)))
success, image = video_pointer.read()
video_hash = {}
while success:
frame_hash = average_hash(Image.fromarray(image))
video_hash[str(frame_hash)] = filename
success, image = video_pointer.read()
stop = clock()
time_taken = stop - start
print("Time taken for ", file_full_path, " is : ", time_taken)
data_dict = dict()
data_dict['size'] = size
data_dict['time_taken'] = time_taken
data_dict['fps'] = fps
data_dict['height'] = height
data_dict['width'] = width
data_dict['frame_count'] = frame_count
data_dict['filename'] = filename
data_dict['video_hash'] = video_hash
write_to_json(filename, data_dict)
return
def multiprocess_video_to_json():
files = next(walk(input_vid_dir))[2]
processes = cpu_count()
print(processes)
pool = Pool(processes)
start = clock()
pool.starmap_async(video_to_json, zip(files))
pool.close()
pool.join()
stop = clock()
print("Time Taken : ", stop - start)
def key_with_max_val(d):
max_value = 0
required_key = ""
for k in d:
if d[k] > max_value:
max_value = d[k]
required_key = k
return required_key
def duplicate_analyzer():
files = next(walk(json_dir))[2]
data_dict = {}
for file in files:
filename = json_dir + file
with open(filename) as f:
data = load(f)
video_hash = data['video_hash']
count = 0
duplicate_file_dict = dict()
for key in video_hash:
count += 1
if key in data_dict:
if data_dict[key] in duplicate_file_dict:
duplicate_file_dict[data_dict[key]] = duplicate_file_dict[data_dict[key]] + 1
else:
duplicate_file_dict[data_dict[key]] = 1
else:
data_dict[key] = video_hash[key]
if duplicate_file_dict:
duplicate_file = key_with_max_val(duplicate_file_dict)
duplicate_percentage = ((duplicate_file_dict[duplicate_file] / count) * 100)
if duplicate_percentage > 50:
file = file[:-5]
print(file, " is dup of ", duplicate_file)
src = analyzed_dir + file
tgt = duplicate_dir + file
if path.exists(src):
rename(src, tgt)
# else:
# print("File already moved")
def mv_analyzed_file():
files = next(walk(json_dir))[2]
for filename in files:
filename = filename[:-5]
src = input_vid_dir + filename
tgt = analyzed_dir + filename
if path.exists(src):
rename(src, tgt)
# else:
# print("File already moved")
if __name__ == '__main__':
mv_analyzed_file()
multiprocess_video_to_json()
mv_analyzed_file()
duplicate_analyzer()

Resources