Related
I would like to plot a scatter plot over a background image. But the origin of the image is at the top left corner. I need the bottom left of the image to be the origin so the bottom left = (0, 0.75) and top right (14, 1.25)
So I need to scale the image to my data from pH 0 - 14 (This is the x axis) and Eh 0.75 - 1.25 (This is the y axis)
Eh = [327.06 561.34 506.82 602.58 745.02 745.04 ...
693.96 682.9 648.46 468 412.18 522.94 459.74]./1e3; % V
pH = [6.4 4.51 5.08 4.98 3.63 4.31 6.24 6.22 4.94 6.44 7.05 5.09 4.63]; %pH
I=imread('Fe_Pourbaix.png');
xImg = linspace(0, 14, size(I, 2));
yImg = linspace(-0.75, 1.25, size(I, 1));
image(xImg, yImg, I, 'CDataMapping', 'scale');
hold on;
plot(pH, Eh,'*','LineWidth',2);
grid on;
Suppose I Need to flip the data? The below image is what I need is the x and y I need to produce to overlay my data
I have tried an algorithm to rotate the images, but it doesn't help in straightening my images given the image can have any orientation from 0 to 180/360 degrees.
Example images:
Image 1:
Image 2:
Image 3:
My code:
# import the necessary packages
import matplotlib.pyplot as plt
import numpy as np
import argparse
import cv2
import copy
def plt_imshow(title, image):
# display the image
plt.imshow(image)
plt.title(title)
plt.grid(False)
plt.show()
args = {
"image": "C:/Users/tskta/Desktop/Images rotation/all
rotations/75.png"}
# load the image from disk
image = cv2.imread(args["image"])
#convert the image to greyscale and flip the foreground and
background
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.bitwise_not(gray)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY |
cv2.THRESH_OTSU)[1]
#grab the x,y coordinates of all pixel values that are greater than
zero
#and form a rotating bounding box
coords = np.column_stack(np.where(thresh > 0))
angle = cv2.minAreaRect(coords)[-1]
print(angle)
if -45 > angle >-89:
angle = -(90 + angle)
elif angle == -90.0:
angle = -angle
else:
angle = -angle
print(angle)
# rotate the image to deskew it
(h, w) = image.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(image, M, (w, h),
flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
# draw the correction angle on the image so we can validate it
cv2.putText(rotated, "Angle: {:.2f} degrees".format(angle),
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
# show the output image
print("[INFO] angle: {:.3f}".format(angle))
plt_imshow("Input", image)
plt_imshow("Rotated", rotated)
cv2.imwrite("Downloads/rotated.png", rotated)
Your Python/OpenCV code looks fine except that the angle test does not take into account that the object is going to need to be rotated to portrait mode.
The following does that by computing the direction of the edge with the largest length and then aligning (unrotating) to the vertical (Y) axis.
The code below takes a white rectangle and rotates it between -90 and 90 in 10 deg increments. The assumption is that the scan of the documents will be such that the top of the bill is in the top half of the image, thus satisfying the assumption.
Input:
import cv2
import numpy as np
import math
# unrotation to portrait mode
# read input
img = cv2.imread('vertical_rect.png')
hh, ww = img.shape[:2]
# compute center
cx = ww // 2
cy = hh // 2
# rotated input in range -90 to 90 inclusive
# assume scanned so always in that range
# do unrotation by using the longest edge of rectangle to find the direction
# and unrotate so that it aligns with the vertical (Y) axis upwards
for rotation in range(-90,100,10):
# rotate image
matrix = cv2.getRotationMatrix2D((cx,cy), rotation, 1.0)
img_rotated = cv2.warpAffine(img, matrix, (ww, hh), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
# convert to gray
gray = cv2.cvtColor(img_rotated, cv2.COLOR_BGR2GRAY)
# threshold (must be convex, so use morphology to close up if needed)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)[1]
# find all non-zero pixel coordinates
# swap x and y for conversion from numpy y,x to opencv x,y ordering via transpose
coords = np.column_stack(np.where(thresh.transpose() > 0))
# get minAreaRect and its vertices
rotrect = cv2.minAreaRect(coords)
pts = cv2.boxPoints(rotrect)
#print(pts)
list = []
# compute edge lengths and directions (in range -90 to 90) and put into list
polygon = img_rotated.copy()
for i in range(0,4):
i1 = i
i2 = i+1 if i!=3 else 0
pt1 = pts[i1]
pt2 = pts[i2]
pt1x = pt1[0]
pt2x = pt2[0]
pt1y = pt1[1]
pt2y = pt2[1]
length = math.sqrt( (pt2x-pt1x)*(pt2x-pt1x) + (pt2y-pt1y)*(pt2y-pt1y) )
direction = (180/math.pi)*math.atan2( (pt2y-pt1y), (pt2x-pt1x) )
list.append([length, direction, pt1])
# optional: draw lines around box points on input (rotated)
# points start at left most point (and top most to break ties)
# and go clockwise around rectangle
# first point is blue and second point is green to show direction
x1 = int(pt1x)
y1 = int(pt1y)
x2 = int(pt2x)
y2 = int(pt2y)
if i == 0:
cv2.circle(polygon,(x1,y1),7,(255,0,0),-1)
cv2.circle(polygon,(x2,y2),5,(0,255,0),-1)
cv2.line(polygon, (x1,y1), (x2,y2), (0,0,255), 2)
else:
cv2.line(polygon, (x1,y1), (x2,y2), (0,0,255), 2)
# sort list on length with largest first
def takeFirst(elem):
return elem[0]
list.sort(key=takeFirst, reverse=True)
# get direction of largest length and correct to -90 to 90 range
item = list[0]
dir = item[1]
if dir < -90:
dir = dir + 180
if dir > 90:
dir = dir - 180
# correct to portrait mode
# if dir is negative or zero, then add 90; otherwise subtract 90
# dir = 0 occurs for both 0 and 90, so both cannot be determined -- pick one
if dir <= 0:
unrotate = dir + 90
else:
unrotate = dir - 90
print("initial rotation=", rotation, "edge direction=", dir, "unrotation angle=", unrotate)
# unrotate image
M = cv2.getRotationMatrix2D((cx, cy), unrotate, 1.0)
img_unrotated = cv2.warpAffine(img_rotated, M, (ww, hh), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
#cv2.imshow('img_rotated', img_rotated)
cv2.imshow('polygon', polygon)
cv2.imshow('img_unrotated', img_unrotated)
cv2.waitKey()
# optional save output
#cv2.imwrite("vertical_rect_{0}.png".format(rotation), img_unrotated)
Results:
initial rotation= -90 edge direction= 0.0 unrotation angle= 90.0
initial rotation= -80 edge direction= -10.024489033132815 unrotation angle= 79.97551096686719
initial rotation= -70 edge direction= -19.922231300954746 unrotation angle= 70.07776869904525
initial rotation= -60 edge direction= -30.000354626107335 unrotation angle= 59.99964537389266
initial rotation= -50 edge direction= -39.98688344835102 unrotation angle= 50.01311655164898
initial rotation= -40 edge direction= -50.00064126059898 unrotation angle= 39.99935873940102
initial rotation= -30 edge direction= -60.00891266459192 unrotation angle= 29.991087335408082
initial rotation= -20 edge direction= -70.07776869904525 unrotation angle= 19.92223130095475
initial rotation= -10 edge direction= -79.97551600323786 unrotation angle= 10.024483996762143
initial rotation= 0 edge direction= 90.0 unrotation angle= 0.0
initial rotation= 10 edge direction= 79.97550927006476 unrotation angle= -10.024490729935238
initial rotation= 20 edge direction= 70.07776869904525 unrotation angle= -19.92223130095475
initial rotation= 30 edge direction= 59.99507091100694 unrotation angle= -30.00492908899306
initial rotation= 40 edge direction= 50.013119348261796 unrotation angle= -39.986880651738204
initial rotation= 50 edge direction= 39.99936207636015 unrotation angle= -50.00063792363985
initial rotation= 60 edge direction= 29.991085160541278 unrotation angle= -60.008914839458726
initial rotation= 70 edge direction= 19.922231300954746 unrotation angle= -70.07776869904525
initial rotation= 80 edge direction= 10.02448431018454 unrotation angle= -79.97551568981547
initial rotation= 90 edge direction= 0.0 unrotation angle= 90.0
I'm trying to get a 10-inch touch display (native resolution: 1280x800) to switch to 1024x768, but everything I try is either ignored or results in an error. The display reportedly supports the resolution, though, xrandr --verbose reports (I'm using the default VESA driver):
xrandr: Failed to get size of gamma for output default
Screen 0: minimum 640 x 480, current 640 x 480, maximum 1280 x 800
default connected 640x480+0+0 (0x180) normal (normal) 0mm x 0mm
Identifier: 0x17d
Timestamp: 635022581
Subpixel: horizontal rgb
Clones:
CRTC: 0
CRTCs: 0
Transform: 1.000000 0.000000 0.000000
0.000000 1.000000 0.000000
0.000000 0.000000 1.000000
filter:
1280x800 (0x17e) 0.0MHz
h: width 1280 start 0 end 0 total 1280 skew 0 clock 0.0KHz
v: height 800 start 0 end 0 total 800 clock 0.0Hz
800x600 (0x17f) 0.0MHz
h: width 800 start 0 end 0 total 800 skew 0 clock 0.0KHz
v: height 600 start 0 end 0 total 600 clock 0.0Hz
640x480 (0x180) 0.0MHz *current
h: width 640 start 0 end 0 total 640 skew 0 clock 0.0KHz
v: height 480 start 0 end 0 total 480 clock 0.0Hz
1024x768 (0x181) 0.0MHz
h: width 1024 start 0 end 0 total 1024 skew 0 clock 0.0KHz
v: height 768 start 0 end 0 total 768 clock 0.0Hz
If I try to change the resolution via xrandr --output default --mode 1027x768, i just get:
xrandr: Failed to get size of gamma for output default
xrandr: Configure crtc 0 failed
As far as I can see, only the second line is relevant to my problem; I don't know why xrandr would want to configre crtc 0, though, I only have the touch screen connected.
Failing that, I tried to configure the mode directly using the following xorg.conf:
Section "InputClass"
Identifier "calibration"
MatchProduct "DIALOGUE INC PenMount USB"
Option "Calibration" "95 911 93 919"
Option "SwapAxes" "0"
EndSection
Section "Monitor"
Identifier "disp0"
Modeline "1024x768_60.00" 63.50 1024 1072 1176 1328 768 771 775 798 -hsync +vsync
Option "PreferredMode" "1024x768_60.00"
EndSection
Section "Device"
Identifier "card0"
Driver "vesa"
EndSection
Section "Screen"
Identifier "src0"
Device "card0"
Monitor "disp0"
SubSection "Display"
Modes "1024x768_60.00" "1024x768"
EndSubSection
EndSection
Unfortunately, This doesn't work, either Xorg.log shows the following:
[634043.694] (II) VESA(0): Not using mode "1024x768_60.00" (no mode of this name)
[634043.694] (II) VESA(0): Not using built-in mode "1024x768" (no mode of this name)
Why doesn't this work? And what else can I try to get the display to switch to 1024x768?
I've uploaded the full logfile to Pastebin.
You can try to perform the following actions
cvt -r 1024 768
xrandr --newmode "1024x768_60.00" 63.50 1024 1072 1176 1328 768 771 775 798 -hsync +vsync
xrandr --addmode default 1024x768_60.00
xrandr --output default --mode 1024x768_60.00
I have written a code in MATLAB which allows me to automatically crop regions of interest in one image, and perform cross-correlation with a second image. The correlated regions are identified by a quiver plot, which I would like to extend across the two images (they are arranged in a vertical montage). However, the quiver arrows appear only in the upper image.
Would anyone know why this happens (and how to fix it)? Hopefully it's something straightforward. I've included some of my code below. Thanks!
Initial = rgb2gray(imread('img9.png'));
Secondary = rgb2gray(imread('img8.png'));
XC = imcrop(Initial, [0 0 1300 350]);
YC = imcrop(Secondary, [0 0 1300 350]);
Multi = cat(1,XC,YC);
VertLinSpace1 = linspace(0, 300, 7);
HorzLinSpace1 = linspace(0, 1250, 24);
imshow(Multi)
axis( [0 1300 0 700])
axis on
for k1 = 1:length(VertLinSpace1)
for k2 = 1:length(HorzLinSpace1)
template = imcrop(Multi, [HorzLinSpace1(k2) VertLinSpace1(k1), 50 50]);
c = normxcorr2(template,YC);
[ypeak, xpeak] = find(c==max(c(:)));
yO = ypeak-size(template,1);
xO = xpeak-size(template,2);
x1 = HorzLinSpace1(k2); y1 = VertLinSpace1(k1); x2 = xO+1; y2 = yO+1;
a = [x1, y1, 0];
b = [x2, y2, 0];
Q = [x1 y1
x2 y2];
QX = Q(:,1);
QY = Q(:,2);
[~,UV] = gradient(Q);
UVX = [UV(1,1); 0];
UVY = [UV(1,2); 0];
figure(1)
hold on
quiver(QX, QY, UVX, UVY, 'color','red')
hold off
end
end
Initial image
Comparison image
I simplified the arrow XY computation, but the essence was to add 350 to point down to the second half. I added 25 to indicate the middle of the template (the tail of the arrows). This may not be 100% accurate solution, because, for example, the middle of a 50 by 50 pixel square is 25.5 (25 pixels on either side) but it solves the main issue and you can take it from here.
Initial = rgb2gray(imread('https://i.stack.imgur.com/5fTa1.png'));
Secondary = rgb2gray(imread(('https://i.stack.imgur.com/sg1co.png')));
XC = imcrop(Initial, [0 0 1300 350]);
YC = imcrop(Secondary, [0 0 1300 350]);
Multi = cat(1,XC,YC);
VertLinSpace1 = linspace(0, 300, 7);
HorzLinSpace1 = linspace(0, 1250, 24);
imshow(Multi)
axis( [0 1300 0 700])
axis on
hold on
counter = 0;
for kV = 1:length(VertLinSpace1)
for kH = 1:length(HorzLinSpace1)
template = imcrop(Multi, [HorzLinSpace1(kH) VertLinSpace1(kV), 50 50]);
c = normxcorr2(template,YC);
[ypeak, xpeak] = find(c==max(c(:)));
fromXY = [HorzLinSpace1(kH)+25,VertLinSpace1(kV)+25];
addXY = [xpeak-fromXY(1),350+ypeak-fromXY(2)];
quiver(fromXY(1), fromXY(2), addXY(1), addXY(2), 'color','red')
drawnow
end
end
I'm currently running some data analysis on a lot of pictures and the code i have running is the following:
close all
clear all
clc
A=imread('Ring_1_frame_120.jpg'); %Load picture
%A01-A010 = xmin ymin width height
%for all vials
A001=imcrop(A,[65 159 95 332]);
A002=imcrop(A,[182 161 95 332]);
A003=imcrop(A,[297 164 95 332]);
A004=imcrop(A,[402 165 90 332]);
A005=imcrop(A,[495 168 90 332]);
A006=imcrop(A,[606 166 90 332]);
A007=imcrop(A,[705 171 90 332]);
A008=imcrop(A,[808 175 90 332]);
A009=imcrop(A,[922 175 90 332]);
A0010=imcrop(A,[1031 175 90 332]);
w = who; % returns the names of all your current variables in a cell.
for i = 1:numel(w)
% A00 is unique to all the variables you want to process.
if ~isempty(strfind(w{i}, 'A00'))
% hard coding greenChannel and extracting the second plane.
eval(['greenChannel = ',w{i},'(:,:,2)']);
BW = edge(greenChannel,'Prewitt');
%figure, imshow(BW);
%Dialate Lines
se90 = strel('line', 3, 90);
se0 = strel('line', 3, 0);
BWsdil = imdilate(BW, [se90 se0]);
%figure, imshow(BWsdil), title('dilated gradient mask');
%Fill Lines
BWdfill = imfill(BWsdil, 'holes');
%figure, imshow(BWdfill), title('binary image with filled holes');
%Clean up borders
BWnobord = imclearborder(BWdfill, 4);
%figure, imshow(BWnobord), title('cleared border image');
%Final cleanup
seD = strel('diamond',1);
BWfinal = imerode(BWnobord,seD);
BWfinal = imerode(BWfinal,seD);
figure, imshow(BWfinal), title('segmented image');
L = bwlabel(BWfinal);
s = regionprops(L,'centroid');
data(:,:,i) = s; %save the xy coords as data matrix
end
end
The goal I'm trying to achieve is getting the variable s into a csv file, but I'm stuck at the last line since it's not working. It keeps overwriting itself. s is a structure ranging from 3x1 to 5x1 and I have also tried to use struct2cell and mat2cell but that was unsuccessful.
s is a structure, so what you need to do is unpack the structure so that it becomes a matrix, then you can save the matrix to file. s contains a field called Centroid, so you need to access that field.
However before I address that point, checking to see how many variables are in your workspace so you can determine how many times your loop has to iterate.... is very bad practice. Especially if you are using each variable name as a separate occurrence for processing. I highly recommend you use a structure to encapsulate this or some sort of cell array.
If I can provide a canonical post, please consult user Adriaan's excellent post on how to avoid dynamic variable names and sheds light on what I'm about to talk about here.
Something like this would work instead. I'll use a cell array because (at least to me) it is easier. Place your desired coordinates in a 2D matrix where each row is the top-left corner of the location in the image you want to process as well as the width and height (basically suitable for imcrop), then loop over each set of coordinates and place the cropped image as an element in a cell array. Cell array use is important because the dimensions per cropped image are different and so you can't use a normal matrix here:
A=imread('Ring_1_frame_120.jpg'); %Load picture
%A01-A010 = xmin ymin width height
coords = [65 159 95 332; 182 161 95 332; 297 164 95 332; 402 165 90 332;...
495 168 90 332; 606 166 90 332; 705 171 90 332; 808 175 90 332;...
922 175 90 332; 1031 175 90 332];
numImages = size(coords,1);
images = cell(1,numImages);
for ii = 1 : numImages
images{ii} = imcrop(A,coords(ii,:));
end
images is now a cell array of cropped images that belong to the image A. To access the right image, you can use images to do that like so:
img = images{ii};
ii is the image number you wish to access. Another comment I'd like to make is your use of eval. It is really not recommended in your loop either... which is why I decided to change the logic.
Do this instead:
for ii = 1 : numImages
% hard coding greenChannel and extracting the second plane.
greenChannel = images{ii}(:,:,2); %// Change for green channel
%// Now code is the same as before
BW = edge(greenChannel,'Prewitt');
%figure, imshow(BW);
%Dilate Lines
se90 = strel('line', 3, 90);
se0 = strel('line', 3, 0);
BWsdil = imdilate(BW, [se90 se0]);
%figure, imshow(BWsdil), title('dilated gradient mask');
%Fill Lines
BWdfill = imfill(BWsdil, 'holes');
%figure, imshow(BWdfill), title('binary image with filled holes');
%Clean up borders
Wnobord = imclearborder(BWdfill, 4);
%figure, imshow(BWnobord), title('cleared border image');
%Final cleanup
seD = strel('diamond',1);
BWfinal = imerode(BWnobord,seD);
BWfinal = imerode(BWfinal,seD);
figure, imshow(BWfinal), title('segmented image');
...
end
Alright, so now how do we get the coordinates of the centroid and save them to file? You simply need to unpack the structure and get the centroid coordinates. Make sure data is declared at the top is now a cell array:
data = cell(1, numImages);
The reason why you need a cell array (again) is because you don't know how many segmented components there are per cropped image you're looking at. Now finally at the end of your loop:
for ii = 1 : numImages
%// Your code...
%//...
L = bwlabel(BWfinal);
s = regionprops(L,'centroid');
%// New code
data{ii} = reshape([s.Centroid],2,[]).';
end
Now that you have the centroid coordinates stored in a cell array per cropped image, you can either create multiple CSVs where each CSV contains the centroids of each detected object for each cropped image, or you can concatenate all of the centroids together in a single matrix.
So, do either:
for ii = 1 : numImages
csvwrite(sprintf('data%d.csv', ii), data{ii});
end
... or
out = cat(1, data{:});
csvwrite('data.csv', out);
I'm not sure which method you want to use to write to file, but either of those should work.
You need to access struct elements using s(i).Centroid, as a minimal example,
a =imread('circlesBrightDark.png');
bw = a < 100;
s = regionprops(bw,'centroid');
for i =1:size(s)
data(:,:,i) = s(i).Centroid
end