How to color the mask image obtained out of a polygon - image

I am getting a mask out of a polygon and below is the image. Now I see that there is a white boundary but I want not just the boundary but inside boundary as white too.
Here is my code:
sar_polygon = Image.new('L', (int(range_samples), int(azimuth_lines)), 0)
draw = ImageDraw.Draw(sar_polygon)
for vertex in range(len(sar_ver)):
st = sar_ver[vertex]
try:
end = sar_ver[vertex + 1]
except IndexError:
end = sar_ver[0]
draw.line((st[0], st[1], end[0], end[1]), fill=1)
sar_polygon.save('polygon.jpg', 'JPEG')

You are currently drawing lines along the edges. You are interested in the polygon method, or perhaps the rectangle method - http://pillow.readthedocs.io/en/5.2.x/reference/ImageDraw.html#PIL.ImageDraw.PIL.ImageDraw.ImageDraw.polygon.
from PIL import Image, ImageDraw
sar_polygon = Image.new('RGB', (500, 500))
draw = ImageDraw.Draw(sar_polygon)
sar_ver = ((100,100),(200,100),(200,200),(100,200))
draw.polygon(sar_ver, fill='#f00')
sar_polygon.save('polygon.jpg', 'JPEG')

Related

Problem with images overlapping in pygame

Im having problems with blitting images to rect objects in pygame. i have a background image blitted to my main pygame window, and also an image blitted to a rect object on the screen which moves. the problem i am having is the rect object is overlapping my background image when its moving around. i was looking to only be able to see the green helicopter shape and not the black outline around it. sorry if i havent explained this very well. will try to include all files im using.
Thanks for any help
import pygame as pg
import random as r
import time
pg.init()
MAX_X = 1190
MAX_Y = 590
MIN_X = 10
MIN_Y = 10
SIZE = 100
SPEED = 1
COLOR = (0,255,0)
move_amount = 0
wn = pg.display.set_mode((1200, 600))
BG_IMG = pg.image.load('bg.png').convert()
BG_IMG = pg.transform.scale(BG_IMG, (1200, 600))
class Wall (pg.Rect):
def __init__(self, posX, posY):
self.xcor = posX
self.ycor = posY
self.rect = None
class Heli (pg.Rect):
def __init__(self, posX, posY):
self.image = pg.image.load('art.png').convert()
self.rect = self.image.get_rect()
self.xcor = posX
self.ycor = posY
# top and bottom constant walls
TOP = pg.Rect(MIN_X, MIN_Y, MAX_X, 3)
BOTTOM = pg.Rect(MIN_X, MAX_Y, MAX_X, 3)
heli = Heli(MIN_X, MAX_Y //2)
# keep moving walls in a list
moving_walls = [Wall(MAX_X, r.randint((MIN_Y + 10), (MAX_Y - 10)))]
# main loop
while True:
# fill screen
wn.fill('black')
# editing objects to move
# blitting must happen before everything else
pg.draw.rect(wn,COLOR, heli.rect)
wn.blit(BG_IMG, (0,0))
wn.blit(heli.image, heli.rect)
heli.rect.y += move_amount
heli.rect.y += 1
# use a variable to control how much movement is happening
# movement happens continuosly
# if key down it oves if key up it doesnt
for wall in moving_walls :
wall.rect = pg.Rect(wall.xcor, wall.ycor, 3, SIZE)
pg.draw.rect(wn, COLOR, wall.rect)
wall.xcor -= SPEED
if wall.xcor < MIN_X + 10:
wall.xcor = MAX_X
wall.ycor = r.randint((MIN_Y), (MAX_Y - SIZE))
# drawing all objects back to the screen
pg.draw.rect(wn, COLOR, TOP)
pg.draw.rect(wn, COLOR, BOTTOM)
# update window
pg.display.update()
# event handling
for ev in pg.event.get():
if ev.type == pg.KEYDOWN:
if ev.key == pg.K_UP:
move_amount = -3
if ev.type == pg.KEYUP:
move_amount = 0
if ev.type == pg.QUIT:
pg.quit()
time.sleep(0.01)
You discard the transparency information of the image. You have to use convert_alpha instead of convert:
self.image = pg.image.load('art.png').convert()
self.image = pg.image.load('art.png').convert_alpha()
The pygame documentation notes that:
The returned Surface will contain the same color format, colorkey and alpha transparency as the file it came from. You will often want to call convert() with no arguments, to create a copy that will draw more quickly on the screen.
For alpha transparency, like in .png images, use the convert_alpha() method after loading so that the image has per pixel transparency.
See also How can I make an Image with a transparent Backround in Pygame?

batch crop quad images with diffenrent sizes to a circle

I have lots of images of planets in differing sizes like
They are all positioned exactly in the middle of the square images but with different height.
Now I want to crop them and make the black border transparent. I tried with convert (ImageMagick 6.9.10-23) like this:
for i in planet_*.jpg; do
nr=$(echo ${i/planet_/}|sed s/.jpg//g|xargs)
convert $i -fuzz 1% -transparent black trans/planet_${nr}.png
done
But this leaves some artifacts like:
Is there a command to crop all images in a circle, so the planet is untouched? (It mustn't be imagemagick).
I could also imagine a solution where I would use a larger -fuzz value and then fill all transparent pixels in the inner planet circle with black.
Those are all planets, I want to convert: download zip
Here is one way using Python Opencv from the minEclosingCircle.
Input:
import cv2
import numpy as np
import skimage.exposure
# read image
img = cv2.imread('planet.jpg')
h, w, c = img.shape
# convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# threshold
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
# get contour
contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
big_contour = max(contours, key=cv2.contourArea)
# get enclosing circle
center, radius = cv2.minEnclosingCircle(big_contour)
cx = int(round(center[0]))
cy = int(round(center[1]))
rr = int(round(radius))
# draw outline circle over input
circle = img.copy()
cv2.circle(circle, (cx,cy), rr, (0, 0, 255), 1)
# draw white filled circle on black background as mask
mask = np.full((h,w), 0, dtype=np.uint8)
cv2.circle(mask, (cx,cy), rr, 255, -1)
# antialias
blur = cv2.GaussianBlur(mask, (0,0), sigmaX=2, sigmaY=2, borderType = cv2.BORDER_DEFAULT)
mask = skimage.exposure.rescale_intensity(blur, in_range=(127,255), out_range=(0,255))
# put mask into alpha channel to make outside transparent
imgT = cv2.cvtColor(img, cv2.COLOR_BGR2BGRA)
imgT[:,:,3] = mask
# crop the image
ulx = int(cx-rr+0.5)
uly = int(cy-rr+0.5)
brx = int(cx+rr+0.5)
bry = int(cy+rr+0.5)
print(ulx,brx,uly,bry)
crop = imgT[uly:bry+1, ulx:brx+1]
# write result to disk
cv2.imwrite("planet_thresh.jpg", thresh)
cv2.imwrite("planet_circle.jpg", circle)
cv2.imwrite("planet_mask.jpg", mask)
cv2.imwrite("planet_transparent.png", imgT)
cv2.imwrite("planet_crop.png", crop)
# display it
cv2.imshow("thresh", thresh)
cv2.imshow("circle", circle)
cv2.imshow("mask", mask)
cv2.waitKey(0)
Threshold image:
Circle on input:
Mask image:
Transparent image:
Cropped transparent image:
packages to install
sudo apt install python3-opencv python3-sklearn python3-skimage

Fast Radial Symmetry Transform (FRST) implementation (python) results in unusual cross-hair looking artifacts

I am trying to implement FRST on python to detect centroids of elliptical objects (e.g. cells in microscopy images), but my implementation does not find seed points (more or less center points) of elliptical objects. This effort comes from duplicating FRST from Segmentation of Overlapping Elliptical Objects in Silhouette Images (https://ieeexplore.ieee.org/document/7300433). I don't know why I have these artifacts. An interesting thing is that I see these patterns (crosses) all in the same direction per object. Any point in the right direction to generate the same result as in the paper (just to find the seed points) will be most welcome.
Original Paper: A Fast Radial Symmetry Transform for Detecting Points of Interest by Loy and Zelinsky (ECCV 2002)
I have also tried the pre-existing python package for FRST: https://pypi.org/project/frst/. This somehow results in the same artifacts. Weird.
First image: Original Image
Second image: Sobel-operated Image
Third image: Magnitude Projection Image
Fourth image: Magnitude Projection Image with positively affected pixels only
Fifth image: FRST'd image: end-product with original image overlaid (shadowed)
Sixth image: FRST'd image by the pre-existing python package with original image overlaid (shadowed).
from scipy.ndimage import gaussian_filter
import numpy as np
from scipy.signal import convolve
# Get orientation projection image
def get_proj_img(image, radius):
workingDims = tuple((e + 2*radius) for e in image.shape)
h,w = image.shape
ori_img = np.zeros(workingDims) # Orientation Projection Image
mag_img = np.zeros(workingDims) # Magnitutde Projection Image
# Kenels for the sobel operator
a1 = np.matrix([1, 2, 1])
a2 = np.matrix([-1, 0, 1])
Kx = a1.T * a2
Ky = a2.T * a1
# Apply the Sobel operator
sobel_x = convolve(image, Kx)
sobel_y = convolve(image, Ky)
sobel_norms = np.hypot(sobel_x, sobel_y)
# Distances to afpx, afpy (affected pixels)
dist_afpx = np.multiply(np.divide(sobel_x, sobel_norms, out = np.zeros(sobel_x.shape), where = sobel_norms!=0), radius)
dist_afpx = np.round(dist_afpx).astype(int)
dist_afpy = np.multiply(np.divide(sobel_y, sobel_norms, out = np.zeros(sobel_y.shape), where = sobel_norms!=0), radius)
dist_afpy = np.round(dist_afpy).astype(int)
for cords, sobel_norm in np.ndenumerate(sobel_norms):
i, j = cords
pos_aff_pix = (i+dist_afpx[i,j], j+dist_afpy[i,j])
neg_aff_pix = (i-dist_afpx[i,j], j-dist_afpy[i,j])
ori_img[pos_aff_pix] += 1
ori_img[neg_aff_pix] -= 1
mag_img[pos_aff_pix] += sobel_norm
mag_img[neg_aff_pix] -= sobel_norm
ori_img = ori_img[:h, :w]
mag_img = mag_img[:h, :w]
print ("Did it go back to the original image size? ")
print (ori_img.shape == image.shape)
# try normalizing ori and mag img
return ori_img, mag_img
def get_sn(ori_img, mag_img, radius, kn, alpha):
ori_img_limited = np.minimum(ori_img, kn)
fn = np.multiply(np.divide(mag_img,kn), np.power((np.absolute(ori_img_limited)/kn), alpha))
# convolute fn with gaussian filter.
sn = gaussian_filter(fn, 0.25*radius)
return sn
def do_frst(image, radius, kn, alpha, ksize = 3):
ori_img, mag_img = get_proj_img(image, radius)
sn = get_sn(ori_img, mag_img, radius, kn, alpha)
return sn
Parameters:
radius = 50
kn = 10
alpha = 2
beta = 0
stdfactor = 0.25

Remove ink mark from paper using OpenCV

I have an image like this,
As you can see, there is a pen mark in the image. I want to remove that mark. How to do it in OpenCV.?
I tried converting it to HSV, creating a mask with blue range and removing it using the code.
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_blue = np.array([110,50,50])
upper_blue = np.array([130,255,255])
mask = cv2.inRange(hsv, lower_blue , upper_blue )
res = cv2.bitwise_and(img, img, mask= mask)
It is not working as needed. All the text gets removed. How to fix this.?
You can take threshold of the first array of image. It looks like this:
Here it is clearly visible the difference in pixel values of the ink mark and the letters. After thresholding it looks like:
The ink mark can now be removed via closing. However it will reduce the size of letters as well. Therefore erosion is performed followed by a bitwise OR to obtain our mask without the ink mark.
If however you want the letters to look like the original image you can store the mask in a numpy array of 255s and perform it bitwise OR with original image.
The full code I have used is:
img = cv2.imread('ink_mark.png')
wimg = img[:, :, 0]
ret,thresh = cv2.threshold(wimg,100,255,cv2.THRESH_BINARY)
kernel = np.ones((7, 7), np.uint8)
closing = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
erosion = cv2.erode(closing, kernel, iterations = 1)
mask = cv2.bitwise_or(erosion, thresh)
white = np.ones(img.shape,np.uint8)*255
white[:, :, 0] = mask
white[:, :, 1] = mask
white[:, :, 2] = mask
result = cv2.bitwise_or(img, white)
cv2.imshow('image', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Try using inpaint. First create a mask of the ink:
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_blue = np.array([100,50,50])
upper_blue = np.array([150,255,255])
kernel = np.ones((5,5),np.uint8)
mask = cv2.inRange(hsv, lower_blue, upper_blue)
mask = cv2.dilate(mask,kernel,iterations = 4)
Use the inpaint function to paint in areas where the mask it white. OpenCV will throw away the original pixels, and use guess which pixels should go there.
dst = cv2.inpaint(img, mask, 3, cv2.INPAINT_TELEA)

ROI is written in lighter colors than original picture

I'm trying to locate an object (here a PWB) on a picture.
First I do this by finding the largest contour. Then I want to rewrite solely this object into a new picture so that in the future I can work on smaller pictures.
The problem however is that when I rewrite this ROI, the picture gets of a lighter color than the original one.
CODE:
Original = cv2.imread(picture_location)
image = cv2.imread(mask_location)
img = cv2.medianBlur(image,29)
imgray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
dst = cv2.bitwise_and(Original, image)
roi = cv2.add(dst, Original)
ret,thresh = cv2.threshold(imgray,127,255,0)
im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
area = 0
max_x = 0
max_y = 0
min_x = Original.shape[1]
min_y = Original.shape[0]
for i in contours:
new_area = cv2.contourArea(i)
if new_area > area:
area = new_area
cnt = i
x,y,w,h = cv2.boundingRect(cnt)
min_x = min(x, min_x)
min_y = min(y, min_y)
max_x = max(x+w, max_x)
max_y = max(y+h, max_y)
roi = roi[min_y-10:max_y+10, min_x-10:max_x+10]
Original = cv2.rectangle(Original,(x-10,y-10),(x+w+10,y+h+10),(0,255,0),2)
#Writing down the images
cv2.imwrite('Pictures/PCB1/LocatedPCB.jpg', roi)
cv2.imwrite('Pictures/PCB1/LocatedPCBContour.jpg',Original)
Since I don't have 10 reputation yet I cannot post the pictures. I can however provide the links:
Original
Region of Interest
The main question is how do I get the software to write down the ROI in the exact same colour as the original picture?
I'm a elektromechanical engineer however, so I'm fairly new to this, remarks on the way I wrote my code would also be appreciated if possible.
The problem is that you first let roi = cv2.add(dst, Original)
and finally cut from the lighten picture in here:
roi = roi[min_y-10:max_y+10, min_x-10:max_x+10]
If you want to crop the original image, you should do:
roi = Original[min_y-10:max_y+10, min_x-10:max_x+10]
You can perhaps perform an edge detection after blurring your image.
How to select best parameters for Canny edge? SEE HERE
lower = 46
upper = 93
edged = cv2.Canny(img, lower, upper) #--- Perform canny edge on the blurred image
kernel = np.ones((5,5),np.uint8)
dilate = cv2.morphologyEx(edged, cv2.MORPH_DILATE, kernel, 3) #---Morphological dilation
_, contours , _= cv2.findContours(dilate, cv2.RETR_EXTERNAL, 1) #---Finds all parent contours, does not find child contours(i.e; does not consider contours within another contour)
max = 0
cc = 0
for i in range(len(contours)): #---For loop for finding contour with maximum area
if (cv2.contourArea(contours[i]) > max):
max = cv2.contourArea(contours[i])
cc = i
cv2.drawContours(img, contours[cc], -1, (0,255,0), 2) #---Draw contour having the maximum area
cv2.imshow(Contour of PCB.',img)
x,y,w,h = cv2.boundingRect(cnt[cc]) #---Calibrates a straight rectangle for the contour of max. area
crop_img = img1[y:y+h, x:x+w] #--- Cropping the ROI having the coordinates of the bounding rectangle
cv2.imshow('cropped PCB.jpg',crop_img)

Resources