import cv2
from moviepy.video.io.VideoFileClip import VideoFileClip
def process_image(img):
out = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return out
video_output = 'output_videos/Entry_02.mp4'
video_input = VideoFileClip('input_videos/Entry_02.mp4')
processed_video = video_input.fl_image(process_image)
%time processed_video.write_videofile(video_output, audio=False)
Exploring VideoFileClip from moviepy to do some video processing, not sure why it generated duplicate multiple frames (3x3) in the output video.
Apparently, write_videofile expects a RGB image (3 channels) instead of the gray scale image that cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) returns. You should get the desired grayscale image by copying out to all channels.
def process_image(img):
image_height = img.shape[0]
image_width = img.shape[1]
out = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = np.zeros((3, image_height, image_width))
img[0] = out
img[1] = out
img[2] = out
return np.moveaxis(img, 0, -1)
Related
This is my code it shows all color spaces except YCBCR ,,
Notation:ConvertLayer converts color space of image
fig=plt.figure(figsize=(9, 9))
for images, labels in train_ds.take(1):
for i in range(16):
plt.subplot(5, 5, i + 1)
color_space = COLOR_SPACES[i%5]
image = tf.expand_dims(images[i//5] ,axis=0)
image = augmentation(NormalLayer(color_space)(tf.cast(image,tf.float32)))
if color_space=="YCBCR":
image = tf.cast(image, tf.uint8)
image = ConvertLayer(color_space)(image)
#image = tf.cast(image * 255.0, tf.uint8)
lbl = "{}({})".format(class_names[labels[i//4].numpy().argmax()] , color_space )
plt.imshow(tf.squeeze(image))
plt.title(lbl)
plt.axis("off")
break
I need to visualize it just in matplotlib.
With the following code I was able to visualize the image in matplotlib.
import matplotlib.pyplot as plt
import numpy as np
x=plt.imread("/content/car.jpeg")
y=rgb2ycbcr(x)
fig = plt.figure(figsize=(10, 7))
rows = 1
columns = 2
fig.add_subplot(rows, columns, 1)
plt.imshow(x)
plt.axis('off')
plt.title("RGB")
fig.add_subplot(rows, columns, 2)
plt.imshow(y)
plt.axis('off')
plt.title("YCBCR")
def rgb2ycbcr(im):
xform = np.array([[.299, .587, .114], [-.1687, -.3313, .5], [.5, -.4187, -.0813]])
ycbcr = im.dot(xform.T)
ycbcr[:,:,[1,2]] += 128
return np.uint8(ycbcr)
Output:
I use this Python code to convert a giving image path to a new Tensor. How can I do the same in TensorFlow JS with Node.js?
def process_image(image_path):
# Read in image file
image = tf.io.read_file(image_path)
# Turn the jpeg image into numerical Tensor with 3 colour channels (Red, Green, Blue)
image = tf.image.decode_jpeg(image, channels=3)
# Convert the colour channel values from 0-225 values to 0-1 values
image = tf.image.convert_image_dtype(image, tf.float32)
# Resize the image to our desired size (224, 244)
image = tf.image.resize(image, size=[IMG_SIZE, IMG_SIZE])
return image
Below is a js function that will do the same processing as in python
const tfnode = require('#tensorflow/tfjs-node')
function processImage(path) {
const imageSize = 224
const imageBuffer = fs.readFileSync(path); // can also use the async readFile instead
// get tensor out of the buffer
image = tfnode.node.decodeImage(imageBuffer, 3);
// dtype to float
image = image.cast('float32').div(255);
// resize the image
image = tf.image.resizeBilinear(image, size = [imageSize, imageSize]); // can also use tf.image.resizeNearestNeighbor
image = image.expandDims(); // to add the most left axis of size 1
return image.shape
}
I want to center x,y coordinates of all delight lokums in this picture;
marshmallows on a tray
i tried a lot of things but i cant complete rectangles.
i get this image
enter image description here
target my project
project target
import cv2
import numpy as np
img = cv2.imread ('yl4.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#morphology
kernel = np.ones((3,3),np.uint8)
erosion = cv2.erode(gray,kernel,iterations = 1)
kernel = np.ones((17,17),np.uint8)
opening = cv2.morphologyEx(gray , cv2.MORPH_OPEN, kernel)
#get binary
gray_blur = cv2.GaussianBlur(opening, (21,21), 0)
thresh = cv2.adaptiveThreshold(gray_blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV, 11, 1)
kernel = np.ones((3,3),np.uint8)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
kernel = np.ones((7,7),np.uint8)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
#filter small dotted regions
nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(thresh , None, None, None, 4, cv2.CV_32S)
sizes = stats[1:, -1] #get CC_STAT_AREA component
img2 = np.zeros((labels.shape), np.uint8)
for i in range(0, nlabels - 1):
if sizes[i] >= 275:
img2[labels == i + 1] = 255
cv2.imshow('frame',img2)
cv2.imshow('gray',gray)
cv2.imwrite("outyl3.jpg", img2)
cv2.waitKey(0)
I want to use this code
fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=True)
cap = cv2.VideoCapture('drunker-1.mp4')
while True:
grabed, img = cap.read()
if not grabed:
break
ori = img.copy()
gray = cv2.cvtColor(ori, cv2.COLOR_BGR2GRAY)
img = fgbg.apply(gray)
ret, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
extract human body from this video:
https://www.youtube.com/watch?v=Xvj4Ud-RKrM
but I got result like this:
This is complete messy,and I think it caused by light and shadow changing,so how to reduce these noises?Thanks in advance!!
You can try to blur the image using GaussianBlur before background subtraction:
Imgproc.GaussianBlur(resize_blur_Img, resize_blur_Img, new Size(9, 9), 2, 2);
I wish to appear a figure (and certain text) as if they are printed on a page of an open book. Is it possible to transform an jpg image programmatically or in matplotlib to have such an effect?
You can use a background axis along with an open source book image to do something like this,
import numpy as np
import matplotlib.pyplot as plt
fig = plt.figure()
ax1 = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax2 = fig.add_axes([0.2, 0.3, 0.25, 0.3])
#Plot page from a book
im = plt.imread("./book_page.jpg")
implot = ax1.imshow(im, origin='lower')
# Plot a graph and set background to transparent
x = np.linspace(0,4.*np.pi,40)
y = np.sin(x)
ax2.plot(x,y,'-ro',alpha=0.5)
ax2.set_ylim([-1.1,1.1])
ax2.patch.set_alpha(0.0)
from matplotlib import rc
rc('text', usetex=True)
margin = im.shape[0]*0.075
ytext = im.shape[1]/2.+10
ax1.text(margin, ytext, "The following text is an example")
ax1.text(margin, 90, "Figure 1. Showing a sine function")
plt.show()
Which looks like this,
where I used the following book image.
UPDATE: Added non-affine transformation based on scikit-image warp example, but with Maxwell distribution. The solution saves the matplotlib line as an image in order to apply a pointwise transform. Mapping for vector graphics may be possible but I think this will be more complicated...
import numpy as np
import matplotlib.pyplot as plt
def maxwellian_transform_image(image):
from skimage.transform import PiecewiseAffineTransform, warp
rows, cols = image.shape[0], image.shape[1]
src_cols = np.linspace(0, cols, 20)
src_rows = np.linspace(0, rows, 10)
src_rows, src_cols = np.meshgrid(src_rows, src_cols)
src = np.dstack([src_cols.flat, src_rows.flat])[0]
# add maxwellian to row coordinates
x = np.linspace(0, 3., src.shape[0])
dst_rows = src[:, 1] + (np.sqrt(2/np.pi)*x**2 * np.exp(-x**2/2)) * 50
dst_cols = src[:, 0]
dst_rows *= 1.5
dst_rows -= 1.0 * 50
dst = np.vstack([dst_cols, dst_rows]).T
tform = PiecewiseAffineTransform()
tform.estimate(src, dst)
out_rows = image.shape[0] - 1.5 * 50
out_cols = cols
out = warp(image, tform, output_shape=(out_rows, out_cols))
return out
#Create the new figure
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
#Plot page from a book
im = plt.imread("./book_page.jpg")
implot = ax.imshow(im, origin='lower')
# Plot and save graph as image, will need some manipulation of location
temp, at = plt.subplots()
margin = im.shape[0]*0.1
x = np.linspace(margin,im.shape[0]/2.,40)
y = im.shape[1]/3. + 0.1*im.shape[1]*np.sin(12.*np.pi*x/im.shape[0])
at.plot(x,y,'-ro',alpha=0.5)
temp.savefig("lineplot.png",transparent=True)
#Read in plot as an image and apply transform
plot = plt.imread("./lineplot.png")
out = maxwellian_transform_image(plot)
ax.imshow(out, extent=[0,im.shape[1],0,im.shape[0]])
plt.show()
The figure now looks like,