unable to upload dynamic image frames of video into S3 - image

I need to detect faces from video and store the image frames in S3
I am stuck at s3_client.upload_file how can i add dynamic frames of the video
import numpy as np
import boto3
from boto.s3.key import Key
import cv2
count=0
loop=0
cascPath = 'haarcascade_frontalface_default.xml'
faceCascade = cv2.CascadeClassifier(cascPath)
cap = cv2.VideoCapture('https://s3-us-west-2.amazonaws.com/test1/news.mp4')
s3_client = boto3.client('s3')
while(cap.isOpened()):
ret, frame = cap.read()
faces = faceCascade.detectMultiScale(
frame,
scaleFactor=2.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
print('------',faces)
#print('------',loop)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
crop_img = frame[y:y+h,x:x+w]
cv2.imwrite('frame%d.jpg' %count, crop_img)
s3_client.upload_file('frame%d.jpg', 'test1', 'frame%d.jpg' )
print('---storing in S3----------')
count += 1
cap.release()
cv2.destroyAllWindows()

Related

How to convert a Julia plot (via Plots.jl) into a Matrix{RGB{N0f8}}

Matplotlib can convert a plot/figure into a RGB array as follows:
import matplotlib.pyplot as plt
import numpy as np
import io
fig, ax = plt.subplots()
n=256
I, J = np.indices((n, n))
im = ax.imshow((I | J) % 19, interpolation='none')
fig.colorbar(im, ax=ax)
#Convert fig to a RGB array
io_buf = io.BytesIO()
fig.savefig(io_buf, format='raw')
io_buf.seek(0)
fig_arr = np.reshape(np.frombuffer(io_buf.getvalue(), dtype=np.uint8),
newshape=(int(fig.bbox.bounds[3]), int(fig.bbox.bounds[2]), -1))
print(f"The shape of the rgb array: {fig_arr.shape}")
plt.show()
It displays:
The shape of the rgb array: (480, 640, 4)
Is it possible to convert similarly a Plots plot into a Matrix{RGB{N0f8}}?
The first part:
using Plots
n = 255
I = [i for i in 0:n, j in 0:n]
h = heatmap(mod.((I .| I'), 19), c= :deep, yflip=true, size=(400, 400), aspect_ratio=:equal)
I searched for Julia equivalent of numpy.frombuffer, but no result has been returned
With h holding the plot, as the code in the OP has described. The following:
using FileIO
io = IOBuffer()
show(io, MIME("image/png"), h);
strm = Stream(format"PNG", io)
img = load(strm)
leaves img with the Matrix{RGB{...}}.

Pillow Black&White Image is saved as a blacksquare

I get an image from PySide6 Canvas.
I can show it in the browser properly. But while saving this one, I get the black square.
While converting the image into a NumPy array I get correct figures in the array.
Large screenshot of Numpy array
I can't understand what is going wrong while saving image.
I expected to save the image properly but I can't find a way to.
the class code
import numpy as np
from PIL import Image, ImageOps
from numpy.typing import NDArray
from typing import Tuple, Union
THRESHOLD = 100
FINAL_SIZE = 28
class Utils:
#classmethod
def get_formatted_image(cls, pil_im: Image, as_ndarray=True) -> Union[Image.Image, NDArray]:
"""
Converts an image of the number into NumPy array
"""
pil_im = cls._get_cropped_image(cls._get_gscale_image(pil_im))
pil_im = pil_im.resize((FINAL_SIZE, FINAL_SIZE), resample=1)
pil_im = cls._get_gscale_image(pil_im, inverse=False)
return np.array(pil_im) if as_ndarray else pil_im
#staticmethod
def _get_gscale_image(img: Image, inverse=True) -> Image:
"""
Converts image into a black-white one
"""
converted_image = img.convert('L')
return ImageOps.invert(converted_image) if inverse else converted_image
#classmethod
def _get_cropped_image(cls, img: Image) -> Image:
"""
Crops the image (converts the image into a squared one)
"""
img_np = np.array(img)
boundaries = cls._get_boundaries(img_np)
img_np = img_np[boundaries[0]: boundaries[2], boundaries[1]: boundaries[3]]
return Image.fromarray(img_np)
#staticmethod
def _get_boundaries(np_img: NDArray, inverted_image=True) -> Tuple:
"""
Gets boundaries of the image
"""
x = {k: v for k, v in enumerate(np_img.sum(axis=1) / np_img.shape[1])}
y = {k: v for k, v in enumerate(np_img.sum(axis=0) / np_img.shape[0])}
if inverted_image:
x = list(filter(lambda z: z[1] > 0, x.items()))
y = list(filter(lambda z: z[1] > 0, y.items()))
else:
x = list(filter(lambda z: z[1] < 255, x.items()))
y = list(filter(lambda z: z[1] < 255, y.items()))
return min(x)[0], min(y)[0], max(x)[0], max(y)[0]
Anything else is given above. There is nothing else I can add.
processed_own_image.show()
gives expected result, but
processes_own_image.save('temp.png', 'PNG') gives a black square

I'm not getting the Matrix output for Camera Caliberation. How can I get CamerMatrix, other results?

!pip install numpy
!pip install matplotlib
!pip install opencv-python
import cv2
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from PIL import Image
import numpy as np
import glob
import os
import matplotlib.image as mpimg
import pickle
import_images
#images = # join all file .jpg in sample_chessboard dir to list
images = glob.glob('C:\Users\tamim\Downloads\Checkerboard8x11-20220609T003559Z-001\Checkerboard8x11/*.jpg')
# Define the chess board rows and columns
ChessboardSize = (11,8)
# Set the termination criteria for the corner sub-pixel algorithm
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 25, 0.001)
# Prepare the object points: (0,0,0), (1,0,0), (2,0,0), ..., (6,5,0). They are the same for all images
objectPoints = np.zeros((ChessboardSize[0] *ChessboardSize1 , 3), np.float32)
objectPoints[:, :2] = np.mgrid[0:ChessboardSize[0], 0:ChessboardSize1].T.reshape(-1, 2)
# Create the arrays to store the object points and the image points
objectPointsArray = []
imgPointsArray = []
for image in images:
print(image)
img = cv2.imread(image)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
**## Find the chaseboard corners**
ret, corners = cv2.findChessboardCorners(gray, ChessboardSize, None)
**## if found, add objectpoints, imagepoints (after refining them)**
if ret == true:
objectPointsArray.append(objectPoints)
corners2 = cv2.cornerSubPix(gray, corners, (11,11),(-1,-1), criteria)
imgPointsArray.append(corners)
image_with_corner = cv2.drawChessboardCorners(gray, ChessboardSize, corners2, ret)
**## Draw and display the corners**
img = cv2.drawChessboardCorners(img, ChessboardSize, coners2, ret)
cv2.imshow('image:',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Calibrate a camera
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objectPointsArray, imgPointsArray, gray.shape[::-1],None,None)
enter image description here

Using an image as marker on a map in Geopandas

I am trying to make a map of a wind farm. I would like to use an image of a wind turbine that I found online as the marker to show the locations of the turbines on the map. I have two problems:
although i was able to import the image using the following code below, I cannot visualize it...
I do not know how to define it as the marker I would like to use (which by the way, will require that I resize it...)
Here is my attempt:
from IPython.display import Image
im = Image('path/turb.png')
display (im)
Out:
But it should be this:
As for the mapping, I tried the following... without success:
fig, ax = plt.subplots(figsize = (10,10))
turb.plot(ax=ax, marker = im)
plt.show()
I got this error:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-53-d26dd941b982> in <module>()
1 minx, miny, maxx, maxy = ri.geometry.total_bounds
2 fig, ax = plt.subplots(figsize = (10,10))
----> 3 turb.plot(ax=ax, marker = im)
4 ri.plot(ax=ax)
5 #ax.set_xlim(minx, maxx) # added/substracted value is to give some margin around total bounds
11 frames
/usr/local/lib/python3.7/dist-packages/numpy/core/_asarray.py in asarray(a, dtype, order)
81
82 """
---> 83 return array(a, dtype, copy=False, order=order)
84
85
TypeError: float() argument must be a string or a number, not 'Image'
Here is a demo code and its resulting plot for the question.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
path = "./images/YCcBa.png"
image = plt.imread(path)[10:10+128, 10:10+108]
def plot_images(x, y, image, ax):
for xi, yi in zip(x,y):
im = OffsetImage(image, zoom=72/ax.figure.dpi)
im.image.axes = ax
# create bbox for the images
ab = AnnotationBbox(im, (xi,yi), frameon=False, pad=0.0)
ax.add_artist(ab)
x = np.arange(10)
y = np.random.rand(10)
fig, ax = plt.subplots(figsize = (8,8))
plot_images(x, y, image, ax)
ax.plot(x, y) # plot lines connecting (x,y)'s
plt.show()

matplotlib: jpg image special effect/transformation as if its on a page of an open book

I wish to appear a figure (and certain text) as if they are printed on a page of an open book. Is it possible to transform an jpg image programmatically or in matplotlib to have such an effect?
You can use a background axis along with an open source book image to do something like this,
import numpy as np
import matplotlib.pyplot as plt
fig = plt.figure()
ax1 = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax2 = fig.add_axes([0.2, 0.3, 0.25, 0.3])
#Plot page from a book
im = plt.imread("./book_page.jpg")
implot = ax1.imshow(im, origin='lower')
# Plot a graph and set background to transparent
x = np.linspace(0,4.*np.pi,40)
y = np.sin(x)
ax2.plot(x,y,'-ro',alpha=0.5)
ax2.set_ylim([-1.1,1.1])
ax2.patch.set_alpha(0.0)
from matplotlib import rc
rc('text', usetex=True)
margin = im.shape[0]*0.075
ytext = im.shape[1]/2.+10
ax1.text(margin, ytext, "The following text is an example")
ax1.text(margin, 90, "Figure 1. Showing a sine function")
plt.show()
Which looks like this,
where I used the following book image.
UPDATE: Added non-affine transformation based on scikit-image warp example, but with Maxwell distribution. The solution saves the matplotlib line as an image in order to apply a pointwise transform. Mapping for vector graphics may be possible but I think this will be more complicated...
import numpy as np
import matplotlib.pyplot as plt
def maxwellian_transform_image(image):
from skimage.transform import PiecewiseAffineTransform, warp
rows, cols = image.shape[0], image.shape[1]
src_cols = np.linspace(0, cols, 20)
src_rows = np.linspace(0, rows, 10)
src_rows, src_cols = np.meshgrid(src_rows, src_cols)
src = np.dstack([src_cols.flat, src_rows.flat])[0]
# add maxwellian to row coordinates
x = np.linspace(0, 3., src.shape[0])
dst_rows = src[:, 1] + (np.sqrt(2/np.pi)*x**2 * np.exp(-x**2/2)) * 50
dst_cols = src[:, 0]
dst_rows *= 1.5
dst_rows -= 1.0 * 50
dst = np.vstack([dst_cols, dst_rows]).T
tform = PiecewiseAffineTransform()
tform.estimate(src, dst)
out_rows = image.shape[0] - 1.5 * 50
out_cols = cols
out = warp(image, tform, output_shape=(out_rows, out_cols))
return out
#Create the new figure
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
#Plot page from a book
im = plt.imread("./book_page.jpg")
implot = ax.imshow(im, origin='lower')
# Plot and save graph as image, will need some manipulation of location
temp, at = plt.subplots()
margin = im.shape[0]*0.1
x = np.linspace(margin,im.shape[0]/2.,40)
y = im.shape[1]/3. + 0.1*im.shape[1]*np.sin(12.*np.pi*x/im.shape[0])
at.plot(x,y,'-ro',alpha=0.5)
temp.savefig("lineplot.png",transparent=True)
#Read in plot as an image and apply transform
plot = plt.imread("./lineplot.png")
out = maxwellian_transform_image(plot)
ax.imshow(out, extent=[0,im.shape[1],0,im.shape[0]])
plt.show()
The figure now looks like,

Resources