Keras provides an ImageDataGenerator class for realtime augmentation, but it does not include contrast adjustment and addition of noise.
How can we apply a random level of noise and a random contrast adjustment during training? Could these functions be added to the 'preprocessing_function' parameter in the datagen?
Thank you.
You could indeed add noise with preprocessing_function.
Example script:
import random
import numpy as np
def add_noise(img):
'''Add random noise to an image'''
VARIABILITY = 50
deviation = VARIABILITY*random.random()
noise = np.random.normal(0, deviation, img.shape)
img += noise
np.clip(img, 0., 255.)
return img
# Prepare data-augmenting data generator
from keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
zoom_range=0.2,
preprocessing_function=add_noise,
)
# Load a single image as our example
from keras.preprocessing import image
img_path = 'cat_by_irene_mei_flickr.png'
img = image.load_img(img_path, target_size=(299,299))
# Generate distorted images
images = [img]
img_arr = image.img_to_array(img)
img_arr = img_arr.reshape((1,) + img_arr.shape)
for batch in datagen.flow(img_arr, batch_size=1):
images.append( image.array_to_img(batch[0]) )
if len(images) >= 4:
break
# Display
import matplotlib.pyplot as plt
f, xyarr = plt.subplots(2,2)
xyarr[0,0].imshow(images[0])
xyarr[0,1].imshow(images[1])
xyarr[1,0].imshow(images[2])
xyarr[1,1].imshow(images[3])
plt.show()
Example images generated by the script:
From the Keras docs:
preprocessing_function: function that will be implied on each input. The function will run before any other modification on it. The function should take one argument: one image (Numpy tensor with rank 3), and should output a Numpy tensor with the same shape.
So, I created a simple function and then used the image augmentation functions from the imgaug module. Note that imgaug requires images to be rank 4.
I found in this blog that you can do something as simple as:
from keras.layers import GaussianNoise
model.add(Dense(32))
model.add(GaussianNoise(0.1))
model.add(Activation('relu'))
model.add(Dense(32))
...
Unfortunately, I can't find an analogous way to adjust/augment the contrast. But you can, according to this post, augment the brightness with
from keras.preprocessing.image import ImageDataGenerator
ImageDataGenerator(brightness_range=[range_min,range_max])
Related
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 16 18:17:32 2023
#author: avnth
"""
import seaborn as sb
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
from sklearn.preprocessing import scale
from sklearn.metrics import silhouette_score
from sklearn.metrics import davies_bouldin_score
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler as sc
from mpl_toolkits import mplot3d
import plotly.express as px
dta=pd.read_csv("D:/XLRI/Term-4/ML/Assignment-2/Prpd_2.csv")
dta.head()
dta1=dta.drop("Cid",axis=1,inplace=False)
#dta1=dta1.iloc[:,1:4]
dta1=pd.DataFrame(dta1)
dta1.head()
dta1.describe()
dta1=pd.DataFrame(dta1)
dta1.describe()
ncl=[]
for i in range(1,15):
kn=KMeans(n_clusters=i)
kn.fit(dta1)
ncl.append(kn.inertia_)
plt.plot(range(1,15),ncl)
#silhoute method
sil = []
for n in range(2,15):
kn1=KMeans(n_clusters = n)
kn1.fit(dta1)
# labels = kn1.labels_
sil.append(silhouette_score(dta1,kn1.labels_, metric = 'euclidean'))
plt.plot(range(2,15),sil)
#Davies Bouldin Index method
db = []
K1 = range(2,8)
for l in K1:
kn2 = (KMeans(n_clusters = l) )
kn2.fit(dta1)
db.append(davies_bouldin_score(dta1,kn2.labels_))
plt.plot(range(2,8),db)
sa=sc()
sa.fit(dta1)
tdta1=sa.transform(dta1)
tdta1=pd.DataFrame(tdta1)
kmc=KMeans(n_clusters=6)
kmc.fit(tdta1)
clus=kmc.predict(tdta1)
dta["clus"]=clus
dta.head()
clus4=dta[dta.clus==4]
clus4.describe()
clus0=dta[dta.clus==0]
clus0.describe()
clus5=dta[dta.clus==5]
clus5.describe()
clus3=dta[dta.clus==3]
clus3.describe()
sb.scatterplot("Recency","Frequency",data=dta,hue="clus")
sb.scatterplot("Frequency","Money",data=dta,hue="clus")
# Creating dataset
z = dta.Recency
x = dta.Frequency
y = dta.Money
z.head()
x.head()
y.head()
# Creating figure
#fig = plt.figure()
#ax = fig.add_subplot(111,projection ="3d")
#dta=pd.DataFrame(dta)
#dta.head()
#for a in range(0,5):
# ax.scatter(dta.Frequency[dta.clus==a],dta.Recency[dta.clus==a],dta.Money[dta.clus==a],label=a,hue="clus")
#ax.legend()
#plt.title("simple 3D scatter plot")
#plt.show()
#df = px.data.iris()
#fig = px.scatter_3d(df, x='sepal_length', y='sepal_width', z='petal_width',color='petal_length',symbol='species')
#fig=plt.figure()
Hello Frieds,
I am newbie to python. Just learning. I have taken a dataset and clustered it. Now, I want to plot it in 3d scatter plot with a 4th dimension that is my cluster as color. For each cluster no new color should appear. So a data point will be plotted as x,y,z attribute but it will have color based on 4th column that is my cluster number. I know how to do it in 2d with hue. But I am unable to find similar thing in 3d plot. Any help will be appreicated. Atatching my code too.
I tried many libraries from online tutorial but I am not egtting exactly what I am looking for. I have attached a sample for how I want it to be plotted. Sample taken from plotly.com This is just replication how I want to plot.
enter image description here
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(z,x,y, marker=".", c=dta["clus"], s=50, cmap="RdBu")
plt.legend(clus)
plt.title("4D scatterplot")
ax.set_xlabel("Recency")
ax.set_ylabel("Frequency")
ax.set_zlabel("Money")
plt.show()
I have an image and I want to filter it to split the text from the background:
and after applying below code:
from skimage import filters
from skimage.filters import threshold_otsu
from skimage import io as skimage_io # So as not to clash with builtin io
dir = r"image_path/a.jpg"
img = skimage_io.imread(dir, as_gray=True, plugin='imageio')
blurred = filters.gaussian(img, sigma=2.0)
sobel = filters.sobel_h(blurred)
blurred += sobel
blurred += sobel
thresh = threshold_otsu(blurred)
# skimage_io.imshow(blurred)
print(thresh)
binary = img < thresh-0.1
skimage_io.imshow(binary)
The image became
Is there a way to make the result better ??
Yes, you can get a better result:
You do account for the noise, but the gaussian blur you apply is much too strong; it starts to dull out the features you are interested in (the letters).
As #Ziri pointed out in a comment, you are not accounting for the uneven exposure that is present in your image. There is many ways to do that; I will use a rolling ball filter to smooth out the background and combine it with a global thresholding method. Note that this is currently (August 2020) a PR in skimage, but will hopefully get merged soon.
import numpy as np
import matplotlib.pyplot as plt
from skimage import util
from skimage import filters
from skimage import io
from skimage import exposure
# PR 4851; will hopefully be in the library soon(TM)
from skimage.morphology import rolling_ellipsoid
img = io.imread("test.jpg", as_gray=True)
img_inv = util.invert(util.img_as_float(img))
# blurr the image slightly to remove noise
blurred = filters.gaussian(img_inv, sigma=1.0)
# remove background
background = rolling_ellipsoid(blurred, kernel_size=(50, 50), intensity_vertex=0.1)
normalized = blurred - background
# re-normalize intensity
normalized = exposure.rescale_intensity(normalized)
# binarize
binary = normalized > 0.38
binary = util.invert(binary)
plt.imshow(binary, cmap="gray")
plt.gca().axis("off")
plt.show()
Sidenote: It may not be wise to upload an uncensored picture of a passport to SO where it is freely accessible to anybody.
I am was trying out one of the sample Python scripts available from the web site of Scikit Image. This script demonstrates Otsu segmentation at a local level. The script works with pictures loaded using
data.page()
but not using
io.imread
. Any suggestions?
https://scikit-image.org/docs/dev/auto_examples/applications/plot_thresholding.html#sphx-glr-auto-examples-applications-plot-thresholding-py
Picture file
Actual output - the Local thresholding window is empty
As you can see, Global thresholding has worked.But Local Thresholding has failed to produce any results.
Strangely, if I use data.page() then everything works fine.
Script
from skimage import io
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
from skimage.filters import threshold_otsu,threshold_local
import matplotlib
from skimage import data
from skimage.util import img_as_ubyte
filename="C:\\Lenna.png"
mypic= img_as_ubyte (io.imread(filename))
#image = data.page() #This works - why not io.imread ?
imagefromfile=io.imread(filename)
image = rgb2gray(imagefromfile)
global_thresh = threshold_otsu(image)
binary_global = image > global_thresh
block_size = 35
local_thresh = threshold_local(image, block_size, offset=10)
binary_local = image > local_thresh
fig, axes = plt.subplots(nrows=3, figsize=(7, 8))
ax = axes.ravel()
plt.gray()
ax[0].imshow(image)
ax[0].set_title('Original')
ax[1].imshow(binary_global)
ax[1].set_title('Global thresholding')
ax[2].imshow(binary_local)
ax[2].set_title('Local thresholding')
for a in ax:
a.axis('off')
plt.show()
If you load the lenna.png and print its shape you will see it is a 4-channel RGBA image rather than a 3-channel RGB image.
print mypic.shape
(512, 512, 4)
I am not sure which parts of your code apply to which image, so I am not sure where to go next, but I guess you want to just get the RGB part and discard the alpha:
RGB = mypic[...,:3]
I'm trying to build a simple image classifier using scikit-learn. I'm hoping to avoid having to resize and convert each image before training.
Question
Given two different images that are different formats and sizes (1.jpg and 2.png), how can I avoid a ValueError while fitting the model?
I have one example where I train using only 1.jpg, which fits successfully.
I have another example where I train using both 1.jpg and 2.png and a ValueError is produced.
This example will fit successfully:
import numpy as np
from sklearn import svm
import matplotlib.image as mpimg
target = [1, 2]
images = np.array([
# target 1
[mpimg.imread('./1.jpg'), mpimg.imread('./1.jpg')],
# target 2
[mpimg.imread('./1.jpg'), mpimg.imread('./1.jpg')],
])
n_samples = len(images)
data = images.reshape((n_samples, -1))
model = svm.SVC()
model.fit(data, target)
This example will raise a Value error.
Observe the different 2.png image in target 2.
import numpy as np
from sklearn import svm
import matplotlib.image as mpimg
target = [1, 2]
images = np.array([
# target 1
[mpimg.imread('./1.jpg'), mpimg.imread('./1.jpg')],
# target 2
[mpimg.imread('./2.png'), mpimg.imread('./1.jpg')],
])
n_samples = len(images)
data = images.reshape((n_samples, -1))
model = svm.SVC()
model.fit(data, target)
# ValueError: setting an array element with a sequence.
1.jpg
2.png
For this, I would really recommend using the tools in Keras that are specifically designed to preprocess images in a highly scalable and efficient way.
from keras.preprocessing.image import ImageDataGenerator
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
1 Determine the target size of your new pictures
h,w = 150,150 # desired height and width
batch_size = 32
N_images = 100 #total number of images
Keras works in batches, so batch_size just determines how many pictures at once will be processed (this does not impact your end result, just the speed).
2 Create your Image Generator
train_datagen = ImageDataGenerator(
rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'Pictures_dir',
target_size=(h, w),
batch_size=batch_size,
class_mode = 'binary')
The object that is going to do the image extraction is ImageDataGenerator. It has the method flow_from_directory which I believe might be useful for you here. It will read the content of the folder Pictures_dir and expect your images to be in folders by class (eg: Pictures_dir/class0 and Pictures_dir/class1). The generator, when called, will then create images from these folders and also import their label (in this example, 'class0' and 'class1').
There are plenty of other arguments to this generator, you can check them out in the Keras documentation (especially if you want to do data augmentation).
Note: this will take any image, be it PNG or JPG, as you requested
If you want to get the mapping from class names to label indices, do:
train_generator.class_indices
# {'class0': 0, 'class1': 1}
You can check what is going on with
plt.imshow(train_generator[0][0][0])
3 Extract all resized images from the Generator
Now you are ready to extract the images from the ImageGenerator:
def extract_images(generator, sample_count):
images = np.zeros(shape=(sample_count, h, w, 3))
labels = np.zeros(shape=(sample_count))
i = 0
for images_batch, labels_batch in generator: # we are looping over batches
images[i*batch_size : (i+1)*batch_size] = images_batch
labels[i*batch_size : (i+1)*batch_size] = labels_batch
i += 1
if i*batch_size >= sample_count:
# we must break after every image has been seen once, because generators yield indifinitely in a loop
break
return images, labels
images, labels = extract_images(train_generator, N_images)
print(labels[0])
plt.imshow(images[0])
Now you have your images all at the same size in images, and their corresponding labels in labels, which you can then feed into any scikit-learn classifier of your choice.
Its difficult because of the math operations behind the scene, (the details are out of scope) if you manage do so, lets say you build your own algorithm, still you would not get the desired result.
i had this issue once with faces with different sizes. maybe this piece of code give you starting point.
from PIL import Image
import face_recognition
def face_detected(file_address = None , prefix = 'detect_'):
if file_address is None:
raise FileNotFoundError('File address required')
image = face_recognition.load_image_file(file_address)
face_location = face_recognition.face_locations(image)
if face_location:
face_location = face_location[0]
UP = int(face_location[0] - (face_location[2] - face_location[0]) / 2)
DOWN = int(face_location[2] + (face_location[2] - face_location[0]) / 2)
LEFT = int(face_location[3] - (face_location[3] - face_location[2]) / 2)
RIGHT = int(face_location[1] + (face_location[3] - face_location[2]) / 2)
if UP - DOWN is not LEFT - RIGHT:
height = UP - DOWN
width = LEFT - RIGHT
delta = width - height
LEFT -= int(delta / 2)
RIGHT += int(delta / 2)
pil_image = Image.fromarray(image[UP:DOWN, LEFT:RIGHT, :])
pil_image.thumbnail((50, 50), Image.ANTIALIAS)
pil_image.save(prefix + file_address)
return True
pil_image = Image.fromarray(image)
pil_image.thumbnail((200, 200), Image.ANTIALIAS)
pil_image.save(prefix + file_address)
return False
Note : i wrote this long time ago maybe not a good practice
Using python code we are able to create image segments as shown in the screenshot. our requirement is how to select specific segment in the image and apply different color to it ?
The following is our python snippet
from skimage.segmentation import felzenszwalb, slic,quickshift
from skimage.segmentation import mark_boundaries
from skimage.util import img_as_float
import matplotlib.pyplot as plt
from skimage import measure
from skimage import restoration
from skimage import img_as_float
image = img_as_float(io.imread("leaf.jpg"))
segments = quickshift(image, ratio=1.0, kernel_size=20, max_dist=10,return_tree=False, sigma=0, convert2lab=True, random_seed=42)
fig = plt.figure("Superpixels -- %d segments" % (500))
ax = fig.add_subplot(1, 1, 1)
ax.imshow(mark_boundaries(image, segments))
plt.axis("off")
plt.show()
do this:
seg_num = 64 # desired segment to be colored
color = float64([1,0,0]) # red color
image[segments == 64] = color # assign color to the segment
You can use OpenCV python module - example: