How we can explain below validation loss? - validation

I have trained different number of layers in CNN+LSTM encoder and decoder model with attention.
The problem I am facing is very strange to me. The validation loss is fluctuating around 3.***. As we can see from the below loss graphs. I have 3 CNN layer+1 layer BLSTM at encoder and 1 LSTM at decoder
3 layer CNN+2 layers of BLSTM at encoder and 1 layer LSTM at encoder
I have also tried weight decay from 0.1 to 0.000001. But still I am getting this type of loss graphs. Note that the Accuracy of the model is increasing on both validation and trainset. How is it possible that validation loss is still around 3 but accuracy is increasing? Can someone explain this?
Thanks
`
class Encoder(nn.Module):
def init(self,height, width, enc_hid_dim, dec_hid_dim, dropout):
super().init()
self.height= height
self.enc_hid_dim=enc_hid_dim
self.width=width
self.layer0 = nn.Sequential(
nn.Conv2d(1, 8, kernel_size=(3,3),stride =(1,1), padding=1),
nn.ReLU(),
nn.BatchNorm2d(8),
nn.MaxPool2d(2,2),
)
self.layer1 = nn.Sequential(
nn.Conv2d(8, 32, kernel_size=(3,3),stride =(1,1), padding=1),
nn.ReLU(),
nn.BatchNorm2d(32),
nn.MaxPool2d(2,2),
)
self.layer2 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=(3,3),stride =(1,1), padding=1),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.MaxPool2d(2,2)
)
self.rnn = nn.LSTM(self.height//8*64, self.enc_hid_dim, bidirectional=True)
self.fc = nn.Linear(enc_hid_dim * 2, dec_hid_dim)
self.dropout = nn.Dropout(dropout)
self.cnn_dropout = nn.Dropout(p=0.2)
def forward(self, src, in_data_len, train):
batch_size = src.shape[0]
out = self.layer0(src)
out = self.layer1(out)
out = self.layer2(out)
out = self.dropout(out) # torch.Size([batch, channel, h, w])
out = out.permute(3, 0, 2, 1) # (width, batch, height, channels)
out.contiguous()
out = out.reshape(-1, batch_size, self.height//8*64) #(w,batch, (height, channels))
width = out.shape[0]
src_len = in_data_len.numpy()*(width/self.width)
src_len = src_len + 0.999 # in case of 0 length value from float to int
src_len = src_len.astype('int')
out = pack_padded_sequence(out, src_len.tolist(), batch_first=False)
outputs, hidden_out = self.rnn(out)
hidden=hidden_out[0]
cell=hidden_out[1]
# output: t, b, f*2 hidden: 2, b, f
outputs, output_len = pad_packed_sequence(outputs, batch_first=False)
hidden = torch.tanh(self.fc(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1)))
cell = torch.tanh(self.fc(torch.cat((cell[-2,:,:], cell[-1,:,:]), dim = 1)))
return outputs, hidden, cell, output_len
class Decoder(nn.Module):
def init(self, output_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout, attention):
super().init()
self.output_dim = output_dim
self.attention = attention
self.embedding = nn.Embedding(output_dim, emb_dim)
self.rnn = nn.LSTM((enc_hid_dim * 2) + emb_dim, dec_hid_dim)
self.fc_out = nn.Linear((enc_hid_dim * 2) + dec_hid_dim + emb_dim, output_dim)
self.dropout_layer = nn.Dropout(dropout)
def forward(self, input, hidden, cell, encoder_outputs, train):
input=torch.topk(input,1)[1]
embedded = self.embedding(input)
if train:
embedded=self.dropout_layer(embedded)
embedded = embedded.permute(1, 0, 2)
#embedded = [1, batch size, emb dim]
a = self.attention(hidden, encoder_outputs)
#a = [batch size, src len]
a = a.unsqueeze(1)
#a = [batch size, 1, src len]
encoder_outputs = encoder_outputs.permute(1, 0, 2)
#encoder_outputs = [batch size, src len, enc hid dim * 2]
weighted = torch.bmm(a, encoder_outputs)
weighted = weighted.permute(1, 0, 2)
#weighted = [1, batch size, enc hid dim * 2]
rnn_input = torch.cat((embedded, weighted), dim = 2)
output, hidden_out = self.rnn(rnn_input (hidden.unsqueeze(0),cell.unsqueeze(0)))
hidden=hidden_out[0]
cell=hidden_out[1]
assert (output == hidden).all()
embedded = embedded.squeeze(0)
output = output.squeeze(0)
weighted = weighted.squeeze(0)
prediction = self.fc_out(torch.cat((output, weighted, embedded), dim = 1))
return prediction, hidden.squeeze(0), cell.squeeze(0)
`

Related

fitting keras model for cat and dog image classification takes 50 minutes at each epoch. any way i can reduce time?

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
image_size = (180, 180)
batch_size = 32
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
"PetImages",
validation_split=0.2,
subset="training",
seed=1337,
image_size=image_size,
batch_size=batch_size,
)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
"PetImages",
validation_split=0.2,
subset="validation",
seed=1337,
image_size=image_size,
batch_size=batch_size,
)
data_augmentation = keras.Sequential(
[
layers.RandomFlip("horizontal"),
layers.RandomRotation(0.1),
]
)
train_ds = train_ds.prefetch(buffer_size=32)
val_ds = val_ds.prefetch(buffer_size=32)
def make_model(input_shape, num_classes):
inputs = keras.Input(shape=input_shape)
# Image augmentation block
x = data_augmentation(inputs)
# Entry block
x = layers.Rescaling(1.0 / 255)(x)
x = layers.Conv2D(32, 3, strides=2, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.Conv2D(64, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
previous_block_activation = x # Set aside residual
for size in [128, 256, 512, 728]:
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(size, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(size, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPooling2D(3, strides=2, padding="same")(x)
# Project residual
residual = layers.Conv2D(size, 1, strides=2, padding="same")(
previous_block_activation
)
x = layers.add([x, residual]) # Add back residual
previous_block_activation = x # Set aside next residual
x = layers.SeparableConv2D(1024, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.GlobalAveragePooling2D()(x)
if num_classes == 2:
activation = "sigmoid"
units = 1
else:
activation = "softmax"
units = num_classes
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(units, activation=activation)(x)
return keras.Model(inputs, outputs)
model = make_model(input_shape=image_size + (3,), num_classes=2)
keras.utils.plot_model(model, show_shapes=True)
epochs = 50
callbacks = [
keras.callbacks.ModelCheckpoint("save_at_{epoch}.h5"),
]
model.compile(
optimizer=keras.optimizers.Adam(1e-3),
loss="binary_crossentropy",
metrics=["accuracy"],
)
model.fit(
train_ds, epochs=epochs, callbacks=callbacks, validation_data=val_ds,
)
So the strategy was to begin the model with the data_augmentation preprocessor, followed by a Rescaling layer and a dropout layer before the final classification layer as shown in the make_model function
for training the model as you can see I set epochs=50 and used buffered prefetching for my input data as it would yield data from disk without having I/O blocking. As for the rest of the parameters I think it was pretty standard. nothing too complicated but when I run my code each epoch is taking approximately 40 minutes and I don't know why.
Any suggestions?

Fit Amplitude (Frequency response) of a capacitor with lmfit

I am trying to fit measured data with lmfit.
My goal is to get the parameters of the capacitor with an equivalent circuit diagram.
So, I want to create a model with parameters (C, R1, L1,...) and fit it to the measured data.
I know that the resonance frequency is at the global minimum and there must also be R1. Also known is C.
So I could fix the parameter C and R1. With the resonance frequency I could calculate L1 too.
I created the model, but the fit doesn't work right.
Maybe someone could help me with this.
Thanks in advance.
from lmfit import minimize, Parameters
from lmfit import report_fit
params = Parameters()
params.add('C', value = 220e-9, vary = False)
params.add('L1', value = 0.00001, min = 0, max = 0.1)
params.add('R1', value = globalmin, vary = False)
params.add('Rp', value = 10000, min = 0, max = 10e20)
params.add('Cp', value = 0.1, min = 0, max = 0.1)
def get_elements(params, freq, data):
C = params['C'].value
L1 = params['L1'].value
R1 = params['R1'].value
Rp = params['Rp'].value
Cp = params['Cp'].value
XC = 1/(1j*2*np.pi*freq*C)
XL = 1j*2*np.pi*freq*L1
XP = 1/(1j*2*np.pi*freq*Cp)
Z1 = R1 + XC*Rp/(XC+Rp) + XL
real = np.real(Z1*XP/(Z1+XP))
imag = np.imag(Z1*XP/(Z1+XP))
model = np.sqrt(real**2 + imag**2)
#model = np.sqrt(R1**2 + ((2*np.pi*freq*L1 - 1/(2*np.pi*freq*C))**2))
#model = (np.arctan((2*np.pi*freq*L1 - 1/(2*np.pi*freq*C))/R1)) * 360/((2*np.pi))
return data - model
out = minimize(get_elements, params , args=(freq, data))
report_fit(out)
#make reconstruction for plotting
C = out.params['C'].value
L1 = out.params['L1'].value
R1 = out.params['R1'].value
Rp = out.params['Rp'].value
Cp = out.params['Cp'].value
XC = 1/(1j*2*np.pi*freq*C)
XL = 1j*2*np.pi*freq*L1
XP = 1/(1j*2*np.pi*freq*Cp)
Z1 = R1 + XC*Rp/(XC+Rp) + XL
real = np.real(Z1*XP/(Z1+XP))
imag = np.imag(Z1*XP/(Z1+XP))
reconst = np.sqrt(real**2 + imag**2)
reconst_phase = np.arctan(imag/real)* 360/(2*np.pi)
'''
PLOTTING
'''
#plot of filtred signal vs measered data (AMPLITUDE)
fig = plt.figure(figsize=(40,15))
file_title = 'Measured Data'
plt.subplot(311)
plt.xscale('log')
plt.yscale('log')
plt.xlim([min(freq), max(freq)])
plt.ylabel('Amplitude')
plt.xlabel('Frequency in Hz')
plt.grid(True, which="both")
plt.plot(freq, z12_fac, 'g', alpha = 0.7, label = 'data')
#Plot Impedance of model in magenta
plt.plot(freq, reconst, 'm', label='Reconstruction (Model)')
plt.legend()
#(PHASE)
plt.subplot(312)
plt.xscale('log')
plt.xlim([min(freq), max(freq)])
plt.ylabel('Phase in °')
plt.xlabel('Frequency in Hz')
plt.grid(True, which="both")
plt.plot(freq, z12_deg, 'g', alpha = 0.7, label = 'data')
#Plot Phase of model in magenta
plt.plot(freq, reconst_phase, 'm', label='Reconstruction (Model)')
plt.legend()
plt.savefig(file_title)
plt.close(fig)
measured data
equivalent circuit diagram (model)
Edit 1:
Fit-Report:
[[Fit Statistics]]
# fitting method = leastsq
# function evals = 28
# data points = 4001
# variables = 3
chi-square = 1197180.70
reduced chi-square = 299.444897
Akaike info crit = 22816.4225
Bayesian info crit = 22835.3054
## Warning: uncertainties could not be estimated:
L1: at initial value
Rp: at boundary
Cp: at initial value
Cp: at boundary
[[Variables]]
C: 2.2e-07 (fixed)
L1: 1.0000e-05 (init = 1e-05)
R1: 0.06375191 (fixed)
Rp: 0.00000000 (init = 10000)
Cp: 0.10000000 (init = 0.1)
Edit 2:
Data can be found here:
https://1drv.ms/u/s!AsLKp-1R8HlZhcdlJER5T7qjmvfmnw?e=r8G2nN
Edit 3:
I now have simplified my model to a simple RLC-series. With a another set of data this works pretty good. see here the plot with another set of data
def get_elements(params, freq, data):
C = params['C'].value
L1 = params['L1'].value
R1 = params['R1'].value
#Rp = params['Rp'].value
#Cp = params['Cp'].value
#k = params['k'].value
#freq = np.log10(freq)
XC = 1/(1j*2*np.pi*freq*C)
XL = 1j*2*np.pi*freq*L1
# XP = 1/(1j*2*np.pi*freq*Cp)
# Z1 = R1*k + XC*Rp/(XC+Rp) + XL
# real = np.real(Z1*XP/(Z1+XP))
# imag = np.imag(Z1*XP/(Z1+XP))
Z1 = R1 + XC + XL
real = np.real(Z1)
imag= np.imag(Z1)
model = np.sqrt(real**2 + imag**2)
return np.sqrt(np.real(data)**2+np.imag(data)**2) - model
out = minimize(get_elements, params , args=(freq, data))
Report:
Chi-Square is really high...
[[Fit Statistics]]
# fitting method = leastsq
# function evals = 25
# data points = 4001
# variables = 2
chi-square = 5.0375e+08
reduced chi-square = 125968.118
Akaike info crit = 46988.8798
Bayesian info crit = 47001.4684
[[Variables]]
C: 3.3e-09 (fixed)
L1: 5.2066e-09 +/- 1.3906e-08 (267.09%) (init = 1e-05)
R1: 0.40753691 +/- 24.5685882 (6028.56%) (init = 0.05)
[[Correlations]] (unreported correlations are < 0.100)
C(L1, R1) = -0.174
With my originally set of data I get this:
plot original data (complex)
Which is not bad, but also not good. That's why I want to make my model more detailed, so I can fit also in higher frequency regions...
Report of this one:
[[Fit Statistics]]
# fitting method = leastsq
# function evals = 25
# data points = 4001
# variables = 2
chi-square = 109156.170
reduced chi-square = 27.2958664
Akaike info crit = 13232.2473
Bayesian info crit = 13244.8359
[[Variables]]
C: 2.2e-07 (fixed)
L1: 2.3344e-08 +/- 1.9987e-10 (0.86%) (init = 1e-05)
R1: 0.17444702 +/- 0.29660571 (170.03%) (init = 0.05)
Please note: I also have changed the input data of the model. Now I give the model complex values and then I calculate the Amplitude. Find this also here: https://1drv.ms/u/s!AsLKp-1R8HlZhcdlJER5T7qjmvfmnw?e=qnrZk1

ValueError: Input 0 of layer sequential is incompatible with the layer

I am trying to run this model but I keep getting this error. There is some mistake with regard to the shape of input data, I played around with it but I still get these errors.
Error:
ValueError: Input 0 of layer sequential is incompatible with the layer: expected axis -1 of input shape to have value 1 but received input with shape (None, 32, 32, 3)
# Image size
img_width = 32
img_height = 32
# Define X as feature variable and Y as name of the class(label)
X = []
Y = []
for features,label in data_set:
X.append(features)
Y.append(label)
X = np.array(X).reshape(-1,img_width,img_height,3)
Y = np.array(Y)
print(X.shape) # Output :(4943, 32, 32, 3)
print(Y.shape) # Output :(4943,)
# Normalize the pixels
X = X/255.0
# Build the model
cnn = Sequential()
cnn.add(keras.Input(shape = (32,32,1)))
cnn.add(Conv2D(32, (3, 3), activation = "relu", input_shape = X.shape[1:]))
cnn.add(MaxPooling2D(pool_size = (2, 2)))
cnn.add(Conv2D(32, (3, 3), activation = "relu",input_shape = X.shape[1:]))
cnn.add(MaxPooling2D(pool_size = (2, 2)))
cnn.add(Conv2D(64, (3,3), activation = "relu",input_shape = X.shape[1:]))
cnn.add(MaxPooling2D(pool_size = (2,2)))
cnn.add(Flatten())
cnn.add(Dense(activation = "relu", units = 150))
cnn.add(Dense(activation = "relu", units = 50))
cnn.add(Dense(activation = "relu", units = 10))
cnn.add(Dense(activation = 'softmax', units = 1))
cnn.summary()
cnn.compile(loss = 'categorical_crossentropy',optimizer = 'adam',metrics = ['accuracy'])
# Model fit
cnn.fit(X, Y, epochs = 15)e
I tried reading about this issue, but still didn't understand it very well.
your input shape should be (32,32,3). y is your label matrix. I assume it contains N unique integer values where N is the number of classes. If N=2 you can treat this as a binary classification problem. In that case your code for the top layer should be
cnn.add(Dense(1, activation = 'sigmoid'))
your code for compile should be
cnn.compile(loss = 'binary_crossentropy',optimizer = 'adam',metrics = ['accuracy'])
If you have more than 2 classes then your code should be
cnn.add(Dense(N, activation = 'softmax'))
cnn.compile(loss = 'sparse_categorical_crossentropy',optimizer = 'adam',metrics = ['accuracy'])
Where N is the number of classes,
Change this line (the last dimension):
cnn.add(keras.Input(shape = (32,32,3)))

Tensorflow/Keras: volatile validation loss

I've been training a U-Net for single class small lesion segmentation, and have been getting consistently volatile validation loss. I have about 20k images split 70/30 between training and validation sets-so I don't think the issue is too little data. I've tried shuffling and resplitting the sets a few times with no change in volatility-so I don't think the validation set is unrepresentative. I have tried lowering the learning rate with no effect on volatility. And I have tried a few loss functions (dice coefficient, focal tversky, weighted binary cross-entropy). I'm using a decent amount of augmentation so as to avoid overfitting. I've also run through all my data (512x512 float64s with corresponding 512x512 int64 masks--both stored as numpy arrays) do double check that the value range, dtypes, etc. aren't screwy...and I even removed any ROIs in the masks under 35 pixels in area which I thought might be artifact and messing with loss.
I'm using keras ImageDataGen.flow_from_directory...I was initially using zca_whitening and brightness_range augmentation but I think this causes issues with flow_from_directory and the link between mask and image being lost.. so I skipped this.
I've tried validation generators with and without shuffle=True. Batch size is 8.
Here's some of my code, happy to include more if it would help:
# loss
from keras.losses import binary_crossentropy
import keras.backend as K
import tensorflow as tf
epsilon = 1e-5
smooth = 1
def dsc(y_true, y_pred):
smooth = 1.
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
score = (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
return score
def dice_loss(y_true, y_pred):
loss = 1 - dsc(y_true, y_pred)
return loss
def bce_dice_loss(y_true, y_pred):
loss = binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
return loss
def confusion(y_true, y_pred):
smooth=1
y_pred_pos = K.clip(y_pred, 0, 1)
y_pred_neg = 1 - y_pred_pos
y_pos = K.clip(y_true, 0, 1)
y_neg = 1 - y_pos
tp = K.sum(y_pos * y_pred_pos)
fp = K.sum(y_neg * y_pred_pos)
fn = K.sum(y_pos * y_pred_neg)
prec = (tp + smooth)/(tp+fp+smooth)
recall = (tp+smooth)/(tp+fn+smooth)
return prec, recall
def tp(y_true, y_pred):
smooth = 1
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pos = K.round(K.clip(y_true, 0, 1))
tp = (K.sum(y_pos * y_pred_pos) + smooth)/ (K.sum(y_pos) + smooth)
return tp
def tn(y_true, y_pred):
smooth = 1
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pred_neg = 1 - y_pred_pos
y_pos = K.round(K.clip(y_true, 0, 1))
y_neg = 1 - y_pos
tn = (K.sum(y_neg * y_pred_neg) + smooth) / (K.sum(y_neg) + smooth )
return tn
def tversky(y_true, y_pred):
y_true_pos = K.flatten(y_true)
y_pred_pos = K.flatten(y_pred)
true_pos = K.sum(y_true_pos * y_pred_pos)
false_neg = K.sum(y_true_pos * (1-y_pred_pos))
false_pos = K.sum((1-y_true_pos)*y_pred_pos)
alpha = 0.7
return (true_pos + smooth)/(true_pos + alpha*false_neg + (1-alpha)*false_pos + smooth)
def tversky_loss(y_true, y_pred):
return 1 - tversky(y_true,y_pred)
def focal_tversky(y_true,y_pred):
pt_1 = tversky(y_true, y_pred)
gamma = 0.75
return K.pow((1-pt_1), gamma)
model = BlockModel((len(os.listdir(os.path.join(imageroot,'train_ct','train'))), 512, 512, 1),filt_num=16,numBlocks=4)
#model.compile(optimizer=Adam(learning_rate=0.001), loss=weighted_cross_entropy)
#model.compile(optimizer=Adam(learning_rate=0.001), loss=dice_coef_loss)
model.compile(optimizer=Adam(learning_rate=0.001), loss=focal_tversky)
train_mask = os.path.join(imageroot,'train_masks')
val_mask = os.path.join(imageroot,'val_masks')
model.load_weights(model_weights_path) #I'm initializing with some pre-trained weights from a similar model
data_gen_args_mask = dict(
rotation_range=10,
shear_range=20,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=[0.8,1.2],
horizontal_flip=True,
#vertical_flip=True,
fill_mode='nearest',
data_format='channels_last'
)
data_gen_args = dict(
**data_gen_args_mask
)
image_datagen_train = ImageDataGenerator(**data_gen_args)
mask_datagen_train = ImageDataGenerator(**data_gen_args)#_mask)
image_datagen_val = ImageDataGenerator()
mask_datagen_val = ImageDataGenerator()
seed = 1
BS = 8
steps = int(np.floor((len(os.listdir(os.path.join(train_ct,'train'))))/BS))
print(steps)
val_steps = int(np.floor((len(os.listdir(os.path.join(val_ct,'val'))))/BS))
print(val_steps)
train_image_generator = image_datagen_train.flow_from_directory(
train_ct,
target_size = (512, 512),
color_mode = ("grayscale"),
classes=None,
class_mode=None,
seed = seed,
shuffle = True,
batch_size = BS)
train_mask_generator = mask_datagen_train.flow_from_directory(
train_mask,
target_size = (512, 512),
color_mode = ("grayscale"),
classes=None,
class_mode=None,
seed = seed,
shuffle = True,
batch_size = BS)
val_image_generator = image_datagen_val.flow_from_directory(
val_ct,
target_size = (512, 512),
color_mode = ("grayscale"),
classes=None,
class_mode=None,
seed = seed,
shuffle = True,
batch_size = BS)
val_mask_generator = mask_datagen_val.flow_from_directory(
val_mask,
target_size = (512, 512),
color_mode = ("grayscale"),
classes=None,
class_mode=None,
seed = seed,
shuffle = True,
batch_size = BS)
train_generator = zip(train_image_generator, train_mask_generator)
val_generator = zip(val_image_generator, val_mask_generator)
# make callback for checkpointing
plot_losses = PlotLossesCallback(skip_first=0,plot_extrema=False)
%matplotlib inline
filepath = os.path.join(versionPath, model_version + "_saved-model-{epoch:02d}-{val_loss:.2f}.hdf5")
if reduce:
cb_check = [ModelCheckpoint(filepath,monitor='val_loss',
verbose=1,save_best_only=False,
save_weights_only=True,mode='auto',period=1),
reduce_lr,
plot_losses]
else:
cb_check = [ModelCheckpoint(filepath,monitor='val_loss',
verbose=1,save_best_only=False,
save_weights_only=True,mode='auto',period=1),
plot_losses]
# train model
history = model.fit_generator(train_generator, epochs=numEp,
steps_per_epoch=steps,
validation_data=val_generator,
validation_steps=val_steps,
verbose=1,
callbacks=cb_check,
use_multiprocessing = False
)
And here's how my loss looks:
Another potentially relevant thing: I tweaked the flow_from_directory code a bit (added npy to the white list). But training loss looks fine so assuming the issue isnt here
Two suggestions:
Switch to the classic validation data format (i.e. numpy array) instead of using a generator -- this will ensure you always use the exactly same validation data every time. If you see a different validation curve, then there is something "random" in the validation generator giving you different data at different epochs.
Use a fixed set of samples (100 or 1000 should be enough w/o any data augmentation) for both training and validation. If everything goes well, you should see your network quickly overfit to this dataset and your training and validation curves should very much similar. If not, debug your network.

How to model a mixture of 3 Normals in PyMC?

There is a question on CrossValidated on how to use PyMC to fit two Normal distributions to data. The answer of Cam.Davidson.Pilon was to use a Bernoulli distribution to assign data to one of the two Normals:
size = 10
p = Uniform( "p", 0 , 1) #this is the fraction that come from mean1 vs mean2
ber = Bernoulli( "ber", p = p, size = size) # produces 1 with proportion p.
precision = Gamma('precision', alpha=0.1, beta=0.1)
mean1 = Normal( "mean1", 0, 0.001 )
mean2 = Normal( "mean2", 0, 0.001 )
#deterministic
def mean( ber = ber, mean1 = mean1, mean2 = mean2):
return ber*mean1 + (1-ber)*mean2
Now my question is: how to do it with three Normals?
Basically, the issue is that you can't use a Bernoulli distribution and 1-Bernoulli anymore. But how to do it then?
edit: With the CDP's suggestion, I wrote the following code:
import numpy as np
import pymc as mc
n = 3
ndata = 500
dd = mc.Dirichlet('dd', theta=(1,)*n)
category = mc.Categorical('category', p=dd, size=ndata)
precs = mc.Gamma('precs', alpha=0.1, beta=0.1, size=n)
means = mc.Normal('means', 0, 0.001, size=n)
#mc.deterministic
def mean(category=category, means=means):
return means[category]
#mc.deterministic
def prec(category=category, precs=precs):
return precs[category]
v = np.random.randint( 0, n, ndata)
data = (v==0)*(50+ np.random.randn(ndata)) \
+ (v==1)*(-50 + np.random.randn(ndata)) \
+ (v==2)*np.random.randn(ndata)
obs = mc.Normal('obs', mean, prec, value=data, observed = True)
model = mc.Model({'dd': dd,
'category': category,
'precs': precs,
'means': means,
'obs': obs})
The traces with the following sampling procedure look good as well. Solved!
mcmc = mc.MCMC( model )
mcmc.sample( 50000,0 )
mcmc.trace('means').gettrace()[-1,:]
there is a mc.Categorical object that does just this.
p = [0.2, 0.3, .5]
t = mc.Categorical('test', p )
t.random()
#array(2, dtype=int32)
It returns an int between 0 and len(p)-1. To model the 3 Normals, you make p a mc.Dirichlet object (it accepts a k length array as the hyperparameters; setting the values in the array to be the same is setting the prior probabilities to be equal). The rest of the model is nearly identical.
This is a generalization of the model I suggested above.
Update:
Okay, so instead of having different means, we can collapse them all into 1:
means = Normal( "means", 0, 0.001, size=3 )
...
#mc.deterministic
def mean(categorical=categorical, means = means):
return means[categorical]

Resources