Predicted model image is always black for image segmentation model - image

I have build an image segmentation model(Segnet) for which my iou shows decent value but when I predict the test image, I always get dark image. Below is my model, it will be great if you can point problem in my network.
from keras.models import Model, Sequential
from keras.layers import Activation, Dense, BatchNormalization, Dropout, Conv2D, Conv2DTranspose, MaxPooling2D, UpSampling2D, Input, Reshape
from keras.optimizers import Adam, SGD
# Encoding layer
img_input = Input(shape= (256, 256, 3))
x = Conv2D(64, (3, 3), padding='same', name='conv1',strides= (1,1))(img_input)
x = BatchNormalization(name='bn1')(x)
x = Activation('relu')(x)
x = Conv2D(64, (3, 3), padding='same', name='conv2')(x)
x = BatchNormalization(name='bn2')(x)
x = Activation('relu')(x)
x = MaxPooling2D()(x)
x = Conv2D(128, (3, 3), padding='same', name='conv3')(x)
x = BatchNormalization(name='bn3')(x)
x = Activation('relu')(x)
x = Conv2D(128, (3, 3), padding='same', name='conv4')(x)
x = BatchNormalization(name='bn4')(x)
x = Activation('relu')(x)
x = MaxPooling2D()(x)
x = Conv2D(256, (3, 3), padding='same', name='conv5')(x)
x = BatchNormalization(name='bn5')(x)
x = Activation('relu')(x)
x = Conv2D(256, (3, 3), padding='same', name='conv6')(x)
x = BatchNormalization(name='bn6')(x)
x = Activation('relu')(x)
x = Conv2D(256, (3, 3), padding='same', name='conv7')(x)
x = BatchNormalization(name='bn7')(x)
x = Activation('relu')(x)
x = MaxPooling2D()(x)
x = Conv2D(512, (3, 3), padding='same', name='conv8')(x)
x = BatchNormalization(name='bn8')(x)
x = Activation('relu')(x)
x = Conv2D(512, (3, 3), padding='same', name='conv9')(x)
x = BatchNormalization(name='bn9')(x)
x = Activation('relu')(x)
x = Conv2D(512, (3, 3), padding='same', name='conv10')(x)
x = BatchNormalization(name='bn10')(x)
x = Activation('relu')(x)
x = MaxPooling2D()(x)
x = Conv2D(512, (3, 3), padding='same', name='conv11')(x)
x = BatchNormalization(name='bn11')(x)
x = Activation('relu')(x)
x = Conv2D(512, (3, 3), padding='same', name='conv12')(x)
x = BatchNormalization(name='bn12')(x)
x = Activation('relu')(x)
x = Conv2D(512, (3, 3), padding='same', name='conv13')(x)
x = BatchNormalization(name='bn13')(x)
x = Activation('relu')(x)
x = MaxPooling2D()(x)
x = Dense(1024, activation = 'relu', name='fc1')(x)
x = Dense(1024, activation = 'relu', name='fc2')(x)
# Decoding Layer
x = UpSampling2D()(x)
x = Conv2DTranspose(512, (3, 3), padding='same', name='deconv1')(x)
x = BatchNormalization(name='bn14')(x)
x = Activation('relu')(x)
x = Conv2DTranspose(512, (3, 3), padding='same', name='deconv2')(x)
x = BatchNormalization(name='bn15')(x)
x = Activation('relu')(x)
x = Conv2DTranspose(512, (3, 3), padding='same', name='deconv3')(x)
x = BatchNormalization(name='bn16')(x)
x = Activation('relu')(x)
x = UpSampling2D()(x)
x = Conv2DTranspose(512, (3, 3), padding='same', name='deconv4')(x)
x = BatchNormalization(name='bn17')(x)
x = Activation('relu')(x)
x = Conv2DTranspose(512, (3, 3), padding='same', name='deconv5')(x)
x = BatchNormalization(name='bn18')(x)
x = Activation('relu')(x)
x = Conv2DTranspose(256, (3, 3), padding='same', name='deconv6')(x)
x = BatchNormalization(name='bn19')(x)
x = Activation('relu')(x)
x = UpSampling2D()(x)
x = Conv2DTranspose(256, (3, 3), padding='same', name='deconv7')(x)
x = BatchNormalization(name='bn20')(x)
x = Activation('relu')(x)
x = Conv2DTranspose(256, (3, 3), padding='same', name='deconv8')(x)
x = BatchNormalization(name='bn21')(x)
x = Activation('relu')(x)
x = Conv2DTranspose(128, (3, 3), padding='same', name='deconv9')(x)
x = BatchNormalization(name='bn22')(x)
x = Activation('relu')(x)
x = UpSampling2D()(x)
x = Conv2DTranspose(128, (3, 3), padding='same', name='deconv10')(x)
x = BatchNormalization(name='bn23')(x)
x = Activation('relu')(x)
x = Conv2DTranspose(64, (3, 3), padding='same', name='deconv11')(x)
x = BatchNormalization(name='bn24')(x)
x = Activation('relu')(x)
x = UpSampling2D()(x)
x = Conv2DTranspose(64, (3, 3), padding='same', name='deconv12')(x)
x = BatchNormalization(name='bn25')(x)
x = Activation('relu')(x)
x = Conv2DTranspose(1, (3, 3), padding='same', name='deconv13')(x)
x = BatchNormalization(name='bn26')(x)
x = Activation('sigmoid')(x)
pred = Reshape((256,256))(x)
below is the code which I use to compile, fit and predict
model = Model(inputs=img_input, outputs=pred)
model.compile(optimizer='adadelta', loss= ["binary_crossentropy"] , metrics=[iou, dice_coef, precision, recall, accuracy])
model.summary()
hist = model.fit(train_dataset, epochs= epochs_num,
validation_data=valid_dataset,
steps_per_epoch=train_steps,
validation_steps=valid_steps,
batch_size= batch_size, shuffle=False, verbose=1)
#prediction
img_1 = cv2.imread(filelist_testx[61], cv2.IMREAD_COLOR)
img_pred = model_1.predict(img_1.reshape(1,256,256,3))
plt.imshow(img_pred.reshape(256, 256), plt.cm.binary_r)
plt.title('Predicted Output')
plt.show()
I am quite confused why always predicted image is black as my IOU is .82.

Related

VGG16 Custom Activation Function used in ResNet function

Here's my code:
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import tensorflow as tf
from tensorflow import keras
from keras import layers
from keras.datasets import cifar10
from sklearn.model_selection import train_test_split
import numpy as np
import matplotlib.pyplot as plt
def main():
#loading data and image augmentation
(X_train, Y_train), (X_test, Y_test) = keras.datasets.cifar10.load_data()
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size=0.20, random_state=42)
X_test, X_train, X_val = X_test.astype("float32"), X_train.astype("float32"), X_val.astype("float32")
Y_train, Y_test, Y_val = keras.utils.to_categorical(Y_train, 10), keras.utils.to_categorical(Y_test, 10), keras.utils.to_categorical(Y_val, 10)
datagen = keras.preprocessing.image.ImageDataGenerator(rotation_range=15, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True)
datagen.fit(X_train)
X_train = X_train.reshape(X_train.shape[0], 32, 32, 3)
X_test = X_test.reshape(X_test.shape[0], 32, 32, 3)
X_val = X_val.reshape(X_val.shape[0], 32, 32, 3)
mean = np.mean(X_train)
std = np.std(X_train)
X_test = (X_test - mean) / std
X_val = (X_val - mean) / std
X_train = (X_train - mean) / std
#constructing ResNet function
def residual_module(layer_in, n_filters, kernel_size, padding, initializer, activation, regularizer, triple=False):
activation2 = 'linear'
filters2 = layer_in.shape[-1]
size2 = 1
conv1 = layers.Conv2D(n_filters, kernel_size, padding=padding, kernel_initializer=initializer, kernel_regularizer=regularizer)(layer_in)
conv1 = layers.Activation(activation)(conv1)
batch1 = layers.BatchNormalization()(conv1)
conv2 = layers.Conv2D(filters2, size2, padding='same', kernel_regularizer=regularizer)(batch1)
conv2 = layers.Activation(activation2)(conv2)
batch2 = layers.BatchNormalization()(conv2)
if triple == True:
activation2 = activation
filters2 = n_filters
size2 = kernel_size
conv3 = layers.Conv2D(layer_in.shape[-1], 1, padding='same', activation='linear', kernel_regularizer=regularizer)(batch2)
batch3 = layers.BatchNormalization()(conv3)
layer_out = layers.add([batch3, layer_in])
layer_out = layers.Activation(activation)(layer_out)
else:
layer_out = layers.add([batch2, layer_in])
layer_out = layers.Activation(activation)(layer_out)
return layer_out
#VGG16 model with SIREN
weight_decay = 0.0005
model = keras.Sequential()
input_layer = layers.Input(shape=(32,32,3))
model.add(residual_module(input_layer, n_filters=64, kernel_size=(3,3), padding='same', initializer="he_uniform", activation=tf.math.sin, regularizer=keras.regularizers.l2(weight_decay)))
first = model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=2))
model.add(residual_module(first, 128, (3,3), 'same', None, tf.math.sin, keras.regularizers.l2(weight_decay)))
second = model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=2))
model.add(residual_module(second, 256, (3,3), 'same', None, tf.math.sin, keras.regularizers.l2(weight_decay), triple=True))
third = model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=2))
model.add(residual_module(third, 512, (3,3), 'same', None, tf.math.sin, keras.regularizers.l2(weight_decay), triple=True))
model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=2))
model.add(layers.GlobalAveragePooling2D())
model.add(layers.Dense(4096, activation="relu"))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(4096, activation="relu"))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(10))
'''
model.add(layers.Conv2D(64, (3, 3), padding='same', kernel_initializer="he_uniform", activation=tf.math.sin, input_shape=(32,32,3), kernel_regularizer=keras.regularizers.l2(weight_decay)))
model.add(layers.BatchNormalization())
model.add(layers.Conv2D(64, (3, 3), padding='same', activation=math.sin, kernel_regularizer=keras.regularizers.l2(weight_decay)))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=2))
model.add(layers.Conv2D(128, (3, 3), padding='same', activation=math.sin, kernel_regularizer=keras.regularizers.l2(weight_decay)))
model.add(layers.BatchNormalization())
model.add(layers.Conv2D(128, (3, 3), padding='same', activation=math.sin, kernel_regularizer=keras.regularizers.l2(weight_decay)))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=2))
model.add(layers.Conv2D(256, (3, 3), padding='same', activation=math.sin, kernel_regularizer=keras.regularizers.l2(weight_decay)))
model.add(layers.BatchNormalization())
model.add(layers.Conv2D(256, (3, 3), padding='same', activation=math.sin, kernel_regularizer=keras.regularizers.l2(weight_decay)))
model.add(layers.BatchNormalization())
model.add(layers.Conv2D(256, (3, 3), padding='same', activation=math.sin, kernel_regularizer=keras.regularizers.l2(weight_decay)))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=2))
model.add(layers.Conv2D(512, (3, 3), padding='same', activation=math.sin, kernel_regularizer=keras.regularizers.l2(weight_decay)))
model.add(layers.BatchNormalization())
model.add(layers.Conv2D(512, (3, 3), padding='same', activation=math.sin, kernel_regularizer=keras.regularizers.l2(weight_decay)))
model.add(layers.BatchNormalization())
model.add(layers.Conv2D(512, (3, 3), padding='same', activation=math.sin, kernel_regularizer=keras.regularizers.l2(weight_decay)))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=2))
model.add(layers.GlobalAveragePooling2D())
model.add(layers.Dense(4096, activation="relu"))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(4096, activation="relu"))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(10))
'''
#training model
lr = 0.001
loss = keras.losses.CategoricalCrossentropy(from_logits=True)
decayed_lr = tf.keras.optimizers.schedules.ExponentialDecay(lr, 10000, 0.85, True)
optim = keras.optimizers.SGD(decayed_lr, momentum=0.9, nesterov=True)
batch_size = 128
#optim = keras.optimizers.Adam(decayed_lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(loss=loss, optimizer=optim, metrics=["accuracy"])
checkpoint_filepath = '/Users/JamesRONewton/Documents/Programming/MachineLearning/SIREN_projects/BrainTumor/checkpoint.hdf5'
checkpoint = keras.callbacks.ModelCheckpoint(filepath = checkpoint_filepath, monitor='accuracy', verbose=2, save_best_only=True, save_weights_only=True, mode='auto', save_freq ="epoch")
try:
model.load_weights(checkpoint_filepath, custom_objects = {"sin": tf.math.sin})
except Exception as e:
pass
model.fit(datagen.flow(X_train, Y_train, batch_size=batch_size), steps_per_epoch = len(X_train) / batch_size, epochs=25, callbacks = [checkpoint], validation_data=(X_val, Y_val))
model.evaluate(X_test, Y_test, verbose=1)
#saving model
model.save("VGG16.h5")
if __name__ == '__main__':
main()
And here's the error I keep getting:
TypeError: The added layer must be an instance of class Layer. Received: layer=KerasTensor(type_spec=TensorSpec(shape=(None, 32, 32, 3), dtype=tf.float32, name=None), name='activation_2/Sin:0', description="created by layer 'activation_2'") of type <class 'keras.engine.keras_tensor.KerasTensor'>.
The errors so far have mostly been about inputting a custom activation function into the ResNet function I created. For example,
TypeError: The added layer must be an instance of class Layer. Received: layer=KerasTensor(type_spec=TensorSpec(shape=(None, 32, 32, 3), dtype=tf.float32, name=None), name='activation/Sin:0', description="created by layer 'activation'") of type <class 'keras.engine.keras_tensor.KerasTensor'>.
So I thought maybe using
layers.Activation(activation)
instead of just putting the activation in the Conv2D layer would fix it, but that clearly did not work, as you can see. I've also tried defining the custom activation function as a class inheriting from layers.Layer, but that also did not work. I used this code to try that:
class Sin(layers.Layer):
def __init__(self, **kwargs):
super(Sin, self).__init__(**kwargs)
def call(self, inputs):
return tf.math.sin(inputs)
But alas, it did not work.
-Update!
I tried using keras backend, but that failed. I also tried using a lambda layer in my ResNet function. Here's my most recent attempt, which combines both:
#custom sinusoidal activation function
def sin(x):
return K.sin(x)
#constructing ResNet function
def residual_module(layer_in, n_filters, kernel_size, padding, initializer, regularizer):
conv1 = layers.Conv2D(n_filters, kernel_size, padding=padding, kernel_initializer=initializer, kernel_regularizer=regularizer)(layer_in)
conv1 = layers.Lambda(lambda x: sin(x))(conv1)
batch1 = layers.BatchNormalization()(conv1)
conv2 = layers.Conv2D(layer_in.shape[-1], 1, padding='same', activation='linear', kernel_regularizer=regularizer)(batch1)
batch2 = layers.BatchNormalization()(conv2)
layer_out = layers.add([batch2, layer_in])
layer_out = layers.Lambda(lambda x: sin(x))(layer_out)
return layer_out
def residual_module_triple(layer_in, n_filters, kernel_size, padding, initializer, regularizer):
conv1 = layers.Conv2D(n_filters, kernel_size, padding=padding, kernel_initializer=initializer, kernel_regularizer=regularizer)(layer_in)
conv1 = layers.Lambda(lambda x: sin(x))(conv1)
batch1 = layers.BatchNormalization()(conv1)
conv2 = layers.Conv2D(n_filters, kernel_size, padding='same', kernel_regularizer=regularizer)(batch1)
conv2 = layers.Lambda(lambda x: sin(x))(conv2)
batch2 = layers.BatchNormalization()(conv2)
conv3 = layers.Conv2D(layer_in.shape[-1], 1, padding='same', activation='linear', kernel_regularizer=regularizer)(batch2)
batch3 = layers.BatchNormalization()(conv3)
layer_out = layers.add([batch3, layer_in])
layer_out = layers.Lambda(lambda x: sin(x))(layer_out)
return layer_out
I got it up and working so I can resume training!! Here's my fixed code:
def main():
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import tensorflow as tf
from tensorflow import keras
from keras import layers
from keras.datasets import cifar10
from sklearn.model_selection import train_test_split
import numpy as np
import matplotlib.pyplot as plt
from keras import backend as K
#loading data and image augmentation
(X_train, Y_train), (X_test, Y_test) = keras.datasets.cifar10.load_data()
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size=0.20, random_state=42)
X_test, X_train, X_val = X_test.astype("float32"), X_train.astype("float32"), X_val.astype("float32")
Y_train, Y_test, Y_val = keras.utils.to_categorical(Y_train, 10), keras.utils.to_categorical(Y_test, 10), keras.utils.to_categorical(Y_val, 10)
datagen = keras.preprocessing.image.ImageDataGenerator(rotation_range=15, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True)
datagen.fit(X_train)
X_train = X_train.reshape(X_train.shape[0], 32, 32, 3)
X_test = X_test.reshape(X_test.shape[0], 32, 32, 3)
X_val = X_val.reshape(X_val.shape[0], 32, 32, 3)
mean = np.mean(X_train)
std = np.std(X_train)
X_test = (X_test - mean) / std
X_val = (X_val - mean) / std
X_train = (X_train - mean) / std
#custom sinusoidal activation function
class sin(layers.Layer):
def __init__(self, **kwargs):
super(sin, self).__init__(**kwargs)
def call(self, inputs, **kwargs):
return K.sin(inputs)
#constructing ResNet function
def residual_module(layer_in, n_filters, kernel_size, padding, initializer, regularizer):
inputs = layer_in
conv1 = layers.Conv2D(n_filters, kernel_size, padding=padding, kernel_initializer=initializer, kernel_regularizer=regularizer)(layer_in)
conv1 = layers.Lambda(lambda x: sin()(x))(conv1)
batch1 = layers.BatchNormalization()(conv1)
conv2 = layers.Conv2D(layer_in.shape[-1], 1, padding='same', activation='linear', kernel_regularizer=regularizer)(batch1)
batch2 = layers.BatchNormalization()(conv2)
layer_out = layers.add([batch2, inputs])
layer_out = layers.Lambda(lambda x: sin()(x))(layer_out)
return layer_out
def residual_module_triple(layer_in, n_filters, kernel_size, padding, initializer, regularizer):
inputs = layer_in
conv1 = layers.Conv2D(n_filters, kernel_size, padding=padding, kernel_initializer=initializer, kernel_regularizer=regularizer)(layer_in)
conv1 = layers.Lambda(lambda x: sin()(x))(conv1)
batch1 = layers.BatchNormalization()(conv1, training=True)
conv2 = layers.Conv2D(n_filters, kernel_size, padding='same', kernel_regularizer=regularizer)(batch1)
conv2 = layers.Lambda(lambda x: sin()(x))(conv2)
batch2 = layers.BatchNormalization()(conv2, training=True)
conv3 = layers.Conv2D(layer_in.shape[-1], 1, padding='same', activation='linear', kernel_regularizer=regularizer)(batch2)
batch3 = layers.BatchNormalization()(conv3, training=True)
layer_out = layers.add([batch3, inputs])
layer_out = layers.Lambda(lambda x: sin()(x))(layer_out)
return layer_out
#VGG16 model with SIREN
weight_decay = 0.0005
inputs = layers.Input(shape=(32,32,3))
x = residual_module(inputs, n_filters=64, kernel_size=(3,3), padding='same', initializer="he_uniform", regularizer=keras.regularizers.l2(weight_decay))
x = layers.MaxPooling2D(pool_size=(2, 2), strides=2)(x)
x = residual_module(x, 128, (3,3), 'same', None, keras.regularizers.l2(weight_decay))
x = layers.MaxPooling2D(pool_size=(2, 2), strides=2)(x)
x = residual_module_triple(x, 256, (3,3), 'same', None, keras.regularizers.l2(weight_decay))
x = layers.MaxPooling2D(pool_size=(2, 2), strides=2)(x)
x = residual_module_triple(x, 512, (3,3), 'same', None, keras.regularizers.l2(weight_decay))
x = layers.MaxPooling2D(pool_size=(2, 2), strides=2)(x)
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(4096, activation="relu")(x)
x = layers.Dropout(0.5)(x)
x = layers.Dense(4096, activation="relu")(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(10)(x)
model = keras.Model(inputs, outputs)
#training model
lr = 0.001
loss = keras.losses.CategoricalCrossentropy(from_logits=True)
decayed_lr = tf.keras.optimizers.schedules.ExponentialDecay(lr, 10000, 0.85, True)
optim = keras.optimizers.SGD(decayed_lr, momentum=0.9, nesterov=True)
batch_size = 128
model.compile(loss=loss, optimizer=optim, metrics=["accuracy"])
checkpoint_filepath = '/Users/JamesRONewton/Documents/Programming/MachineLearning/SIREN_projects/BrainTumor/checkpoint.hdf5'
checkpoint = keras.callbacks.ModelCheckpoint(filepath = checkpoint_filepath, monitor='accuracy', verbose=2, save_best_only=True, save_weights_only=True, mode='auto', save_freq ="epoch")
try:
model.load_weights(checkpoint_filepath, custom_objects = {"sin": tf.math.sin})
except Exception as e:
pass
model.fit(datagen.flow(X_train, Y_train, batch_size=batch_size), steps_per_epoch = len(X_train) / batch_size, epochs=25, callbacks = [checkpoint], validation_data=(X_val, Y_val))
model.evaluate(X_test, Y_test, verbose=1)
#saving model
model.save("VGG16.h5")
if __name__ == '__main__':
main()
Two issues with my code were causing the errors:
I found out that when I was adding a layer, it didn't return anything (e.g. "first" was just a NoneType), so I used "x" as the output of each variable and chained them together with the Model class.
I figured out how to modify the "sin" class I defined to accept multiple arguments. Also, I decided to pass a class function to the Lambda layer instead of a function because it kept passing the Tensor returned from function instead of the function itself.

RuntimeError: mat1 and mat2 shapes cannot be multiplied (1280x5 and 6400x4096)?

Defining Alexnet using the following code,I can train successfully.But when I want to see the output of each layer,it will be an error ‘RuntimeError: mat1 and mat2 shapes cannot be multiplied (1280x5 and 6400x4096)?’
class AlexNet(nn.Module):
def __init__(self):
super(AlexNet, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(1, 96, 11, 4),
nn.ReLU(),
nn.MaxPool2d(3, 2),
nn.Conv2d(96, 256, 5, 1, 2),
nn.ReLU(),
nn.MaxPool2d(3, 2),
nn.Conv2d(256, 384, 3, 1, 1),
nn.ReLU(),
nn.Conv2d(384, 384, 3, 1, 1),
nn.ReLU(),
nn.Conv2d(384, 256, 3, 1, 1),
nn.ReLU(),
nn.MaxPool2d(3, 2)
)
self.fc = nn.Sequential(
nn.Linear(256*5*5, 4096),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(4096, 4096),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(4096, 10)
)
def forward(self, img):
feature = self.conv(img)
output = self.fc(feature.view(img.shape[0], -1))
return output
X=torch.randn(1,1,224,224)
for name,layer in net.named_children():
X=layer(X)
print(name,X.shape)
Could u help me?
You forgot to flatten the output array of self.conv in the for cycle. You can split it into two cycles, one for the convolution layers, and one for the fully connected ones.
X = torch.randn(1, 1, 224, 224)
for name, layer in net.conv.named_children():
X = layer(X)
print(name, X.shape)
X = X.flatten() # or X = X.view(X.shape[0], -1)
for name, layer in net.fc.named_children():
X = layer(X)
print(name, X.shape)

How to set the validation data for a concatenated deep CNN models?

I have built a CNN model (keras-2.1.6) with two different structures, each with different set of input data.
I am trying to use validation set in the model fitting. I couldn't get the valid dimension of "validation_data" since I have two different sets of data to be tested.
validation_data = ([tvar_test_data, mfcc_test_data], mfcc_test_labels)
With "tvar_test_data" and "mfcc_test_data" have equal dimensions of (40754, 12, 96) (samples, height, width)
The model:
branch_tvar = Sequential()
branch_tvar.add(Conv2D(kernel_size=8, strides=1, filters=6, padding='same',
input_shape=(n,m,1), activation='relu'))
branch_tvar.add(MaxPooling2D(pool_size=2, strides=2))
branch_tvar.add(Flatten())
branch_tvar.add(Dense(512, activation='relu'))
branch_tvar.add(Dropout(0.2))
branch_mfcc = Sequential()
branch_mfcc.add(Conv2D(kernel_size=16, strides=1, filters=5, padding='same',
input_shape=(n,m,1), activation='relu'))
branch_mfcc.add(MaxPooling2D(pool_size=2, strides=2))
branch_mfcc.add(Dense(512, activation='relu'))
branch_mfcc.add(Dropout(0.2))
branch_mfcc.add(Dense(512, activation='relu'))
branch_mfcc.add(Dropout(0.2))
model = Sequential()
model.add(Concatenate([branch_tvar, branch_mfcc]))
model.add(Dense(number_of_classes, activation='softmax'))
optimizer = Adam(lr=0.000384305959)
model.compile(loss = 'binary_crossentropy', optimizer = optimizer, metrics = ['accuracy'])
seed(2017)
model.fit([tvar_train_data, mfcc_train_data], tvar_train_labels,
batch_size = 128, nb_epoch = 10, verbose = 1,
validation_data=validation_data)
Problem solved. The labels dimension was not correct.
from keras.layers import concatenate
from keras.layers import Dropout, Dense, Flatten, MaxPooling2D, Conv2D
from keras.models import Input, Model
from keras.optimizers import Adam
input_tvar = Input(shape=(n,m,1))
tvar_branch = Conv2D(kernel_size=8, strides=1, filters=6, padding='same',
activation='relu')(input_tvar)
tvar_branch = MaxPooling2D(pool_size=2, strides=2)(tvar_branch)
tvar_branch = Flatten()(tvar_branch)
tvar_branch = Dense(512, activation='relu')(tvar_branch)
tvar_branch = Dropout(0.2)(tvar_branch)
input_mfcc = Input(shape=(n,m,1))
mfcc_branch = Conv2D(kernel_size=16, strides=1, filters=5, padding='same',
activation='relu')(input_tvar)
mfcc_branch = MaxPooling2D(pool_size=2, strides=2)(mfcc_branch)
mfcc_branch = Dense(512, activation='relu')(tvar_branch)
mfcc_branch = Dropout(0.2)(mfcc_branch)
mfcc_branch = Dense(512, activation='relu')(tvar_branch)
mfcc_branch = Dropout(0.2)(mfcc_branch)
con = concatenate(inputs = [tvar_branch,mfcc_branch] ) # merge in metadata
tvar_mfcc = Dense(50)(con)
tvar_mfcc = Dropout(0.3)(tvar_mfcc)
output = Dense(number_of_classes, activation='relu')(tvar_mfcc)
tvar_mfcc_net = Model(inputs=[input_tvar, input_mfcc], outputs=output)
optimizer = Adam(lr=0.000384305959)
tvar_mfcc_net.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
#%%
seed(2017)
tvar_mfcc_net.fit([tvar_train_data, mfcc_train_data], tvar_train_labels,
batch_size = 128, epochs = 10, verbose = 1,
validation_data=validation_data)

VPython Object Revolution

Having to use VPython currently, and I want to make a model of the Solar System.
Currently I have all the Planets and the orbital Rings, however, the actual orbit is what I'm finding very difficult.
GlowScript 2.7 VPython
from visual import *
# Declaring Celestial Body Objects
Sun = sphere(pos = vec(0, 0, 0), radius = 10, color = color.yellow)
Mercury = sphere(pos = vec(25, 0, 0), radius = 2, color = color.green)
Venus = sphere(pos = vec(40, 0, 0), radius = 2.5, color = color.red)
Earth = sphere(pos = vec(50, 0, 0), radius = 2.65, color = color.blue)
Mars = sphere(pos = vec(70, 0, 0), radius = 2.3, color = color.red)
Jupiter = sphere(pos = vec(90, 0, 0), radius = 3, color = color.orange)
Saturn = sphere(pos = vec(105, 0, 0), radius = 2.9, color = color.orange)
Uranus = sphere(pos = vec(117.5, 0, 0), radius = 2.9, color = color.orange)
Neptune = sphere(pos = vec(135, 0, 0), radius = 2.8, color = color.blue)
Pluto = sphere(pos = vec(165, 0, 0), radius = 1.5, color = color.white)
# Declaring Orbital Rings of Celestial Body Objects
Mercury.ring = ring(pos = vec(0, 0, 0), axis = vec(0, 1, 0), size = vec(0.1, Mercury.pos.x * 2, Mercury.pos.x * 2))
Venus.ring = ring(pos = vec(0, 0, 0), axis = vec(0, 1, 0), size = vec(0.1, Venus.pos.x * 2, Venus.pos.x * 2))
Earth.ring = ring(pos = vec(0, 0, 0), axis = vec(0, 1, 0), size = vec(0.1, Earth.pos.x * 2, Earth.pos.x * 2))
Mars.ring = ring(pos = vec(0, 0, 0), axis = vec(0, 1, 0), size = vec(0.1, Mars.pos.x * 2, Mars.pos.x * 2))
Jupiter.ring = ring(pos = vec(0, 0, 0), axis = vec(0, 1, 0), size = vec(0.1, Jupiter.pos.x * 2, Jupiter.pos.x * 2))
Saturn.ring = ring(pos = vec(0, 0, 0), axis = vec(0, 1, 0), size = vec(0.1, Saturn.pos.x * 2, Saturn.pos.x * 2))
Uranus.ring = ring(pos = vec(0, 0, 0), axis = vec(0, 1, 0), size = vec(0.1, Uranus.pos.x * 2, Uranus.pos.x * 2))
Neptune.ring = ring(pos = vec(0, 0, 0), axis = vec(0, 1, 0), size = vec(0.1, Neptune.pos.x * 2, Neptune.pos.x * 2))
Pluto.ring = ring(pos = vec(0, 0, 0), axis = vec(0, 1, 0), size = vec(0.1, Pluto.pos.x * 2, Pluto.pos.x * 2))
# Infinite Loop
while 1 == 1:
Mercury.rotate(angle = radians(360), axis = vec(Mercury.pos.y, Mercury.pos.x, 0), origin = vec(0, 0, 0))
rate(50)
print("Error! Escaped While Loop!")
When I switch out the rotate method with Mercury.rotate(angle = 0.0174533, axis = vec(0, Mercury.pos.x, 0), origin = vec(0, 0, 0)), it properly rotates... yet only for a quarter of the rotation. I've read about everything to do with this, but N/A.
After the quarter revolution, the planet sometimes decides to violently "seizure," when the angle is a larger number. It just seems like a barrier of sorts.
You should write axis=vec(0,1,0). The axis of rotation needs to be always pointing upward.

WxPython Resize an Image using GridBagSizer

I am sorry if this is too simple... I tried to add a logo to my first GUI, however, I am not sure what is the best way to resize it. At this moment, I am using image.Scale to adjust the logo size and place GridBagSizer.
self.image = wx.Image("logo11w.png", wx.BITMAP_TYPE_ANY)
w = self.image.GetWidth()
h = self.image.GetHeight()
self.image = self.image.Scale(w/8, h/8)
self.sb1 = wx.StaticBitmap(self.panel, -1, wx.BitmapFromImage(self.image))
self.sizer.Add(self.sb1, pos=(0, 0), flag=wx.TOP|wx.LEFT|wx.BOTTOM, border=15)
.
I am wondering if there is an auto way to do this? Since I am using GridBagSizer, is it possible to leave one "grid" (e.g., 1 by 1 "box") for my logo? Thanks in advance!
Code:
import wx
class landing_frame(wx.Frame):
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, id = wx.ID_ANY, title = wx.EmptyString, pos = wx.DefaultPosition, size = wx.Size(800, 600), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.font1 = wx.Font(18, wx.DECORATIVE, wx.ITALIC, wx.BOLD)
self.InitUI()
self.Centre()
self.Show()
def InitUI(self):
self.panel = wx.Panel(self)
self.sizer = wx.GridBagSizer(5, 15)
self.image = wx.Image("logo11w.png", wx.BITMAP_TYPE_ANY)
w = self.image.GetWidth()
h = self.image.GetHeight()
self.image = self.image.Scale(w/8, h/8)
self.sb1 = wx.StaticBitmap(self.panel, -1, wx.BitmapFromImage(self.image))
self.sizer.Add(self.sb1, pos=(0, 0), flag=wx.TOP|wx.LEFT|wx.BOTTOM, border=15)
self.text1 = wx.StaticText(self.panel, label="Welcome!")
self.sizer.Add(self.text1, pos=(0, 2), flag=wx.TOP|wx.LEFT|wx.BOTTOM, border=15)
line = wx.StaticLine(self.panel)
self.sizer.Add(line, pos=(1, 0), span=(1, 5), flag=wx.EXPAND|wx.BOTTOM, border=10)
self.text2 = wx.StaticText(self.panel, label="Question 1?")
self.sizer.Add(self.text2, pos=(2, 0), flag=wx.ALL, border=10)
self.sampleList = ['Op1', 'Op2', 'Op3']
self.combo = wx.ComboBox(self.panel, 10, choices=self.sampleList)
self.sizer.Add(self.combo, pos=(2, 1), span=(1, 5), flag=wx.EXPAND|wx.ALL, border=10)
self.input1 = wx.StaticText(self.panel, 11, label="Please Enter Filepath")
self.sizer.Add(self.input1, pos=(3, 0), span=(1, 1), flag=wx.ALL , border=10)
self.input2 = wx.FilePickerCtrl(self.panel, 12, wx.EmptyString, u"Select a file", u"*.*", wx.DefaultPosition, wx.DefaultSize, wx.FLP_DEFAULT_STYLE )
self.sizer.Add(self.input2, pos=(3, 1), span=(1, 20), flag=wx.EXPAND|wx.ALL, border=10)
self.input3 = wx.StaticText(self.panel, 13, label="Additional inputs")
self.sizer.Add(self.input3, pos=(4, 0), flag=wx.ALL , border=10)
self.input4 = wx.TextCtrl(self.panel, 14, 'E.g. ...', wx.DefaultPosition, wx.DefaultSize, 0, wx.DefaultValidator )
self.sizer.Add(self.input4, pos=(4, 1), span=(1, 10), flag=wx.EXPAND|wx.ALL, border=10)
self.panel.SetSizer(self.sizer)
if __name__ == '__main__':
app = wx.App(redirect=False, filename="mylogfile.txt")
landing_frame(None, title="Test")
app.MainLoop()
Here is the logo

Resources