Assignment - 6 (Machine Learning)

CNN + Transfer Learning

Author: Jimut Bahan Pal

Note: Use the GPU for fastest results

A) CNN model:

Build a CNN model to perform classification on the CIFAR-10 dataset. For example you can have 10 convolutional layers in your model. You should incorporate pooling, dropout, batch normalization in at least one of the layers. You can choose other parameters/hyperparameters as you feel appropriate for achieving better results. Compute the accuracy of the model.

Import libraries
In [1]:
from __future__ import print_function
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import BatchNormalization
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
import os

import numpy as np

import matplotlib
import matplotlib.pyplot as plt

from sklearn.metrics import confusion_matrix
import itertools

%matplotlib inline
Using TensorFlow backend.
Set the epochs to 150
In [0]:
batch_size = 32  # The default batch size of keras.
num_classes = 10  # Number of class for the dataset
epochs = 150
data_augmentation = False
In [3]:
# The data, split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# Normalize the data. Before we need to connvert data type to float for computation.
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255

# Convert class vectors to binary class matrices. This is called one hot encoding.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
x_train shape: (50000, 32, 32, 3)
50000 train samples
10000 test samples
Create a CNN+DNN
In [0]:
#define the convnet
model = Sequential()
# CONV => RELU => CONV => RELU => POOL => DROPOUT
model.add(Conv2D(32, (3, 3), padding='same',input_shape=x_train.shape[1:],kernel_initializer = 'he_uniform'))
model.add(BatchNormalization())
model.add(Activation('relu'))

model.add(Conv2D(32, (3, 3), padding='same',kernel_initializer = 'he_uniform'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

# CONV => RELU => CONV => RELU => POOL => DROPOUT
model.add(Conv2D(64, (3, 3), padding='same',kernel_initializer = 'he_uniform'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3), padding='same',kernel_initializer = 'he_uniform'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

# FLATTERN => DENSE => RELU => DROPOUT
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
# a softmax classifier
model.add(Dense(num_classes))
model.add(Activation('softmax'))
In [0]:
# initiate RMSprop optimizer
opt = keras.optimizers.RMSprop(learning_rate=0.0001, decay=1e-6)

# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
              optimizer=opt,
              metrics=['accuracy'])

Fit the model

In [6]:
data_augmentation = False # not data augmentation
history = None
if not data_augmentation:
    print('Not using data augmentation.')
    history = model.fit(x_train, y_train,
              batch_size=batch_size,
              epochs=epochs,
              validation_data=(x_test, y_test),
              shuffle=True)
else:
    print('Using real-time data augmentation.')
    # This will do preprocessing and realtime data augmentation:
    datagen = ImageDataGenerator(
        featurewise_center=False,  # set input mean to 0 over the dataset
        samplewise_center=False,  # set each sample mean to 0
        featurewise_std_normalization=False,  # divide inputs by std of the dataset
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        zca_epsilon=1e-06,  # epsilon for ZCA whitening
        rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
        # randomly shift images horizontally (fraction of total width)
        width_shift_range=0.1,
        # randomly shift images vertically (fraction of total height)
        height_shift_range=0.1,
        shear_range=0.,  # set range for random shear
        zoom_range=0.,  # set range for random zoom
        channel_shift_range=0.,  # set range for random channel shifts
        # set mode for filling points outside the input boundaries
        fill_mode='nearest',
        cval=0.,  # value used for fill_mode = "constant"
        horizontal_flip=True,  # randomly flip images
        vertical_flip=False,  # randomly flip images
        # set rescaling factor (applied before any other transformation)
        rescale=None,
        # set function that will be applied on each input
        preprocessing_function=None,
        # image data format, either "channels_first" or "channels_last"
        data_format=None,
        # fraction of images reserved for validation (strictly between 0 and 1)
        validation_split=0.0)

    # Compute quantities required for feature-wise normalization
    # (std, mean, and principal components if ZCA whitening is applied).
    datagen.fit(x_train)

    # Fit the model on the batches generated by datagen.flow().
    history = model.fit_generator(datagen.flow(x_train, y_train,
                                    batch_size=batch_size),
                                    epochs=epochs,
                                    validation_data=(x_test, y_test),
                                    workers=4)
Not using data augmentation.
Train on 50000 samples, validate on 10000 samples
Epoch 1/150
50000/50000 [==============================] - 18s 352us/step - loss: 1.8769 - accuracy: 0.3318 - val_loss: 1.4258 - val_accuracy: 0.4827
Epoch 2/150
50000/50000 [==============================] - 15s 310us/step - loss: 1.5188 - accuracy: 0.4539 - val_loss: 1.3683 - val_accuracy: 0.5124
Epoch 3/150
50000/50000 [==============================] - 15s 309us/step - loss: 1.3886 - accuracy: 0.5051 - val_loss: 1.1837 - val_accuracy: 0.5799
Epoch 4/150
50000/50000 [==============================] - 15s 310us/step - loss: 1.3045 - accuracy: 0.5345 - val_loss: 1.1310 - val_accuracy: 0.5976
Epoch 5/150
50000/50000 [==============================] - 15s 310us/step - loss: 1.2447 - accuracy: 0.5593 - val_loss: 1.0878 - val_accuracy: 0.6151
Epoch 6/150
50000/50000 [==============================] - 16s 310us/step - loss: 1.2030 - accuracy: 0.5742 - val_loss: 1.0617 - val_accuracy: 0.6250
Epoch 7/150
50000/50000 [==============================] - 15s 309us/step - loss: 1.1625 - accuracy: 0.5882 - val_loss: 1.0468 - val_accuracy: 0.6327
Epoch 8/150
50000/50000 [==============================] - 15s 310us/step - loss: 1.1337 - accuracy: 0.6031 - val_loss: 1.0141 - val_accuracy: 0.6413
Epoch 9/150
50000/50000 [==============================] - 15s 308us/step - loss: 1.1093 - accuracy: 0.6106 - val_loss: 1.0050 - val_accuracy: 0.6466
Epoch 10/150
50000/50000 [==============================] - 15s 310us/step - loss: 1.0848 - accuracy: 0.6202 - val_loss: 0.9494 - val_accuracy: 0.6714
Epoch 11/150
50000/50000 [==============================] - 15s 310us/step - loss: 1.0718 - accuracy: 0.6255 - val_loss: 1.0158 - val_accuracy: 0.6558
Epoch 12/150
50000/50000 [==============================] - 15s 309us/step - loss: 1.0554 - accuracy: 0.6341 - val_loss: 1.0073 - val_accuracy: 0.6614
Epoch 13/150
50000/50000 [==============================] - 16s 311us/step - loss: 1.0452 - accuracy: 0.6382 - val_loss: 0.9085 - val_accuracy: 0.6902
Epoch 14/150
50000/50000 [==============================] - 15s 309us/step - loss: 1.0305 - accuracy: 0.6398 - val_loss: 0.8909 - val_accuracy: 0.7058
Epoch 15/150
50000/50000 [==============================] - 15s 310us/step - loss: 1.0119 - accuracy: 0.6491 - val_loss: 0.8977 - val_accuracy: 0.6942
Epoch 16/150
50000/50000 [==============================] - 15s 310us/step - loss: 0.9955 - accuracy: 0.6544 - val_loss: 0.8505 - val_accuracy: 0.7142
Epoch 17/150
50000/50000 [==============================] - 15s 309us/step - loss: 0.9874 - accuracy: 0.6593 - val_loss: 0.8794 - val_accuracy: 0.7029
Epoch 18/150
50000/50000 [==============================] - 15s 310us/step - loss: 0.9710 - accuracy: 0.6649 - val_loss: 0.8961 - val_accuracy: 0.6872
Epoch 19/150
50000/50000 [==============================] - 16s 316us/step - loss: 0.9636 - accuracy: 0.6671 - val_loss: 0.8867 - val_accuracy: 0.6997
Epoch 20/150
50000/50000 [==============================] - 15s 308us/step - loss: 0.9534 - accuracy: 0.6723 - val_loss: 0.8616 - val_accuracy: 0.7117
Epoch 21/150
50000/50000 [==============================] - 15s 308us/step - loss: 0.9400 - accuracy: 0.6770 - val_loss: 0.8537 - val_accuracy: 0.7087
Epoch 22/150
50000/50000 [==============================] - 15s 308us/step - loss: 0.9319 - accuracy: 0.6789 - val_loss: 0.8422 - val_accuracy: 0.7143
Epoch 23/150
50000/50000 [==============================] - 17s 335us/step - loss: 0.9249 - accuracy: 0.6835 - val_loss: 0.8788 - val_accuracy: 0.7025
Epoch 24/150
50000/50000 [==============================] - 16s 313us/step - loss: 0.9146 - accuracy: 0.6865 - val_loss: 0.8613 - val_accuracy: 0.7059
Epoch 25/150
50000/50000 [==============================] - 16s 313us/step - loss: 0.9018 - accuracy: 0.6889 - val_loss: 0.8188 - val_accuracy: 0.7282
Epoch 26/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.8968 - accuracy: 0.6912 - val_loss: 0.8550 - val_accuracy: 0.7130
Epoch 27/150
50000/50000 [==============================] - 16s 315us/step - loss: 0.8865 - accuracy: 0.6960 - val_loss: 0.7914 - val_accuracy: 0.7277
Epoch 28/150
50000/50000 [==============================] - 16s 313us/step - loss: 0.8848 - accuracy: 0.6977 - val_loss: 0.8224 - val_accuracy: 0.7222
Epoch 29/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.8786 - accuracy: 0.6991 - val_loss: 0.8019 - val_accuracy: 0.7269
Epoch 30/150
50000/50000 [==============================] - 16s 311us/step - loss: 0.8694 - accuracy: 0.7029 - val_loss: 0.8346 - val_accuracy: 0.7207
Epoch 31/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.8615 - accuracy: 0.7058 - val_loss: 0.7965 - val_accuracy: 0.7283
Epoch 32/150
50000/50000 [==============================] - 16s 314us/step - loss: 0.8602 - accuracy: 0.7064 - val_loss: 0.7666 - val_accuracy: 0.7432
Epoch 33/150
50000/50000 [==============================] - 16s 316us/step - loss: 0.8514 - accuracy: 0.7101 - val_loss: 0.7665 - val_accuracy: 0.7386
Epoch 34/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.8447 - accuracy: 0.7115 - val_loss: 0.7620 - val_accuracy: 0.7413
Epoch 35/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.8430 - accuracy: 0.7149 - val_loss: 0.7689 - val_accuracy: 0.7373
Epoch 36/150
50000/50000 [==============================] - 16s 313us/step - loss: 0.8417 - accuracy: 0.7136 - val_loss: 0.7801 - val_accuracy: 0.7329
Epoch 37/150
50000/50000 [==============================] - 16s 315us/step - loss: 0.8385 - accuracy: 0.7143 - val_loss: 0.7636 - val_accuracy: 0.7463
Epoch 38/150
50000/50000 [==============================] - 16s 315us/step - loss: 0.8262 - accuracy: 0.7199 - val_loss: 0.7792 - val_accuracy: 0.7415
Epoch 39/150
50000/50000 [==============================] - 16s 318us/step - loss: 0.8199 - accuracy: 0.7211 - val_loss: 0.7581 - val_accuracy: 0.7463
Epoch 40/150
50000/50000 [==============================] - 16s 313us/step - loss: 0.8251 - accuracy: 0.7194 - val_loss: 0.7469 - val_accuracy: 0.7522
Epoch 41/150
50000/50000 [==============================] - 16s 314us/step - loss: 0.8255 - accuracy: 0.7200 - val_loss: 0.7870 - val_accuracy: 0.7412
Epoch 42/150
50000/50000 [==============================] - 16s 314us/step - loss: 0.8166 - accuracy: 0.7229 - val_loss: 0.7329 - val_accuracy: 0.7508
Epoch 43/150
50000/50000 [==============================] - 16s 313us/step - loss: 0.8139 - accuracy: 0.7243 - val_loss: 0.7574 - val_accuracy: 0.7486
Epoch 44/150
50000/50000 [==============================] - 16s 311us/step - loss: 0.8096 - accuracy: 0.7263 - val_loss: 0.7755 - val_accuracy: 0.7430
Epoch 45/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.8068 - accuracy: 0.7264 - val_loss: 0.7660 - val_accuracy: 0.7448
Epoch 46/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.8024 - accuracy: 0.7291 - val_loss: 0.7513 - val_accuracy: 0.7546
Epoch 47/150
50000/50000 [==============================] - 16s 314us/step - loss: 0.7980 - accuracy: 0.7314 - val_loss: 0.7636 - val_accuracy: 0.7434
Epoch 48/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.7995 - accuracy: 0.7295 - val_loss: 0.7326 - val_accuracy: 0.7590
Epoch 49/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.7932 - accuracy: 0.7318 - val_loss: 0.7424 - val_accuracy: 0.7519
Epoch 50/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.7923 - accuracy: 0.7304 - val_loss: 0.7276 - val_accuracy: 0.7580
Epoch 51/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.7900 - accuracy: 0.7339 - val_loss: 0.7335 - val_accuracy: 0.7564
Epoch 52/150
50000/50000 [==============================] - 16s 315us/step - loss: 0.7891 - accuracy: 0.7349 - val_loss: 0.7502 - val_accuracy: 0.7515
Epoch 53/150
50000/50000 [==============================] - 16s 314us/step - loss: 0.7847 - accuracy: 0.7343 - val_loss: 0.7359 - val_accuracy: 0.7633
Epoch 54/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.7826 - accuracy: 0.7345 - val_loss: 0.7285 - val_accuracy: 0.7640
Epoch 55/150
50000/50000 [==============================] - 16s 311us/step - loss: 0.7818 - accuracy: 0.7371 - val_loss: 0.7148 - val_accuracy: 0.7612
Epoch 56/150
50000/50000 [==============================] - 16s 315us/step - loss: 0.7776 - accuracy: 0.7390 - val_loss: 0.7468 - val_accuracy: 0.7507
Epoch 57/150
50000/50000 [==============================] - 16s 313us/step - loss: 0.7788 - accuracy: 0.7386 - val_loss: 0.7187 - val_accuracy: 0.7663
Epoch 58/150
50000/50000 [==============================] - 16s 319us/step - loss: 0.7704 - accuracy: 0.7417 - val_loss: 0.7256 - val_accuracy: 0.7587
Epoch 59/150
50000/50000 [==============================] - 16s 315us/step - loss: 0.7626 - accuracy: 0.7428 - val_loss: 0.7230 - val_accuracy: 0.7597
Epoch 60/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.7663 - accuracy: 0.7416 - val_loss: 0.7069 - val_accuracy: 0.7686
Epoch 61/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.7673 - accuracy: 0.7412 - val_loss: 0.6959 - val_accuracy: 0.7649
Epoch 62/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.7685 - accuracy: 0.7440 - val_loss: 0.7384 - val_accuracy: 0.7523
Epoch 63/150
50000/50000 [==============================] - 16s 313us/step - loss: 0.7691 - accuracy: 0.7426 - val_loss: 0.7321 - val_accuracy: 0.7593
Epoch 64/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.7693 - accuracy: 0.7430 - val_loss: 0.7322 - val_accuracy: 0.7535
Epoch 65/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.7640 - accuracy: 0.7463 - val_loss: 0.7296 - val_accuracy: 0.7637
Epoch 66/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.7529 - accuracy: 0.7489 - val_loss: 0.7533 - val_accuracy: 0.7460
Epoch 67/150
50000/50000 [==============================] - 16s 310us/step - loss: 0.7597 - accuracy: 0.7445 - val_loss: 0.7279 - val_accuracy: 0.7581
Epoch 68/150
50000/50000 [==============================] - 16s 314us/step - loss: 0.7534 - accuracy: 0.7457 - val_loss: 0.7214 - val_accuracy: 0.7594
Epoch 69/150
50000/50000 [==============================] - 16s 311us/step - loss: 0.7542 - accuracy: 0.7483 - val_loss: 0.7193 - val_accuracy: 0.7633
Epoch 70/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.7566 - accuracy: 0.7481 - val_loss: 0.7016 - val_accuracy: 0.7693
Epoch 71/150
50000/50000 [==============================] - 16s 311us/step - loss: 0.7560 - accuracy: 0.7472 - val_loss: 0.7486 - val_accuracy: 0.7591
Epoch 72/150
50000/50000 [==============================] - 16s 314us/step - loss: 0.7485 - accuracy: 0.7504 - val_loss: 0.7139 - val_accuracy: 0.7662
Epoch 73/150
50000/50000 [==============================] - 16s 313us/step - loss: 0.7530 - accuracy: 0.7474 - val_loss: 0.7235 - val_accuracy: 0.7624
Epoch 74/150
50000/50000 [==============================] - 16s 313us/step - loss: 0.7460 - accuracy: 0.7513 - val_loss: 0.7394 - val_accuracy: 0.7558
Epoch 75/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.7503 - accuracy: 0.7496 - val_loss: 0.6924 - val_accuracy: 0.7672
Epoch 76/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.7395 - accuracy: 0.7543 - val_loss: 0.7148 - val_accuracy: 0.7624
Epoch 77/150
50000/50000 [==============================] - 16s 311us/step - loss: 0.7426 - accuracy: 0.7524 - val_loss: 0.7378 - val_accuracy: 0.7593
Epoch 78/150
50000/50000 [==============================] - 16s 319us/step - loss: 0.7420 - accuracy: 0.7526 - val_loss: 0.7097 - val_accuracy: 0.7641
Epoch 79/150
50000/50000 [==============================] - 16s 314us/step - loss: 0.7423 - accuracy: 0.7528 - val_loss: 0.6886 - val_accuracy: 0.7733
Epoch 80/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.7450 - accuracy: 0.7528 - val_loss: 0.7334 - val_accuracy: 0.7558
Epoch 81/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.7447 - accuracy: 0.7535 - val_loss: 0.7158 - val_accuracy: 0.7630
Epoch 82/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.7437 - accuracy: 0.7536 - val_loss: 0.7111 - val_accuracy: 0.7687
Epoch 83/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.7377 - accuracy: 0.7560 - val_loss: 0.7207 - val_accuracy: 0.7627
Epoch 84/150
50000/50000 [==============================] - 16s 313us/step - loss: 0.7299 - accuracy: 0.7585 - val_loss: 0.6913 - val_accuracy: 0.7745
Epoch 85/150
50000/50000 [==============================] - 16s 313us/step - loss: 0.7357 - accuracy: 0.7544 - val_loss: 0.6924 - val_accuracy: 0.7722
Epoch 86/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.7337 - accuracy: 0.7575 - val_loss: 0.7175 - val_accuracy: 0.7590
Epoch 87/150
50000/50000 [==============================] - 16s 313us/step - loss: 0.7317 - accuracy: 0.7560 - val_loss: 0.6938 - val_accuracy: 0.7711
Epoch 88/150
50000/50000 [==============================] - 16s 313us/step - loss: 0.7318 - accuracy: 0.7571 - val_loss: 0.6886 - val_accuracy: 0.7748
Epoch 89/150
50000/50000 [==============================] - 16s 313us/step - loss: 0.7260 - accuracy: 0.7586 - val_loss: 0.7028 - val_accuracy: 0.7658
Epoch 90/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.7310 - accuracy: 0.7573 - val_loss: 0.7091 - val_accuracy: 0.7650
Epoch 91/150
50000/50000 [==============================] - 16s 318us/step - loss: 0.7306 - accuracy: 0.7583 - val_loss: 0.6799 - val_accuracy: 0.7723
Epoch 92/150
50000/50000 [==============================] - 16s 316us/step - loss: 0.7250 - accuracy: 0.7581 - val_loss: 0.7275 - val_accuracy: 0.7649
Epoch 93/150
50000/50000 [==============================] - 16s 317us/step - loss: 0.7262 - accuracy: 0.7577 - val_loss: 0.7558 - val_accuracy: 0.7495
Epoch 94/150
50000/50000 [==============================] - 16s 318us/step - loss: 0.7182 - accuracy: 0.7611 - val_loss: 0.6919 - val_accuracy: 0.7704
Epoch 95/150
50000/50000 [==============================] - 16s 318us/step - loss: 0.7248 - accuracy: 0.7593 - val_loss: 0.6783 - val_accuracy: 0.7773
Epoch 96/150
50000/50000 [==============================] - 16s 320us/step - loss: 0.7194 - accuracy: 0.7611 - val_loss: 0.6982 - val_accuracy: 0.7673
Epoch 97/150
50000/50000 [==============================] - 16s 317us/step - loss: 0.7210 - accuracy: 0.7621 - val_loss: 0.7031 - val_accuracy: 0.7676
Epoch 98/150
50000/50000 [==============================] - 16s 325us/step - loss: 0.7187 - accuracy: 0.7630 - val_loss: 0.6735 - val_accuracy: 0.7822
Epoch 99/150
50000/50000 [==============================] - 16s 317us/step - loss: 0.7170 - accuracy: 0.7634 - val_loss: 0.6880 - val_accuracy: 0.7744
Epoch 100/150
50000/50000 [==============================] - 16s 315us/step - loss: 0.7257 - accuracy: 0.7582 - val_loss: 0.6873 - val_accuracy: 0.7732
Epoch 101/150
50000/50000 [==============================] - 16s 315us/step - loss: 0.7164 - accuracy: 0.7643 - val_loss: 0.6668 - val_accuracy: 0.7820
Epoch 102/150
50000/50000 [==============================] - 16s 316us/step - loss: 0.7213 - accuracy: 0.7624 - val_loss: 0.7011 - val_accuracy: 0.7693
Epoch 103/150
50000/50000 [==============================] - 16s 318us/step - loss: 0.7220 - accuracy: 0.7628 - val_loss: 0.6855 - val_accuracy: 0.7744
Epoch 104/150
50000/50000 [==============================] - 16s 319us/step - loss: 0.7130 - accuracy: 0.7629 - val_loss: 0.7172 - val_accuracy: 0.7638
Epoch 105/150
50000/50000 [==============================] - 16s 317us/step - loss: 0.7117 - accuracy: 0.7649 - val_loss: 0.6859 - val_accuracy: 0.7720
Epoch 106/150
50000/50000 [==============================] - 16s 316us/step - loss: 0.7105 - accuracy: 0.7623 - val_loss: 0.6981 - val_accuracy: 0.7693
Epoch 107/150
50000/50000 [==============================] - 16s 317us/step - loss: 0.7063 - accuracy: 0.7653 - val_loss: 0.6971 - val_accuracy: 0.7734
Epoch 108/150
50000/50000 [==============================] - 16s 315us/step - loss: 0.7087 - accuracy: 0.7640 - val_loss: 0.6920 - val_accuracy: 0.7735
Epoch 109/150
50000/50000 [==============================] - 16s 317us/step - loss: 0.7072 - accuracy: 0.7654 - val_loss: 0.6941 - val_accuracy: 0.7786
Epoch 110/150
50000/50000 [==============================] - 16s 319us/step - loss: 0.7105 - accuracy: 0.7648 - val_loss: 0.6727 - val_accuracy: 0.7806
Epoch 111/150
50000/50000 [==============================] - 16s 318us/step - loss: 0.7088 - accuracy: 0.7657 - val_loss: 0.6808 - val_accuracy: 0.7771
Epoch 112/150
50000/50000 [==============================] - 16s 316us/step - loss: 0.7104 - accuracy: 0.7652 - val_loss: 0.6939 - val_accuracy: 0.7718
Epoch 113/150
50000/50000 [==============================] - 16s 317us/step - loss: 0.6998 - accuracy: 0.7684 - val_loss: 0.6856 - val_accuracy: 0.7766
Epoch 114/150
50000/50000 [==============================] - 16s 317us/step - loss: 0.7047 - accuracy: 0.7676 - val_loss: 0.7007 - val_accuracy: 0.7675
Epoch 115/150
50000/50000 [==============================] - 16s 317us/step - loss: 0.7067 - accuracy: 0.7653 - val_loss: 0.6761 - val_accuracy: 0.7742
Epoch 116/150
50000/50000 [==============================] - 16s 315us/step - loss: 0.7018 - accuracy: 0.7676 - val_loss: 0.6803 - val_accuracy: 0.7736
Epoch 117/150
50000/50000 [==============================] - 16s 320us/step - loss: 0.7041 - accuracy: 0.7650 - val_loss: 0.7223 - val_accuracy: 0.7658
Epoch 118/150
50000/50000 [==============================] - 16s 322us/step - loss: 0.7036 - accuracy: 0.7685 - val_loss: 0.6977 - val_accuracy: 0.7719
Epoch 119/150
50000/50000 [==============================] - 16s 317us/step - loss: 0.7008 - accuracy: 0.7685 - val_loss: 0.6594 - val_accuracy: 0.7814
Epoch 120/150
50000/50000 [==============================] - 16s 316us/step - loss: 0.7045 - accuracy: 0.7671 - val_loss: 0.6827 - val_accuracy: 0.7747
Epoch 121/150
50000/50000 [==============================] - 16s 314us/step - loss: 0.7018 - accuracy: 0.7661 - val_loss: 0.7226 - val_accuracy: 0.7609
Epoch 122/150
50000/50000 [==============================] - 16s 314us/step - loss: 0.6955 - accuracy: 0.7682 - val_loss: 0.6692 - val_accuracy: 0.7794
Epoch 123/150
50000/50000 [==============================] - 16s 315us/step - loss: 0.6910 - accuracy: 0.7708 - val_loss: 0.6785 - val_accuracy: 0.7787
Epoch 124/150
50000/50000 [==============================] - 16s 315us/step - loss: 0.6967 - accuracy: 0.7714 - val_loss: 0.7013 - val_accuracy: 0.7690
Epoch 125/150
50000/50000 [==============================] - 16s 315us/step - loss: 0.6983 - accuracy: 0.7678 - val_loss: 0.6758 - val_accuracy: 0.7769
Epoch 126/150
50000/50000 [==============================] - 16s 312us/step - loss: 0.6934 - accuracy: 0.7692 - val_loss: 0.7070 - val_accuracy: 0.7736
Epoch 127/150
50000/50000 [==============================] - 16s 314us/step - loss: 0.7019 - accuracy: 0.7696 - val_loss: 0.6984 - val_accuracy: 0.7721
Epoch 128/150
50000/50000 [==============================] - 16s 314us/step - loss: 0.6924 - accuracy: 0.7698 - val_loss: 0.6799 - val_accuracy: 0.7741
Epoch 129/150
50000/50000 [==============================] - 16s 317us/step - loss: 0.6921 - accuracy: 0.7709 - val_loss: 0.6905 - val_accuracy: 0.7733
Epoch 130/150
50000/50000 [==============================] - 16s 318us/step - loss: 0.6912 - accuracy: 0.7716 - val_loss: 0.6802 - val_accuracy: 0.7760
Epoch 131/150
50000/50000 [==============================] - 16s 315us/step - loss: 0.6982 - accuracy: 0.7706 - val_loss: 0.6703 - val_accuracy: 0.7745
Epoch 132/150
50000/50000 [==============================] - 16s 316us/step - loss: 0.6921 - accuracy: 0.7706 - val_loss: 0.6912 - val_accuracy: 0.7747
Epoch 133/150
50000/50000 [==============================] - 16s 318us/step - loss: 0.6917 - accuracy: 0.7711 - val_loss: 0.6790 - val_accuracy: 0.7753
Epoch 134/150
50000/50000 [==============================] - 16s 315us/step - loss: 0.6944 - accuracy: 0.7722 - val_loss: 0.6834 - val_accuracy: 0.7728
Epoch 135/150
50000/50000 [==============================] - 16s 317us/step - loss: 0.6922 - accuracy: 0.7697 - val_loss: 0.6699 - val_accuracy: 0.7750
Epoch 136/150
50000/50000 [==============================] - 16s 314us/step - loss: 0.6895 - accuracy: 0.7707 - val_loss: 0.6767 - val_accuracy: 0.7774
Epoch 137/150
50000/50000 [==============================] - 16s 323us/step - loss: 0.6864 - accuracy: 0.7734 - val_loss: 0.6650 - val_accuracy: 0.7788
Epoch 138/150
50000/50000 [==============================] - 16s 318us/step - loss: 0.6935 - accuracy: 0.7706 - val_loss: 0.6902 - val_accuracy: 0.7742
Epoch 139/150
50000/50000 [==============================] - 16s 316us/step - loss: 0.6976 - accuracy: 0.7706 - val_loss: 0.6831 - val_accuracy: 0.7713
Epoch 140/150
50000/50000 [==============================] - 16s 320us/step - loss: 0.6889 - accuracy: 0.7728 - val_loss: 0.6784 - val_accuracy: 0.7753
Epoch 141/150
50000/50000 [==============================] - 16s 317us/step - loss: 0.6915 - accuracy: 0.7723 - val_loss: 0.6878 - val_accuracy: 0.7726
Epoch 142/150
50000/50000 [==============================] - 16s 317us/step - loss: 0.6870 - accuracy: 0.7728 - val_loss: 0.6919 - val_accuracy: 0.7665
Epoch 143/150
50000/50000 [==============================] - 16s 320us/step - loss: 0.6933 - accuracy: 0.7718 - val_loss: 0.6893 - val_accuracy: 0.7708
Epoch 144/150
50000/50000 [==============================] - 16s 319us/step - loss: 0.6831 - accuracy: 0.7737 - val_loss: 0.6808 - val_accuracy: 0.7742
Epoch 145/150
50000/50000 [==============================] - 16s 321us/step - loss: 0.6976 - accuracy: 0.7698 - val_loss: 0.6661 - val_accuracy: 0.7794
Epoch 146/150
50000/50000 [==============================] - 16s 317us/step - loss: 0.6815 - accuracy: 0.7724 - val_loss: 0.6847 - val_accuracy: 0.7764
Epoch 147/150
50000/50000 [==============================] - 16s 318us/step - loss: 0.6873 - accuracy: 0.7744 - val_loss: 0.6639 - val_accuracy: 0.7800
Epoch 148/150
50000/50000 [==============================] - 16s 320us/step - loss: 0.6857 - accuracy: 0.7736 - val_loss: 0.6865 - val_accuracy: 0.7677
Epoch 149/150
50000/50000 [==============================] - 16s 324us/step - loss: 0.6848 - accuracy: 0.7749 - val_loss: 0.6738 - val_accuracy: 0.7726
Epoch 150/150
50000/50000 [==============================] - 16s 319us/step - loss: 0.6861 - accuracy: 0.7736 - val_loss: 0.6683 - val_accuracy: 0.7773
In [0]:
# download the model in Drive for later use
model.save('CNN_150_CIFAR_10.h5')
In [9]:
from google.colab import drive
drive.mount('/content/drive')
Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly

Enter your authorization code:
··········
Mounted at /content/drive
In [0]:
! cp CNN_150_CIFAR_10.h5 /content/drive/'My Drive'/
Plot the metrics
In [13]:
def plotmodelhistory(history): 
    fig, axs = plt.subplots(1,2,figsize=(15,5)) 
    # summarize history for accuracy
    axs[0].plot(history.history['accuracy']) 
    axs[0].plot(history.history['val_accuracy']) 
    axs[0].set_title('Model Accuracy')
    axs[0].set_ylabel('Accuracy') 
    axs[0].set_xlabel('Epoch')
    axs[0].legend(['train', 'validate'], loc='upper left')
    # summarize history for loss
    axs[1].plot(history.history['loss']) 
    axs[1].plot(history.history['val_loss']) 
    axs[1].set_title('Model Loss')
    axs[1].set_ylabel('Loss') 
    axs[1].set_xlabel('Epoch')
    axs[1].legend(['train', 'validate'], loc='upper left')
    plt.show()

# list all data in history
print(history.history.keys())

plotmodelhistory(history)
dict_keys(['val_loss', 'val_accuracy', 'loss', 'accuracy'])

Predictions

In [14]:
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])

# make prediction.
pred = model.predict(x_test)
10000/10000 [==============================] - 1s 126us/step
Test loss: 0.6682982161045075
Test accuracy: 0.7773000001907349
In [0]:
def heatmap(data, row_labels, col_labels, ax=None, cbar_kw={}, cbarlabel="", **kwargs):
    """
    Create a heatmap from a numpy array and two lists of labels.
    """
    if not ax:
        ax = plt.gca()

    # Plot the heatmap
    im = ax.imshow(data, **kwargs)

    # Create colorbar
    cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
    cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")

    # Let the horizontal axes labeling appear on top.
    ax.tick_params(top=True, bottom=False,
                   labeltop=True, labelbottom=False)
    # We want to show all ticks...
    ax.set_xticks(np.arange(data.shape[1]))
    ax.set_yticks(np.arange(data.shape[0]))
    # ... and label them with the respective list entries.
    ax.set_xticklabels(col_labels)
    ax.set_yticklabels(row_labels)
    
    ax.set_xlabel('Predicted Label') 
    ax.set_ylabel('True Label')
    
    return im, cbar

def annotate_heatmap(im, data=None, fmt="d", threshold=None):
    """
    A function to annotate a heatmap.
    """
    # Change the text's color depending on the data.
    texts = []
    for i in range(data.shape[0]):
        for j in range(data.shape[1]):
            text = im.axes.text(j, i, format(data[i, j], fmt), horizontalalignment="center",
                                 color="white" if data[i, j] > thresh else "black")
            texts.append(text)

    return texts
Confusion Matrix
In [16]:
labels = ['Airplane', 'Automobile', 'Bird', 'Cat', 'Deer', 'Dog', 'Frog', 'Horse', 'Ship', 'Truck']

# Convert predictions classes to one hot vectors 
Y_pred_classes = np.argmax(pred, axis=1) 
# Convert validation observations to one hot vectors
Y_true = np.argmax(y_test, axis=1)
# Errors are difference between predicted labels and true labels
errors = (Y_pred_classes - Y_true != 0)

Y_pred_classes_errors = Y_pred_classes[errors]
Y_pred_errors = pred[errors]
Y_true_errors = Y_true[errors]
X_test_errors = x_test[errors]

cm = confusion_matrix(Y_true, Y_pred_classes) 
thresh = cm.max() / 2.

fig, ax = plt.subplots(figsize=(12,12))
im, cbar = heatmap(cm, labels, labels, ax=ax,
                   cmap=plt.cm.Blues, cbarlabel="count of predictions")
texts = annotate_heatmap(im, data=cm, threshold=thresh)

fig.tight_layout()
plt.show()
True Predictions
In [17]:
R = 5
C = 5
fig, axes = plt.subplots(R, C, figsize=(12,12))
axes = axes.ravel()

for i in np.arange(0, R*C):
    axes[i].imshow(x_test[i])
    axes[i].set_title("True: %s \nPredict: %s" % (labels[Y_true[i]], labels[Y_pred_classes[i]]))
    axes[i].axis('off')
    plt.subplots_adjust(wspace=1)
False Predictions
In [18]:
R = 3
C = 5
fig, axes = plt.subplots(R, C, figsize=(12,8))
axes = axes.ravel()

misclassified_idx = np.where(Y_pred_classes != Y_true)[0]
for i in np.arange(0, R*C):
    axes[i].imshow(x_test[misclassified_idx[i]])
    axes[i].set_title("True: %s \nPredicted: %s" % (labels[Y_true[misclassified_idx[i]]], 
                                                  labels[Y_pred_classes[misclassified_idx[i]]]))
    axes[i].axis('off')
    plt.subplots_adjust(wspace=1)

Might be better to restart the kernel

Modified Version of the Model : About 90% accuracy

Finetune the learning rate and compile the model, fit them all at the same time.
In [1]:
import keras
from keras.models import Sequential
from keras.utils import np_utils
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Dense, Activation, Flatten, Dropout, BatchNormalization
from keras.layers import Conv2D, MaxPooling2D
from keras.datasets import cifar10
from keras import regularizers, optimizers
import numpy as np


(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')

#z-score
mean = np.mean(x_train,axis=(0,1,2,3))
std = np.std(x_train,axis=(0,1,2,3))
x_train = (x_train-mean)/(std+1e-7)
x_test = (x_test-mean)/(std+1e-7)

num_classes = 10
y_train = np_utils.to_categorical(y_train,num_classes)
y_test = np_utils.to_categorical(y_test,num_classes)

baseMapNum = 32
weight_decay = 1e-4
model = Sequential()
model.add(Conv2D(baseMapNum, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay), input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Conv2D(baseMapNum, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))

model.add(Conv2D(2*baseMapNum, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Conv2D(2*baseMapNum, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.3))

model.add(Conv2D(4*baseMapNum, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Conv2D(4*baseMapNum, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.4))

model.add(Flatten())
model.add(Dense(num_classes, activation='softmax'))

model.summary()

#data augmentation
datagen = ImageDataGenerator(
    featurewise_center=False,
    samplewise_center=False,
    featurewise_std_normalization=False,
    samplewise_std_normalization=False,
    zca_whitening=False,
    rotation_range=15,
    width_shift_range=0.1,
    height_shift_range=0.1,
    horizontal_flip=True,
    vertical_flip=False
    )
datagen.fit(x_train)

#training
batch_size = 64
epochs=25
opt_rms = keras.optimizers.rmsprop(lr=0.001,decay=1e-6)
model.compile(loss='categorical_crossentropy',
        optimizer=opt_rms,
        metrics=['accuracy'])
model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),steps_per_epoch=x_train.shape[0] // batch_size,epochs=3*epochs,verbose=1,validation_data=(x_test,y_test))
model.save_weights('cifar10_normal_rms_ep75.h5')

opt_rms = keras.optimizers.rmsprop(lr=0.0005,decay=1e-6)
model.compile(loss='categorical_crossentropy',
        optimizer=opt_rms,
        metrics=['accuracy'])
model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),steps_per_epoch=x_train.shape[0] // batch_size,epochs=epochs,verbose=1,validation_data=(x_test,y_test))
model.save_weights('cifar10_normal_rms_ep100.h5')

opt_rms = keras.optimizers.rmsprop(lr=0.0003,decay=1e-6)
model.compile(loss='categorical_crossentropy',
        optimizer=opt_rms,
        metrics=['accuracy'])
model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),steps_per_epoch=x_train.shape[0] // batch_size,epochs=epochs,verbose=1,validation_data=(x_test,y_test))
model.save_weights('cifar10_normal_rms_ep125.h5')

#testing - no kaggle eval
scores = model.evaluate(x_test, y_test, batch_size=128, verbose=1)
print('\nTest result: %.3f loss: %.3f' % (scores[1]*100,scores[0]))
Using TensorFlow backend.
Model: "sequential_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_1 (Conv2D)            (None, 32, 32, 32)        896       
_________________________________________________________________
activation_1 (Activation)    (None, 32, 32, 32)        0         
_________________________________________________________________
batch_normalization_1 (Batch (None, 32, 32, 32)        128       
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 32, 32, 32)        9248      
_________________________________________________________________
activation_2 (Activation)    (None, 32, 32, 32)        0         
_________________________________________________________________
batch_normalization_2 (Batch (None, 32, 32, 32)        128       
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 16, 16, 32)        0         
_________________________________________________________________
dropout_1 (Dropout)          (None, 16, 16, 32)        0         
_________________________________________________________________
conv2d_3 (Conv2D)            (None, 16, 16, 64)        18496     
_________________________________________________________________
activation_3 (Activation)    (None, 16, 16, 64)        0         
_________________________________________________________________
batch_normalization_3 (Batch (None, 16, 16, 64)        256       
_________________________________________________________________
conv2d_4 (Conv2D)            (None, 16, 16, 64)        36928     
_________________________________________________________________
activation_4 (Activation)    (None, 16, 16, 64)        0         
_________________________________________________________________
batch_normalization_4 (Batch (None, 16, 16, 64)        256       
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 8, 8, 64)          0         
_________________________________________________________________
dropout_2 (Dropout)          (None, 8, 8, 64)          0         
_________________________________________________________________
conv2d_5 (Conv2D)            (None, 8, 8, 128)         73856     
_________________________________________________________________
activation_5 (Activation)    (None, 8, 8, 128)         0         
_________________________________________________________________
batch_normalization_5 (Batch (None, 8, 8, 128)         512       
_________________________________________________________________
conv2d_6 (Conv2D)            (None, 8, 8, 128)         147584    
_________________________________________________________________
activation_6 (Activation)    (None, 8, 8, 128)         0         
_________________________________________________________________
batch_normalization_6 (Batch (None, 8, 8, 128)         512       
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 4, 4, 128)         0         
_________________________________________________________________
dropout_3 (Dropout)          (None, 4, 4, 128)         0         
_________________________________________________________________
flatten_1 (Flatten)          (None, 2048)              0         
_________________________________________________________________
dense_1 (Dense)              (None, 10)                20490     
=================================================================
Total params: 309,290
Trainable params: 308,394
Non-trainable params: 896
_________________________________________________________________
Epoch 1/75
781/781 [==============================] - 29s 37ms/step - loss: 1.9634 - accuracy: 0.4197 - val_loss: 1.4122 - val_accuracy: 0.5518
Epoch 2/75
781/781 [==============================] - 27s 34ms/step - loss: 1.3671 - accuracy: 0.5800 - val_loss: 1.0427 - val_accuracy: 0.6760
Epoch 3/75
781/781 [==============================] - 26s 34ms/step - loss: 1.1346 - accuracy: 0.6466 - val_loss: 0.9954 - val_accuracy: 0.6887
Epoch 4/75
781/781 [==============================] - 27s 34ms/step - loss: 1.0056 - accuracy: 0.6911 - val_loss: 0.8763 - val_accuracy: 0.7298
Epoch 5/75
781/781 [==============================] - 27s 34ms/step - loss: 0.9303 - accuracy: 0.7151 - val_loss: 0.9771 - val_accuracy: 0.7135
Epoch 6/75
781/781 [==============================] - 27s 34ms/step - loss: 0.8773 - accuracy: 0.7323 - val_loss: 0.8043 - val_accuracy: 0.7648
Epoch 7/75
781/781 [==============================] - 27s 34ms/step - loss: 0.8375 - accuracy: 0.7510 - val_loss: 0.8141 - val_accuracy: 0.7705
Epoch 8/75
781/781 [==============================] - 27s 34ms/step - loss: 0.8126 - accuracy: 0.7604 - val_loss: 0.7641 - val_accuracy: 0.7765
Epoch 9/75
781/781 [==============================] - 27s 34ms/step - loss: 0.7854 - accuracy: 0.7669 - val_loss: 0.8133 - val_accuracy: 0.7715
Epoch 10/75
781/781 [==============================] - 27s 34ms/step - loss: 0.7672 - accuracy: 0.7747 - val_loss: 0.7204 - val_accuracy: 0.7975
Epoch 11/75
781/781 [==============================] - 27s 34ms/step - loss: 0.7528 - accuracy: 0.7818 - val_loss: 0.7536 - val_accuracy: 0.7918
Epoch 12/75
781/781 [==============================] - 27s 35ms/step - loss: 0.7390 - accuracy: 0.7892 - val_loss: 0.6231 - val_accuracy: 0.8303
Epoch 13/75
781/781 [==============================] - 27s 34ms/step - loss: 0.7310 - accuracy: 0.7907 - val_loss: 0.6665 - val_accuracy: 0.8131
Epoch 14/75
781/781 [==============================] - 27s 34ms/step - loss: 0.7163 - accuracy: 0.7975 - val_loss: 0.6685 - val_accuracy: 0.8216
Epoch 15/75
781/781 [==============================] - 26s 34ms/step - loss: 0.7065 - accuracy: 0.8011 - val_loss: 0.6420 - val_accuracy: 0.8243
Epoch 16/75
781/781 [==============================] - 27s 34ms/step - loss: 0.6963 - accuracy: 0.8046 - val_loss: 0.6570 - val_accuracy: 0.8231
Epoch 17/75
781/781 [==============================] - 26s 34ms/step - loss: 0.6888 - accuracy: 0.8096 - val_loss: 0.6855 - val_accuracy: 0.8197
Epoch 18/75
781/781 [==============================] - 26s 33ms/step - loss: 0.6839 - accuracy: 0.8090 - val_loss: 0.6646 - val_accuracy: 0.8200
Epoch 19/75
781/781 [==============================] - 26s 33ms/step - loss: 0.6804 - accuracy: 0.8119 - val_loss: 0.6521 - val_accuracy: 0.8279
Epoch 20/75
781/781 [==============================] - 26s 33ms/step - loss: 0.6737 - accuracy: 0.8149 - val_loss: 0.7094 - val_accuracy: 0.8153
Epoch 21/75
781/781 [==============================] - 26s 33ms/step - loss: 0.6649 - accuracy: 0.8165 - val_loss: 0.6015 - val_accuracy: 0.8441
Epoch 22/75
781/781 [==============================] - 26s 33ms/step - loss: 0.6594 - accuracy: 0.8208 - val_loss: 0.6342 - val_accuracy: 0.8323
Epoch 23/75
781/781 [==============================] - 26s 33ms/step - loss: 0.6541 - accuracy: 0.8226 - val_loss: 0.6657 - val_accuracy: 0.8228
Epoch 24/75
781/781 [==============================] - 26s 33ms/step - loss: 0.6462 - accuracy: 0.8265 - val_loss: 0.6465 - val_accuracy: 0.8356
Epoch 25/75
781/781 [==============================] - 26s 33ms/step - loss: 0.6469 - accuracy: 0.8256 - val_loss: 0.7142 - val_accuracy: 0.8142
Epoch 26/75
781/781 [==============================] - 26s 33ms/step - loss: 0.6464 - accuracy: 0.8276 - val_loss: 0.6747 - val_accuracy: 0.8260
Epoch 27/75
781/781 [==============================] - 26s 33ms/step - loss: 0.6440 - accuracy: 0.8261 - val_loss: 0.6254 - val_accuracy: 0.8424
Epoch 28/75
781/781 [==============================] - 26s 33ms/step - loss: 0.6385 - accuracy: 0.8326 - val_loss: 0.5946 - val_accuracy: 0.8484
Epoch 29/75
781/781 [==============================] - 26s 33ms/step - loss: 0.6349 - accuracy: 0.8313 - val_loss: 0.6004 - val_accuracy: 0.8454
Epoch 30/75
781/781 [==============================] - 26s 34ms/step - loss: 0.6304 - accuracy: 0.8343 - val_loss: 0.6734 - val_accuracy: 0.8289
Epoch 31/75
781/781 [==============================] - 26s 34ms/step - loss: 0.6248 - accuracy: 0.8352 - val_loss: 0.6779 - val_accuracy: 0.8358
Epoch 32/75
781/781 [==============================] - 26s 34ms/step - loss: 0.6267 - accuracy: 0.8362 - val_loss: 0.5858 - val_accuracy: 0.8509
Epoch 33/75
781/781 [==============================] - 26s 33ms/step - loss: 0.6237 - accuracy: 0.8366 - val_loss: 0.6242 - val_accuracy: 0.8465
Epoch 34/75
781/781 [==============================] - 27s 34ms/step - loss: 0.6254 - accuracy: 0.8353 - val_loss: 0.6253 - val_accuracy: 0.8437
Epoch 35/75
781/781 [==============================] - 28s 35ms/step - loss: 0.6194 - accuracy: 0.8377 - val_loss: 0.6156 - val_accuracy: 0.8513
Epoch 36/75
781/781 [==============================] - 27s 35ms/step - loss: 0.6176 - accuracy: 0.8394 - val_loss: 0.6345 - val_accuracy: 0.8433
Epoch 37/75
781/781 [==============================] - 27s 34ms/step - loss: 0.6145 - accuracy: 0.8397 - val_loss: 0.6425 - val_accuracy: 0.8390
Epoch 38/75
781/781 [==============================] - 27s 34ms/step - loss: 0.6173 - accuracy: 0.8391 - val_loss: 0.5991 - val_accuracy: 0.8531
Epoch 39/75
781/781 [==============================] - 27s 34ms/step - loss: 0.6107 - accuracy: 0.8400 - val_loss: 0.5647 - val_accuracy: 0.8640
Epoch 40/75
781/781 [==============================] - 27s 34ms/step - loss: 0.6143 - accuracy: 0.8412 - val_loss: 0.6508 - val_accuracy: 0.8349
Epoch 41/75
781/781 [==============================] - 27s 34ms/step - loss: 0.6067 - accuracy: 0.8431 - val_loss: 0.5966 - val_accuracy: 0.8517
Epoch 42/75
781/781 [==============================] - 27s 34ms/step - loss: 0.6080 - accuracy: 0.8412 - val_loss: 0.6077 - val_accuracy: 0.8521
Epoch 43/75
781/781 [==============================] - 27s 34ms/step - loss: 0.6082 - accuracy: 0.8427 - val_loss: 0.5990 - val_accuracy: 0.8536
Epoch 44/75
781/781 [==============================] - 27s 34ms/step - loss: 0.6035 - accuracy: 0.8440 - val_loss: 0.6645 - val_accuracy: 0.8341
Epoch 45/75
781/781 [==============================] - 26s 34ms/step - loss: 0.6033 - accuracy: 0.8442 - val_loss: 0.6269 - val_accuracy: 0.8461
Epoch 46/75
781/781 [==============================] - 26s 34ms/step - loss: 0.5985 - accuracy: 0.8470 - val_loss: 0.6499 - val_accuracy: 0.8408
Epoch 47/75
781/781 [==============================] - 27s 34ms/step - loss: 0.6000 - accuracy: 0.8459 - val_loss: 0.5954 - val_accuracy: 0.8553
Epoch 48/75
781/781 [==============================] - 27s 34ms/step - loss: 0.6031 - accuracy: 0.8462 - val_loss: 0.7288 - val_accuracy: 0.8189
Epoch 49/75
781/781 [==============================] - 27s 34ms/step - loss: 0.5948 - accuracy: 0.8468 - val_loss: 0.6000 - val_accuracy: 0.8548
Epoch 50/75
781/781 [==============================] - 27s 34ms/step - loss: 0.6026 - accuracy: 0.8451 - val_loss: 0.6770 - val_accuracy: 0.8329
Epoch 51/75
781/781 [==============================] - 26s 34ms/step - loss: 0.5914 - accuracy: 0.8487 - val_loss: 0.5575 - val_accuracy: 0.8700
Epoch 52/75
781/781 [==============================] - 27s 34ms/step - loss: 0.5989 - accuracy: 0.8464 - val_loss: 0.6102 - val_accuracy: 0.8498
Epoch 53/75
781/781 [==============================] - 26s 34ms/step - loss: 0.5962 - accuracy: 0.8476 - val_loss: 0.6341 - val_accuracy: 0.8426
Epoch 54/75
781/781 [==============================] - 27s 34ms/step - loss: 0.5960 - accuracy: 0.8488 - val_loss: 0.5605 - val_accuracy: 0.8640
Epoch 55/75
781/781 [==============================] - 27s 34ms/step - loss: 0.5987 - accuracy: 0.8478 - val_loss: 0.6120 - val_accuracy: 0.8483
Epoch 56/75
781/781 [==============================] - 27s 34ms/step - loss: 0.5917 - accuracy: 0.8495 - val_loss: 0.6165 - val_accuracy: 0.8489
Epoch 57/75
781/781 [==============================] - 27s 34ms/step - loss: 0.5893 - accuracy: 0.8511 - val_loss: 0.5782 - val_accuracy: 0.8593
Epoch 58/75
781/781 [==============================] - 26s 33ms/step - loss: 0.5876 - accuracy: 0.8501 - val_loss: 0.5839 - val_accuracy: 0.8625
Epoch 59/75
781/781 [==============================] - 27s 34ms/step - loss: 0.5868 - accuracy: 0.8518 - val_loss: 0.6177 - val_accuracy: 0.8505
Epoch 60/75
781/781 [==============================] - 27s 34ms/step - loss: 0.5850 - accuracy: 0.8517 - val_loss: 0.6629 - val_accuracy: 0.8437
Epoch 61/75
781/781 [==============================] - 27s 35ms/step - loss: 0.5881 - accuracy: 0.8514 - val_loss: 0.5440 - val_accuracy: 0.8696
Epoch 62/75
781/781 [==============================] - 26s 34ms/step - loss: 0.5824 - accuracy: 0.8521 - val_loss: 0.5803 - val_accuracy: 0.8630
Epoch 63/75
781/781 [==============================] - 27s 34ms/step - loss: 0.5850 - accuracy: 0.8530 - val_loss: 0.6618 - val_accuracy: 0.8396
Epoch 64/75
781/781 [==============================] - 27s 34ms/step - loss: 0.5897 - accuracy: 0.8522 - val_loss: 0.6282 - val_accuracy: 0.8480
Epoch 65/75
781/781 [==============================] - 27s 34ms/step - loss: 0.5783 - accuracy: 0.8551 - val_loss: 0.6075 - val_accuracy: 0.8572
Epoch 66/75
781/781 [==============================] - 27s 34ms/step - loss: 0.5830 - accuracy: 0.8524 - val_loss: 0.6761 - val_accuracy: 0.8361
Epoch 67/75
781/781 [==============================] - 27s 34ms/step - loss: 0.5908 - accuracy: 0.8527 - val_loss: 0.6188 - val_accuracy: 0.8501
Epoch 68/75
781/781 [==============================] - 27s 34ms/step - loss: 0.5792 - accuracy: 0.8544 - val_loss: 0.5626 - val_accuracy: 0.8689
Epoch 69/75
781/781 [==============================] - 27s 34ms/step - loss: 0.5836 - accuracy: 0.8526 - val_loss: 0.6836 - val_accuracy: 0.8302
Epoch 70/75
781/781 [==============================] - 27s 34ms/step - loss: 0.5823 - accuracy: 0.8531 - val_loss: 0.5646 - val_accuracy: 0.8649
Epoch 71/75
781/781 [==============================] - 26s 34ms/step - loss: 0.5768 - accuracy: 0.8549 - val_loss: 0.6230 - val_accuracy: 0.8528
Epoch 72/75
781/781 [==============================] - 27s 34ms/step - loss: 0.5827 - accuracy: 0.8533 - val_loss: 0.5776 - val_accuracy: 0.8644
Epoch 73/75
781/781 [==============================] - 27s 34ms/step - loss: 0.5755 - accuracy: 0.8564 - val_loss: 0.6671 - val_accuracy: 0.8419
Epoch 74/75
781/781 [==============================] - 27s 34ms/step - loss: 0.5749 - accuracy: 0.8566 - val_loss: 0.6661 - val_accuracy: 0.8335
Epoch 75/75
781/781 [==============================] - 27s 35ms/step - loss: 0.5729 - accuracy: 0.8561 - val_loss: 0.6000 - val_accuracy: 0.8565
Epoch 1/25
781/781 [==============================] - 28s 36ms/step - loss: 0.5332 - accuracy: 0.8700 - val_loss: 0.5655 - val_accuracy: 0.8680
Epoch 2/25
781/781 [==============================] - 27s 35ms/step - loss: 0.5183 - accuracy: 0.8758 - val_loss: 0.5260 - val_accuracy: 0.8765
Epoch 3/25
781/781 [==============================] - 28s 35ms/step - loss: 0.5099 - accuracy: 0.8747 - val_loss: 0.5288 - val_accuracy: 0.8768
Epoch 4/25
781/781 [==============================] - 27s 34ms/step - loss: 0.5061 - accuracy: 0.8768 - val_loss: 0.5513 - val_accuracy: 0.8674
Epoch 5/25
781/781 [==============================] - 27s 35ms/step - loss: 0.4998 - accuracy: 0.8776 - val_loss: 0.5819 - val_accuracy: 0.8578
Epoch 6/25
781/781 [==============================] - 27s 35ms/step - loss: 0.4985 - accuracy: 0.8767 - val_loss: 0.5535 - val_accuracy: 0.8656
Epoch 7/25
781/781 [==============================] - 27s 35ms/step - loss: 0.4945 - accuracy: 0.8769 - val_loss: 0.5693 - val_accuracy: 0.8635
Epoch 8/25
781/781 [==============================] - 27s 35ms/step - loss: 0.4881 - accuracy: 0.8792 - val_loss: 0.5789 - val_accuracy: 0.8589
Epoch 9/25
781/781 [==============================] - 27s 35ms/step - loss: 0.4827 - accuracy: 0.8800 - val_loss: 0.5130 - val_accuracy: 0.8773
Epoch 10/25
781/781 [==============================] - 27s 35ms/step - loss: 0.4852 - accuracy: 0.8792 - val_loss: 0.5405 - val_accuracy: 0.8677
Epoch 11/25
781/781 [==============================] - 27s 35ms/step - loss: 0.4770 - accuracy: 0.8818 - val_loss: 0.4996 - val_accuracy: 0.8780
Epoch 12/25
781/781 [==============================] - 27s 34ms/step - loss: 0.4718 - accuracy: 0.8812 - val_loss: 0.5265 - val_accuracy: 0.8738
Epoch 13/25
781/781 [==============================] - 27s 34ms/step - loss: 0.4746 - accuracy: 0.8817 - val_loss: 0.5631 - val_accuracy: 0.8640
Epoch 14/25
781/781 [==============================] - 27s 34ms/step - loss: 0.4750 - accuracy: 0.8809 - val_loss: 0.4962 - val_accuracy: 0.8808
Epoch 15/25
781/781 [==============================] - 27s 34ms/step - loss: 0.4715 - accuracy: 0.8806 - val_loss: 0.5105 - val_accuracy: 0.8759
Epoch 16/25
781/781 [==============================] - 27s 34ms/step - loss: 0.4709 - accuracy: 0.8815 - val_loss: 0.5532 - val_accuracy: 0.8658
Epoch 17/25
781/781 [==============================] - 27s 34ms/step - loss: 0.4700 - accuracy: 0.8812 - val_loss: 0.5171 - val_accuracy: 0.8721
Epoch 18/25
781/781 [==============================] - 27s 34ms/step - loss: 0.4664 - accuracy: 0.8834 - val_loss: 0.5329 - val_accuracy: 0.8713
Epoch 19/25
781/781 [==============================] - 27s 34ms/step - loss: 0.4660 - accuracy: 0.8818 - val_loss: 0.4850 - val_accuracy: 0.8821
Epoch 20/25
781/781 [==============================] - 28s 35ms/step - loss: 0.4672 - accuracy: 0.8823 - val_loss: 0.5059 - val_accuracy: 0.8748
Epoch 21/25
781/781 [==============================] - 28s 36ms/step - loss: 0.4664 - accuracy: 0.8808 - val_loss: 0.5485 - val_accuracy: 0.8648
Epoch 22/25
781/781 [==============================] - 28s 36ms/step - loss: 0.4596 - accuracy: 0.8817 - val_loss: 0.4780 - val_accuracy: 0.8860
Epoch 23/25
781/781 [==============================] - 28s 36ms/step - loss: 0.4607 - accuracy: 0.8823 - val_loss: 0.4855 - val_accuracy: 0.8770
Epoch 24/25
781/781 [==============================] - 28s 36ms/step - loss: 0.4590 - accuracy: 0.8836 - val_loss: 0.4557 - val_accuracy: 0.8894
Epoch 25/25
781/781 [==============================] - 28s 36ms/step - loss: 0.4611 - accuracy: 0.8833 - val_loss: 0.5226 - val_accuracy: 0.8727
Epoch 1/25
781/781 [==============================] - 29s 37ms/step - loss: 0.4417 - accuracy: 0.8887 - val_loss: 0.5208 - val_accuracy: 0.8729
Epoch 2/25
781/781 [==============================] - 28s 36ms/step - loss: 0.4301 - accuracy: 0.8925 - val_loss: 0.4989 - val_accuracy: 0.8761
Epoch 3/25
781/781 [==============================] - 28s 36ms/step - loss: 0.4254 - accuracy: 0.8932 - val_loss: 0.4922 - val_accuracy: 0.8774
Epoch 4/25
781/781 [==============================] - 28s 36ms/step - loss: 0.4242 - accuracy: 0.8925 - val_loss: 0.4832 - val_accuracy: 0.8828
Epoch 5/25
781/781 [==============================] - 28s 36ms/step - loss: 0.4257 - accuracy: 0.8939 - val_loss: 0.4697 - val_accuracy: 0.8854
Epoch 6/25
781/781 [==============================] - 28s 36ms/step - loss: 0.4229 - accuracy: 0.8935 - val_loss: 0.5008 - val_accuracy: 0.8788
Epoch 7/25
781/781 [==============================] - 28s 36ms/step - loss: 0.4179 - accuracy: 0.8951 - val_loss: 0.4683 - val_accuracy: 0.8860
Epoch 8/25
781/781 [==============================] - 28s 35ms/step - loss: 0.4163 - accuracy: 0.8939 - val_loss: 0.4847 - val_accuracy: 0.8810
Epoch 9/25
781/781 [==============================] - 27s 35ms/step - loss: 0.4117 - accuracy: 0.8973 - val_loss: 0.4586 - val_accuracy: 0.8860
Epoch 10/25
781/781 [==============================] - 27s 35ms/step - loss: 0.4105 - accuracy: 0.8968 - val_loss: 0.4869 - val_accuracy: 0.8800
Epoch 11/25
781/781 [==============================] - 27s 35ms/step - loss: 0.4100 - accuracy: 0.8949 - val_loss: 0.4910 - val_accuracy: 0.8808
Epoch 12/25
781/781 [==============================] - 27s 34ms/step - loss: 0.4126 - accuracy: 0.8947 - val_loss: 0.4629 - val_accuracy: 0.8868
Epoch 13/25
781/781 [==============================] - 27s 34ms/step - loss: 0.4077 - accuracy: 0.8957 - val_loss: 0.4950 - val_accuracy: 0.8769
Epoch 14/25
781/781 [==============================] - 27s 34ms/step - loss: 0.4064 - accuracy: 0.8965 - val_loss: 0.4658 - val_accuracy: 0.8845
Epoch 15/25
781/781 [==============================] - 27s 34ms/step - loss: 0.4056 - accuracy: 0.8989 - val_loss: 0.4709 - val_accuracy: 0.8837
Epoch 16/25
781/781 [==============================] - 27s 34ms/step - loss: 0.4074 - accuracy: 0.8958 - val_loss: 0.4799 - val_accuracy: 0.8795
Epoch 17/25
781/781 [==============================] - 26s 34ms/step - loss: 0.4051 - accuracy: 0.8958 - val_loss: 0.4656 - val_accuracy: 0.8866
Epoch 18/25
781/781 [==============================] - 26s 34ms/step - loss: 0.4044 - accuracy: 0.8948 - val_loss: 0.4415 - val_accuracy: 0.8898
Epoch 19/25
781/781 [==============================] - 27s 34ms/step - loss: 0.4037 - accuracy: 0.8977 - val_loss: 0.4536 - val_accuracy: 0.8886
Epoch 20/25
781/781 [==============================] - 27s 34ms/step - loss: 0.4001 - accuracy: 0.8980 - val_loss: 0.4411 - val_accuracy: 0.8906
Epoch 21/25
781/781 [==============================] - 27s 34ms/step - loss: 0.3998 - accuracy: 0.8970 - val_loss: 0.4660 - val_accuracy: 0.8828
Epoch 22/25
781/781 [==============================] - 27s 34ms/step - loss: 0.4002 - accuracy: 0.8977 - val_loss: 0.4412 - val_accuracy: 0.8896
Epoch 23/25
781/781 [==============================] - 27s 34ms/step - loss: 0.3988 - accuracy: 0.8983 - val_loss: 0.4487 - val_accuracy: 0.8922
Epoch 24/25
781/781 [==============================] - 27s 34ms/step - loss: 0.3983 - accuracy: 0.8976 - val_loss: 0.4575 - val_accuracy: 0.8879
Epoch 25/25
781/781 [==============================] - 27s 34ms/step - loss: 0.3959 - accuracy: 0.8976 - val_loss: 0.4513 - val_accuracy: 0.8848
10000/10000 [==============================] - 1s 70us/step

Test result: 88.480 loss: 0.451
In [0]:
# download the model in Drive for later use
model.save('CNN_25_CIFAR_10_better_model.h5')
In [0]:
from google.colab import drive
drive.mount('/content/drive')
In [0]:
! cp CNN_25_CIFAR_10_better_model.h5 /content/drive/'My Drive'/

Might be better to restart the kernel

B) Transfer Learning:

Using well-known models for feature extraction, perform classification on the CIFAR-10 dataset via transfer learning:

i) VGG16 architecture (https://keras.io/api/applications/vgg/#vgg16-function)

In [0]:
# Pandas and Numpy for data structures and util fucntions
import scipy as sp
import numpy as np
import pandas as pd
from numpy.random import rand
pd.options.display.max_colwidth = 600

# Scikit Imports
from sklearn import preprocessing
from sklearn.metrics import roc_curve, auc, precision_recall_curve
from sklearn.model_selection import train_test_split

#! wget "https://raw.githubusercontent.com/JudasDie/deeplearning.ai/master/Convolutional%20Neural%20Networks/week1/cnn_utils.py"
#import cnn_utils as utils
#from model_evaluation_utils import get_metrics

# Matplot Imports
import matplotlib.pyplot as plt
params = {'legend.fontsize': 'x-large',
          'figure.figsize': (15, 5),
          'axes.labelsize': 'x-large',
          'axes.titlesize':'x-large',
          'xtick.labelsize':'x-large',
          'ytick.labelsize':'x-large'}

plt.rcParams.update(params)
%matplotlib inline

# pandas display data frames as tables
from IPython.display import display, HTML

import warnings
warnings.filterwarnings('ignore')

CNN Utils Code

In [0]:
import numpy as np
import matplotlib.pyplot as plt
params = {'legend.fontsize': 'x-large',
          'figure.figsize': (15, 5),
         'axes.labelsize': 'x-large',
         'axes.titlesize':'x-large',
         'xtick.labelsize':'x-large',
         'ytick.labelsize':'x-large'}

plt.rcParams.update(params)


def make_prediction(model=None,img_vector=[],
                    label_dict={},top_N=3, 
                    model_input_shape=None):
    if model:
        # get model input shape
        if not model_input_shape:
            model_input_shape = (1,)+model.get_input_shape_at(0)[1:]
            
        # get prediction
        prediction = model.predict(img_vector.reshape(model_input_shape))[0]
        
        
        # get top N with confidence
        labels_predicted = [label_dict[idx] for idx in np.argsort(prediction)[::-1][:top_N]]
        confidence_predicted = np.sort(prediction)[::-1][:top_N]
        
        return labels_predicted, confidence_predicted
    
    
def plot_predictions(model,dataset,
                    dataset_labels,label_dict,
                    batch_size,grid_height,grid_width):
    if model:
        f, ax = plt.subplots(grid_width, grid_height)
        f.set_size_inches(12, 12)
        
        random_batch_indx = np.random.permutation(np.arange(0,len(dataset)))[:batch_size]

        img_idx = 0
        for i in range(0, grid_width):
            for j in range(0, grid_height):
                actual_label = label_dict.get(dataset_labels[random_batch_indx[img_idx]].argmax())
                preds,confs_ = make_prediction(model,
                                              img_vector=dataset[random_batch_indx[img_idx]],
                                              label_dict=label_dict,
                                              top_N=1)
                ax[i][j].axis('off')
                ax[i][j].set_title('Actual:'+actual_label[:10]+\
                                    '\nPredicted:'+preds[0] + \
                                    '(' +str(round(confs_[0],2)) + ')')
                ax[i][j].imshow(dataset[random_batch_indx[img_idx]])
                img_idx += 1

        plt.subplots_adjust(left=0, bottom=0, right=1, 
                            top=1, wspace=0.4, hspace=0.55)    
 

# source: https://github.com/keras-team/keras/issues/431#issuecomment-317397154
def get_activations(model, model_inputs, 
    print_shape_only=True, layer_name=None):
    import keras.backend as K
    print('----- activations -----')
    activations = []
    inp = model.input

    model_multi_inputs_cond = True
    if not isinstance(inp, list):
        # only one input! let's wrap it in a list.
        inp = [inp]
        model_multi_inputs_cond = False
    # all layer outputs
    outputs = [layer.output for layer in model.layers if
               layer.name == layer_name or layer_name is None]  

    # evaluation functions           
    funcs = [K.function(inp + [K.learning_phase()], [out]) for out in outputs]  

    if model_multi_inputs_cond:
        list_inputs = []
        list_inputs.extend(model_inputs)
        list_inputs.append(1.)
    else:
        list_inputs = [model_inputs, 1.]

    # Learning phase. 1 = Test mode (no dropout or batch normalization)
    # layer_outputs = [func([model_inputs, 1.])[0] for func in funcs]
    layer_outputs = [func(list_inputs)[0] for func in funcs]
    for layer_activations in layer_outputs:
        activations.append(layer_activations)
        if print_shape_only:
            print(layer_activations.shape)
        else:
            print(layer_activations)
    return activations

# source :https://github.com/philipperemy/keras-visualize-activations/blob/master/read_activations.py
def display_activations(activation_maps):
    batch_size = activation_maps[0].shape[0]
    assert batch_size == 1, 'One image at a time to visualize.'
    for i, activation_map in enumerate(activation_maps):
        print('Displaying activation map {}'.format(i))
        shape = activation_map.shape
        if len(shape) == 4:
            activations = np.hstack(np.transpose(activation_map[0], (2, 0, 1)))
        elif len(shape) == 2:
            # try to make it square as much as possible. we can skip some activations.
            activations = activation_map[0]
            num_activations = len(activations)
            # too hard to display it on the screen.
            if num_activations > 1024:  
                square_param = int(np.floor(np.sqrt(num_activations)))
                activations = activations[0: square_param * square_param]
                activations = np.reshape(activations, (square_param, square_param))
            else:
                activations = np.expand_dims(activations, axis=0)
        else:
            raise Exception('len(shape) = 3 has not been implemented.')
        #plt.imshow(activations, interpolation='None', cmap='binary')
        fig, ax = plt.subplots(figsize=(18, 12))
        ax.imshow(activations, interpolation='None', cmap='binary')
        plt.show() 

model_evaluation_utils.py Code

In [0]:
from sklearn import metrics
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.base import clone
from sklearn.preprocessing import label_binarize
from scipy import interp
from sklearn.metrics import roc_curve, auc 


def get_metrics(true_labels, predicted_labels):
    
    print('Accuracy:', np.round(
                        metrics.accuracy_score(true_labels, 
                                               predicted_labels),
                        4))
    print('Precision:', np.round(
                        metrics.precision_score(true_labels, 
                                               predicted_labels,
                                               average='weighted'),
                        4))
    print('Recall:', np.round(
                        metrics.recall_score(true_labels, 
                                               predicted_labels,
                                               average='weighted'),
                        4))
    print('F1 Score:', np.round(
                        metrics.f1_score(true_labels, 
                                               predicted_labels,
                                               average='weighted'),
                        4))
                        

def train_predict_model(classifier, 
                        train_features, train_labels, 
                        test_features, test_labels):
    # build model    
    classifier.fit(train_features, train_labels)
    # predict using model
    predictions = classifier.predict(test_features) 
    return predictions    


def display_confusion_matrix(true_labels, predicted_labels, classes=[1,0]):
    
    total_classes = len(classes)
    level_labels = [total_classes*[0], list(range(total_classes))]

    cm = metrics.confusion_matrix(y_true=true_labels, y_pred=predicted_labels, 
                                  labels=classes)
    cm_frame = pd.DataFrame(data=cm, 
                            columns=pd.MultiIndex(levels=[['Predicted:'], classes], 
                                                  labels=level_labels), 
                            index=pd.MultiIndex(levels=[['Actual:'], classes], 
                                                labels=level_labels)) 
    print(cm_frame) 
    
def display_classification_report(true_labels, predicted_labels, classes=[1,0]):

    report = metrics.classification_report(y_true=true_labels, 
                                           y_pred=predicted_labels, 
                                           labels=classes) 
    print(report)
    
    
    
def display_model_performance_metrics(true_labels, predicted_labels, classes=[1,0]):
    print('Model Performance metrics:')
    print('-'*30)
    get_metrics(true_labels=true_labels, predicted_labels=predicted_labels)
    print('\nModel Classification report:')
    print('-'*30)
    display_classification_report(true_labels=true_labels, predicted_labels=predicted_labels, 
                                  classes=classes)
    print('\nPrediction Confusion Matrix:')
    print('-'*30)
    display_confusion_matrix(true_labels=true_labels, predicted_labels=predicted_labels, 
                             classes=classes)


def plot_model_decision_surface(clf, train_features, train_labels,
                                plot_step=0.02, cmap=plt.cm.RdYlBu,
                                markers=None, alphas=None, colors=None):
    
    if train_features.shape[1] != 2:
        raise ValueError("X_train should have exactly 2 columnns!")
    
    x_min, x_max = train_features[:, 0].min() - plot_step, train_features[:, 0].max() + plot_step
    y_min, y_max = train_features[:, 1].min() - plot_step, train_features[:, 1].max() + plot_step
    xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
                         np.arange(y_min, y_max, plot_step))

    clf_est = clone(clf)
    clf_est.fit(train_features,train_labels)
    if hasattr(clf_est, 'predict_proba'):
        Z = clf_est.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:,1]
    else:
        Z = clf_est.predict(np.c_[xx.ravel(), yy.ravel()])    
    Z = Z.reshape(xx.shape)
    cs = plt.contourf(xx, yy, Z, cmap=cmap)
    
    le = LabelEncoder()
    y_enc = le.fit_transform(train_labels)
    n_classes = len(le.classes_)
    plot_colors = ''.join(colors) if colors else [None] * n_classes
    label_names = le.classes_
    markers = markers if markers else [None] * n_classes
    alphas = alphas if alphas else [None] * n_classes
    for i, color in zip(range(n_classes), plot_colors):
        idx = np.where(y_enc == i)
        plt.scatter(train_features[idx, 0], train_features[idx, 1], c=color,
                    label=label_names[i], cmap=cmap, edgecolors='black', 
                    marker=markers[i], alpha=alphas[i])
    plt.legend()
    plt.show()


def plot_model_roc_curve(clf, features, true_labels, label_encoder=None, class_names=None):
    
    ## Compute ROC curve and ROC area for each class
    fpr = dict()
    tpr = dict()
    roc_auc = dict()
    if hasattr(clf, 'classes_'):
        class_labels = clf.classes_
    elif label_encoder:
        class_labels = label_encoder.classes_
    elif class_names:
        class_labels = class_names
    else:
        raise ValueError('Unable to derive prediction classes, please specify class_names!')
    n_classes = len(class_labels)
    y_test = label_binarize(true_labels, classes=class_labels)
    if n_classes == 2:
        if hasattr(clf, 'predict_proba'):
            prob = clf.predict_proba(features)
            y_score = prob[:, prob.shape[1]-1] 
        elif hasattr(clf, 'decision_function'):
            prob = clf.decision_function(features)
            y_score = prob[:, prob.shape[1]-1]
        else:
            raise AttributeError("Estimator doesn't have a probability or confidence scoring system!")
        
        fpr, tpr, _ = roc_curve(y_test, y_score)      
        roc_auc = auc(fpr, tpr)
        plt.plot(fpr, tpr, label='ROC curve (area = {0:0.2f})'
                                 ''.format(roc_auc),
                 linewidth=2.5)
        
    elif n_classes > 2:
        if hasattr(clf, 'predict_proba'):
            y_score = clf.predict_proba(features)
        elif hasattr(clf, 'decision_function'):
            y_score = clf.decision_function(features)
        else:
            raise AttributeError("Estimator doesn't have a probability or confidence scoring system!")

        for i in range(n_classes):
            fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
            roc_auc[i] = auc(fpr[i], tpr[i])

        ## Compute micro-average ROC curve and ROC area
        fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
        roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])

        ## Compute macro-average ROC curve and ROC area
        # First aggregate all false positive rates
        all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
        # Then interpolate all ROC curves at this points
        mean_tpr = np.zeros_like(all_fpr)
        for i in range(n_classes):
            mean_tpr += interp(all_fpr, fpr[i], tpr[i])
        # Finally average it and compute AUC
        mean_tpr /= n_classes
        fpr["macro"] = all_fpr
        tpr["macro"] = mean_tpr
        roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])

        ## Plot ROC curves
        plt.figure(figsize=(6, 4))
        plt.plot(fpr["micro"], tpr["micro"],
                 label='micro-average ROC curve (area = {0:0.2f})'
                       ''.format(roc_auc["micro"]), linewidth=3)

        plt.plot(fpr["macro"], tpr["macro"],
                 label='macro-average ROC curve (area = {0:0.2f})'
                       ''.format(roc_auc["macro"]), linewidth=3)

        for i, label in enumerate(class_labels):
            plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
                                           ''.format(label, roc_auc[i]), 
                     linewidth=2, linestyle=':')
    else:
        raise ValueError('Number of classes should be atleast 2 or more')
        
    plt.plot([0, 1], [0, 1], 'k--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver Operating Characteristic (ROC) Curve')
    plt.legend(loc="lower right")
    plt.show()
In [4]:
import tensorflow as tf
from keras import callbacks
from keras import optimizers
from keras.datasets import cifar10
from keras.engine import Model
from keras.applications import vgg16 as vgg
from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D,BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
Using TensorFlow backend.
Only Take 10 Epochs
In [0]:
BATCH_SIZE = 32
EPOCHS = 10
NUM_CLASSES = 10
LEARNING_RATE = 1e-4
MOMENTUM = 0.9
In [0]:
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
In [0]:
X_train, X_val, y_train, y_val = train_test_split(X_train, 
                                                  y_train, 
                                                  test_size=0.15, 
                                                  stratify=np.array(y_train), 
                                                  random_state=42)
In [0]:
Y_train = np_utils.to_categorical(y_train, NUM_CLASSES)
Y_val = np_utils.to_categorical(y_val, NUM_CLASSES)
Y_test = np_utils.to_categorical(y_test, NUM_CLASSES)
In [0]:
# ! pip uninstall scipy 
# ! pip install scipy==1.2.2
In [0]:
from PIL import Image

X_train = np.array([sp.misc.imresize(x,(48, 48)) for x in X_train])
X_val = np.array([sp.misc.imresize(x, 
                                   (48, 48)) for x in X_val])
X_test = np.array([sp.misc.imresize(x, 
                                    (48, 48)) for x in X_test])
Resize to 48x48 for fast computation
In [0]:
base_model = vgg.VGG16(weights='imagenet', 
                       include_top=False, 
                       input_shape=(48, 48, 3))
See the VGG16 model summary
In [12]:
base_model.summary()
Model: "vgg16"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_1 (InputLayer)         (None, 48, 48, 3)         0         
_________________________________________________________________
block1_conv1 (Conv2D)        (None, 48, 48, 64)        1792      
_________________________________________________________________
block1_conv2 (Conv2D)        (None, 48, 48, 64)        36928     
_________________________________________________________________
block1_pool (MaxPooling2D)   (None, 24, 24, 64)        0         
_________________________________________________________________
block2_conv1 (Conv2D)        (None, 24, 24, 128)       73856     
_________________________________________________________________
block2_conv2 (Conv2D)        (None, 24, 24, 128)       147584    
_________________________________________________________________
block2_pool (MaxPooling2D)   (None, 12, 12, 128)       0         
_________________________________________________________________
block3_conv1 (Conv2D)        (None, 12, 12, 256)       295168    
_________________________________________________________________
block3_conv2 (Conv2D)        (None, 12, 12, 256)       590080    
_________________________________________________________________
block3_conv3 (Conv2D)        (None, 12, 12, 256)       590080    
_________________________________________________________________
block3_pool (MaxPooling2D)   (None, 6, 6, 256)         0         
_________________________________________________________________
block4_conv1 (Conv2D)        (None, 6, 6, 512)         1180160   
_________________________________________________________________
block4_conv2 (Conv2D)        (None, 6, 6, 512)         2359808   
_________________________________________________________________
block4_conv3 (Conv2D)        (None, 6, 6, 512)         2359808   
_________________________________________________________________
block4_pool (MaxPooling2D)   (None, 3, 3, 512)         0         
_________________________________________________________________
block5_conv1 (Conv2D)        (None, 3, 3, 512)         2359808   
_________________________________________________________________
block5_conv2 (Conv2D)        (None, 3, 3, 512)         2359808   
_________________________________________________________________
block5_conv3 (Conv2D)        (None, 3, 3, 512)         2359808   
_________________________________________________________________
block5_pool (MaxPooling2D)   (None, 1, 1, 512)         0         
=================================================================
Total params: 14,714,688
Trainable params: 14,714,688
Non-trainable params: 0
_________________________________________________________________
In [0]:
# Extract the last layer from third block of vgg16 model
last = base_model.get_layer('block3_pool').output
In [0]:
# Add classification layers on top of it
x = GlobalAveragePooling2D()(last)
x= BatchNormalization()(x)
x = Dense(256, activation='relu')(x)
x = Dense(256, activation='relu')(x)
x = Dropout(0.6)(x)
pred = Dense(NUM_CLASSES, activation='softmax')(x)
model = Model(base_model.input, pred)
In [0]:
for layer in base_model.layers:
     layer.trainable = False
In [0]:
model.compile(loss='binary_crossentropy',
              optimizer=optimizers.Adam(lr=LEARNING_RATE),
              metrics=['accuracy'])
Current Summary of the model
In [17]:
model.summary()
Model: "model_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_1 (InputLayer)         (None, 48, 48, 3)         0         
_________________________________________________________________
block1_conv1 (Conv2D)        (None, 48, 48, 64)        1792      
_________________________________________________________________
block1_conv2 (Conv2D)        (None, 48, 48, 64)        36928     
_________________________________________________________________
block1_pool (MaxPooling2D)   (None, 24, 24, 64)        0         
_________________________________________________________________
block2_conv1 (Conv2D)        (None, 24, 24, 128)       73856     
_________________________________________________________________
block2_conv2 (Conv2D)        (None, 24, 24, 128)       147584    
_________________________________________________________________
block2_pool (MaxPooling2D)   (None, 12, 12, 128)       0         
_________________________________________________________________
block3_conv1 (Conv2D)        (None, 12, 12, 256)       295168    
_________________________________________________________________
block3_conv2 (Conv2D)        (None, 12, 12, 256)       590080    
_________________________________________________________________
block3_conv3 (Conv2D)        (None, 12, 12, 256)       590080    
_________________________________________________________________
block3_pool (MaxPooling2D)   (None, 6, 6, 256)         0         
_________________________________________________________________
global_average_pooling2d_1 ( (None, 256)               0         
_________________________________________________________________
batch_normalization_1 (Batch (None, 256)               1024      
_________________________________________________________________
dense_1 (Dense)              (None, 256)               65792     
_________________________________________________________________
dense_2 (Dense)              (None, 256)               65792     
_________________________________________________________________
dropout_1 (Dropout)          (None, 256)               0         
_________________________________________________________________
dense_3 (Dense)              (None, 10)                2570      
=================================================================
Total params: 1,870,666
Trainable params: 134,666
Non-trainable params: 1,736,000
_________________________________________________________________
In [0]:
# prepare data augmentation configuration
train_datagen = ImageDataGenerator(
    rescale=1. / 255,
    horizontal_flip=False)
In [0]:
train_datagen.fit(X_train)
train_generator = train_datagen.flow(X_train,
                                     Y_train, 
                                     batch_size=BATCH_SIZE)
In [0]:
val_datagen = ImageDataGenerator(rescale=1. / 255,
    horizontal_flip=False)

val_datagen.fit(X_val)
val_generator = val_datagen.flow(X_val,
                                 Y_val,
                                 batch_size=BATCH_SIZE)
Fit the model
In [21]:
train_steps_per_epoch = X_train.shape[0] // BATCH_SIZE
val_steps_per_epoch = X_val.shape[0] // BATCH_SIZE

history = model.fit_generator(train_generator,
                              steps_per_epoch=train_steps_per_epoch,
                              validation_data=val_generator,
                              validation_steps=val_steps_per_epoch,
                              epochs=EPOCHS,
                              verbose=1)
Epoch 1/10
1328/1328 [==============================] - 12s 9ms/step - loss: 0.2489 - accuracy: 0.9099 - val_loss: 0.1692 - val_accuracy: 0.9273
Epoch 2/10
1328/1328 [==============================] - 11s 8ms/step - loss: 0.1966 - accuracy: 0.9243 - val_loss: 0.1673 - val_accuracy: 0.9354
Epoch 3/10
1328/1328 [==============================] - 11s 8ms/step - loss: 0.1783 - accuracy: 0.9311 - val_loss: 0.1270 - val_accuracy: 0.9399
Epoch 4/10
1328/1328 [==============================] - 11s 8ms/step - loss: 0.1667 - accuracy: 0.9353 - val_loss: 0.1690 - val_accuracy: 0.9421
Epoch 5/10
1328/1328 [==============================] - 11s 8ms/step - loss: 0.1597 - accuracy: 0.9383 - val_loss: 0.1143 - val_accuracy: 0.9457
Epoch 6/10
1328/1328 [==============================] - 11s 8ms/step - loss: 0.1526 - accuracy: 0.9405 - val_loss: 0.1137 - val_accuracy: 0.9469
Epoch 7/10
1328/1328 [==============================] - 11s 8ms/step - loss: 0.1491 - accuracy: 0.9421 - val_loss: 0.1425 - val_accuracy: 0.9480
Epoch 8/10
1328/1328 [==============================] - 11s 8ms/step - loss: 0.1441 - accuracy: 0.9443 - val_loss: 0.1141 - val_accuracy: 0.9497
Epoch 9/10
1328/1328 [==============================] - 11s 8ms/step - loss: 0.1410 - accuracy: 0.9454 - val_loss: 0.1402 - val_accuracy: 0.9504
Epoch 10/10
1328/1328 [==============================] - 11s 8ms/step - loss: 0.1380 - accuracy: 0.9463 - val_loss: 0.1191 - val_accuracy: 0.9503
In [22]:
plt.figure(figsize=(12,7))
legend_acc = []
for item in history.history.keys():
  if item.split('_')[-1:] == ['accuracy']:
    #print("Accuracy = ",item)
    legend_acc.append(item)
    plt.plot(history.history[item])
plt.ylabel('Accuracy ->')
plt.xlabel('Epoch ->')
plt.legend(legend_acc, loc='upper left')
plt.show()

plt.figure(figsize=(12,7))
legend_acc = []
for item in history.history.keys():
  if item.split('_')[-1:] == ['loss']:
    #print("Accuracy = ",item)
    legend_acc.append(item)
    plt.plot(history.history[item])
plt.ylabel('Loss ->')
plt.xlabel('Epoch ->')
plt.legend(legend_acc, loc='upper right')
plt.show()
In [0]:
predictions = model.predict(X_test/255.)
In [0]:
test_labels = list(y_test.squeeze())
predictions = list(predictions.argmax(axis=1))
In [25]:
get_metrics(true_labels=y_test, 
                predicted_labels=predictions)
Accuracy: 0.732
Precision: 0.7313
Recall: 0.732
F1 Score: 0.7313
In [0]:
label_dict = {0:'airplane',
             1:'automobile',
             2:'bird',
             3:'cat',
             4:'deer',
             5:'dog',
             6:'frog',
             7:'horse',
             8:'ship',
             9:'truck'}
In [27]:
plot_predictions(model=model,dataset=X_test/255.,
                       dataset_labels=Y_test,
                       label_dict=label_dict,
                       batch_size=16,
                       grid_height=4,
                       grid_width=4)
In [0]:
# download the model in Drive for later use
model.save('CNN_TL_CIFAR_10_VGG16.h5')
In [0]:
! cp CNN_TL_CIFAR_10_VGG16.h5 /content/drive/'My Drive'/

Might be better to restart the kernel

ii) MobileNetV2 architecture ( https://keras.io/api/applications/mobilenet/#mobilenetv2-function )

You may train the model for 10 epochs and exclude the top layer of the architectures. Based on your own judgement make suitable choice of other parameters/hyperparameters of the model. Compare the accuracy of the models.

In [0]:
# Pandas and Numpy for data structures and util fucntions
import scipy as sp
import numpy as np
import pandas as pd
from numpy.random import rand
pd.options.display.max_colwidth = 600

# Scikit Imports
from sklearn import preprocessing
from sklearn.metrics import roc_curve, auc, precision_recall_curve
from sklearn.model_selection import train_test_split


# Matplot Imports
import matplotlib.pyplot as plt
params = {'legend.fontsize': 'x-large',
          'figure.figsize': (15, 5),
          'axes.labelsize': 'x-large',
          'axes.titlesize':'x-large',
          'xtick.labelsize':'x-large',
          'ytick.labelsize':'x-large'}

plt.rcParams.update(params)
%matplotlib inline

# pandas display data frames as tables
from IPython.display import display, HTML

import warnings
warnings.filterwarnings('ignore')
In [2]:
import tensorflow as tf
from keras import callbacks
from keras import optimizers
from keras.datasets import cifar10
from keras.engine import Model
from keras.applications import MobileNetV2 
from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D,BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
Using TensorFlow backend.
In [0]:
BATCH_SIZE = 32
EPOCHS = 10
NUM_CLASSES = 10
LEARNING_RATE = 1e-4
MOMENTUM = 0.9
In [0]:
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
In [0]:
X_train, X_val, y_train, y_val = train_test_split(X_train, 
                                                  y_train, 
                                                  test_size=0.15, 
                                                  stratify=np.array(y_train), 
                                                  random_state=42)
In [0]:
Y_train = np_utils.to_categorical(y_train, NUM_CLASSES)
Y_val = np_utils.to_categorical(y_val, NUM_CLASSES)
Y_test = np_utils.to_categorical(y_test, NUM_CLASSES)
In [0]:
X_train = np.array([sp.misc.imresize(x, 
                                     (48, 48)) for x in X_train])
X_val = np.array([sp.misc.imresize(x, 
                                   (48, 48)) for x in X_val])
X_test = np.array([sp.misc.imresize(x, 
                                    (48, 48)) for x in X_test])
In [0]:
base_model = MobileNetV2(weights='imagenet', 
                       include_top=False, 
                       input_shape=(48, 48, 3))
In [9]:
base_model.summary()
Model: "mobilenetv2_1.00_224"
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_1 (InputLayer)            (None, 48, 48, 3)    0                                            
__________________________________________________________________________________________________
Conv1_pad (ZeroPadding2D)       (None, 49, 49, 3)    0           input_1[0][0]                    
__________________________________________________________________________________________________
Conv1 (Conv2D)                  (None, 24, 24, 32)   864         Conv1_pad[0][0]                  
__________________________________________________________________________________________________
bn_Conv1 (BatchNormalization)   (None, 24, 24, 32)   128         Conv1[0][0]                      
__________________________________________________________________________________________________
Conv1_relu (ReLU)               (None, 24, 24, 32)   0           bn_Conv1[0][0]                   
__________________________________________________________________________________________________
expanded_conv_depthwise (Depthw (None, 24, 24, 32)   288         Conv1_relu[0][0]                 
__________________________________________________________________________________________________
expanded_conv_depthwise_BN (Bat (None, 24, 24, 32)   128         expanded_conv_depthwise[0][0]    
__________________________________________________________________________________________________
expanded_conv_depthwise_relu (R (None, 24, 24, 32)   0           expanded_conv_depthwise_BN[0][0] 
__________________________________________________________________________________________________
expanded_conv_project (Conv2D)  (None, 24, 24, 16)   512         expanded_conv_depthwise_relu[0][0
__________________________________________________________________________________________________
expanded_conv_project_BN (Batch (None, 24, 24, 16)   64          expanded_conv_project[0][0]      
__________________________________________________________________________________________________
block_1_expand (Conv2D)         (None, 24, 24, 96)   1536        expanded_conv_project_BN[0][0]   
__________________________________________________________________________________________________
block_1_expand_BN (BatchNormali (None, 24, 24, 96)   384         block_1_expand[0][0]             
__________________________________________________________________________________________________
block_1_expand_relu (ReLU)      (None, 24, 24, 96)   0           block_1_expand_BN[0][0]          
__________________________________________________________________________________________________
block_1_pad (ZeroPadding2D)     (None, 25, 25, 96)   0           block_1_expand_relu[0][0]        
__________________________________________________________________________________________________
block_1_depthwise (DepthwiseCon (None, 12, 12, 96)   864         block_1_pad[0][0]                
__________________________________________________________________________________________________
block_1_depthwise_BN (BatchNorm (None, 12, 12, 96)   384         block_1_depthwise[0][0]          
__________________________________________________________________________________________________
block_1_depthwise_relu (ReLU)   (None, 12, 12, 96)   0           block_1_depthwise_BN[0][0]       
__________________________________________________________________________________________________
block_1_project (Conv2D)        (None, 12, 12, 24)   2304        block_1_depthwise_relu[0][0]     
__________________________________________________________________________________________________
block_1_project_BN (BatchNormal (None, 12, 12, 24)   96          block_1_project[0][0]            
__________________________________________________________________________________________________
block_2_expand (Conv2D)         (None, 12, 12, 144)  3456        block_1_project_BN[0][0]         
__________________________________________________________________________________________________
block_2_expand_BN (BatchNormali (None, 12, 12, 144)  576         block_2_expand[0][0]             
__________________________________________________________________________________________________
block_2_expand_relu (ReLU)      (None, 12, 12, 144)  0           block_2_expand_BN[0][0]          
__________________________________________________________________________________________________
block_2_depthwise (DepthwiseCon (None, 12, 12, 144)  1296        block_2_expand_relu[0][0]        
__________________________________________________________________________________________________
block_2_depthwise_BN (BatchNorm (None, 12, 12, 144)  576         block_2_depthwise[0][0]          
__________________________________________________________________________________________________
block_2_depthwise_relu (ReLU)   (None, 12, 12, 144)  0           block_2_depthwise_BN[0][0]       
__________________________________________________________________________________________________
block_2_project (Conv2D)        (None, 12, 12, 24)   3456        block_2_depthwise_relu[0][0]     
__________________________________________________________________________________________________
block_2_project_BN (BatchNormal (None, 12, 12, 24)   96          block_2_project[0][0]            
__________________________________________________________________________________________________
block_2_add (Add)               (None, 12, 12, 24)   0           block_1_project_BN[0][0]         
                                                                 block_2_project_BN[0][0]         
__________________________________________________________________________________________________
block_3_expand (Conv2D)         (None, 12, 12, 144)  3456        block_2_add[0][0]                
__________________________________________________________________________________________________
block_3_expand_BN (BatchNormali (None, 12, 12, 144)  576         block_3_expand[0][0]             
__________________________________________________________________________________________________
block_3_expand_relu (ReLU)      (None, 12, 12, 144)  0           block_3_expand_BN[0][0]          
__________________________________________________________________________________________________
block_3_pad (ZeroPadding2D)     (None, 13, 13, 144)  0           block_3_expand_relu[0][0]        
__________________________________________________________________________________________________
block_3_depthwise (DepthwiseCon (None, 6, 6, 144)    1296        block_3_pad[0][0]                
__________________________________________________________________________________________________
block_3_depthwise_BN (BatchNorm (None, 6, 6, 144)    576         block_3_depthwise[0][0]          
__________________________________________________________________________________________________
block_3_depthwise_relu (ReLU)   (None, 6, 6, 144)    0           block_3_depthwise_BN[0][0]       
__________________________________________________________________________________________________
block_3_project (Conv2D)        (None, 6, 6, 32)     4608        block_3_depthwise_relu[0][0]     
__________________________________________________________________________________________________
block_3_project_BN (BatchNormal (None, 6, 6, 32)     128         block_3_project[0][0]            
__________________________________________________________________________________________________
block_4_expand (Conv2D)         (None, 6, 6, 192)    6144        block_3_project_BN[0][0]         
__________________________________________________________________________________________________
block_4_expand_BN (BatchNormali (None, 6, 6, 192)    768         block_4_expand[0][0]             
__________________________________________________________________________________________________
block_4_expand_relu (ReLU)      (None, 6, 6, 192)    0           block_4_expand_BN[0][0]          
__________________________________________________________________________________________________
block_4_depthwise (DepthwiseCon (None, 6, 6, 192)    1728        block_4_expand_relu[0][0]        
__________________________________________________________________________________________________
block_4_depthwise_BN (BatchNorm (None, 6, 6, 192)    768         block_4_depthwise[0][0]          
__________________________________________________________________________________________________
block_4_depthwise_relu (ReLU)   (None, 6, 6, 192)    0           block_4_depthwise_BN[0][0]       
__________________________________________________________________________________________________
block_4_project (Conv2D)        (None, 6, 6, 32)     6144        block_4_depthwise_relu[0][0]     
__________________________________________________________________________________________________
block_4_project_BN (BatchNormal (None, 6, 6, 32)     128         block_4_project[0][0]            
__________________________________________________________________________________________________
block_4_add (Add)               (None, 6, 6, 32)     0           block_3_project_BN[0][0]         
                                                                 block_4_project_BN[0][0]         
__________________________________________________________________________________________________
block_5_expand (Conv2D)         (None, 6, 6, 192)    6144        block_4_add[0][0]                
__________________________________________________________________________________________________
block_5_expand_BN (BatchNormali (None, 6, 6, 192)    768         block_5_expand[0][0]             
__________________________________________________________________________________________________
block_5_expand_relu (ReLU)      (None, 6, 6, 192)    0           block_5_expand_BN[0][0]          
__________________________________________________________________________________________________
block_5_depthwise (DepthwiseCon (None, 6, 6, 192)    1728        block_5_expand_relu[0][0]        
__________________________________________________________________________________________________
block_5_depthwise_BN (BatchNorm (None, 6, 6, 192)    768         block_5_depthwise[0][0]          
__________________________________________________________________________________________________
block_5_depthwise_relu (ReLU)   (None, 6, 6, 192)    0           block_5_depthwise_BN[0][0]       
__________________________________________________________________________________________________
block_5_project (Conv2D)        (None, 6, 6, 32)     6144        block_5_depthwise_relu[0][0]     
__________________________________________________________________________________________________
block_5_project_BN (BatchNormal (None, 6, 6, 32)     128         block_5_project[0][0]            
__________________________________________________________________________________________________
block_5_add (Add)               (None, 6, 6, 32)     0           block_4_add[0][0]                
                                                                 block_5_project_BN[0][0]         
__________________________________________________________________________________________________
block_6_expand (Conv2D)         (None, 6, 6, 192)    6144        block_5_add[0][0]                
__________________________________________________________________________________________________
block_6_expand_BN (BatchNormali (None, 6, 6, 192)    768         block_6_expand[0][0]             
__________________________________________________________________________________________________
block_6_expand_relu (ReLU)      (None, 6, 6, 192)    0           block_6_expand_BN[0][0]          
__________________________________________________________________________________________________
block_6_pad (ZeroPadding2D)     (None, 7, 7, 192)    0           block_6_expand_relu[0][0]        
__________________________________________________________________________________________________
block_6_depthwise (DepthwiseCon (None, 3, 3, 192)    1728        block_6_pad[0][0]                
__________________________________________________________________________________________________
block_6_depthwise_BN (BatchNorm (None, 3, 3, 192)    768         block_6_depthwise[0][0]          
__________________________________________________________________________________________________
block_6_depthwise_relu (ReLU)   (None, 3, 3, 192)    0           block_6_depthwise_BN[0][0]       
__________________________________________________________________________________________________
block_6_project (Conv2D)        (None, 3, 3, 64)     12288       block_6_depthwise_relu[0][0]     
__________________________________________________________________________________________________
block_6_project_BN (BatchNormal (None, 3, 3, 64)     256         block_6_project[0][0]            
__________________________________________________________________________________________________
block_7_expand (Conv2D)         (None, 3, 3, 384)    24576       block_6_project_BN[0][0]         
__________________________________________________________________________________________________
block_7_expand_BN (BatchNormali (None, 3, 3, 384)    1536        block_7_expand[0][0]             
__________________________________________________________________________________________________
block_7_expand_relu (ReLU)      (None, 3, 3, 384)    0           block_7_expand_BN[0][0]          
__________________________________________________________________________________________________
block_7_depthwise (DepthwiseCon (None, 3, 3, 384)    3456        block_7_expand_relu[0][0]        
__________________________________________________________________________________________________
block_7_depthwise_BN (BatchNorm (None, 3, 3, 384)    1536        block_7_depthwise[0][0]          
__________________________________________________________________________________________________
block_7_depthwise_relu (ReLU)   (None, 3, 3, 384)    0           block_7_depthwise_BN[0][0]       
__________________________________________________________________________________________________
block_7_project (Conv2D)        (None, 3, 3, 64)     24576       block_7_depthwise_relu[0][0]     
__________________________________________________________________________________________________
block_7_project_BN (BatchNormal (None, 3, 3, 64)     256         block_7_project[0][0]            
__________________________________________________________________________________________________
block_7_add (Add)               (None, 3, 3, 64)     0           block_6_project_BN[0][0]         
                                                                 block_7_project_BN[0][0]         
__________________________________________________________________________________________________
block_8_expand (Conv2D)         (None, 3, 3, 384)    24576       block_7_add[0][0]                
__________________________________________________________________________________________________
block_8_expand_BN (BatchNormali (None, 3, 3, 384)    1536        block_8_expand[0][0]             
__________________________________________________________________________________________________
block_8_expand_relu (ReLU)      (None, 3, 3, 384)    0           block_8_expand_BN[0][0]          
__________________________________________________________________________________________________
block_8_depthwise (DepthwiseCon (None, 3, 3, 384)    3456        block_8_expand_relu[0][0]        
__________________________________________________________________________________________________
block_8_depthwise_BN (BatchNorm (None, 3, 3, 384)    1536        block_8_depthwise[0][0]          
__________________________________________________________________________________________________
block_8_depthwise_relu (ReLU)   (None, 3, 3, 384)    0           block_8_depthwise_BN[0][0]       
__________________________________________________________________________________________________
block_8_project (Conv2D)        (None, 3, 3, 64)     24576       block_8_depthwise_relu[0][0]     
__________________________________________________________________________________________________
block_8_project_BN (BatchNormal (None, 3, 3, 64)     256         block_8_project[0][0]            
__________________________________________________________________________________________________
block_8_add (Add)               (None, 3, 3, 64)     0           block_7_add[0][0]                
                                                                 block_8_project_BN[0][0]         
__________________________________________________________________________________________________
block_9_expand (Conv2D)         (None, 3, 3, 384)    24576       block_8_add[0][0]                
__________________________________________________________________________________________________
block_9_expand_BN (BatchNormali (None, 3, 3, 384)    1536        block_9_expand[0][0]             
__________________________________________________________________________________________________
block_9_expand_relu (ReLU)      (None, 3, 3, 384)    0           block_9_expand_BN[0][0]          
__________________________________________________________________________________________________
block_9_depthwise (DepthwiseCon (None, 3, 3, 384)    3456        block_9_expand_relu[0][0]        
__________________________________________________________________________________________________
block_9_depthwise_BN (BatchNorm (None, 3, 3, 384)    1536        block_9_depthwise[0][0]          
__________________________________________________________________________________________________
block_9_depthwise_relu (ReLU)   (None, 3, 3, 384)    0           block_9_depthwise_BN[0][0]       
__________________________________________________________________________________________________
block_9_project (Conv2D)        (None, 3, 3, 64)     24576       block_9_depthwise_relu[0][0]     
__________________________________________________________________________________________________
block_9_project_BN (BatchNormal (None, 3, 3, 64)     256         block_9_project[0][0]            
__________________________________________________________________________________________________
block_9_add (Add)               (None, 3, 3, 64)     0           block_8_add[0][0]                
                                                                 block_9_project_BN[0][0]         
__________________________________________________________________________________________________
block_10_expand (Conv2D)        (None, 3, 3, 384)    24576       block_9_add[0][0]                
__________________________________________________________________________________________________
block_10_expand_BN (BatchNormal (None, 3, 3, 384)    1536        block_10_expand[0][0]            
__________________________________________________________________________________________________
block_10_expand_relu (ReLU)     (None, 3, 3, 384)    0           block_10_expand_BN[0][0]         
__________________________________________________________________________________________________
block_10_depthwise (DepthwiseCo (None, 3, 3, 384)    3456        block_10_expand_relu[0][0]       
__________________________________________________________________________________________________
block_10_depthwise_BN (BatchNor (None, 3, 3, 384)    1536        block_10_depthwise[0][0]         
__________________________________________________________________________________________________
block_10_depthwise_relu (ReLU)  (None, 3, 3, 384)    0           block_10_depthwise_BN[0][0]      
__________________________________________________________________________________________________
block_10_project (Conv2D)       (None, 3, 3, 96)     36864       block_10_depthwise_relu[0][0]    
__________________________________________________________________________________________________
block_10_project_BN (BatchNorma (None, 3, 3, 96)     384         block_10_project[0][0]           
__________________________________________________________________________________________________
block_11_expand (Conv2D)        (None, 3, 3, 576)    55296       block_10_project_BN[0][0]        
__________________________________________________________________________________________________
block_11_expand_BN (BatchNormal (None, 3, 3, 576)    2304        block_11_expand[0][0]            
__________________________________________________________________________________________________
block_11_expand_relu (ReLU)     (None, 3, 3, 576)    0           block_11_expand_BN[0][0]         
__________________________________________________________________________________________________
block_11_depthwise (DepthwiseCo (None, 3, 3, 576)    5184        block_11_expand_relu[0][0]       
__________________________________________________________________________________________________
block_11_depthwise_BN (BatchNor (None, 3, 3, 576)    2304        block_11_depthwise[0][0]         
__________________________________________________________________________________________________
block_11_depthwise_relu (ReLU)  (None, 3, 3, 576)    0           block_11_depthwise_BN[0][0]      
__________________________________________________________________________________________________
block_11_project (Conv2D)       (None, 3, 3, 96)     55296       block_11_depthwise_relu[0][0]    
__________________________________________________________________________________________________
block_11_project_BN (BatchNorma (None, 3, 3, 96)     384         block_11_project[0][0]           
__________________________________________________________________________________________________
block_11_add (Add)              (None, 3, 3, 96)     0           block_10_project_BN[0][0]        
                                                                 block_11_project_BN[0][0]        
__________________________________________________________________________________________________
block_12_expand (Conv2D)        (None, 3, 3, 576)    55296       block_11_add[0][0]               
__________________________________________________________________________________________________
block_12_expand_BN (BatchNormal (None, 3, 3, 576)    2304        block_12_expand[0][0]            
__________________________________________________________________________________________________
block_12_expand_relu (ReLU)     (None, 3, 3, 576)    0           block_12_expand_BN[0][0]         
__________________________________________________________________________________________________
block_12_depthwise (DepthwiseCo (None, 3, 3, 576)    5184        block_12_expand_relu[0][0]       
__________________________________________________________________________________________________
block_12_depthwise_BN (BatchNor (None, 3, 3, 576)    2304        block_12_depthwise[0][0]         
__________________________________________________________________________________________________
block_12_depthwise_relu (ReLU)  (None, 3, 3, 576)    0           block_12_depthwise_BN[0][0]      
__________________________________________________________________________________________________
block_12_project (Conv2D)       (None, 3, 3, 96)     55296       block_12_depthwise_relu[0][0]    
__________________________________________________________________________________________________
block_12_project_BN (BatchNorma (None, 3, 3, 96)     384         block_12_project[0][0]           
__________________________________________________________________________________________________
block_12_add (Add)              (None, 3, 3, 96)     0           block_11_add[0][0]               
                                                                 block_12_project_BN[0][0]        
__________________________________________________________________________________________________
block_13_expand (Conv2D)        (None, 3, 3, 576)    55296       block_12_add[0][0]               
__________________________________________________________________________________________________
block_13_expand_BN (BatchNormal (None, 3, 3, 576)    2304        block_13_expand[0][0]            
__________________________________________________________________________________________________
block_13_expand_relu (ReLU)     (None, 3, 3, 576)    0           block_13_expand_BN[0][0]         
__________________________________________________________________________________________________
block_13_pad (ZeroPadding2D)    (None, 5, 5, 576)    0           block_13_expand_relu[0][0]       
__________________________________________________________________________________________________
block_13_depthwise (DepthwiseCo (None, 2, 2, 576)    5184        block_13_pad[0][0]               
__________________________________________________________________________________________________
block_13_depthwise_BN (BatchNor (None, 2, 2, 576)    2304        block_13_depthwise[0][0]         
__________________________________________________________________________________________________
block_13_depthwise_relu (ReLU)  (None, 2, 2, 576)    0           block_13_depthwise_BN[0][0]      
__________________________________________________________________________________________________
block_13_project (Conv2D)       (None, 2, 2, 160)    92160       block_13_depthwise_relu[0][0]    
__________________________________________________________________________________________________
block_13_project_BN (BatchNorma (None, 2, 2, 160)    640         block_13_project[0][0]           
__________________________________________________________________________________________________
block_14_expand (Conv2D)        (None, 2, 2, 960)    153600      block_13_project_BN[0][0]        
__________________________________________________________________________________________________
block_14_expand_BN (BatchNormal (None, 2, 2, 960)    3840        block_14_expand[0][0]            
__________________________________________________________________________________________________
block_14_expand_relu (ReLU)     (None, 2, 2, 960)    0           block_14_expand_BN[0][0]         
__________________________________________________________________________________________________
block_14_depthwise (DepthwiseCo (None, 2, 2, 960)    8640        block_14_expand_relu[0][0]       
__________________________________________________________________________________________________
block_14_depthwise_BN (BatchNor (None, 2, 2, 960)    3840        block_14_depthwise[0][0]         
__________________________________________________________________________________________________
block_14_depthwise_relu (ReLU)  (None, 2, 2, 960)    0           block_14_depthwise_BN[0][0]      
__________________________________________________________________________________________________
block_14_project (Conv2D)       (None, 2, 2, 160)    153600      block_14_depthwise_relu[0][0]    
__________________________________________________________________________________________________
block_14_project_BN (BatchNorma (None, 2, 2, 160)    640         block_14_project[0][0]           
__________________________________________________________________________________________________
block_14_add (Add)              (None, 2, 2, 160)    0           block_13_project_BN[0][0]        
                                                                 block_14_project_BN[0][0]        
__________________________________________________________________________________________________
block_15_expand (Conv2D)        (None, 2, 2, 960)    153600      block_14_add[0][0]               
__________________________________________________________________________________________________
block_15_expand_BN (BatchNormal (None, 2, 2, 960)    3840        block_15_expand[0][0]            
__________________________________________________________________________________________________
block_15_expand_relu (ReLU)     (None, 2, 2, 960)    0           block_15_expand_BN[0][0]         
__________________________________________________________________________________________________
block_15_depthwise (DepthwiseCo (None, 2, 2, 960)    8640        block_15_expand_relu[0][0]       
__________________________________________________________________________________________________
block_15_depthwise_BN (BatchNor (None, 2, 2, 960)    3840        block_15_depthwise[0][0]         
__________________________________________________________________________________________________
block_15_depthwise_relu (ReLU)  (None, 2, 2, 960)    0           block_15_depthwise_BN[0][0]      
__________________________________________________________________________________________________
block_15_project (Conv2D)       (None, 2, 2, 160)    153600      block_15_depthwise_relu[0][0]    
__________________________________________________________________________________________________
block_15_project_BN (BatchNorma (None, 2, 2, 160)    640         block_15_project[0][0]           
__________________________________________________________________________________________________
block_15_add (Add)              (None, 2, 2, 160)    0           block_14_add[0][0]               
                                                                 block_15_project_BN[0][0]        
__________________________________________________________________________________________________
block_16_expand (Conv2D)        (None, 2, 2, 960)    153600      block_15_add[0][0]               
__________________________________________________________________________________________________
block_16_expand_BN (BatchNormal (None, 2, 2, 960)    3840        block_16_expand[0][0]            
__________________________________________________________________________________________________
block_16_expand_relu (ReLU)     (None, 2, 2, 960)    0           block_16_expand_BN[0][0]         
__________________________________________________________________________________________________
block_16_depthwise (DepthwiseCo (None, 2, 2, 960)    8640        block_16_expand_relu[0][0]       
__________________________________________________________________________________________________
block_16_depthwise_BN (BatchNor (None, 2, 2, 960)    3840        block_16_depthwise[0][0]         
__________________________________________________________________________________________________
block_16_depthwise_relu (ReLU)  (None, 2, 2, 960)    0           block_16_depthwise_BN[0][0]      
__________________________________________________________________________________________________
block_16_project (Conv2D)       (None, 2, 2, 320)    307200      block_16_depthwise_relu[0][0]    
__________________________________________________________________________________________________
block_16_project_BN (BatchNorma (None, 2, 2, 320)    1280        block_16_project[0][0]           
__________________________________________________________________________________________________
Conv_1 (Conv2D)                 (None, 2, 2, 1280)   409600      block_16_project_BN[0][0]        
__________________________________________________________________________________________________
Conv_1_bn (BatchNormalization)  (None, 2, 2, 1280)   5120        Conv_1[0][0]                     
__________________________________________________________________________________________________
out_relu (ReLU)                 (None, 2, 2, 1280)   0           Conv_1_bn[0][0]                  
==================================================================================================
Total params: 2,257,984
Trainable params: 2,223,872
Non-trainable params: 34,112
__________________________________________________________________________________________________
In [0]:
# Extract the layer 
last = base_model.get_layer('block_3_project_BN').output
In [0]:
# Add classification layers on top of it
x = GlobalAveragePooling2D()(last)
x= BatchNormalization()(x)
x = Dense(256, activation='relu')(x)
x = Dense(256, activation='relu')(x)
x = Dropout(0.6)(x)
pred = Dense(NUM_CLASSES, activation='softmax')(x)
model = Model(base_model.input, pred)
In [0]:
for layer in base_model.layers:
     layer.trainable = False
In [0]:
model.compile(loss='binary_crossentropy',
              optimizer=optimizers.Adam(lr=LEARNING_RATE),
              metrics=['accuracy'])
In [14]:
model.summary()
Model: "model_1"
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_1 (InputLayer)            (None, 48, 48, 3)    0                                            
__________________________________________________________________________________________________
Conv1_pad (ZeroPadding2D)       (None, 49, 49, 3)    0           input_1[0][0]                    
__________________________________________________________________________________________________
Conv1 (Conv2D)                  (None, 24, 24, 32)   864         Conv1_pad[0][0]                  
__________________________________________________________________________________________________
bn_Conv1 (BatchNormalization)   (None, 24, 24, 32)   128         Conv1[0][0]                      
__________________________________________________________________________________________________
Conv1_relu (ReLU)               (None, 24, 24, 32)   0           bn_Conv1[0][0]                   
__________________________________________________________________________________________________
expanded_conv_depthwise (Depthw (None, 24, 24, 32)   288         Conv1_relu[0][0]                 
__________________________________________________________________________________________________
expanded_conv_depthwise_BN (Bat (None, 24, 24, 32)   128         expanded_conv_depthwise[0][0]    
__________________________________________________________________________________________________
expanded_conv_depthwise_relu (R (None, 24, 24, 32)   0           expanded_conv_depthwise_BN[0][0] 
__________________________________________________________________________________________________
expanded_conv_project (Conv2D)  (None, 24, 24, 16)   512         expanded_conv_depthwise_relu[0][0
__________________________________________________________________________________________________
expanded_conv_project_BN (Batch (None, 24, 24, 16)   64          expanded_conv_project[0][0]      
__________________________________________________________________________________________________
block_1_expand (Conv2D)         (None, 24, 24, 96)   1536        expanded_conv_project_BN[0][0]   
__________________________________________________________________________________________________
block_1_expand_BN (BatchNormali (None, 24, 24, 96)   384         block_1_expand[0][0]             
__________________________________________________________________________________________________
block_1_expand_relu (ReLU)      (None, 24, 24, 96)   0           block_1_expand_BN[0][0]          
__________________________________________________________________________________________________
block_1_pad (ZeroPadding2D)     (None, 25, 25, 96)   0           block_1_expand_relu[0][0]        
__________________________________________________________________________________________________
block_1_depthwise (DepthwiseCon (None, 12, 12, 96)   864         block_1_pad[0][0]                
__________________________________________________________________________________________________
block_1_depthwise_BN (BatchNorm (None, 12, 12, 96)   384         block_1_depthwise[0][0]          
__________________________________________________________________________________________________
block_1_depthwise_relu (ReLU)   (None, 12, 12, 96)   0           block_1_depthwise_BN[0][0]       
__________________________________________________________________________________________________
block_1_project (Conv2D)        (None, 12, 12, 24)   2304        block_1_depthwise_relu[0][0]     
__________________________________________________________________________________________________
block_1_project_BN (BatchNormal (None, 12, 12, 24)   96          block_1_project[0][0]            
__________________________________________________________________________________________________
block_2_expand (Conv2D)         (None, 12, 12, 144)  3456        block_1_project_BN[0][0]         
__________________________________________________________________________________________________
block_2_expand_BN (BatchNormali (None, 12, 12, 144)  576         block_2_expand[0][0]             
__________________________________________________________________________________________________
block_2_expand_relu (ReLU)      (None, 12, 12, 144)  0           block_2_expand_BN[0][0]          
__________________________________________________________________________________________________
block_2_depthwise (DepthwiseCon (None, 12, 12, 144)  1296        block_2_expand_relu[0][0]        
__________________________________________________________________________________________________
block_2_depthwise_BN (BatchNorm (None, 12, 12, 144)  576         block_2_depthwise[0][0]          
__________________________________________________________________________________________________
block_2_depthwise_relu (ReLU)   (None, 12, 12, 144)  0           block_2_depthwise_BN[0][0]       
__________________________________________________________________________________________________
block_2_project (Conv2D)        (None, 12, 12, 24)   3456        block_2_depthwise_relu[0][0]     
__________________________________________________________________________________________________
block_2_project_BN (BatchNormal (None, 12, 12, 24)   96          block_2_project[0][0]            
__________________________________________________________________________________________________
block_2_add (Add)               (None, 12, 12, 24)   0           block_1_project_BN[0][0]         
                                                                 block_2_project_BN[0][0]         
__________________________________________________________________________________________________
block_3_expand (Conv2D)         (None, 12, 12, 144)  3456        block_2_add[0][0]                
__________________________________________________________________________________________________
block_3_expand_BN (BatchNormali (None, 12, 12, 144)  576         block_3_expand[0][0]             
__________________________________________________________________________________________________
block_3_expand_relu (ReLU)      (None, 12, 12, 144)  0           block_3_expand_BN[0][0]          
__________________________________________________________________________________________________
block_3_pad (ZeroPadding2D)     (None, 13, 13, 144)  0           block_3_expand_relu[0][0]        
__________________________________________________________________________________________________
block_3_depthwise (DepthwiseCon (None, 6, 6, 144)    1296        block_3_pad[0][0]                
__________________________________________________________________________________________________
block_3_depthwise_BN (BatchNorm (None, 6, 6, 144)    576         block_3_depthwise[0][0]          
__________________________________________________________________________________________________
block_3_depthwise_relu (ReLU)   (None, 6, 6, 144)    0           block_3_depthwise_BN[0][0]       
__________________________________________________________________________________________________
block_3_project (Conv2D)        (None, 6, 6, 32)     4608        block_3_depthwise_relu[0][0]     
__________________________________________________________________________________________________
block_3_project_BN (BatchNormal (None, 6, 6, 32)     128         block_3_project[0][0]            
__________________________________________________________________________________________________
global_average_pooling2d_1 (Glo (None, 32)           0           block_3_project_BN[0][0]         
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, 32)           128         global_average_pooling2d_1[0][0] 
__________________________________________________________________________________________________
dense_1 (Dense)                 (None, 256)          8448        batch_normalization_1[0][0]      
__________________________________________________________________________________________________
dense_2 (Dense)                 (None, 256)          65792       dense_1[0][0]                    
__________________________________________________________________________________________________
dropout_1 (Dropout)             (None, 256)          0           dense_2[0][0]                    
__________________________________________________________________________________________________
dense_3 (Dense)                 (None, 10)           2570        dropout_1[0][0]                  
==================================================================================================
Total params: 104,586
Trainable params: 76,874
Non-trainable params: 27,712
__________________________________________________________________________________________________
In [0]:
# prepare data augmentation configuration
train_datagen = ImageDataGenerator(
    rescale=1. / 255,
    horizontal_flip=False)
In [0]:
train_datagen.fit(X_train)
train_generator = train_datagen.flow(X_train,
                                     Y_train, 
                                     batch_size=BATCH_SIZE)
In [0]:
val_datagen = ImageDataGenerator(rescale=1. / 255,
    horizontal_flip=False)

val_datagen.fit(X_val)
val_generator = val_datagen.flow(X_val,
                                 Y_val,
                                 batch_size=BATCH_SIZE)
In [18]:
train_steps_per_epoch = X_train.shape[0] // BATCH_SIZE
val_steps_per_epoch = X_val.shape[0] // BATCH_SIZE

history = model.fit_generator(train_generator,
                              steps_per_epoch=train_steps_per_epoch,
                              validation_data=val_generator,
                              validation_steps=val_steps_per_epoch,
                              epochs=EPOCHS,
                              verbose=1)
Epoch 1/10
1328/1328 [==============================] - 11s 8ms/step - loss: 0.2589 - accuracy: 0.9051 - val_loss: 0.2695 - val_accuracy: 0.9013
Epoch 2/10
1328/1328 [==============================] - 10s 7ms/step - loss: 0.2209 - accuracy: 0.9140 - val_loss: 0.2983 - val_accuracy: 0.8971
Epoch 3/10
1328/1328 [==============================] - 10s 8ms/step - loss: 0.2113 - accuracy: 0.9169 - val_loss: 0.2615 - val_accuracy: 0.8952
Epoch 4/10
1328/1328 [==============================] - 10s 7ms/step - loss: 0.2061 - accuracy: 0.9186 - val_loss: 0.2936 - val_accuracy: 0.8961
Epoch 5/10
1328/1328 [==============================] - 10s 7ms/step - loss: 0.2026 - accuracy: 0.9204 - val_loss: 0.2573 - val_accuracy: 0.8943
Epoch 6/10
1328/1328 [==============================] - 10s 7ms/step - loss: 0.2001 - accuracy: 0.9213 - val_loss: 0.3308 - val_accuracy: 0.8926
Epoch 7/10
1328/1328 [==============================] - 10s 7ms/step - loss: 0.1983 - accuracy: 0.9219 - val_loss: 0.3185 - val_accuracy: 0.8928
Epoch 8/10
1328/1328 [==============================] - 10s 7ms/step - loss: 0.1957 - accuracy: 0.9229 - val_loss: 0.3639 - val_accuracy: 0.8934
Epoch 9/10
1328/1328 [==============================] - 10s 7ms/step - loss: 0.1940 - accuracy: 0.9234 - val_loss: 0.2921 - val_accuracy: 0.8943
Epoch 10/10
1328/1328 [==============================] - 10s 7ms/step - loss: 0.1928 - accuracy: 0.9239 - val_loss: 0.3422 - val_accuracy: 0.8914
In [32]:
plt.figure(figsize=(12,7))
legend_acc = []
for item in history.history.keys():
  if item.split('_')[-1:] == ['accuracy']:
    #print("Accuracy = ",item)
    legend_acc.append(item)
    plt.plot(history.history[item])
plt.ylabel('Accuracy ->')
plt.xlabel('Epoch ->')
plt.legend(legend_acc, loc='upper left')
plt.show()

plt.figure(figsize=(12,7))
legend_acc = []
for item in history.history.keys():
  if item.split('_')[-1:] == ['loss']:
    #print("Accuracy = ",item)
    legend_acc.append(item)
    plt.plot(history.history[item])
plt.ylabel('Loss ->')
plt.xlabel('Epoch ->')
plt.legend(legend_acc, loc='upper right')
plt.show()
In [0]:
predictions = model.predict(X_test/255.)
In [0]:
test_labels = list(y_test.squeeze())
predictions = list(predictions.argmax(axis=1))
In [35]:
get_metrics(true_labels=y_test, 
                predicted_labels=predictions)
Accuracy: 0.2882
Precision: 0.5323
Recall: 0.2882
F1 Score: 0.2322
In [0]:
label_dict = {0:'airplane',
             1:'automobile',
             2:'bird',
             3:'cat',
             4:'deer',
             5:'dog',
             6:'frog',
             7:'horse',
             8:'ship',
             9:'truck'}
In [0]:
# download the model in Drive for later use
model.save('CNN_TL_CIFAR_10_MobileNetv2.h5')

! cp CNN_TL_CIFAR_10_MobileNetv2.h5 /content/drive/'My Drive'/
In [37]:
plot_predictions(model=model,dataset=X_test/255.,
                       dataset_labels=Y_test,
                       label_dict=label_dict,
                       batch_size=16,
                       grid_height=4,
                       grid_width=4)

References

Acknowledgements

  • Dripta Maharaj