Setup

First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20 and TensorFlow ≥2.0.

#collapse-show
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)

# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"

try:
    # %tensorflow_version only exists in Colab.
    %tensorflow_version 2.x
except Exception:
    pass

# TensorFlow ≥2.0 is required
import tensorflow as tf
assert tf.__version__ >= "2.0"

# Common imports
import numpy as np
import os

# to make this notebook's output stable across runs
np.random.seed(42)

# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)

# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "ann"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)

def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
    path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
    print("Saving figure", fig_id)
    if tight_layout:
        plt.tight_layout()
    plt.savefig(path, format=fig_extension, dpi=resolution)

# Ignore useless warnings (see SciPy issue #5998)
import warnings
warnings.filterwarnings(action="ignore", message="^internal gelsd")

Perceptrons

Note: we set max_iter and tol explicitly to avoid warnings about the fact that their default value will change in future versions of Scikit-Learn.

import numpy as np
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron

iris = load_iris()
X = iris.data[:, (2, 3)]  # petal length, petal width
y = (iris.target == 0).astype(np.int)

per_clf = Perceptron(max_iter=1000, tol=1e-3, random_state=42)
per_clf.fit(X, y)

y_pred = per_clf.predict([[2, 0.5]])
y_pred
array([1])

#collapse-show
a = -per_clf.coef_[0][0] / per_clf.coef_[0][1]
b = -per_clf.intercept_ / per_clf.coef_[0][1]

axes = [0, 5, 0, 2]

x0, x1 = np.meshgrid(
        np.linspace(axes[0], axes[1], 500).reshape(-1, 1),
        np.linspace(axes[2], axes[3], 200).reshape(-1, 1),
    )
X_new = np.c_[x0.ravel(), x1.ravel()]
y_predict = per_clf.predict(X_new)
zz = y_predict.reshape(x0.shape)

plt.figure(figsize=(10, 4))
plt.plot(X[y==0, 0], X[y==0, 1], "bs", label="Not Iris-Setosa")
plt.plot(X[y==1, 0], X[y==1, 1], "yo", label="Iris-Setosa")

plt.plot([axes[0], axes[1]], [a * axes[0] + b, a * axes[1] + b], "k-", linewidth=3)
from matplotlib.colors import ListedColormap
custom_cmap = ListedColormap(['#9898ff', '#fafab0'])

plt.contourf(x0, x1, zz, cmap=custom_cmap)
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.legend(loc="lower right", fontsize=14)
plt.axis(axes)

save_fig("perceptron_iris_plot")
plt.show()

Saving figure perceptron_iris_plot

Activation functions

def sigmoid(z):
    return 1 / (1 + np.exp(-z))

def relu(z):
    return np.maximum(0, z)

def derivative(f, z, eps=0.000001):
    return (f(z + eps) - f(z - eps))/(2 * eps)

#collapse-show
z = np.linspace(-5, 5, 200)

plt.figure(figsize=(11,4))

plt.subplot(121)
plt.plot(z, np.sign(z), "r-", linewidth=1, label="Step")
plt.plot(z, sigmoid(z), "g--", linewidth=2, label="Sigmoid")
plt.plot(z, np.tanh(z), "b-", linewidth=2, label="Tanh")
plt.plot(z, relu(z), "m-.", linewidth=2, label="ReLU")
plt.grid(True)
plt.legend(loc="center right", fontsize=14)
plt.title("Activation functions", fontsize=14)
plt.axis([-5, 5, -1.2, 1.2])

plt.subplot(122)
plt.plot(z, derivative(np.sign, z), "r-", linewidth=1, label="Step")
plt.plot(0, 0, "ro", markersize=5)
plt.plot(0, 0, "rx", markersize=10)
plt.plot(z, derivative(sigmoid, z), "g--", linewidth=2, label="Sigmoid")
plt.plot(z, derivative(np.tanh, z), "b-", linewidth=2, label="Tanh")
plt.plot(z, derivative(relu, z), "m-.", linewidth=2, label="ReLU")
plt.grid(True)
#plt.legend(loc="center right", fontsize=14)
plt.title("Derivatives", fontsize=14)
plt.axis([-5, 5, -0.2, 1.2])

save_fig("activation_functions_plot")
plt.show()

Saving figure activation_functions_plot
def heaviside(z):
    return (z >= 0).astype(z.dtype)

def mlp_xor(x1, x2, activation=heaviside):
    return activation(-activation(x1 + x2 - 1.5) + activation(x1 + x2 - 0.5) - 0.5)

#collapse-show
x1s = np.linspace(-0.2, 1.2, 100)
x2s = np.linspace(-0.2, 1.2, 100)
x1, x2 = np.meshgrid(x1s, x2s)

z1 = mlp_xor(x1, x2, activation=heaviside)
z2 = mlp_xor(x1, x2, activation=sigmoid)

plt.figure(figsize=(10,4))

plt.subplot(121)
plt.contourf(x1, x2, z1)
plt.plot([0, 1], [0, 1], "gs", markersize=20)
plt.plot([0, 1], [1, 0], "y^", markersize=20)
plt.title("Activation function: heaviside", fontsize=14)
plt.grid(True)

plt.subplot(122)
plt.contourf(x1, x2, z2)
plt.plot([0, 1], [0, 1], "gs", markersize=20)
plt.plot([0, 1], [1, 0], "y^", markersize=20)
plt.title("Activation function: sigmoid", fontsize=14)
plt.grid(True)

Building an Image Classifier

First let's import TensorFlow and Keras.

import tensorflow as tf
from tensorflow import keras
tf.__version__
'2.1.0'
keras.__version__
'2.2.4-tf'

Let's start by loading the fashion MNIST dataset. Keras has a number of functions to load popular datasets in keras.datasets. The dataset is already split for you between a training set and a test set, but it can be useful to split the training set further to have a validation set:

fashion_mnist = keras.datasets.fashion_mnist
(X_train_full, y_train_full), (X_test, y_test) = fashion_mnist.load_data()

The training set contains 60,000 grayscale images, each 28x28 pixels:

X_train_full.shape
(60000, 28, 28)

Each pixel intensity is represented as a byte (0 to 255):

X_train_full.dtype
dtype('uint8')

Let's split the full training set into a validation set and a (smaller) training set. We also scale the pixel intensities down to the 0-1 range and convert them to floats, by dividing by 255.

X_valid, X_train = X_train_full[:5000] / 255., X_train_full[5000:] / 255.
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
X_test = X_test / 255.

You can plot an image using Matplotlib's imshow() function, with a 'binary' color map:

plt.imshow(X_train[0], cmap="binary")
plt.axis('off')
plt.show()

The labels are the class IDs (represented as uint8), from 0 to 9:

y_train
array([4, 0, 7, ..., 3, 0, 5], dtype=uint8)

Here are the corresponding class names:

class_names = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat",
               "Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"]

So the first image in the training set is a coat:

class_names[y_train[0]]
'Coat'

The validation set contains 5,000 images, and the test set contains 10,000 images:

X_valid.shape
(5000, 28, 28)
X_test.shape
(10000, 28, 28)

Let's take a look at a sample of the images in the dataset:

n_rows = 4
n_cols = 10
plt.figure(figsize=(n_cols * 1.2, n_rows * 1.2))
for row in range(n_rows):
    for col in range(n_cols):
        index = n_cols * row + col
        plt.subplot(n_rows, n_cols, index + 1)
        plt.imshow(X_train[index], cmap="binary", interpolation="nearest")
        plt.axis('off')
        plt.title(class_names[y_train[index]], fontsize=12)
plt.subplots_adjust(wspace=0.2, hspace=0.5)
save_fig('fashion_mnist_plot', tight_layout=False)
plt.show()
Saving figure fashion_mnist_plot
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28, 28]))
model.add(keras.layers.Dense(300, activation="relu"))
model.add(keras.layers.Dense(100, activation="relu"))
model.add(keras.layers.Dense(10, activation="softmax"))
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
    keras.layers.Flatten(input_shape=[28, 28]),
    keras.layers.Dense(300, activation="relu"),
    keras.layers.Dense(100, activation="relu"),
    keras.layers.Dense(10, activation="softmax")
])
model.layers
[<tensorflow.python.keras.layers.core.Flatten at 0x7ff370af5780>,
 <tensorflow.python.keras.layers.core.Dense at 0x7ff370af5c88>,
 <tensorflow.python.keras.layers.core.Dense at 0x7ff330ab36d8>,
 <tensorflow.python.keras.layers.core.Dense at 0x7ff330ab3828>]
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
flatten (Flatten)            (None, 784)               0         
_________________________________________________________________
dense (Dense)                (None, 300)               235500    
_________________________________________________________________
dense_1 (Dense)              (None, 100)               30100     
_________________________________________________________________
dense_2 (Dense)              (None, 10)                1010      
=================================================================
Total params: 266,610
Trainable params: 266,610
Non-trainable params: 0
_________________________________________________________________
keras.utils.plot_model(model, "my_fashion_mnist_model.png", show_shapes=True)
hidden1 = model.layers[1]
hidden1.name
'dense'
model.get_layer(hidden1.name) is hidden1
True
weights, biases = hidden1.get_weights()
weights
array([[ 0.02448617, -0.00877795, -0.02189048, ..., -0.02766046,
         0.03859074, -0.06889391],
       [ 0.00476504, -0.03105379, -0.0586676 , ...,  0.00602964,
        -0.02763776, -0.04165364],
       [-0.06189284, -0.06901957,  0.07102345, ..., -0.04238207,
         0.07121518, -0.07331658],
       ...,
       [-0.03048757,  0.02155137, -0.05400612, ..., -0.00113463,
         0.00228987,  0.05581069],
       [ 0.07061854, -0.06960931,  0.07038955, ..., -0.00384101,
         0.00034875,  0.02878492],
       [-0.06022581,  0.01577859, -0.02585464, ..., -0.00527829,
         0.00272203, -0.06793761]], dtype=float32)
weights.shape
(784, 300)
biases
array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
       0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
       0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
       0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
       0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
       0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
       0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
       0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
       0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
       0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
       0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
       0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
       0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
       0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
       0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
       0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
       0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
       0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=float32)
biases.shape
(300,)
model.compile(loss="sparse_categorical_crossentropy",
              optimizer="sgd",
              metrics=["accuracy"])

This is equivalent to:

model.compile(loss=keras.losses.sparse_categorical_crossentropy,
              optimizer=keras.optimizers.SGD(),
              metrics=[keras.metrics.sparse_categorical_accuracy])
history = model.fit(X_train, y_train, epochs=30,
                    validation_data=(X_valid, y_valid))
Train on 55000 samples, validate on 5000 samples
Epoch 1/30
55000/55000 [==============================] - 2s 44us/sample - loss: 0.7226 - accuracy: 0.7641 - val_loss: 0.5073 - val_accuracy: 0.8320
Epoch 2/30
55000/55000 [==============================] - 2s 39us/sample - loss: 0.4844 - accuracy: 0.8321 - val_loss: 0.4541 - val_accuracy: 0.8478
Epoch 3/30
55000/55000 [==============================] - 2s 39us/sample - loss: 0.4414 - accuracy: 0.8464 - val_loss: 0.4373 - val_accuracy: 0.8508
Epoch 4/30
55000/55000 [==============================] - 2s 40us/sample - loss: 0.4129 - accuracy: 0.8549 - val_loss: 0.4170 - val_accuracy: 0.8562
Epoch 5/30
55000/55000 [==============================] - 2s 38us/sample - loss: 0.3927 - accuracy: 0.8616 - val_loss: 0.3825 - val_accuracy: 0.8646
Epoch 6/30
55000/55000 [==============================] - 2s 38us/sample - loss: 0.3772 - accuracy: 0.8665 - val_loss: 0.3736 - val_accuracy: 0.8680
Epoch 7/30
55000/55000 [==============================] - 2s 39us/sample - loss: 0.3630 - accuracy: 0.8726 - val_loss: 0.3713 - val_accuracy: 0.8698
Epoch 8/30
55000/55000 [==============================] - 2s 39us/sample - loss: 0.3523 - accuracy: 0.8746 - val_loss: 0.3657 - val_accuracy: 0.8710
Epoch 9/30
55000/55000 [==============================] - 2s 39us/sample - loss: 0.3424 - accuracy: 0.8776 - val_loss: 0.3442 - val_accuracy: 0.8784
Epoch 10/30
55000/55000 [==============================] - 2s 38us/sample - loss: 0.3329 - accuracy: 0.8809 - val_loss: 0.3523 - val_accuracy: 0.8774
Epoch 11/30
55000/55000 [==============================] - 2s 39us/sample - loss: 0.3243 - accuracy: 0.8835 - val_loss: 0.3363 - val_accuracy: 0.8820
Epoch 12/30
55000/55000 [==============================] - 2s 40us/sample - loss: 0.3163 - accuracy: 0.8868 - val_loss: 0.3313 - val_accuracy: 0.8842
Epoch 13/30
55000/55000 [==============================] - 2s 39us/sample - loss: 0.3075 - accuracy: 0.8898 - val_loss: 0.3320 - val_accuracy: 0.8814
Epoch 14/30
55000/55000 [==============================] - 2s 38us/sample - loss: 0.3019 - accuracy: 0.8921 - val_loss: 0.3237 - val_accuracy: 0.8874
Epoch 15/30
55000/55000 [==============================] - 2s 38us/sample - loss: 0.2956 - accuracy: 0.8938 - val_loss: 0.3173 - val_accuracy: 0.8898
Epoch 16/30
55000/55000 [==============================] - 2s 38us/sample - loss: 0.2899 - accuracy: 0.8963 - val_loss: 0.3247 - val_accuracy: 0.8872
Epoch 17/30
55000/55000 [==============================] - 2s 38us/sample - loss: 0.2836 - accuracy: 0.8985 - val_loss: 0.3175 - val_accuracy: 0.8920
Epoch 18/30
55000/55000 [==============================] - 2s 39us/sample - loss: 0.2783 - accuracy: 0.8996 - val_loss: 0.3095 - val_accuracy: 0.8910
Epoch 19/30
55000/55000 [==============================] - 2s 38us/sample - loss: 0.2730 - accuracy: 0.9021 - val_loss: 0.3185 - val_accuracy: 0.8856
Epoch 20/30
55000/55000 [==============================] - 2s 37us/sample - loss: 0.2681 - accuracy: 0.9042 - val_loss: 0.3208 - val_accuracy: 0.8850
Epoch 21/30
55000/55000 [==============================] - 2s 43us/sample - loss: 0.2635 - accuracy: 0.9047 - val_loss: 0.3005 - val_accuracy: 0.8946
Epoch 22/30
55000/55000 [==============================] - 2s 39us/sample - loss: 0.2576 - accuracy: 0.9077 - val_loss: 0.3107 - val_accuracy: 0.8878
Epoch 23/30
55000/55000 [==============================] - 2s 43us/sample - loss: 0.2540 - accuracy: 0.9082 - val_loss: 0.3020 - val_accuracy: 0.8896
Epoch 24/30
55000/55000 [==============================] - 2s 38us/sample - loss: 0.2492 - accuracy: 0.9102 - val_loss: 0.3105 - val_accuracy: 0.8850
Epoch 25/30
55000/55000 [==============================] - 2s 39us/sample - loss: 0.2453 - accuracy: 0.9126 - val_loss: 0.3100 - val_accuracy: 0.8906
Epoch 26/30
55000/55000 [==============================] - 2s 39us/sample - loss: 0.2408 - accuracy: 0.9145 - val_loss: 0.3278 - val_accuracy: 0.8846
Epoch 27/30
55000/55000 [==============================] - 2s 39us/sample - loss: 0.2367 - accuracy: 0.9155 - val_loss: 0.3130 - val_accuracy: 0.8856
Epoch 28/30
55000/55000 [==============================] - 2s 39us/sample - loss: 0.2323 - accuracy: 0.9178 - val_loss: 0.2954 - val_accuracy: 0.8926
Epoch 29/30
55000/55000 [==============================] - 2s 39us/sample - loss: 0.2287 - accuracy: 0.9184 - val_loss: 0.2998 - val_accuracy: 0.8924
Epoch 30/30
55000/55000 [==============================] - 2s 39us/sample - loss: 0.2256 - accuracy: 0.9195 - val_loss: 0.3049 - val_accuracy: 0.8882
history.params
{'batch_size': 32,
 'epochs': 30,
 'steps': 1719,
 'samples': 55000,
 'verbose': 0,
 'do_validation': True,
 'metrics': ['loss', 'accuracy', 'val_loss', 'val_accuracy']}
print(history.epoch)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]
history.history.keys()
dict_keys(['loss', 'accuracy', 'val_loss', 'val_accuracy'])
import pandas as pd

pd.DataFrame(history.history).plot(figsize=(8, 5))
plt.grid(True)
plt.gca().set_ylim(0, 1)
save_fig("keras_learning_curves_plot")
plt.show()
Saving figure keras_learning_curves_plot
model.evaluate(X_test, y_test)
10000/10000 [==============================] - 0s 21us/sample - loss: 0.3378 - accuracy: 0.8781
[0.33780701770782473, 0.8781]
X_new = X_test[:3]
y_proba = model.predict(X_new)
y_proba.round(2)
array([[0.  , 0.  , 0.  , 0.  , 0.  , 0.  , 0.  , 0.01, 0.  , 0.99],
       [0.  , 0.  , 0.99, 0.  , 0.01, 0.  , 0.  , 0.  , 0.  , 0.  ],
       [0.  , 1.  , 0.  , 0.  , 0.  , 0.  , 0.  , 0.  , 0.  , 0.  ]],
      dtype=float32)
y_pred = model.predict_classes(X_new)
y_pred
array([9, 2, 1])
np.array(class_names)[y_pred]
array(['Ankle boot', 'Pullover', 'Trouser'], dtype='<U11')
y_new = y_test[:3]
y_new
array([9, 2, 1], dtype=uint8)
plt.figure(figsize=(7.2, 2.4))
for index, image in enumerate(X_new):
    plt.subplot(1, 3, index + 1)
    plt.imshow(image, cmap="binary", interpolation="nearest")
    plt.axis('off')
    plt.title(class_names[y_test[index]], fontsize=12)
plt.subplots_adjust(wspace=0.2, hspace=0.5)
save_fig('fashion_mnist_images_plot', tight_layout=False)
plt.show()
Saving figure fashion_mnist_images_plot

Regression MLP

Let's load, split and scale the California housing dataset (the original one, not the modified one as in chapter 2):

from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler

housing = fetch_california_housing()

X_train_full, X_test, y_train_full, y_test = train_test_split(housing.data, housing.target, random_state=42)
X_train, X_valid, y_train, y_valid = train_test_split(X_train_full, y_train_full, random_state=42)

scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_valid = scaler.transform(X_valid)
X_test = scaler.transform(X_test)
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
    keras.layers.Dense(30, activation="relu", input_shape=X_train.shape[1:]),
    keras.layers.Dense(1)
])
model.compile(loss="mean_squared_error", optimizer=keras.optimizers.SGD(lr=1e-3))
history = model.fit(X_train, y_train, epochs=20, validation_data=(X_valid, y_valid))
mse_test = model.evaluate(X_test, y_test)
X_new = X_test[:3]
y_pred = model.predict(X_new)
Train on 11610 samples, validate on 3870 samples
Epoch 1/20
11610/11610 [==============================] - 1s 44us/sample - loss: 1.6205 - val_loss: 2.0374
Epoch 2/20
11610/11610 [==============================] - 0s 29us/sample - loss: 0.7162 - val_loss: 0.6571
Epoch 3/20
11610/11610 [==============================] - 0s 31us/sample - loss: 0.6356 - val_loss: 0.5996
Epoch 4/20
11610/11610 [==============================] - 0s 30us/sample - loss: 0.5989 - val_loss: 0.5662
Epoch 5/20
11610/11610 [==============================] - 0s 28us/sample - loss: 0.5713 - val_loss: 0.5489
Epoch 6/20
11610/11610 [==============================] - 0s 28us/sample - loss: 0.5491 - val_loss: 0.5204
Epoch 7/20
11610/11610 [==============================] - 0s 30us/sample - loss: 0.5301 - val_loss: 0.5018
Epoch 8/20
11610/11610 [==============================] - 0s 29us/sample - loss: 0.5142 - val_loss: 0.4815
Epoch 9/20
11610/11610 [==============================] - 0s 27us/sample - loss: 0.5004 - val_loss: 0.4695
Epoch 10/20
11610/11610 [==============================] - 0s 30us/sample - loss: 0.4883 - val_loss: 0.4605
Epoch 11/20
11610/11610 [==============================] - 0s 29us/sample - loss: 0.4786 - val_loss: 0.4495
Epoch 12/20
11610/11610 [==============================] - 0s 29us/sample - loss: 0.4697 - val_loss: 0.4382
Epoch 13/20
11610/11610 [==============================] - 0s 29us/sample - loss: 0.4621 - val_loss: 0.4309
Epoch 14/20
11610/11610 [==============================] - 0s 27us/sample - loss: 0.4556 - val_loss: 0.4247
Epoch 15/20
11610/11610 [==============================] - 0s 29us/sample - loss: 0.4497 - val_loss: 0.4200
Epoch 16/20
11610/11610 [==============================] - 0s 29us/sample - loss: 0.4443 - val_loss: 0.4149
Epoch 17/20
11610/11610 [==============================] - 0s 29us/sample - loss: 0.4397 - val_loss: 0.4108
Epoch 18/20
11610/11610 [==============================] - 0s 29us/sample - loss: 0.4354 - val_loss: 0.4059
Epoch 19/20
11610/11610 [==============================] - 0s 29us/sample - loss: 0.4315 - val_loss: 0.4003
Epoch 20/20
11610/11610 [==============================] - 0s 29us/sample - loss: 0.4281 - val_loss: 0.3981
5160/5160 [==============================] - 0s 15us/sample - loss: 0.4218
plt.plot(pd.DataFrame(history.history))
plt.grid(True)
plt.gca().set_ylim(0, 1)
plt.show()
y_pred
array([[0.37310064],
       [1.6790789 ],
       [3.0817137 ]], dtype=float32)

Functional API

Not all neural network models are simply sequential. Some may have complex topologies. Some may have multiple inputs and/or multiple outputs. For example, a Wide & Deep neural network (see paper) connects all or part of the inputs directly to the output layer.

np.random.seed(42)
tf.random.set_seed(42)
input_ = keras.layers.Input(shape=X_train.shape[1:])
hidden1 = keras.layers.Dense(30, activation="relu")(input_)
hidden2 = keras.layers.Dense(30, activation="relu")(hidden1)
concat = keras.layers.concatenate([input_, hidden2])
output = keras.layers.Dense(1)(concat)
model = keras.models.Model(inputs=[input_], outputs=[output])
model.summary()
Model: "model"
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_1 (InputLayer)            [(None, 8)]          0                                            
__________________________________________________________________________________________________
dense_5 (Dense)                 (None, 30)           270         input_1[0][0]                    
__________________________________________________________________________________________________
dense_6 (Dense)                 (None, 30)           930         dense_5[0][0]                    
__________________________________________________________________________________________________
concatenate (Concatenate)       (None, 38)           0           input_1[0][0]                    
                                                                 dense_6[0][0]                    
__________________________________________________________________________________________________
dense_7 (Dense)                 (None, 1)            39          concatenate[0][0]                
==================================================================================================
Total params: 1,239
Trainable params: 1,239
Non-trainable params: 0
__________________________________________________________________________________________________
model.compile(loss="mean_squared_error", optimizer=keras.optimizers.SGD(lr=1e-3))
history = model.fit(X_train, y_train, epochs=20,
                    validation_data=(X_valid, y_valid))
mse_test = model.evaluate(X_test, y_test)
y_pred = model.predict(X_new)
Train on 11610 samples, validate on 3870 samples
Epoch 1/20
11610/11610 [==============================] - 1s 47us/sample - loss: 1.2390 - val_loss: 0.6566
Epoch 2/20
11610/11610 [==============================] - 0s 30us/sample - loss: 0.6312 - val_loss: 0.6734
Epoch 3/20
11610/11610 [==============================] - 0s 31us/sample - loss: 0.5886 - val_loss: 0.5574
Epoch 4/20
11610/11610 [==============================] - 0s 31us/sample - loss: 0.5595 - val_loss: 0.5235
Epoch 5/20
11610/11610 [==============================] - 0s 30us/sample - loss: 0.5361 - val_loss: 0.5011
Epoch 6/20
11610/11610 [==============================] - 0s 30us/sample - loss: 0.5178 - val_loss: 0.5065
Epoch 7/20
11610/11610 [==============================] - 0s 31us/sample - loss: 0.5016 - val_loss: 0.4699
Epoch 8/20
11610/11610 [==============================] - 0s 31us/sample - loss: 0.4888 - val_loss: 0.4745
Epoch 9/20
11610/11610 [==============================] - 0s 30us/sample - loss: 0.4772 - val_loss: 0.4425
Epoch 10/20
11610/11610 [==============================] - 0s 31us/sample - loss: 0.4673 - val_loss: 0.4384
Epoch 11/20
11610/11610 [==============================] - 0s 30us/sample - loss: 0.4586 - val_loss: 0.4533
Epoch 12/20
11610/11610 [==============================] - 0s 31us/sample - loss: 0.4504 - val_loss: 0.4179
Epoch 13/20
11610/11610 [==============================] - 0s 31us/sample - loss: 0.4435 - val_loss: 0.4137
Epoch 14/20
11610/11610 [==============================] - 0s 30us/sample - loss: 0.4376 - val_loss: 0.4062
Epoch 15/20
11610/11610 [==============================] - 0s 31us/sample - loss: 0.4318 - val_loss: 0.4541
Epoch 16/20
11610/11610 [==============================] - 0s 31us/sample - loss: 0.4266 - val_loss: 0.3952
Epoch 17/20
11610/11610 [==============================] - 0s 31us/sample - loss: 0.4221 - val_loss: 0.3910
Epoch 18/20
11610/11610 [==============================] - 0s 30us/sample - loss: 0.4173 - val_loss: 0.4205
Epoch 19/20
11610/11610 [==============================] - 0s 30us/sample - loss: 0.4132 - val_loss: 0.3830
Epoch 20/20
11610/11610 [==============================] - 0s 30us/sample - loss: 0.4096 - val_loss: 0.3923
5160/5160 [==============================] - 0s 15us/sample - loss: 0.4042

What if you want to send different subsets of input features through the wide or deep paths? We will send 5 features (features 0 to 4), and 6 through the deep path (features 2 to 7). Note that 3 features will go through both (features 2, 3 and 4).

np.random.seed(42)
tf.random.set_seed(42)
input_A = keras.layers.Input(shape=[5], name="wide_input")
input_B = keras.layers.Input(shape=[6], name="deep_input")
hidden1 = keras.layers.Dense(30, activation="relu")(input_B)
hidden2 = keras.layers.Dense(30, activation="relu")(hidden1)
concat = keras.layers.concatenate([input_A, hidden2])
output = keras.layers.Dense(1, name="output")(concat)
model = keras.models.Model(inputs=[input_A, input_B], outputs=[output])
model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1e-3))

X_train_A, X_train_B = X_train[:, :5], X_train[:, 2:]
X_valid_A, X_valid_B = X_valid[:, :5], X_valid[:, 2:]
X_test_A, X_test_B = X_test[:, :5], X_test[:, 2:]
X_new_A, X_new_B = X_test_A[:3], X_test_B[:3]

history = model.fit((X_train_A, X_train_B), y_train, epochs=20,
                    validation_data=((X_valid_A, X_valid_B), y_valid))
mse_test = model.evaluate((X_test_A, X_test_B), y_test)
y_pred = model.predict((X_new_A, X_new_B))
Train on 11610 samples, validate on 3870 samples
Epoch 1/20
11610/11610 [==============================] - 1s 50us/sample - loss: 1.8127 - val_loss: 2.1165
Epoch 2/20
11610/11610 [==============================] - 0s 32us/sample - loss: 0.6852 - val_loss: 0.6178
Epoch 3/20
11610/11610 [==============================] - 0s 32us/sample - loss: 0.5965 - val_loss: 0.5600
Epoch 4/20
11610/11610 [==============================] - 0s 32us/sample - loss: 0.5587 - val_loss: 0.5269
Epoch 5/20
11610/11610 [==============================] - 0s 31us/sample - loss: 0.5321 - val_loss: 0.5185
Epoch 6/20
11610/11610 [==============================] - 0s 32us/sample - loss: 0.5129 - val_loss: 0.4803
Epoch 7/20
11610/11610 [==============================] - 0s 32us/sample - loss: 0.4959 - val_loss: 0.4689
Epoch 8/20
11610/11610 [==============================] - 0s 32us/sample - loss: 0.4837 - val_loss: 0.4498
Epoch 9/20
11610/11610 [==============================] - 0s 32us/sample - loss: 0.4734 - val_loss: 0.4387
Epoch 10/20
11610/11610 [==============================] - 0s 32us/sample - loss: 0.4646 - val_loss: 0.4306
Epoch 11/20
11610/11610 [==============================] - 0s 32us/sample - loss: 0.4571 - val_loss: 0.4262
Epoch 12/20
11610/11610 [==============================] - 0s 32us/sample - loss: 0.4507 - val_loss: 0.4173
Epoch 13/20
11610/11610 [==============================] - 0s 32us/sample - loss: 0.4456 - val_loss: 0.4124
Epoch 14/20
11610/11610 [==============================] - 0s 32us/sample - loss: 0.4422 - val_loss: 0.4084
Epoch 15/20
11610/11610 [==============================] - 0s 32us/sample - loss: 0.4386 - val_loss: 0.4351
Epoch 16/20
11610/11610 [==============================] - 0s 32us/sample - loss: 0.4361 - val_loss: 0.4017
Epoch 17/20
11610/11610 [==============================] - 0s 32us/sample - loss: 0.4326 - val_loss: 0.3990
Epoch 18/20
11610/11610 [==============================] - 0s 32us/sample - loss: 0.4296 - val_loss: 0.4148
Epoch 19/20
11610/11610 [==============================] - 0s 32us/sample - loss: 0.4278 - val_loss: 0.3957
Epoch 20/20
11610/11610 [==============================] - 0s 33us/sample - loss: 0.4259 - val_loss: 0.3976
5160/5160 [==============================] - 0s 16us/sample - loss: 0.4202

Adding an auxiliary output for regularization:

np.random.seed(42)
tf.random.set_seed(42)
input_A = keras.layers.Input(shape=[5], name="wide_input")
input_B = keras.layers.Input(shape=[6], name="deep_input")
hidden1 = keras.layers.Dense(30, activation="relu")(input_B)
hidden2 = keras.layers.Dense(30, activation="relu")(hidden1)
concat = keras.layers.concatenate([input_A, hidden2])
output = keras.layers.Dense(1, name="main_output")(concat)
aux_output = keras.layers.Dense(1, name="aux_output")(hidden2)
model = keras.models.Model(inputs=[input_A, input_B],
                           outputs=[output, aux_output])
model.compile(loss=["mse", "mse"], loss_weights=[0.9, 0.1], optimizer=keras.optimizers.SGD(lr=1e-3))
history = model.fit([X_train_A, X_train_B], [y_train, y_train], epochs=20,
                    validation_data=([X_valid_A, X_valid_B], [y_valid, y_valid]))
Train on 11610 samples, validate on 3870 samples
Epoch 1/20
11610/11610 [==============================] - 1s 68us/sample - loss: 2.1346 - main_output_loss: 1.9194 - aux_output_loss: 4.0632 - val_loss: 2.9120 - val_main_output_loss: 2.2555 - val_aux_output_loss: 8.8088
Epoch 2/20
11610/11610 [==============================] - 0s 37us/sample - loss: 0.8954 - main_output_loss: 0.7048 - aux_output_loss: 2.6119 - val_loss: 1.4135 - val_main_output_loss: 0.6348 - val_aux_output_loss: 8.4172
Epoch 3/20
11610/11610 [==============================] - 0s 37us/sample - loss: 0.7400 - main_output_loss: 0.6077 - aux_output_loss: 1.9305 - val_loss: 1.3594 - val_main_output_loss: 0.5885 - val_aux_output_loss: 8.2925
Epoch 4/20
11610/11610 [==============================] - 0s 37us/sample - loss: 0.6749 - main_output_loss: 0.5690 - aux_output_loss: 1.6264 - val_loss: 1.2789 - val_main_output_loss: 0.5611 - val_aux_output_loss: 7.7340
Epoch 5/20
11610/11610 [==============================] - 0s 37us/sample - loss: 0.6351 - main_output_loss: 0.5420 - aux_output_loss: 1.4729 - val_loss: 1.1841 - val_main_output_loss: 0.5656 - val_aux_output_loss: 6.7464
Epoch 6/20
11610/11610 [==============================] - 0s 37us/sample - loss: 0.6068 - main_output_loss: 0.5213 - aux_output_loss: 1.3763 - val_loss: 1.0614 - val_main_output_loss: 0.5202 - val_aux_output_loss: 5.9282
Epoch 7/20
11610/11610 [==============================] - 0s 37us/sample - loss: 0.5832 - main_output_loss: 0.5028 - aux_output_loss: 1.3072 - val_loss: 0.9555 - val_main_output_loss: 0.5111 - val_aux_output_loss: 4.9515
Epoch 8/20
11610/11610 [==============================] - 0s 38us/sample - loss: 0.5651 - main_output_loss: 0.4892 - aux_output_loss: 1.2463 - val_loss: 0.8426 - val_main_output_loss: 0.4713 - val_aux_output_loss: 4.1805
Epoch 9/20
11610/11610 [==============================] - 0s 36us/sample - loss: 0.5497 - main_output_loss: 0.4772 - aux_output_loss: 1.2017 - val_loss: 0.7632 - val_main_output_loss: 0.4633 - val_aux_output_loss: 3.4589
Epoch 10/20
11610/11610 [==============================] - 0s 38us/sample - loss: 0.5368 - main_output_loss: 0.4671 - aux_output_loss: 1.1631 - val_loss: 0.6954 - val_main_output_loss: 0.4467 - val_aux_output_loss: 2.9307
Epoch 11/20
11610/11610 [==============================] - 0s 36us/sample - loss: 0.5257 - main_output_loss: 0.4586 - aux_output_loss: 1.1297 - val_loss: 0.6413 - val_main_output_loss: 0.4292 - val_aux_output_loss: 2.5478
Epoch 12/20
11610/11610 [==============================] - 0s 36us/sample - loss: 0.5162 - main_output_loss: 0.4516 - aux_output_loss: 1.0989 - val_loss: 0.5950 - val_main_output_loss: 0.4232 - val_aux_output_loss: 2.1396
Epoch 13/20
11610/11610 [==============================] - 0s 36us/sample - loss: 0.5082 - main_output_loss: 0.4454 - aux_output_loss: 1.0729 - val_loss: 0.5608 - val_main_output_loss: 0.4203 - val_aux_output_loss: 1.8236
Epoch 14/20
11610/11610 [==============================] - 0s 37us/sample - loss: 0.5022 - main_output_loss: 0.4416 - aux_output_loss: 1.0487 - val_loss: 0.5324 - val_main_output_loss: 0.4142 - val_aux_output_loss: 1.5944
Epoch 15/20
11610/11610 [==============================] - 0s 37us/sample - loss: 0.4964 - main_output_loss: 0.4376 - aux_output_loss: 1.0240 - val_loss: 0.5206 - val_main_output_loss: 0.4118 - val_aux_output_loss: 1.4985
Epoch 16/20
11610/11610 [==============================] - 0s 37us/sample - loss: 0.4914 - main_output_loss: 0.4344 - aux_output_loss: 1.0054 - val_loss: 0.4939 - val_main_output_loss: 0.4017 - val_aux_output_loss: 1.3221
Epoch 17/20
11610/11610 [==============================] - 0s 37us/sample - loss: 0.4865 - main_output_loss: 0.4312 - aux_output_loss: 0.9841 - val_loss: 0.4803 - val_main_output_loss: 0.3993 - val_aux_output_loss: 1.2082
Epoch 18/20
11610/11610 [==============================] - 0s 38us/sample - loss: 0.4820 - main_output_loss: 0.4281 - aux_output_loss: 0.9663 - val_loss: 0.4820 - val_main_output_loss: 0.4053 - val_aux_output_loss: 1.1710
Epoch 19/20
11610/11610 [==============================] - 0s 36us/sample - loss: 0.4786 - main_output_loss: 0.4264 - aux_output_loss: 0.9490 - val_loss: 0.4661 - val_main_output_loss: 0.3979 - val_aux_output_loss: 1.0790
Epoch 20/20
11610/11610 [==============================] - 0s 36us/sample - loss: 0.4753 - main_output_loss: 0.4245 - aux_output_loss: 0.9320 - val_loss: 0.4598 - val_main_output_loss: 0.3968 - val_aux_output_loss: 1.0257
total_loss, main_loss, aux_loss = model.evaluate(
    [X_test_A, X_test_B], [y_test, y_test])
y_pred_main, y_pred_aux = model.predict([X_new_A, X_new_B])
5160/5160 [==============================] - 0s 19us/sample - loss: 0.4656 - main_output_loss: 0.4165 - aux_output_loss: 0.9111

The subclassing API

class WideAndDeepModel(keras.models.Model):
    def __init__(self, units=30, activation="relu", **kwargs):
        super().__init__(**kwargs)
        self.hidden1 = keras.layers.Dense(units, activation=activation)
        self.hidden2 = keras.layers.Dense(units, activation=activation)
        self.main_output = keras.layers.Dense(1)
        self.aux_output = keras.layers.Dense(1)
        
    def call(self, inputs):
        input_A, input_B = inputs
        hidden1 = self.hidden1(input_B)
        hidden2 = self.hidden2(hidden1)
        concat = keras.layers.concatenate([input_A, hidden2])
        main_output = self.main_output(concat)
        aux_output = self.aux_output(hidden2)
        return main_output, aux_output

model = WideAndDeepModel(30, activation="relu")
model.compile(loss="mse", loss_weights=[0.9, 0.1], optimizer=keras.optimizers.SGD(lr=1e-3))
history = model.fit((X_train_A, X_train_B), (y_train, y_train), epochs=10,
                    validation_data=((X_valid_A, X_valid_B), (y_valid, y_valid)))
total_loss, main_loss, aux_loss = model.evaluate((X_test_A, X_test_B), (y_test, y_test))
y_pred_main, y_pred_aux = model.predict((X_new_A, X_new_B))
Train on 11610 samples, validate on 3870 samples
Epoch 1/10
11610/11610 [==============================] - 1s 78us/sample - loss: 2.2719 - output_1_loss: 2.1554 - output_2_loss: 3.3117 - val_loss: 4.3377 - val_output_1_loss: 2.7732 - val_output_2_loss: 18.3999
Epoch 2/10
11610/11610 [==============================] - 0s 36us/sample - loss: 0.9891 - output_1_loss: 0.8653 - output_2_loss: 2.1062 - val_loss: 2.0073 - val_output_1_loss: 0.7581 - val_output_2_loss: 13.2427
Epoch 3/10
11610/11610 [==============================] - 0s 36us/sample - loss: 0.8320 - output_1_loss: 0.7303 - output_2_loss: 1.7468 - val_loss: 1.7215 - val_output_1_loss: 0.7114 - val_output_2_loss: 10.8051
Epoch 4/10
11610/11610 [==============================] - 0s 36us/sample - loss: 0.7658 - output_1_loss: 0.6760 - output_2_loss: 1.5726 - val_loss: 1.4708 - val_output_1_loss: 0.6454 - val_output_2_loss: 8.8938
Epoch 5/10
11610/11610 [==============================] - 0s 36us/sample - loss: 0.7223 - output_1_loss: 0.6394 - output_2_loss: 1.4683 - val_loss: 1.3057 - val_output_1_loss: 0.6852 - val_output_2_loss: 6.8846
Epoch 6/10
11610/11610 [==============================] - 0s 36us/sample - loss: 0.6909 - output_1_loss: 0.6132 - output_2_loss: 1.3901 - val_loss: 1.1005 - val_output_1_loss: 0.5915 - val_output_2_loss: 5.6773
Epoch 7/10
11610/11610 [==============================] - 0s 37us/sample - loss: 0.6636 - output_1_loss: 0.5894 - output_2_loss: 1.3330 - val_loss: 0.9605 - val_output_1_loss: 0.5611 - val_output_2_loss: 4.5516
Epoch 8/10
11610/11610 [==============================] - 0s 37us/sample - loss: 0.6406 - output_1_loss: 0.5691 - output_2_loss: 1.2833 - val_loss: 0.8480 - val_output_1_loss: 0.5263 - val_output_2_loss: 3.7399
Epoch 9/10
11610/11610 [==============================] - 0s 36us/sample - loss: 0.6199 - output_1_loss: 0.5507 - output_2_loss: 1.2416 - val_loss: 0.7650 - val_output_1_loss: 0.5085 - val_output_2_loss: 3.0718
Epoch 10/10
11610/11610 [==============================] - 0s 37us/sample - loss: 0.6019 - output_1_loss: 0.5351 - output_2_loss: 1.2026 - val_loss: 0.7004 - val_output_1_loss: 0.4916 - val_output_2_loss: 2.5772
5160/5160 [==============================] - 0s 18us/sample - loss: 0.5819 - output_1_loss: 0.5174 - output_2_loss: 1.1749
model = WideAndDeepModel(30, activation="relu")

Saving and Restoring

np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
    keras.layers.Dense(30, activation="relu", input_shape=[8]),
    keras.layers.Dense(30, activation="relu"),
    keras.layers.Dense(1)
])    
model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1e-3))
history = model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid))
mse_test = model.evaluate(X_test, y_test)
Train on 11610 samples, validate on 3870 samples
Epoch 1/10
11610/11610 [==============================] - 1s 46us/sample - loss: 1.8423 - val_loss: 5.2165
Epoch 2/10
11610/11610 [==============================] - 0s 30us/sample - loss: 0.6876 - val_loss: 0.7732
Epoch 3/10
11610/11610 [==============================] - 0s 30us/sample - loss: 0.5954 - val_loss: 0.5446
Epoch 4/10
11610/11610 [==============================] - 0s 31us/sample - loss: 0.5553 - val_loss: 0.5425
Epoch 5/10
11610/11610 [==============================] - 0s 30us/sample - loss: 0.5268 - val_loss: 0.5539
Epoch 6/10
11610/11610 [==============================] - 0s 30us/sample - loss: 0.5049 - val_loss: 0.4701
Epoch 7/10
11610/11610 [==============================] - 0s 30us/sample - loss: 0.4852 - val_loss: 0.4562
Epoch 8/10
11610/11610 [==============================] - 0s 31us/sample - loss: 0.4706 - val_loss: 0.4452
Epoch 9/10
11610/11610 [==============================] - 0s 30us/sample - loss: 0.4576 - val_loss: 0.4406
Epoch 10/10
11610/11610 [==============================] - 0s 30us/sample - loss: 0.4476 - val_loss: 0.4185
5160/5160 [==============================] - 0s 15us/sample - loss: 0.4376
model.save("my_keras_model.h5")
model = keras.models.load_model("my_keras_model.h5")
model.predict(X_new)
array([[0.551559 ],
       [1.6555369],
       [3.0014234]], dtype=float32)
model.save_weights("my_keras_weights.ckpt")
model.load_weights("my_keras_weights.ckpt")
<tensorflow.python.training.tracking.util.CheckpointLoadStatus at 0x7ff383d586d8>

Using Callbacks during Training

keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
    keras.layers.Dense(30, activation="relu", input_shape=[8]),
    keras.layers.Dense(30, activation="relu"),
    keras.layers.Dense(1)
])    
model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1e-3))
checkpoint_cb = keras.callbacks.ModelCheckpoint("my_keras_model.h5", save_best_only=True)
history = model.fit(X_train, y_train, epochs=10,
                    validation_data=(X_valid, y_valid),
                    callbacks=[checkpoint_cb])
model = keras.models.load_model("my_keras_model.h5") # rollback to best model
mse_test = model.evaluate(X_test, y_test)
Train on 11610 samples, validate on 3870 samples
Epoch 1/10
11610/11610 [==============================] - 1s 47us/sample - loss: 1.8423 - val_loss: 5.2165
Epoch 2/10
11610/11610 [==============================] - 0s 30us/sample - loss: 0.6876 - val_loss: 0.7732
Epoch 3/10
11610/11610 [==============================] - 0s 31us/sample - loss: 0.5954 - val_loss: 0.5446
Epoch 4/10
11610/11610 [==============================] - 0s 31us/sample - loss: 0.5553 - val_loss: 0.5425
Epoch 5/10
11610/11610 [==============================] - 0s 31us/sample - loss: 0.5268 - val_loss: 0.5539
Epoch 6/10
11610/11610 [==============================] - 0s 31us/sample - loss: 0.5049 - val_loss: 0.4701
Epoch 7/10
11610/11610 [==============================] - 0s 31us/sample - loss: 0.4852 - val_loss: 0.4562
Epoch 8/10
11610/11610 [==============================] - 0s 31us/sample - loss: 0.4706 - val_loss: 0.4452
Epoch 9/10
11610/11610 [==============================] - 0s 31us/sample - loss: 0.4576 - val_loss: 0.4406
Epoch 10/10
11610/11610 [==============================] - 0s 31us/sample - loss: 0.4476 - val_loss: 0.4185
5160/5160 [==============================] - 0s 22us/sample - loss: 0.4376
model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1e-3))
early_stopping_cb = keras.callbacks.EarlyStopping(patience=10,
                                                  restore_best_weights=True)
history = model.fit(X_train, y_train, epochs=100,
                    validation_data=(X_valid, y_valid),
                    callbacks=[checkpoint_cb, early_stopping_cb])
mse_test = model.evaluate(X_test, y_test)
Train on 11610 samples, validate on 3870 samples
Epoch 1/100
11610/11610 [==============================] - 1s 47us/sample - loss: 0.4385 - val_loss: 0.4287
Epoch 2/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.4319 - val_loss: 0.4117
Epoch 3/100
11610/11610 [==============================] - 0s 32us/sample - loss: 0.4252 - val_loss: 0.3975
Epoch 4/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.4199 - val_loss: 0.3943
Epoch 5/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.4150 - val_loss: 0.3964
Epoch 6/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.4110 - val_loss: 0.3907
Epoch 7/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.4070 - val_loss: 0.3823
Epoch 8/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.4036 - val_loss: 0.3786
Epoch 9/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.4003 - val_loss: 0.3739
Epoch 10/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.3973 - val_loss: 0.3724
Epoch 11/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.3944 - val_loss: 0.3697
Epoch 12/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.3916 - val_loss: 0.3670
Epoch 13/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.3892 - val_loss: 0.3638
Epoch 14/100
11610/11610 [==============================] - 0s 32us/sample - loss: 0.3869 - val_loss: 0.3633
Epoch 15/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.3845 - val_loss: 0.4051
Epoch 16/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.3826 - val_loss: 0.3662
Epoch 17/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.3805 - val_loss: 0.3554
<<132 more lines>>
Epoch 84/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.3312 - val_loss: 0.3282
Epoch 85/100
11610/11610 [==============================] - 0s 30us/sample - loss: 0.3307 - val_loss: 0.3379
Epoch 86/100
11610/11610 [==============================] - 0s 30us/sample - loss: 0.3306 - val_loss: 0.3163
Epoch 87/100
11610/11610 [==============================] - 0s 30us/sample - loss: 0.3302 - val_loss: 0.3377
Epoch 88/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.3300 - val_loss: 0.3340
Epoch 89/100
11610/11610 [==============================] - 0s 30us/sample - loss: 0.3295 - val_loss: 0.3158
Epoch 90/100
11610/11610 [==============================] - 0s 30us/sample - loss: 0.3294 - val_loss: 0.3519
Epoch 91/100
11610/11610 [==============================] - 0s 30us/sample - loss: 0.3289 - val_loss: 0.3142
Epoch 92/100
11610/11610 [==============================] - 0s 30us/sample - loss: 0.3286 - val_loss: 0.3619
Epoch 93/100
11610/11610 [==============================] - 0s 29us/sample - loss: 0.3282 - val_loss: 0.3270
Epoch 94/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.3282 - val_loss: 0.4632
Epoch 95/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.3284 - val_loss: 0.3371
Epoch 96/100
11610/11610 [==============================] - 0s 30us/sample - loss: 0.3275 - val_loss: 0.4659
Epoch 97/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.3278 - val_loss: 0.3156
Epoch 98/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.3265 - val_loss: 0.3259
Epoch 99/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.3262 - val_loss: 0.3407
Epoch 100/100
11610/11610 [==============================] - 0s 30us/sample - loss: 0.3264 - val_loss: 0.3176
5160/5160 [==============================] - 0s 15us/sample - loss: 0.3271
class PrintValTrainRatioCallback(keras.callbacks.Callback):
    def on_epoch_end(self, epoch, logs):
        print("\nval/train: {:.2f}".format(logs["val_loss"] / logs["loss"]))
val_train_ratio_cb = PrintValTrainRatioCallback()
history = model.fit(X_train, y_train, epochs=1,
                    validation_data=(X_valid, y_valid),
                    callbacks=[val_train_ratio_cb])
Train on 11610 samples, validate on 3870 samples
10912/11610 [===========================>..] - ETA: 0s - loss: 0.3231
val/train: 1.16
11610/11610 [==============================] - 0s 30us/sample - loss: 0.3256 - val_loss: 0.3785

TensorBoard

root_logdir = os.path.join(os.curdir, "my_logs")
def get_run_logdir():
    import time
    run_id = time.strftime("run_%Y_%m_%d-%H_%M_%S")
    return os.path.join(root_logdir, run_id)

run_logdir = get_run_logdir()
run_logdir
'./my_logs/run_2020_01_27-10_15_44'
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
    keras.layers.Dense(30, activation="relu", input_shape=[8]),
    keras.layers.Dense(30, activation="relu"),
    keras.layers.Dense(1)
])    
model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1e-3))
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)
history = model.fit(X_train, y_train, epochs=30,
                    validation_data=(X_valid, y_valid),
                    callbacks=[checkpoint_cb, tensorboard_cb])
Train on 11610 samples, validate on 3870 samples
Epoch 1/30
11610/11610 [==============================] - 1s 52us/sample - loss: 1.8423 - val_loss: 5.2165
Epoch 2/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.6876 - val_loss: 0.7732
Epoch 3/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.5954 - val_loss: 0.5446
Epoch 4/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.5553 - val_loss: 0.5425
Epoch 5/30
11610/11610 [==============================] - 0s 35us/sample - loss: 0.5268 - val_loss: 0.5539
Epoch 6/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.5049 - val_loss: 0.4701
Epoch 7/30
11610/11610 [==============================] - 0s 37us/sample - loss: 0.4852 - val_loss: 0.4562
Epoch 8/30
11610/11610 [==============================] - 0s 35us/sample - loss: 0.4706 - val_loss: 0.4452
Epoch 9/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.4576 - val_loss: 0.4406
Epoch 10/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.4476 - val_loss: 0.4185
Epoch 11/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.4388 - val_loss: 0.4285
Epoch 12/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.4313 - val_loss: 0.4071
Epoch 13/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.4252 - val_loss: 0.3998
Epoch 14/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.4198 - val_loss: 0.3970
Epoch 15/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.4152 - val_loss: 0.4115
Epoch 16/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.4109 - val_loss: 0.3849
Epoch 17/30
11610/11610 [==============================] - 0s 39us/sample - loss: 0.4073 - val_loss: 0.3862
Epoch 18/30
11610/11610 [==============================] - 0s 37us/sample - loss: 0.4037 - val_loss: 0.3907
Epoch 19/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.4003 - val_loss: 0.3751
Epoch 20/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.3974 - val_loss: 0.3711
Epoch 21/30
11610/11610 [==============================] - 0s 37us/sample - loss: 0.3945 - val_loss: 0.3733
Epoch 22/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.3919 - val_loss: 0.3676
Epoch 23/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.3893 - val_loss: 0.3669
Epoch 24/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.3869 - val_loss: 0.3614
Epoch 25/30
11610/11610 [==============================] - 0s 35us/sample - loss: 0.3846 - val_loss: 0.3600
Epoch 26/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.3825 - val_loss: 0.3578
Epoch 27/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.3802 - val_loss: 0.3676
Epoch 28/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.3781 - val_loss: 0.3545
Epoch 29/30
11610/11610 [==============================] - 0s 35us/sample - loss: 0.3761 - val_loss: 0.3612
Epoch 30/30
11610/11610 [==============================] - 0s 35us/sample - loss: 0.3742 - val_loss: 0.3555

To start the TensorBoard server, one option is to open a terminal, if needed activate the virtualenv where you installed TensorBoard, go to this notebook's directory, then type:

$ tensorboard --logdir=./my_logs --port=6006

You can then open your web browser to localhost:6006 and use TensorBoard. Once you are done, press Ctrl-C in the terminal window, this will shutdown the TensorBoard server.

Alternatively, you can load TensorBoard's Jupyter extension and run it like this:

%load_ext tensorboard
%tensorboard --logdir=./my_logs --port=6006
run_logdir2 = get_run_logdir()
run_logdir2
'./my_logs/run_2020_01_27-10_18_25'
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
    keras.layers.Dense(30, activation="relu", input_shape=[8]),
    keras.layers.Dense(30, activation="relu"),
    keras.layers.Dense(1)
])    
model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=0.05))
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir2)
history = model.fit(X_train, y_train, epochs=30,
                    validation_data=(X_valid, y_valid),
                    callbacks=[checkpoint_cb, tensorboard_cb])
Train on 11610 samples, validate on 3870 samples
Epoch 1/30
11610/11610 [==============================] - 1s 52us/sample - loss: 5.6341 - val_loss: 1.3205
Epoch 2/30
11610/11610 [==============================] - 0s 35us/sample - loss: 1.2704 - val_loss: 1.0757
Epoch 3/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.9370 - val_loss: 0.7769
Epoch 4/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.8854 - val_loss: 0.8254
Epoch 5/30
11610/11610 [==============================] - 0s 35us/sample - loss: 0.9501 - val_loss: 0.9415
Epoch 6/30
11610/11610 [==============================] - 0s 35us/sample - loss: 0.8283 - val_loss: 0.6111
Epoch 7/30
11610/11610 [==============================] - 0s 35us/sample - loss: 0.8095 - val_loss: 0.7394
Epoch 8/30
11610/11610 [==============================] - 0s 35us/sample - loss: 1.1857 - val_loss: 1.0356
Epoch 9/30
11610/11610 [==============================] - 0s 35us/sample - loss: 0.8920 - val_loss: 0.6564
Epoch 10/30
11610/11610 [==============================] - 0s 35us/sample - loss: 1.2295 - val_loss: 1.1277
Epoch 11/30
11610/11610 [==============================] - 0s 35us/sample - loss: 0.8992 - val_loss: 0.6219
Epoch 12/30
11610/11610 [==============================] - 0s 35us/sample - loss: 0.7697 - val_loss: 0.6959
Epoch 13/30
11610/11610 [==============================] - 0s 35us/sample - loss: 0.7375 - val_loss: 0.6471
Epoch 14/30
11610/11610 [==============================] - 0s 35us/sample - loss: 0.9825 - val_loss: 1.0278
Epoch 15/30
11610/11610 [==============================] - 0s 35us/sample - loss: 0.9653 - val_loss: 0.9092
Epoch 16/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.9008 - val_loss: 0.8451
Epoch 17/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.7838 - val_loss: 0.5818
Epoch 18/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.9960 - val_loss: 1.0598
Epoch 19/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.7887 - val_loss: 0.6405
Epoch 20/30
11610/11610 [==============================] - 0s 35us/sample - loss: 0.5706 - val_loss: 0.5093
Epoch 21/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.5192 - val_loss: 0.5457
Epoch 22/30
11610/11610 [==============================] - 0s 38us/sample - loss: 0.4971 - val_loss: 0.4644
Epoch 23/30
11610/11610 [==============================] - 0s 38us/sample - loss: 0.4815 - val_loss: 0.4120
Epoch 24/30
11610/11610 [==============================] - 0s 37us/sample - loss: 0.4678 - val_loss: 0.4853
Epoch 25/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.4490 - val_loss: 0.3972
Epoch 26/30
11610/11610 [==============================] - 0s 35us/sample - loss: 0.4348 - val_loss: 0.4258
Epoch 27/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.4377 - val_loss: 0.3829
Epoch 28/30
11610/11610 [==============================] - 0s 35us/sample - loss: 0.4353 - val_loss: 0.3860
Epoch 29/30
11610/11610 [==============================] - 0s 35us/sample - loss: 0.4209 - val_loss: 0.3763
Epoch 30/30
11610/11610 [==============================] - 0s 36us/sample - loss: 0.4441 - val_loss: 0.4186

Notice how TensorBoard now sees two runs, and you can compare the learning curves.

Check out the other available logging options:

help(keras.callbacks.TensorBoard.__init__)
Help on function __init__ in module tensorflow.python.keras.callbacks:

__init__(self, log_dir='logs', histogram_freq=0, write_graph=True, write_images=False, update_freq='epoch', profile_batch=2, embeddings_freq=0, embeddings_metadata=None, **kwargs)
    Initialize self.  See help(type(self)) for accurate signature.

Hyperparameter Tuning

keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
def build_model(n_hidden=1, n_neurons=30, learning_rate=3e-3, input_shape=[8]):
    model = keras.models.Sequential()
    model.add(keras.layers.InputLayer(input_shape=input_shape))
    for layer in range(n_hidden):
        model.add(keras.layers.Dense(n_neurons, activation="relu"))
    model.add(keras.layers.Dense(1))
    optimizer = keras.optimizers.SGD(lr=learning_rate)
    model.compile(loss="mse", optimizer=optimizer)
    return model
keras_reg = keras.wrappers.scikit_learn.KerasRegressor(build_model)
keras_reg.fit(X_train, y_train, epochs=100,
              validation_data=(X_valid, y_valid),
              callbacks=[keras.callbacks.EarlyStopping(patience=10)])
Train on 11610 samples, validate on 3870 samples
Epoch 1/100
11610/11610 [==============================] - 0s 43us/sample - loss: 1.1399 - val_loss: 24.4309
Epoch 2/100
11610/11610 [==============================] - 0s 29us/sample - loss: 0.7393 - val_loss: 3.2896
Epoch 3/100
11610/11610 [==============================] - 0s 29us/sample - loss: 0.5374 - val_loss: 0.6080
Epoch 4/100
11610/11610 [==============================] - 0s 28us/sample - loss: 0.4796 - val_loss: 0.4532
Epoch 5/100
11610/11610 [==============================] - 0s 29us/sample - loss: 0.4536 - val_loss: 0.4221
Epoch 6/100
11610/11610 [==============================] - 0s 29us/sample - loss: 0.4373 - val_loss: 0.4144
Epoch 7/100
11610/11610 [==============================] - 0s 29us/sample - loss: 0.4256 - val_loss: 0.4025
Epoch 8/100
11610/11610 [==============================] - 0s 28us/sample - loss: 0.4178 - val_loss: 0.3938
Epoch 9/100
11610/11610 [==============================] - 0s 29us/sample - loss: 0.4114 - val_loss: 0.4085
Epoch 10/100
11610/11610 [==============================] - 0s 28us/sample - loss: 0.4060 - val_loss: 0.3974
Epoch 11/100
11610/11610 [==============================] - 0s 29us/sample - loss: 0.4018 - val_loss: 0.3847
Epoch 12/100
11610/11610 [==============================] - 0s 29us/sample - loss: 0.3978 - val_loss: 0.3818
Epoch 13/100
11610/11610 [==============================] - 0s 28us/sample - loss: 0.3943 - val_loss: 0.3820
Epoch 14/100
11610/11610 [==============================] - 0s 28us/sample - loss: 0.3915 - val_loss: 0.3766
Epoch 15/100
11610/11610 [==============================] - 0s 29us/sample - loss: 0.3888 - val_loss: 0.4268
Epoch 16/100
11610/11610 [==============================] - 0s 28us/sample - loss: 0.3862 - val_loss: 0.3638
Epoch 17/100
11610/11610 [==============================] - 0s 29us/sample - loss: 0.3842 - val_loss: 0.3666
<<53 more lines>>
11610/11610 [==============================] - 0s 29us/sample - loss: 0.3533 - val_loss: 0.3355
Epoch 45/100
11610/11610 [==============================] - 0s 29us/sample - loss: 0.3529 - val_loss: 0.3329
Epoch 46/100
11610/11610 [==============================] - 0s 29us/sample - loss: 0.3522 - val_loss: 0.3393
Epoch 47/100
11610/11610 [==============================] - 0s 30us/sample - loss: 0.3510 - val_loss: 0.4172
Epoch 48/100
11610/11610 [==============================] - 0s 29us/sample - loss: 0.3515 - val_loss: 0.3758
Epoch 49/100
11610/11610 [==============================] - 0s 29us/sample - loss: 0.3506 - val_loss: 0.4285
Epoch 50/100
11610/11610 [==============================] - 0s 28us/sample - loss: 0.3506 - val_loss: 0.4004
Epoch 51/100
11610/11610 [==============================] - 0s 29us/sample - loss: 0.3501 - val_loss: 0.3300
Epoch 52/100
11610/11610 [==============================] - 0s 29us/sample - loss: 0.3495 - val_loss: 0.3300
Epoch 53/100
11610/11610 [==============================] - 0s 29us/sample - loss: 0.3485 - val_loss: 0.3599
Epoch 54/100
11610/11610 [==============================] - 0s 29us/sample - loss: 0.3479 - val_loss: 0.3424
Epoch 55/100
11610/11610 [==============================] - 0s 29us/sample - loss: 0.3475 - val_loss: 0.3752
Epoch 56/100
11610/11610 [==============================] - 0s 29us/sample - loss: 0.3472 - val_loss: 0.4231
Epoch 57/100
11610/11610 [==============================] - 0s 29us/sample - loss: 0.3474 - val_loss: 0.3728
Epoch 58/100
11610/11610 [==============================] - 0s 28us/sample - loss: 0.3464 - val_loss: 0.3455
Epoch 59/100
11610/11610 [==============================] - 0s 29us/sample - loss: 0.3458 - val_loss: 0.3647
Epoch 60/100
11610/11610 [==============================] - 0s 29us/sample - loss: 0.3451 - val_loss: 0.4226
Epoch 61/100
11610/11610 [==============================] - 0s 29us/sample - loss: 0.3456 - val_loss: 0.3635
<tensorflow.python.keras.callbacks.History at 0x7ff32141f630>
mse_test = keras_reg.score(X_test, y_test)
5160/5160 [==============================] - 0s 14us/sample - loss: 0.3464
y_pred = keras_reg.predict(X_new)
np.random.seed(42)
tf.random.set_seed(42)

#collapse-show
from scipy.stats import reciprocal
from sklearn.model_selection import RandomizedSearchCV

param_distribs = {
    "n_hidden": [0, 1, 2, 3],
    "n_neurons": np.arange(1, 100),
    "learning_rate": reciprocal(3e-4, 3e-2),
}

rnd_search_cv = RandomizedSearchCV(keras_reg, param_distribs, n_iter=10, cv=3, verbose=2)
rnd_search_cv.fit(X_train, y_train, epochs=100,
                  validation_data=(X_valid, y_valid),
                  callbacks=[keras.callbacks.EarlyStopping(patience=10)])

Fitting 3 folds for each of 10 candidates, totalling 30 fits
[CV] learning_rate=0.001683454924600351, n_hidden=0, n_neurons=15 ....
Train on 7740 samples, validate on 3870 samples
Epoch 1/100
  32/7740 [..............................] - ETA: 30s - loss: 6.7379
[Parallel(n_jobs=1)]: Using backend SequentialBackend with 1 concurrent workers.
7740/7740 [==============================] - 0s 53us/sample - loss: 3.5574 - val_loss: 1.8536
Epoch 2/100
7740/7740 [==============================] - 0s 30us/sample - loss: 1.3316 - val_loss: 0.9380
Epoch 3/100
7740/7740 [==============================] - 0s 30us/sample - loss: 0.8573 - val_loss: 0.8545
Epoch 4/100
7740/7740 [==============================] - 0s 30us/sample - loss: 0.7344 - val_loss: 0.9545
Epoch 5/100
7740/7740 [==============================] - 0s 30us/sample - loss: 0.6943 - val_loss: 0.7248
Epoch 6/100
7740/7740 [==============================] - 0s 30us/sample - loss: 0.6682 - val_loss: 0.7356
Epoch 7/100
7740/7740 [==============================] - 0s 31us/sample - loss: 0.6494 - val_loss: 0.9732
Epoch 8/100
7740/7740 [==============================] - 0s 30us/sample - loss: 0.6408 - val_loss: 0.6175
Epoch 9/100
7740/7740 [==============================] - 0s 30us/sample - loss: 0.6256 - val_loss: 0.5877
Epoch 10/100
7740/7740 [==============================] - 0s 30us/sample - loss: 0.6149 - val_loss: 0.6164
Epoch 11/100
7740/7740 [==============================] - 0s 29us/sample - loss: 0.6058 - val_loss: 0.5851
Epoch 12/100
7740/7740 [==============================] - 0s 30us/sample - loss: 0.5961 - val_loss: 0.7040
Epoch 13/100
7740/7740 [==============================] - 0s 30us/sample - loss: 0.5905 - val_loss: 0.5594
Epoch 14/100
7740/7740 [==============================] - 0s 30us/sample - loss: 0.5797 - val_loss: 0.8668
Epoch 15/100
7740/7740 [==============================] - 0s 30us/sample - loss: 0.5764 - val_loss: 0.9031
<<2324 more lines>>
11610/11610 [==============================] - 0s 31us/sample - loss: 0.2976 - val_loss: 0.3259
Epoch 72/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.2979 - val_loss: 0.3275
Epoch 73/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.2982 - val_loss: 0.3235
Epoch 74/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.2971 - val_loss: 0.2947
Epoch 75/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.2960 - val_loss: 0.3523
Epoch 76/100
11610/11610 [==============================] - 0s 30us/sample - loss: 0.2967 - val_loss: 0.3259
Epoch 77/100
11610/11610 [==============================] - 0s 30us/sample - loss: 0.2962 - val_loss: 0.3409
Epoch 78/100
11610/11610 [==============================] - 0s 30us/sample - loss: 0.2952 - val_loss: 0.2925
Epoch 79/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.2949 - val_loss: 0.3667
Epoch 80/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.2948 - val_loss: 0.2954
Epoch 81/100
11610/11610 [==============================] - 0s 30us/sample - loss: 0.2931 - val_loss: 0.3468
Epoch 82/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.2951 - val_loss: 0.3064
Epoch 83/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.2932 - val_loss: 0.3054
Epoch 84/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.2924 - val_loss: 0.3041
Epoch 85/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.2920 - val_loss: 0.3227
Epoch 86/100
11610/11610 [==============================] - 0s 30us/sample - loss: 0.2922 - val_loss: 0.2982
Epoch 87/100
11610/11610 [==============================] - 0s 31us/sample - loss: 0.2917 - val_loss: 0.3389
Epoch 88/100
11610/11610 [==============================] - 0s 30us/sample - loss: 0.2915 - val_loss: 0.3658
RandomizedSearchCV(cv=3, error_score='raise-deprecating',
                   estimator=<tensorflow.python.keras.wrappers.scikit_learn.KerasRegressor object at 0x7ff3841c6be0>,
                   iid='warn', n_iter=10, n_jobs=None,
                   param_distributions={'learning_rate': <scipy.stats._distn_infrastructure.rv_frozen object at 0x7ff384301f60>,
                                        'n_hidden': [0, 1, 2, 3],
                                        'n_neurons': array([ 1,  2,  3,  4,  5,  6,  7,  8,  9, 10,...
       18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
       35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
       52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
       69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85,
       86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99])},
                   pre_dispatch='2*n_jobs', random_state=None, refit=True,
                   return_train_score=False, scoring=None, verbose=2)
rnd_search_cv.best_params_
{'learning_rate': 0.0033625641252688094, 'n_hidden': 2, 'n_neurons': 42}
rnd_search_cv.best_score_
-0.35952892616378346
rnd_search_cv.best_estimator_
<tensorflow.python.keras.wrappers.scikit_learn.KerasRegressor at 0x7ff384301518>
rnd_search_cv.score(X_test, y_test)
5160/5160 [==============================] - 0s 15us/sample - loss: 0.3065
-0.30652404945026074
model = rnd_search_cv.best_estimator_.model
model
<tensorflow.python.keras.engine.sequential.Sequential at 0x7ff350924668>
model.evaluate(X_test, y_test)
5160/5160 [==============================] - 0s 15us/sample - loss: 0.3065
0.30652404945026074

Exercise solutions

1. to 9.

See appendix A.

10.

Exercise: Train a deep MLP on the MNIST dataset (you can load it using keras.datasets.mnist.load_data(). See if you can get over 98% precision. Try searching for the optimal learning rate by using the approach presented in this chapter (i.e., by growing the learning rate exponentially, plotting the loss, and finding the point where the loss shoots up). Try adding all the bells and whistles—save checkpoints, use early stopping, and plot learning curves using TensorBoard.

Let's load the dataset:

(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()

Just like for the Fashion MNIST dataset, the MNIST training set contains 60,000 grayscale images, each 28x28 pixels:

X_train_full.shape
(60000, 28, 28)

Each pixel intensity is also represented as a byte (0 to 255):

X_train_full.dtype
dtype('uint8')

Let's split the full training set into a validation set and a (smaller) training set. We also scale the pixel intensities down to the 0-1 range and convert them to floats, by dividing by 255, just like we did for Fashion MNIST:

X_valid, X_train = X_train_full[:5000] / 255., X_train_full[5000:] / 255.
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
X_test = X_test / 255.

Let's plot an image using Matplotlib's imshow() function, with a 'binary' color map:

plt.imshow(X_train[0], cmap="binary")
plt.axis('off')
plt.show()

The labels are the class IDs (represented as uint8), from 0 to 9. Conveniently, the class IDs correspond to the digits represented in the images, so we don't need a class_names array:

y_train
array([7, 3, 4, ..., 5, 6, 8], dtype=uint8)

The validation set contains 5,000 images, and the test set contains 10,000 images:

X_valid.shape
(5000, 28, 28)
X_test.shape
(10000, 28, 28)

Let's take a look at a sample of the images in the dataset:

n_rows = 4
n_cols = 10
plt.figure(figsize=(n_cols * 1.2, n_rows * 1.2))
for row in range(n_rows):
    for col in range(n_cols):
        index = n_cols * row + col
        plt.subplot(n_rows, n_cols, index + 1)
        plt.imshow(X_train[index], cmap="binary", interpolation="nearest")
        plt.axis('off')
        plt.title(y_train[index], fontsize=12)
plt.subplots_adjust(wspace=0.2, hspace=0.5)
plt.show()

Let's build a simple dense network and find the optimal learning rate. We will need a callback to grow the learning rate at each iteration. It will also record the learning rate and the loss at each iteration:

K = keras.backend

class ExponentialLearningRate(keras.callbacks.Callback):
    def __init__(self, factor):
        self.factor = factor
        self.rates = []
        self.losses = []
    def on_batch_end(self, batch, logs):
        self.rates.append(K.get_value(self.model.optimizer.lr))
        self.losses.append(logs["loss"])
        K.set_value(self.model.optimizer.lr, self.model.optimizer.lr * self.factor)
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
    keras.layers.Flatten(input_shape=[28, 28]),
    keras.layers.Dense(300, activation="relu"),
    keras.layers.Dense(100, activation="relu"),
    keras.layers.Dense(10, activation="softmax")
])

We will start with a small learning rate of 1e-3, and grow it by 0.5% at each iteration:

model.compile(loss="sparse_categorical_crossentropy",
              optimizer=keras.optimizers.SGD(lr=1e-3),
              metrics=["accuracy"])
expon_lr = ExponentialLearningRate(factor=1.005)

Now let's train the model for just 1 epoch:

history = model.fit(X_train, y_train, epochs=1,
                    validation_data=(X_valid, y_valid),
                    callbacks=[expon_lr])
Train on 55000 samples, validate on 5000 samples
55000/55000 [==============================] - 3s 52us/sample - loss: 55143288500710.3281 - accuracy: 0.5734 - val_loss: 2.3660 - val_accuracy: 0.1100

We can now plot the loss as a functionof the learning rate:

plt.plot(expon_lr.rates, expon_lr.losses)
plt.gca().set_xscale('log')
plt.hlines(min(expon_lr.losses), min(expon_lr.rates), max(expon_lr.rates))
plt.axis([min(expon_lr.rates), max(expon_lr.rates), 0, expon_lr.losses[0]])
plt.xlabel("Learning rate")
plt.ylabel("Loss")
Text(0, 0.5, 'Loss')

The loss starts shooting back up violently around 3e-1, so let's try using 2e-1 as our learning rate:

keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
    keras.layers.Flatten(input_shape=[28, 28]),
    keras.layers.Dense(300, activation="relu"),
    keras.layers.Dense(100, activation="relu"),
    keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
              optimizer=keras.optimizers.SGD(lr=2e-1),
              metrics=["accuracy"])
run_index = 1 # increment this at every run
run_logdir = os.path.join(os.curdir, "my_mnist_logs", "run_{:03d}".format(run_index))
run_logdir
'./my_mnist_logs/run_001'
early_stopping_cb = keras.callbacks.EarlyStopping(patience=20)
checkpoint_cb = keras.callbacks.ModelCheckpoint("my_mnist_model.h5", save_best_only=True)
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)

history = model.fit(X_train, y_train, epochs=100,
                    validation_data=(X_valid, y_valid),
                    callbacks=[early_stopping_cb, checkpoint_cb, tensorboard_cb])
Train on 55000 samples, validate on 5000 samples
Epoch 1/100
55000/55000 [==============================] - 3s 46us/sample - loss: 0.2361 - accuracy: 0.9280 - val_loss: 0.1183 - val_accuracy: 0.9664
Epoch 2/100
55000/55000 [==============================] - 2s 42us/sample - loss: 0.0954 - accuracy: 0.9705 - val_loss: 0.0855 - val_accuracy: 0.9768
Epoch 3/100
55000/55000 [==============================] - 2s 42us/sample - loss: 0.0642 - accuracy: 0.9796 - val_loss: 0.0822 - val_accuracy: 0.9786
Epoch 4/100
55000/55000 [==============================] - 2s 42us/sample - loss: 0.0462 - accuracy: 0.9855 - val_loss: 0.0804 - val_accuracy: 0.9770
Epoch 5/100
55000/55000 [==============================] - 2s 42us/sample - loss: 0.0333 - accuracy: 0.9894 - val_loss: 0.1907 - val_accuracy: 0.9500
Epoch 6/100
55000/55000 [==============================] - 2s 41us/sample - loss: 0.0244 - accuracy: 0.9919 - val_loss: 0.0698 - val_accuracy: 0.9828
Epoch 7/100
55000/55000 [==============================] - 2s 41us/sample - loss: 0.0205 - accuracy: 0.9929 - val_loss: 0.0809 - val_accuracy: 0.9800
Epoch 8/100
55000/55000 [==============================] - 2s 42us/sample - loss: 0.0153 - accuracy: 0.9949 - val_loss: 0.0841 - val_accuracy: 0.9824
Epoch 9/100
55000/55000 [==============================] - 2s 42us/sample - loss: 0.0084 - accuracy: 0.9975 - val_loss: 0.0899 - val_accuracy: 0.9788
Epoch 10/100
55000/55000 [==============================] - 2s 41us/sample - loss: 0.0084 - accuracy: 0.9972 - val_loss: 0.0741 - val_accuracy: 0.9844
Epoch 11/100
55000/55000 [==============================] - 2s 42us/sample - loss: 0.0042 - accuracy: 0.9989 - val_loss: 0.0729 - val_accuracy: 0.9842
Epoch 12/100
55000/55000 [==============================] - 2s 43us/sample - loss: 0.0028 - accuracy: 0.9993 - val_loss: 0.0839 - val_accuracy: 0.9838
Epoch 13/100
55000/55000 [==============================] - 2s 43us/sample - loss: 0.0018 - accuracy: 0.9997 - val_loss: 0.0747 - val_accuracy: 0.9858
Epoch 14/100
55000/55000 [==============================] - 2s 43us/sample - loss: 8.9780e-04 - accuracy: 0.9998 - val_loss: 0.0732 - val_accuracy: 0.9858
Epoch 15/100
55000/55000 [==============================] - 2s 42us/sample - loss: 3.7406e-04 - accuracy: 1.0000 - val_loss: 0.0771 - val_accuracy: 0.9862
Epoch 16/100
55000/55000 [==============================] - 2s 41us/sample - loss: 2.2128e-04 - accuracy: 1.0000 - val_loss: 0.0783 - val_accuracy: 0.9860
Epoch 17/100
55000/55000 [==============================] - 2s 43us/sample - loss: 1.8240e-04 - accuracy: 1.0000 - val_loss: 0.0789 - val_accuracy: 0.9864
Epoch 18/100
55000/55000 [==============================] - 2s 45us/sample - loss: 1.5978e-04 - accuracy: 1.0000 - val_loss: 0.0800 - val_accuracy: 0.9862
Epoch 19/100
55000/55000 [==============================] - 2s 43us/sample - loss: 1.4287e-04 - accuracy: 1.0000 - val_loss: 0.0808 - val_accuracy: 0.9862
Epoch 20/100
55000/55000 [==============================] - 2s 42us/sample - loss: 1.2992e-04 - accuracy: 1.0000 - val_loss: 0.0812 - val_accuracy: 0.9860
Epoch 21/100
55000/55000 [==============================] - 2s 43us/sample - loss: 1.2116e-04 - accuracy: 1.0000 - val_loss: 0.0816 - val_accuracy: 0.9860
Epoch 22/100
55000/55000 [==============================] - 2s 43us/sample - loss: 1.1251e-04 - accuracy: 1.0000 - val_loss: 0.0820 - val_accuracy: 0.9862
Epoch 23/100
55000/55000 [==============================] - 2s 44us/sample - loss: 1.0464e-04 - accuracy: 1.0000 - val_loss: 0.0826 - val_accuracy: 0.9866
Epoch 24/100
55000/55000 [==============================] - 2s 43us/sample - loss: 9.8997e-05 - accuracy: 1.0000 - val_loss: 0.0833 - val_accuracy: 0.9864
Epoch 25/100
55000/55000 [==============================] - 2s 43us/sample - loss: 9.3607e-05 - accuracy: 1.0000 - val_loss: 0.0837 - val_accuracy: 0.9864
Epoch 26/100
55000/55000 [==============================] - 2s 43us/sample - loss: 8.8776e-05 - accuracy: 1.0000 - val_loss: 0.0839 - val_accuracy: 0.9864
model = keras.models.load_model("my_mnist_model.h5") # rollback to best model
model.evaluate(X_test, y_test)
10000/10000 [==============================] - 0s 26us/sample - loss: 0.0692 - accuracy: 0.9806
[0.06917384602149541, 0.9806]

We got over 98% accuracy. Finally, let's look at the learning curves using TensorBoard:

%tensorboard --logdir=./my_mnist_logs --port=6006