# import libraries
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras.datasets import imdb
from tensorflow.keras import models, layers, optimizers, backend5 IMDB
# load dataset
num_words = 10000
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words = num_words)
train_data.shape, train_labels.shape, test_data.shape, test_labels.shape# preprocess
X_train = np.zeros(shape = (len(train_data), num_words), dtype = float)
X_test = np.zeros(shape = (len(test_data), num_words), dtype = float)
for i, seq in enumerate(train_data):
X_train[i, seq] = 1.
for i, seq in enumerate(test_data):
X_test[i, seq] = 1.
y_train = train_labels.astype(float)
y_test = test_labels.astype(float)
print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)partial_X_train = X_train[:12500]
partial_y_train = y_train[:12500]
X_val = X_train[12500:]
y_val = y_train[12500:]def explore(X_train,
y_train,
X_val,
y_val,
n_units,
n_layers,
activation,
learning_rate,
momentum):
# define ann architecture
model = models.Sequential()
for i in range(n_layers):
model.add(layers.Dense(n_units, activation = activation))
model.add(layers.Dense(1, activation = "sigmoid"))
# define optimizer, loss function, and metrics
optimizer = optimizers.RMSprop(learning_rate = learning_rate, momentum = momentum)
# train ann model
model.build(input_shape = (10000,))
model.compile(optimizer = optimizer, loss = "binary_crossentropy", metrics = ["accuracy"])
model.fit(X_train, y_train, epochs = 20, batch_size = 64, verbose = 0)
# evaluate ann model
val_loss, val_acc = model.evaluate(X_val, y_val, verbose = 0)
return val_loss, val_acc# set hyperparameters
learning_rate_list = np.logspace(-2, -4, 5)
momentum_list = np.linspace(0.1, 0.9, 5)
n_unit_list = [32, 64]
n_hidden_layer_list = [1, 3]
activation_list = ["relu", "tanh"]
param_list = []
for learning_rate in learning_rate_list:
for momentum in momentum_list:
for n_units in n_unit_list:
for n_layers in n_hidden_layer_list:
for activation in activation_list:
param_list.append({
"learning_rate": learning_rate,
"momentum": momentum,
"n_units": n_units,
"n_layers": n_layers,
"activation": activation
})results = []
for params in param_list:
val_loss, val_acc = explore(
partial_X_train,
partial_y_train,
X_val,
y_val,
n_units = params["n_units"],
n_hidden_layer = params["n_hidden_layer"],
activation = params["activation"],
learning_rate = params["learning_rate"],
momentum = params["momentum"],
)
results.append({"val_loss": val_loss,
"val_acc": val_acc,
"params": params})
backend.clear_session()# get optimal parameters
val_accuracies = [result["val_acc"] for result in results]
opt_params = results[np.argmax(val_accuracies)]["params"]
opt_params# define ann architecture
model = models.Sequential()
for i in range(opt_params["n_layers"]):
model.add(layers.Dense(opt_params["n_units"], activation = opt_params["activation"]))
model.add(layers.Dense(1, activation = "sigmoid"))
# define optimizer, loss function, and metrics
optimizer = optimizers.RMSprop(learning_rate = opt_params["learning_rate"],
momentum = opt_params["momentum"])
# train ann model
model.build(input_shape = (10000,))
model.compile(optimizer = optimizer, loss = "binary_crossentropy", metrics = ["accuracy"])
history = model.fit(X_train, y_train, epochs = 20, batch_size = 64, verbose = 0)loss = history['loss']
epochs = range(1, len(loss) + 1)
blue_dots = 'bo'
solid_blue_line = 'b'
plt.plot(epochs, loss, solid_blue_line, label = 'Training loss')
plt.title('Training loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()accuracy = history['accuracy']
epochs = range(1, len(accuracy) + 1)
blue_dots = 'bo'
solid_blue_line = 'b'
plt.plot(epochs, accuracy, solid_blue_line, label = 'Training accuracy')
plt.title('Training accuracy')
plt.xlabel('Epochs')
plt.ylabel('accuracy')
plt.legend()
plt.show()model.evaluate(X_test, y_test)