# import libraries
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.base import clone
from tensorflow.keras.datasets import boston_housing
from tensorflow.keras import models, layers, backend
7 Boston Housing
# load dataset
= boston_housing.load_data()
(X_train, y_train), (X_test, y_test)
X_train.shape, y_train.shape, X_test.shape, y_test.shape
# rescale and shift data based on training set
= np.mean(X_train)
transform_mean = np.std(X_train, ddof = 1)
transform_std
-= transform_mean
X_train /= transform_std
X_train
-= transform_mean
X_test /= transform_std X_test
= models.Sequential()
model_nn 128, activation = "relu", input_shape = (13,)))
model_nn.add(layers.Dense(128, activation = "relu"))
model_nn.add(layers.Dense(128, activation = "relu"))
model_nn.add(layers.Dense(1))
model_nn.add(layers.Dense(compile(optimizer = "adam", loss = "mse", metrics = ["mae", "mse"])
model_nn.
= model_nn.get_weights() initial_weight_nn
= models.Sequential()
model_nn_reg 64, activation = "relu", input_shape = (13,)))
model_nn_reg.add(layers.Dense(0.3))
model_nn_reg.add(layers.Dropout(64, activation = "relu", kernel_regularizer='l1_l2'))
model_nn_reg.add(layers.Dense(0.3))
model_nn_reg.add(layers.Dropout(64, activation = "relu", kernel_regularizer='l1_l2'))
model_nn_reg.add(layers.Dense(0.3))
model_nn_reg.add(layers.Dropout(1))
model_nn_reg.add(layers.Dense(
compile(optimizer = "adam", loss = "mse", metrics = ["mae", "mse"])
model_nn_reg.= model_nn_reg.get_weights() initial_weight_nn_reg
= LinearRegression() lm_base
= np.arange(len(X_train))
indices 123)
np.random.seed(
np.random.shuffle(indices)
= 5
k_fold = np.ceil(len(X_train) / k_fold).astype(int) sample_size
= [], [], []
mse_nn, mse_nn_reg, mse_lm = [], [], []
mae_nn, mae_nn_reg, mae_lm
for i in range(k_fold):
# configure model with exact parameters
= clone(lm_base)
model_lm
model_nn.set_weights(initial_weight_nn)
model_nn_reg.set_weights(initial_weight_nn_reg)
# split into partial_train and validation
= i * sample_size, (i+1) * sample_size
id_start, id_end
= np.concatenate((indices[:id_start], indices[id_end:]))
mask_train = indices[id_start:id_end]
mask_val
= X_train[mask_val]
X_val = y_train[mask_val]
y_val = X_train[mask_train]
partial_X_train = y_train[mask_train]
partial_y_train
# fit and predict
model_lm.fit(partial_X_train, partial_y_train)= 500, verbose = 0)
model_nn.fit(partial_X_train, partial_y_train, epochs = 500, verbose = 0)
model_nn_reg.fit(partial_X_train, partial_y_train, epochs
= model_lm.predict(X_val)
y_pred_lm = model_nn.predict(X_val, verbose = 0)
y_pred_nn = model_nn_reg.predict(X_val, verbose = 0)
y_pred_nn_reg
# save results
mse_nn.append(mean_squared_error(y_val, y_pred_nn))
mse_nn_reg.append(mean_squared_error(y_val, y_pred_nn_reg))
mse_lm.append(mean_squared_error(y_val, y_pred_lm))
mae_nn.append(mean_absolute_error(y_val, y_pred_nn))
mae_nn_reg.append(mean_absolute_error(y_val, y_pred_nn_reg)) mae_lm.append(mean_absolute_error(y_val, y_pred_lm))
print(f"Avg MSE of Neral Network : {np.mean(mse_nn):.2f}")
print(f"Avg MSE of NN Regulaized : {np.mean(mse_nn_reg):.2f}")
print(f"Avg MSE of Linear Model : {np.mean(mse_lm):.2f}")
print(f"Avg MAE of Neral Network : {np.mean(mae_nn):.2f}")
print(f"Avg MAE of NN Regulaized : {np.mean(mae_nn_reg):.2f}")
print(f"Avg MAE of Linear Model : {np.mean(mae_lm):.2f}")