exported and formatted the code into a simple python file.
This commit is contained in:
parent
f3b37fe93d
commit
9e461bed89
256
LaTeX/exported_code.py
Normal file
256
LaTeX/exported_code.py
Normal file
@ -0,0 +1,256 @@
|
||||
# Import libraries
|
||||
import matplotlib.pyplot as plt
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import layers, models, applications, optimizers
|
||||
import tensorflow_datasets as tfds
|
||||
import tkinter.messagebox # for notifications
|
||||
|
||||
# Load Dataset
|
||||
# https://www.tensorflow.org/datasets/keras_example
|
||||
# split into training and test data
|
||||
ds_train, ds_test = tfds.load(
|
||||
'svhn_cropped',
|
||||
split=['train', 'test'],
|
||||
shuffle_files=True,
|
||||
as_supervised=True
|
||||
)
|
||||
|
||||
# Confirm Data
|
||||
for image, label in ds_train.take(1):
|
||||
plt.imshow(image)
|
||||
plt.show()
|
||||
|
||||
# Define normalization function
|
||||
def normalize_img(image, label):
|
||||
return tf.cast(image, tf.float32) / 255.0, label
|
||||
|
||||
# Build a training pipeline
|
||||
ds_train = ds_train.map(normalize_img, num_parallel_calls=tf.data.AUTOTUNE)
|
||||
ds_train = ds_train.cache()
|
||||
ds_train = ds_train.batch(128)
|
||||
ds_train = ds_train.prefetch(tf.data.AUTOTUNE)
|
||||
|
||||
# Build an evaluation pipeline
|
||||
ds_test = ds_test.map(normalize_img, num_parallel_calls=tf.data.AUTOTUNE)
|
||||
ds_test = ds_test.cache()
|
||||
ds_test = ds_test.batch(128)
|
||||
ds_test = ds_test.prefetch(tf.data.AUTOTUNE)
|
||||
|
||||
# Configurations
|
||||
configs = {}
|
||||
|
||||
configs['regular Neural Network (3 hidden layers)'] = {
|
||||
'model': models.Sequential([
|
||||
layers.Flatten(input_shape=(32, 32, 3)),
|
||||
layers.Dense(64, activation='relu'),
|
||||
layers.Dense(64, activation='relu'),
|
||||
layers.Dense(64, activation='relu'),
|
||||
layers.Dense(10, activation='softmax')
|
||||
]),
|
||||
'optimizer': 'adam',
|
||||
'loss': 'sparse_categorical_crossentropy',
|
||||
'metrics': ['accuracy'],
|
||||
'epochs': 10,
|
||||
'batch_size': 64
|
||||
}
|
||||
|
||||
configs['regular Neural Network (5 hidden layers)'] = {
|
||||
'model': models.Sequential([
|
||||
layers.Flatten(input_shape=(32, 32, 3)),
|
||||
layers.Dense(64, activation='relu'),
|
||||
layers.Dense(64, activation='relu'),
|
||||
layers.Dense(64, activation='relu'),
|
||||
layers.Dense(64, activation='relu'),
|
||||
layers.Dense(64, activation='relu'),
|
||||
layers.Dense(10, activation='softmax')
|
||||
]),
|
||||
'optimizer': 'adam',
|
||||
'loss': 'sparse_categorical_crossentropy',
|
||||
'metrics': ['accuracy'],
|
||||
'epochs': 10,
|
||||
'batch_size': 64
|
||||
}
|
||||
|
||||
configs['regular Neural Network (10 hidden layers)'] = {
|
||||
'model': models.Sequential([
|
||||
layers.Flatten(input_shape=(32, 32, 3)),
|
||||
layers.Dense(64, activation='relu'),
|
||||
layers.Dense(64, activation='relu'),
|
||||
layers.Dense(64, activation='relu'),
|
||||
layers.Dense(64, activation='relu'),
|
||||
layers.Dense(64, activation='relu'),
|
||||
layers.Dense(64, activation='relu'),
|
||||
layers.Dense(64, activation='relu'),
|
||||
layers.Dense(64, activation='relu'),
|
||||
layers.Dense(64, activation='relu'),
|
||||
layers.Dense(64, activation='relu'),
|
||||
layers.Dense(10, activation='softmax')
|
||||
]),
|
||||
'optimizer': 'adam',
|
||||
'loss': 'sparse_categorical_crossentropy',
|
||||
'metrics': ['accuracy'],
|
||||
'epochs': 10,
|
||||
'batch_size': 64
|
||||
}
|
||||
|
||||
configs['convolutional neural network (1 convolution)'] = {
|
||||
'model': models.Sequential([
|
||||
layers.Conv2D(32, (3,3), activation='relu', input_shape=(32,32,3)),
|
||||
layers.MaxPooling2D((2,2)),
|
||||
layers.Flatten(),
|
||||
layers.Dense(64, activation='relu'),
|
||||
layers.Dense(64, activation='relu'),
|
||||
layers.Dense(10, activation='softmax')
|
||||
]),
|
||||
'optimizer': 'adam',
|
||||
'loss': 'sparse_categorical_crossentropy',
|
||||
'metrics': ['accuracy'],
|
||||
'epochs': 10,
|
||||
'batch_size': 64
|
||||
}
|
||||
|
||||
configs['convolutional neural network (3 convolutions)'] = {
|
||||
'model': models.Sequential([
|
||||
layers.Conv2D(32, (3,3), activation='relu', input_shape=(32,32,3)),
|
||||
layers.MaxPooling2D((2,2)),
|
||||
layers.Conv2D(64, (3,3), activation='relu'),
|
||||
layers.MaxPooling2D((2,2)),
|
||||
layers.Conv2D(128, (3,3), activation='relu'),
|
||||
layers.MaxPooling2D((2,2)),
|
||||
layers.Flatten(),
|
||||
layers.Dense(64, activation='relu'),
|
||||
layers.Dense(64, activation='relu'),
|
||||
layers.Dense(10, activation='softmax')
|
||||
]),
|
||||
'optimizer': 'adam',
|
||||
'loss': 'sparse_categorical_crossentropy',
|
||||
'metrics': ['accuracy'],
|
||||
'epochs': 10,
|
||||
'batch_size': 128
|
||||
}
|
||||
|
||||
vgg16_base_model = applications.VGG16(input_shape=(32,32,3), include_top= False)
|
||||
# freeze the VGG16 model
|
||||
vgg16_base_model.trainable = False
|
||||
|
||||
configs['frozen VGG16 base model'] = {
|
||||
'model': models.Sequential([
|
||||
vgg16_base_model,
|
||||
layers.Flatten(),
|
||||
layers.Dense(64, activation='relu'),
|
||||
layers.Dense(32, activation='relu'),
|
||||
layers.Dense(10, activation='softmax'),
|
||||
]),
|
||||
'optimizer': 'adam',
|
||||
'loss': 'sparse_categorical_crossentropy',
|
||||
'metrics': ['accuracy'],
|
||||
'epochs': 5,
|
||||
'batch_size': 64
|
||||
}
|
||||
|
||||
vgg16_base_model = applications.VGG16(input_shape=(32,32,3), include_top= False)
|
||||
configs['fully trainable VGG16 base model'] = {
|
||||
'model': models.Sequential([
|
||||
vgg16_base_model,
|
||||
layers.Flatten(),
|
||||
layers.Dense(64, activation='relu'),
|
||||
layers.Dense(32, activation='relu'),
|
||||
layers.Dense(10, activation='softmax'),
|
||||
]),
|
||||
'optimizer': optimizers.Adam(learning_rate=0.001),
|
||||
'loss': 'sparse_categorical_crossentropy',
|
||||
'metrics': ['accuracy'],
|
||||
'epochs': 5,
|
||||
'batch_size': 64
|
||||
}
|
||||
|
||||
vgg16_base_model = applications.VGG16(input_shape=(32,32,3), include_top= False)
|
||||
# freeze the VGG16 model
|
||||
vgg16_base_model.trainable = False
|
||||
for layer in vgg16_base_model.layers[-6:]:
|
||||
layer.trainable=True
|
||||
|
||||
configs['partly trainable VGG16 base model'] = {
|
||||
'model': models.Sequential([
|
||||
vgg16_base_model,
|
||||
layers.Flatten(),
|
||||
layers.Dense(64, activation='relu'),
|
||||
layers.Dense(32, activation='relu'),
|
||||
layers.Dense(10, activation='softmax'),
|
||||
]),
|
||||
'optimizer': optimizers.Adam(learning_rate=0.001),
|
||||
'loss': 'sparse_categorical_crossentropy',
|
||||
'metrics': ['accuracy'],
|
||||
'epochs': 5,
|
||||
'batch_size': 64
|
||||
}
|
||||
|
||||
# Compile the Models
|
||||
for config in configs.values():
|
||||
if 'history' not in config.keys():
|
||||
config['model'].compile(
|
||||
optimizer=config['optimizer'],
|
||||
loss=config['loss'],
|
||||
metrics=config['metrics']
|
||||
)
|
||||
|
||||
# Train and Evaluate the Models
|
||||
try:
|
||||
for config_name, config in configs.items():
|
||||
if 'history' in config.keys():
|
||||
print(f'Already trained model "{config_name}"')
|
||||
else:
|
||||
print(f'Now training model "{config_name}"')
|
||||
config['history'] = config['model'].fit(ds_train, epochs=config['epochs'], validation_data=ds_test, batch_size=config['batch_size'])#, verbose=0)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
tkinter.messagebox.showerror("ERROR", f"ERROR: {e}")
|
||||
|
||||
# Plot the accuracy
|
||||
plt.figure(figsize=(10,5))
|
||||
for config_name, config in configs.items():
|
||||
plt.plot(config['history'].history['accuracy'], label=config_name)
|
||||
plt.xlabel('Epoch')
|
||||
plt.ylabel('Accuracy')
|
||||
# plt.ylim([0.75, 1])
|
||||
plt.legend(loc='lower right')
|
||||
plt.title("Accuracy")
|
||||
plt.show()
|
||||
|
||||
# Plot the validation accuracy
|
||||
plt.figure(figsize=(10,5))
|
||||
for config_name, config in configs.items():
|
||||
plt.plot(config['history'].history['val_accuracy'], label=config_name)
|
||||
plt.xlabel('Epoch')
|
||||
plt.ylabel('Accuracy')
|
||||
# plt.ylim([0.8, 1])
|
||||
plt.legend(loc='lower right')
|
||||
plt.title("Validation Accuracy")
|
||||
plt.show()
|
||||
|
||||
# Notify when done
|
||||
tkinter.messagebox.showinfo("DONE", "DONE")
|
||||
|
||||
# Save data for future use
|
||||
# store histories with pickle
|
||||
import pickle
|
||||
histories = {}
|
||||
for config_name, config in configs.items():
|
||||
histories[config_name] = config['history']
|
||||
with open('savepoint.pkl', 'wb') as out:
|
||||
pickle.dump(histories, out)
|
||||
|
||||
# store models with tensorflow
|
||||
for config_name, config in configs.items():
|
||||
config['model'].save(f'saved_models/{config_name}')
|
||||
|
||||
# store models in HDF5 format
|
||||
for config_name, config in configs.items():
|
||||
config['model'].save(f'hdf5_models/{config_name}.h5')
|
||||
|
||||
# Test Save File
|
||||
new_model = tf.keras.models.load_model('hdf5_models/fully trainable VGG16 base model.h5')
|
||||
print(new_model.summary())
|
||||
# Evaluate the restored model
|
||||
loss, acc = new_model.evaluate(ds_test)
|
||||
print('Restored model, accuracy: {:5.2f}%'.format(100 * acc))
|
Loading…
x
Reference in New Issue
Block a user