verified model save method

This commit is contained in:
paul-loedige 2023-02-01 23:39:18 +09:00
parent 776eff1077
commit f3b37fe93d
2 changed files with 87 additions and 22 deletions

View File

@ -72,9 +72,25 @@
},
{
"cell_type": "code",
"execution_count": 17,
"execution_count": 1,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"2023-02-01 23:37:09.558933: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n",
"To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
"2023-02-01 23:37:09.710778: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory\n",
"2023-02-01 23:37:09.710807: I tensorflow/compiler/xla/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.\n",
"2023-02-01 23:37:10.735053: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory\n",
"2023-02-01 23:37:10.735109: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory\n",
"2023-02-01 23:37:10.735116: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.\n",
"/home/paul/.local/lib/python3.10/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
" from .autonotebook import tqdm as notebook_tqdm\n"
]
}
],
"source": [
"import matplotlib.pyplot as plt\n",
"import tensorflow as tf\n",
@ -94,9 +110,21 @@
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": 2,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"2023-02-01 23:37:12.516064: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcuda.so.1'; dlerror: libcuda.so.1: cannot open shared object file: No such file or directory\n",
"2023-02-01 23:37:12.516115: W tensorflow/compiler/xla/stream_executor/cuda/cuda_driver.cc:265] failed call to cuInit: UNKNOWN ERROR (303)\n",
"2023-02-01 23:37:12.516142: I tensorflow/compiler/xla/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host (paul-laptop): /proc/driver/nvidia/version does not exist\n",
"2023-02-01 23:37:12.516432: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n",
"To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n"
]
}
],
"source": [
"# split into training and test data\n",
"ds_train, ds_test = tfds.load(\n",
@ -117,7 +145,7 @@
},
{
"cell_type": "code",
"execution_count": 19,
"execution_count": 3,
"metadata": {},
"outputs": [
{
@ -147,7 +175,7 @@
},
{
"cell_type": "code",
"execution_count": 20,
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
@ -165,7 +193,7 @@
},
{
"cell_type": "code",
"execution_count": 21,
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
@ -185,7 +213,7 @@
},
{
"cell_type": "code",
"execution_count": 22,
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
@ -1524,6 +1552,57 @@
"for config_name, config in configs.items():\n",
" config['model'].save(f'hdf5_models/{config_name}.h5')\n"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Test Save File"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Model: \"sequential_11\"\n",
"_________________________________________________________________\n",
" Layer (type) Output Shape Param # \n",
"=================================================================\n",
" vgg16 (Functional) (None, 1, 1, 512) 14714688 \n",
" \n",
" flatten_11 (Flatten) (None, 512) 0 \n",
" \n",
" dense_47 (Dense) (None, 64) 32832 \n",
" \n",
" dense_48 (Dense) (None, 32) 2080 \n",
" \n",
" dense_49 (Dense) (None, 10) 330 \n",
" \n",
"=================================================================\n",
"Total params: 14,749,930\n",
"Trainable params: 14,749,930\n",
"Non-trainable params: 0\n",
"_________________________________________________________________\n",
"None\n",
"204/204 [==============================] - 64s 313ms/step - loss: 0.2786 - accuracy: 0.9249\n",
"Restored model, accuracy: 92.49%\n"
]
}
],
"source": [
"new_model = tf.keras.models.load_model('hdf5_models/fully trainable VGG16 base model.h5')\n",
"print(new_model.summary())\n",
"\n",
"# Evaluate the restored model\n",
"loss, acc = new_model.evaluate(ds_test)\n",
"print('Restored model, accuracy: {:5.2f}%'.format(100 * acc))"
]
}
],
"metadata": {

14
test.py
View File

@ -1,14 +0,0 @@
import tensorflow as tf
import tensorflow_datasets as tfds
new_model = tf.keras.models.load_model('hdf5_models/fully trainable VGG16 base model.h5')
new_model.summary()
ds_train, ds_test = tfds.load(
'svhn_cropped',
split=['train', 'test'],
shuffle_files=True,
as_supervised=True
)
loss, acc = new_model.evaluate(ds_test, verbose=2)
print('Restored model, accuracy: {:5.2f}%'.format(100 * acc))