2022-01-03 20:32:42 +00:00
{
"cells": [
{
"cell_type": "code",
2022-01-13 05:25:40 +00:00
"execution_count": 1,
2022-01-03 20:32:42 +00:00
"id": "572dc7fb",
"metadata": {},
2022-08-03 03:14:38 +00:00
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"2022-08-01 22:09:35.958273: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.10.1\n"
]
}
],
2022-01-03 20:32:42 +00:00
"source": [
"from matplotlib import pyplot as plt\n",
2022-08-03 03:14:38 +00:00
"import cv\n",
2022-01-03 20:32:42 +00:00
"from matplotlib.image import imread\n",
"import pandas as pd\n",
"from collections import Counter\n",
"import json\n",
"import os\n",
"import re\n",
"import tempfile\n",
"import numpy as np\n",
"from os.path import exists\n",
"from imblearn.under_sampling import RandomUnderSampler\n",
"from PIL import ImageFile\n",
"import sklearn as sk\n",
"from sklearn.model_selection import train_test_split, StratifiedShuffleSplit\n",
"import tensorflow as tf\n",
"import tensorflow.keras\n",
"from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
"from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Flatten, Activation\n",
"from tensorflow.keras.models import Sequential\n",
"from tensorflow.keras.optimizers import Adam\n",
"# custom modules\n",
"import image_faults\n",
"\n",
"ImageFile.LOAD_TRUNCATED_IMAGES = True"
]
},
{
"cell_type": "code",
2022-08-03 03:14:38 +00:00
"execution_count": 27,
2022-01-03 20:32:42 +00:00
"id": "a5c72863",
"metadata": {},
"outputs": [],
"source": [
2022-01-29 01:05:54 +00:00
"image_faults.faulty_images() # removes faulty images\n",
2022-01-15 02:53:59 +00:00
"df = pd.read_csv('expanded_class.csv', index_col=[0], low_memory=False)\n"
2022-01-03 20:32:42 +00:00
]
},
{
"cell_type": "code",
2022-01-15 02:53:59 +00:00
"execution_count": 3,
2022-08-03 03:14:38 +00:00
"id": "67ecdebe",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"INFO:tensorflow:Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0', '/job:localhost/replica:0/task:0/device:GPU:1')\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"2022-08-01 22:09:39.570503: I tensorflow/compiler/jit/xla_cpu_device.cc:41] Not creating XLA devices, tf_xla_enable_xla_devices not set\n",
"2022-08-01 22:09:39.571048: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1\n",
"2022-08-01 22:09:39.613420: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2022-08-01 22:09:39.613584: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1720] Found device 0 with properties: \n",
"pciBusID: 0000:04:00.0 name: NVIDIA GeForce RTX 3090 computeCapability: 8.6\n",
"coreClock: 1.725GHz coreCount: 82 deviceMemorySize: 23.70GiB deviceMemoryBandwidth: 871.81GiB/s\n",
"2022-08-01 22:09:39.613631: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2022-08-01 22:09:39.613751: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1720] Found device 1 with properties: \n",
"pciBusID: 0000:0b:00.0 name: NVIDIA GeForce RTX 3090 computeCapability: 8.6\n",
"coreClock: 1.8GHz coreCount: 82 deviceMemorySize: 23.70GiB deviceMemoryBandwidth: 871.81GiB/s\n",
"2022-08-01 22:09:39.613767: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.10.1\n",
"2022-08-01 22:09:39.614548: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublas.so.10\n",
"2022-08-01 22:09:39.614572: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublasLt.so.10\n",
"2022-08-01 22:09:39.615415: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcufft.so.10\n",
"2022-08-01 22:09:39.615547: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcurand.so.10\n",
"2022-08-01 22:09:39.616317: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusolver.so.10\n",
"2022-08-01 22:09:39.616763: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusparse.so.10\n",
"2022-08-01 22:09:39.618472: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudnn.so.7\n",
"2022-08-01 22:09:39.618532: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2022-08-01 22:09:39.618687: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2022-08-01 22:09:39.618830: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2022-08-01 22:09:39.618969: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2022-08-01 22:09:39.619075: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1862] Adding visible gpu devices: 0, 1\n",
"2022-08-01 22:09:39.619877: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: SSE4.1 SSE4.2 AVX AVX2 FMA\n",
"To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
"2022-08-01 22:09:39.621856: I tensorflow/compiler/jit/xla_gpu_device.cc:99] Not creating XLA devices, tf_xla_enable_xla_devices not set\n",
"2022-08-01 22:09:39.792333: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2022-08-01 22:09:39.792467: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1720] Found device 0 with properties: \n",
"pciBusID: 0000:04:00.0 name: NVIDIA GeForce RTX 3090 computeCapability: 8.6\n",
"coreClock: 1.725GHz coreCount: 82 deviceMemorySize: 23.70GiB deviceMemoryBandwidth: 871.81GiB/s\n",
"2022-08-01 22:09:39.792551: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2022-08-01 22:09:39.792644: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1720] Found device 1 with properties: \n",
"pciBusID: 0000:0b:00.0 name: NVIDIA GeForce RTX 3090 computeCapability: 8.6\n",
"coreClock: 1.8GHz coreCount: 82 deviceMemorySize: 23.70GiB deviceMemoryBandwidth: 871.81GiB/s\n",
"2022-08-01 22:09:39.792680: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.10.1\n",
"2022-08-01 22:09:39.792696: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublas.so.10\n",
"2022-08-01 22:09:39.792706: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublasLt.so.10\n",
"2022-08-01 22:09:39.792715: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcufft.so.10\n",
"2022-08-01 22:09:39.792724: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcurand.so.10\n",
"2022-08-01 22:09:39.792733: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusolver.so.10\n",
"2022-08-01 22:09:39.792741: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusparse.so.10\n",
"2022-08-01 22:09:39.792750: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudnn.so.7\n",
"2022-08-01 22:09:39.792797: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2022-08-01 22:09:39.792931: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2022-08-01 22:09:39.793053: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2022-08-01 22:09:39.793172: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2022-08-01 22:09:39.793263: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1862] Adding visible gpu devices: 0, 1\n",
"2022-08-01 22:09:39.793290: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.10.1\n",
"2022-08-01 22:09:41.188032: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1261] Device interconnect StreamExecutor with strength 1 edge matrix:\n",
"2022-08-01 22:09:41.188052: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1267] 0 1 \n",
"2022-08-01 22:09:41.188057: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1280] 0: N N \n",
"2022-08-01 22:09:41.188059: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1280] 1: N N \n",
"2022-08-01 22:09:41.188316: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2022-08-01 22:09:41.188469: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2022-08-01 22:09:41.188599: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2022-08-01 22:09:41.188726: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2022-08-01 22:09:41.188831: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1406] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 22425 MB memory) -> physical GPU (device: 0, name: NVIDIA GeForce RTX 3090, pci bus id: 0000:04:00.0, compute capability: 8.6)\n",
"2022-08-01 22:09:41.189525: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2022-08-01 22:09:41.189665: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:941] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
"2022-08-01 22:09:41.189758: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1406] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:1 with 21683 MB memory) -> physical GPU (device: 1, name: NVIDIA GeForce RTX 3090, pci bus id: 0000:0b:00.0, compute capability: 8.6)\n"
]
}
],
"source": [
"mirrored_strategy = tf.distribute.MirroredStrategy(devices=[\"/gpu:0\",\"/gpu:1\"])\n",
"#\"/gpu:0\","
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "a89913e0",
2022-01-13 05:25:40 +00:00
"metadata": {},
"outputs": [],
"source": [
"def dict_pics_jup():\n",
" '''\n",
" {source:target} dict used to replace source urls with image location as input\n",
" '''\n",
" target_dir = os.getcwd() + os.sep + \"training_images\"\n",
" with open('temp_pics_source_list.txt') as f:\n",
" temp_pics_source_list = json.load(f)\n",
" \n",
" dict_pics = {}\n",
" for k in temp_pics_source_list:\n",
2022-01-29 01:05:54 +00:00
" try: \n",
2022-01-15 02:53:59 +00:00
" patt_1 = re.search(r'[^/]+(?=/\\$_|.(\\.jpg|\\.jpeg|\\.png))', k, re.IGNORECASE)\n",
" patt_2 = re.search(r'(\\.jpg|\\.jpeg|\\.png)', k, re.IGNORECASE)\n",
" if patt_1 and patt_2 is not None:\n",
" tag = patt_1.group() + patt_2.group().lower()\n",
" file_name = target_dir + os.sep + tag\n",
" dict_pics.update({k:file_name})\n",
" except TypeError:\n",
" print(k)\n",
2022-01-13 05:25:40 +00:00
" print(\"{source:target} dictionary created @ \" + target_dir)\n",
" return dict_pics\n"
]
},
{
"cell_type": "code",
2022-08-03 03:14:38 +00:00
"execution_count": 55,
2022-01-03 20:32:42 +00:00
"id": "1057a442",
"metadata": {
"scrolled": true
},
"outputs": [
{
2022-01-13 05:25:40 +00:00
"name": "stdout",
"output_type": "stream",
"text": [
2022-01-15 02:53:59 +00:00
"nan\n",
2022-08-03 03:14:38 +00:00
"{source:target} dictionary created @ /home/unknown/Sync/projects/ebay ML Lister Project/training_images\n"
2022-01-03 20:32:42 +00:00
]
}
],
"source": [
2022-01-08 01:28:37 +00:00
"dict_pics = dict_pics_jup()\n",
2022-01-15 02:53:59 +00:00
"\n",
"with open('women_cat_list.txt') as f:\n",
" women_cats = json.load(f)\n",
"with open('men_cat_list.txt') as f:\n",
" men_cats = json.load(f)\n",
" \n",
2022-01-13 05:25:40 +00:00
"with open('temp_pics_source_list.txt') as f:\n",
" tempics = json.load(f)\n",
"# list of image urls that did not get named properly which will be removed from the dataframe\n",
"drop_row_vals = []\n",
"for pic in tempics:\n",
" try:\n",
" dict_pics[pic]\n",
" except KeyError:\n",
" drop_row_vals.append(pic)\n",
2022-01-15 02:53:59 +00:00
"\n",
"df['PrimaryCategoryID'] = df['PrimaryCategoryID'].astype(str) # pandas thinks ids are ints\n",
2022-01-29 01:05:54 +00:00
"df = df[df.PictureURL.isin(drop_row_vals)==False] # remove improperly named image files\n",
2022-08-03 03:14:38 +00:00
"df = df[df.PrimaryCategoryID.isin(men_cats)==False] # removes rows of womens categories\n",
2022-01-15 02:53:59 +00:00
"\n",
2022-01-03 20:32:42 +00:00
"blah = pd.Series(df.PictureURL)\n",
"df = df.drop(labels=['PictureURL'], axis=1)\n",
2022-01-13 05:25:40 +00:00
"\n",
2022-01-03 20:32:42 +00:00
"blah = blah.apply(lambda x: dict_pics[x])\n",
"df = pd.concat([blah, df],axis=1)\n",
2022-01-13 05:25:40 +00:00
"df = df.groupby('PrimaryCategoryID').filter(lambda x: len(x)>25) # removes cat outliers"
2022-01-03 20:32:42 +00:00
]
},
{
"cell_type": "code",
2022-08-03 03:14:38 +00:00
"execution_count": 78,
2022-01-03 20:32:42 +00:00
"id": "7a6146e6",
"metadata": {},
2022-08-03 03:14:38 +00:00
"outputs": [
{
"data": {
"text/plain": [
"'/home/unknown/Sync/projects/ebay ML Lister Project/training_images/7BQAAOSw0eZhpmqM.jpg'"
]
},
"execution_count": 78,
"metadata": {},
"output_type": "execute_result"
}
],
2022-01-03 20:32:42 +00:00
"source": [
2022-08-03 03:14:38 +00:00
"df=df.sample(frac=1)\n",
"something = df.iloc[1,0]\n",
"something"
2022-01-03 20:32:42 +00:00
]
},
{
"cell_type": "code",
2022-08-03 03:14:38 +00:00
"execution_count": 60,
2022-01-03 20:32:42 +00:00
"id": "114cc3c0",
"metadata": {},
"outputs": [],
"source": [
"undersample = RandomUnderSampler(sampling_strategy='auto')\n",
"train, y_under = undersample.fit_resample(df, df['PrimaryCategoryID'])\n",
"#print(Counter(train['PrimaryCategoryID']))"
]
},
{
"cell_type": "code",
2022-08-03 03:14:38 +00:00
"execution_count": 61,
2022-01-03 20:32:42 +00:00
"id": "506aa5cf",
"metadata": {},
"outputs": [],
"source": [
2022-08-03 03:14:38 +00:00
"train, test = train_test_split(train, test_size=0.2, random_state=42)\n",
2022-01-03 20:32:42 +00:00
"# stratify=train['PrimaryCategoryID']\n",
"# train['PrimaryCategoryID'].value_counts()"
]
},
{
"cell_type": "code",
2022-08-03 03:14:38 +00:00
"execution_count": 80,
2022-01-03 20:32:42 +00:00
"id": "4d72eb90",
"metadata": {},
"outputs": [
{
2022-01-29 01:05:54 +00:00
"name": "stderr",
2022-01-03 20:32:42 +00:00
"output_type": "stream",
"text": [
2022-08-03 03:14:38 +00:00
"/home/unknown/miniconda3/envs/tensorflow-cuda/lib/python3.9/site-packages/keras_preprocessing/image/dataframe_iterator.py:279: UserWarning: Found 5 invalid image filename(s) in x_col=\"PictureURL\". These filename(s) will be ignored.\n",
" warnings.warn(\n",
"/home/unknown/miniconda3/envs/tensorflow-cuda/lib/python3.9/site-packages/keras_preprocessing/image/dataframe_iterator.py:279: UserWarning: Found 5 invalid image filename(s) in x_col=\"PictureURL\". These filename(s) will be ignored.\n",
2022-01-29 01:05:54 +00:00
" warnings.warn(\n"
2022-01-28 04:00:13 +00:00
]
},
{
2022-01-29 01:05:54 +00:00
"name": "stdout",
2022-01-28 04:00:13 +00:00
"output_type": "stream",
"text": [
2022-08-03 03:14:38 +00:00
"Found 43744 validated image filenames belonging to 7 classes.\n",
"Found 10935 validated image filenames belonging to 7 classes.\n"
2022-01-03 20:32:42 +00:00
]
}
],
"source": [
"datagen = ImageDataGenerator(rescale=1./255., \n",
" validation_split=.2,\n",
" #samplewise_std_normalization=True,\n",
" #horizontal_flip= True,\n",
" #vertical_flip= True,\n",
" #width_shift_range= 0.2,\n",
" #height_shift_range= 0.2,\n",
" #rotation_range= 90,\n",
" preprocessing_function=tf.keras.applications.xception.preprocess_input)\n",
2022-01-15 02:53:59 +00:00
"\n",
2022-01-03 20:32:42 +00:00
"train_generator=datagen.flow_from_dataframe(\n",
" dataframe=train[:len(train)],\n",
" directory='./training_images',\n",
" x_col='PictureURL',\n",
" y_col='PrimaryCategoryID',\n",
2022-08-03 03:14:38 +00:00
" batch_size=56,\n",
2022-01-03 20:32:42 +00:00
" seed=42,\n",
" shuffle=True,\n",
" target_size=(299,299),\n",
" subset='training'\n",
" )\n",
"validation_generator=datagen.flow_from_dataframe(\n",
" dataframe=train[:len(train)], # is using train right?\n",
" directory='./training_images',\n",
" x_col='PictureURL',\n",
" y_col='PrimaryCategoryID',\n",
2022-08-03 03:14:38 +00:00
" batch_size=56,\n",
2022-01-03 20:32:42 +00:00
" seed=42,\n",
" shuffle=True,\n",
" target_size=(299,299),\n",
" subset='validation'\n",
" )"
]
},
{
"cell_type": "code",
2022-08-03 03:14:38 +00:00
"execution_count": 81,
2022-01-03 20:32:42 +00:00
"id": "7b70f37f",
"metadata": {},
"outputs": [],
"source": [
"imgs, labels = next(train_generator)"
]
},
{
"cell_type": "code",
2022-08-03 03:14:38 +00:00
"execution_count": 82,
2022-01-03 20:32:42 +00:00
"id": "1ed54bf5",
"metadata": {},
"outputs": [],
"source": [
"def plotImages(images_arr):\n",
" fig, axes = plt.subplots(1, 10, figsize=(20,20))\n",
" axes = axes.flatten()\n",
" for img, ax in zip( images_arr, axes):\n",
" ax.imshow(img)\n",
" ax.axis('off')\n",
" plt.tight_layout()\n",
" plt.show()"
]
},
{
"cell_type": "code",
2022-08-03 03:14:38 +00:00
"execution_count": 83,
2022-01-03 20:32:42 +00:00
"id": "85934565",
"metadata": {},
"outputs": [
{
2022-08-03 03:14:38 +00:00
"name": "stderr",
2022-01-03 20:32:42 +00:00
"output_type": "stream",
"text": [
2022-08-03 03:14:38 +00:00
"Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).\n",
"Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).\n",
"Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).\n",
"Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).\n",
"Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).\n",
"Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).\n",
"Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).\n",
"Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).\n",
"Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).\n",
"Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).\n"
2022-01-03 20:32:42 +00:00
]
2022-08-03 03:14:38 +00:00
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAABZgAAACSCAYAAADIDq8FAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/YYfK9AAAACXBIWXMAAAsTAAALEwEAmpwYAAALIUlEQVR4nO3d63KbSBCA0emtvP8rz/7QDckgwTDADJxT5U3irDFSVLH0pd1EzjkBAAAAAMBS/x19AgAAAAAA9ElgBgAAAACgiMAMAAAAAEARgRkAAAAAgCICMwAAAAAARQRmAAAAAACK/Pv2mxGR9zoR+pBzjjn/n8cOnzx2KOWxQymPHUp57FDKY4dSHjuUuuxjJ1JK57pFu5vz2Dnd44bVph43JpgBAAAAACgiMAMAAACcWES6Tf2ehblaaIrADAAAAHBqkeJUhRloydcdzAAAAAD0LWcjv8B2TDADAAAAAFBEYAYAAAC4AlsygA0IzAAAAABXYFMGsAGBGQAAAACAIgIzAAAAAABFBGYAAAAAAIoIzAAAAAAAFBGYAQAAAAAoIjADAAAAAFBEYAYAAAAAoIjADAAAAABAEYEZAAAAAIAiAjMAAAAAAEUEZgAAAAAAigjMAAAAAAAUEZgBAAAAACgiMAMAAAAAUERg5iaOPgEAAAAAoDcCMymle1+OJDQDAAAAALMJzKS4R2VtGQAAAABYQmC+uvj6SwAAAACASQLznlqrtzF+ShGvqWYAAAAAgCl9B+beKmg++gRu4suy5Zxvbynd7157mQEAAACACf+OPoE54ltIvv9ezo3U2waN3X+zm/Hjbo3UTCAHAAAAANrQfmA2PbtOpfsvKh4LAAAAADiHtgPzj6D5mFqOiOeUrknmdN+tPKcGj0w2v70rd7eFBAAAAADYT9s7mPPcUPoSMXHlOgq8ljDnZEMGAAAAAPCu7QnmmUwtf8hJZAcAAAAANtdVYB6G5K8X/ru8+vfN0klyAAAAAOD8mg7Mw4g8NqU8tnf57WNSttcBAAAAAGAjTQfmqdUXn9PLU9PMkeIWmS/HjgwAAAAAYHtNB+Ypn+F5LDA3sZc50qET1M/7IIY/CM8AAAAAQB3dBOaIaCMaz/AWvCOnI077bXI7v36IGJ6M2AwAAAAAlOsmMKe07MJ+TQXpsUnmraebJ479HGrWlgEAAACAlcoC85c4OnbhvTWWROVvH3tcbI7bnPDYzbi/z8UIAQAAAIAelU8wfwZTgXSdg/c1AwAAAAAsNT8w/7pQ3AYrF9ZML6d03NRyzvlvMI7nLPMf8X7nvo4BAAAAANCw2YF5MiwfYBhfP1dyrI3S1Xz24ZwmA/LYOTe1QxoAAAAAYESli/zlwX9TxVUPjwNNR+OeIuzUuebBHTY15QwAAAAA0JqvgXnJMPAWnTcPivXcyeScc3/Tvx+rNPYhZAMAAAAA6/yYYB6LkPdp5Z37bXMrMDayx+08+30IAAAAAOyjaEXGkcPBpZPJn7uat3T7XHnV/bTFeQrLAAAAAEBN/y39gB42T9zWZPx9326fv94S6jdFgThuq07EZQAAAACgtsWBuRdjPXm3yJwbCvEtnQsAAAAAcCr9BWaDuAAAAAAATeguMEfupzBbSwEAAAAAnFl3gXmr/cZnt+cOagAAAADgGroLzCmlPxfwa1XtqLv2eBIzAAAAAFDT8sDcQNw1jFvKHQcAAAAA1LM4MEe6TRCvmiJuIFJfUrZiBAAAAACop8sVGV2qFdXXHkdfBgAAAAAqKQjM8XyLuL0tP0Kd2trVIHStsCsQAwAAAACN2H2CudYF+uLRuQEAAAAAOMTqwJwXX3GvThXOKbq/2N/SXdYl0+Jjlv+ZAQAAAAD89W/NBx8aKk/RSG/B+NGNf92fOednZBaJAQAAAICjrQrMJeqF0f4D6yMYL7lPcspVbvowVgMAAAAAlNh9BzPTZgXf/rs6AAAAAHASqwKzCdj1htPL1l4AAAAAAD0xwQwAAAAAQJFVgdnELQAAAADAdZlgBgAAAACgiMB8VdZnAwAAAAArCcwAAAAAABQRmAEAAAAAKCIwX5XrMwIAAAAAKwnMAAAAAAAUEZgBAAAAACgiMAMAAAAAUERgBgAAAACgiMAMAAAAAECRFYE51zsLDhBHnwAAAAAA0DkTzAAAAAAAFCkOzNkAMwAAAADApZlgvqiwIQMAAAAAWKk8MAuU/VKXAQAAAIAKTDBfkLwMAAAAANQgMAMAAAAAUERgBgAAAACgiMAMAAAAAECR8sCcK54FAAAAAADdMcEMAAAAAEARgflDxNFnAAAAAADQh3/LPySnbD0GAAAAAMDlLZ5gPntbFs8BAAAAAOZZviJDgO1a2AECAAAAAFRiB/MH/RUAAAAAYB6BeYzIDAAAAADwk8AMAAAAAEARgfnDqS/yZzIbAAAAAKhIYL4UhRkAAAAAqEdgHnPSKWZ5GQAAAACoaXlgVin75M8NAAAAAKjMBPNFhMIMAAAAAFS2ODDLlAAAAAAApGSC+TJyzum0y6UBAAAAgEMUBOZIESecY46Pn5/wJgIAAAAA1GSC+SFP/BwAAAAAgFECMwAAAAAARcoD84lXSJxxA0hKBrMBAAAAgLqKA/NpGuzoDYmvty8+9zUDAAAAAFzQqhUZvbfViFQ01puNAgMAAAAArAnM0e0uiYhIEfE9FOffty3i16wzAAAAAMB5/Tv6BPYUWwXxwkloAAAAAICerVqR0ZNhXM45p/xrz0X8DtKPQ0S6TUS3PMwcEaatAYD1PJ0AAAAGVgfm3rZk/AzLq47X79oQAICU0u+A7Lu2AACAgQorMtrdD7HZSowPOef3fcwRt3vE1QABgF48njblwa/zl98HAABIZ93BPBKWa08uzz+P7IUYANC+z+crv34NAACQThSYp6eV846DxK+Rn9fZ3HYzbxK4504S2doBAAAAAGzgNIF5zJ5Ty8NPNda6I6LO+QyjskkiAAAAAOBAJwrMr9pa2nFHp6ALVkw/djK/DvB+/GFojhQp//gEEe/rENPCUzLADAAAAABsoU5grnWdvwX19NZqh+l05SqMiQpbetMe5/L7OoN5xsUI1965EjMAAAAAUF9bE8w5TYfewftzfgTcijsivnzuVYd9lubH4WNGdAYAriFSivtzBauvAICNjH1HNUAtVQLznDUPs00cZuu/A6ebb6Xx7Pw4ysexIqUYOX6e2InhSwEAnMPrO5juL/jO9lU+3i977AUtAACcU1sTzEf6NlVcawXImPzj0F6LAQAdGbsOBQAAcF7VAnNEdD6ZsvkM8yHCizsAaMf9yr1jX52H12S4bQJr9NnHICDPeZZxuslsAOiYVRnAFkwwpzRrwCai3dd5U8RlAGjHMyDPed6R2vvH7d8XJf6itRsDAABU81/Ng/V48bqImBFiI3V5cb5IvjsVABrxOSmUc36+jVkVdDdQMumU869dYAAAQO+qBuaU0umDZmOv9QCAjkwF5b/vz2lyl8aBlkRmqzEAAOAaKq/IiBSdvJwomwqK+3/bv41COAC0a+qZxDPgtrgjYwF7HQGgbW/XfvB1G1hpmx3Mnb8o+unx9/CZbyMAsJ2TPodo/5/gAQCA2uqvyEj3ncYNT9Cu32l429rc9pRw0ycHAHzTa6e9b/YAAACuY4PAfBXzrwS/Kxf2AwAO4NtrAQDgmrZZkZHSc4o5p3auHl7/auxx3wbSxg283Tx1GQDYl7gMAP2KCF/LgVU2C8wPt9Cc01F/V9WPyr8/x95/Me9xGwEAhrwQBYDzEJmBNXZbkXFIAr1Cd73CbQQAmpFzTodNDgAAAM3ZfIL5JtLrun/bTzMfPdE7/Pxb/Qvg0bcRALgueRkAAHg45CJ/seWF6Frrrq2dDwDAEp7LAMA1xOANYIGwYwcAAAAAgBKHTDADAAAAANA/gRkAAAAAgCICMwAAAAAARQRmAAAAAACKCMwAAAAAABQRmAEAAAAAKPI/pCct2vtE3uwAAAAASUVORK5CYII=\n",
"text/plain": [
"<Figure size 1440x1440 with 10 Axes>"
]
},
"metadata": {
"needs_background": "light"
},
"output_type": "display_data"
2022-01-03 20:32:42 +00:00
}
],
"source": [
2022-08-03 03:14:38 +00:00
"plotImages(imgs)\n",
"# image = plt.imread('training_images/0t0AAOSw4tNgSQ1j.jpg')\n",
"# plt.imshow(image)"
2022-01-03 20:32:42 +00:00
]
},
{
"cell_type": "code",
2022-08-03 03:14:38 +00:00
"execution_count": 84,
"id": "6322bcad",
2022-01-03 20:32:42 +00:00
"metadata": {},
"outputs": [],
"source": [
2022-08-03 03:14:38 +00:00
"#physical_devices = tf.config.list_physical_devices('GPU')\n",
"#print(len(physical_devices))\n",
"#print(physical_devices)\n",
"#for gpu_instance in physical_devices: \n",
"# tf.config.experimental.set_memory_growth(gpu_instance, True)\n",
"#tf.config.experimental.set_memory_growth(physical_devices[0], True)"
2022-01-03 20:32:42 +00:00
]
},
{
"cell_type": "code",
2022-08-03 03:14:38 +00:00
"execution_count": 85,
"id": "07fd25c6",
2022-01-03 20:32:42 +00:00
"metadata": {},
"outputs": [],
"source": [
2022-08-03 03:14:38 +00:00
"# see https://www.kaggle.com/dmitrypukhov/cnn-with-imagedatagenerator-flow-from-dataframe for train/test/val split \n",
"# example\n",
"\n",
"# may need to either create a test dataset from the original dataset or just download a new one"
2022-01-03 20:32:42 +00:00
]
},
{
"cell_type": "code",
2022-08-03 03:14:38 +00:00
"execution_count": 86,
2022-01-03 20:32:42 +00:00
"id": "fe06f2bf",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
2022-08-03 03:14:38 +00:00
"Model: \"model_5\"\n",
2022-01-03 20:32:42 +00:00
"__________________________________________________________________________________________________\n",
2022-08-03 03:14:38 +00:00
"Layer (type) Output Shape Param # Connected to \n",
2022-01-03 20:32:42 +00:00
"==================================================================================================\n",
2022-08-03 03:14:38 +00:00
"input_6 (InputLayer) [(None, 299, 299, 3) 0 \n",
"__________________________________________________________________________________________________\n",
"block1_conv1 (Conv2D) (None, 149, 149, 32) 864 input_6[0][0] \n",
"__________________________________________________________________________________________________\n",
"block1_conv1_bn (BatchNormaliza (None, 149, 149, 32) 128 block1_conv1[0][0] \n",
"__________________________________________________________________________________________________\n",
"block1_conv1_act (Activation) (None, 149, 149, 32) 0 block1_conv1_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"block1_conv2 (Conv2D) (None, 147, 147, 64) 18432 block1_conv1_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block1_conv2_bn (BatchNormaliza (None, 147, 147, 64) 256 block1_conv2[0][0] \n",
"__________________________________________________________________________________________________\n",
"block1_conv2_act (Activation) (None, 147, 147, 64) 0 block1_conv2_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"block2_sepconv1 (SeparableConv2 (None, 147, 147, 128 8768 block1_conv2_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block2_sepconv1_bn (BatchNormal (None, 147, 147, 128 512 block2_sepconv1[0][0] \n",
"__________________________________________________________________________________________________\n",
"block2_sepconv2_act (Activation (None, 147, 147, 128 0 block2_sepconv1_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"block2_sepconv2 (SeparableConv2 (None, 147, 147, 128 17536 block2_sepconv2_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block2_sepconv2_bn (BatchNormal (None, 147, 147, 128 512 block2_sepconv2[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv2d_20 (Conv2D) (None, 74, 74, 128) 8192 block1_conv2_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block2_pool (MaxPooling2D) (None, 74, 74, 128) 0 block2_sepconv2_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"batch_normalization_20 (BatchNo (None, 74, 74, 128) 512 conv2d_20[0][0] \n",
"__________________________________________________________________________________________________\n",
"add_60 (Add) (None, 74, 74, 128) 0 block2_pool[0][0] \n",
" batch_normalization_20[0][0] \n",
"__________________________________________________________________________________________________\n",
"block3_sepconv1_act (Activation (None, 74, 74, 128) 0 add_60[0][0] \n",
"__________________________________________________________________________________________________\n",
"block3_sepconv1 (SeparableConv2 (None, 74, 74, 256) 33920 block3_sepconv1_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block3_sepconv1_bn (BatchNormal (None, 74, 74, 256) 1024 block3_sepconv1[0][0] \n",
"__________________________________________________________________________________________________\n",
"block3_sepconv2_act (Activation (None, 74, 74, 256) 0 block3_sepconv1_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"block3_sepconv2 (SeparableConv2 (None, 74, 74, 256) 67840 block3_sepconv2_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block3_sepconv2_bn (BatchNormal (None, 74, 74, 256) 1024 block3_sepconv2[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv2d_21 (Conv2D) (None, 37, 37, 256) 32768 add_60[0][0] \n",
"__________________________________________________________________________________________________\n",
"block3_pool (MaxPooling2D) (None, 37, 37, 256) 0 block3_sepconv2_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"batch_normalization_21 (BatchNo (None, 37, 37, 256) 1024 conv2d_21[0][0] \n",
"__________________________________________________________________________________________________\n",
"add_61 (Add) (None, 37, 37, 256) 0 block3_pool[0][0] \n",
" batch_normalization_21[0][0] \n",
"__________________________________________________________________________________________________\n",
"block4_sepconv1_act (Activation (None, 37, 37, 256) 0 add_61[0][0] \n",
"__________________________________________________________________________________________________\n",
"block4_sepconv1 (SeparableConv2 (None, 37, 37, 728) 188672 block4_sepconv1_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block4_sepconv1_bn (BatchNormal (None, 37, 37, 728) 2912 block4_sepconv1[0][0] \n",
"__________________________________________________________________________________________________\n",
"block4_sepconv2_act (Activation (None, 37, 37, 728) 0 block4_sepconv1_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"block4_sepconv2 (SeparableConv2 (None, 37, 37, 728) 536536 block4_sepconv2_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block4_sepconv2_bn (BatchNormal (None, 37, 37, 728) 2912 block4_sepconv2[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv2d_22 (Conv2D) (None, 19, 19, 728) 186368 add_61[0][0] \n",
"__________________________________________________________________________________________________\n",
"block4_pool (MaxPooling2D) (None, 19, 19, 728) 0 block4_sepconv2_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"batch_normalization_22 (BatchNo (None, 19, 19, 728) 2912 conv2d_22[0][0] \n",
"__________________________________________________________________________________________________\n",
"add_62 (Add) (None, 19, 19, 728) 0 block4_pool[0][0] \n",
" batch_normalization_22[0][0] \n",
"__________________________________________________________________________________________________\n",
"block5_sepconv1_act (Activation (None, 19, 19, 728) 0 add_62[0][0] \n",
"__________________________________________________________________________________________________\n",
"block5_sepconv1 (SeparableConv2 (None, 19, 19, 728) 536536 block5_sepconv1_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block5_sepconv1_bn (BatchNormal (None, 19, 19, 728) 2912 block5_sepconv1[0][0] \n",
"__________________________________________________________________________________________________\n",
"block5_sepconv2_act (Activation (None, 19, 19, 728) 0 block5_sepconv1_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"block5_sepconv2 (SeparableConv2 (None, 19, 19, 728) 536536 block5_sepconv2_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block5_sepconv2_bn (BatchNormal (None, 19, 19, 728) 2912 block5_sepconv2[0][0] \n",
"__________________________________________________________________________________________________\n",
"block5_sepconv3_act (Activation (None, 19, 19, 728) 0 block5_sepconv2_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"block5_sepconv3 (SeparableConv2 (None, 19, 19, 728) 536536 block5_sepconv3_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block5_sepconv3_bn (BatchNormal (None, 19, 19, 728) 2912 block5_sepconv3[0][0] \n",
"__________________________________________________________________________________________________\n",
"add_63 (Add) (None, 19, 19, 728) 0 block5_sepconv3_bn[0][0] \n",
" add_62[0][0] \n",
"__________________________________________________________________________________________________\n",
"block6_sepconv1_act (Activation (None, 19, 19, 728) 0 add_63[0][0] \n",
"__________________________________________________________________________________________________\n",
"block6_sepconv1 (SeparableConv2 (None, 19, 19, 728) 536536 block6_sepconv1_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block6_sepconv1_bn (BatchNormal (None, 19, 19, 728) 2912 block6_sepconv1[0][0] \n",
"__________________________________________________________________________________________________\n",
"block6_sepconv2_act (Activation (None, 19, 19, 728) 0 block6_sepconv1_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"block6_sepconv2 (SeparableConv2 (None, 19, 19, 728) 536536 block6_sepconv2_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block6_sepconv2_bn (BatchNormal (None, 19, 19, 728) 2912 block6_sepconv2[0][0] \n",
"__________________________________________________________________________________________________\n",
"block6_sepconv3_act (Activation (None, 19, 19, 728) 0 block6_sepconv2_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"block6_sepconv3 (SeparableConv2 (None, 19, 19, 728) 536536 block6_sepconv3_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block6_sepconv3_bn (BatchNormal (None, 19, 19, 728) 2912 block6_sepconv3[0][0] \n",
"__________________________________________________________________________________________________\n",
"add_64 (Add) (None, 19, 19, 728) 0 block6_sepconv3_bn[0][0] \n",
" add_63[0][0] \n",
"__________________________________________________________________________________________________\n",
"block7_sepconv1_act (Activation (None, 19, 19, 728) 0 add_64[0][0] \n",
"__________________________________________________________________________________________________\n",
"block7_sepconv1 (SeparableConv2 (None, 19, 19, 728) 536536 block7_sepconv1_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block7_sepconv1_bn (BatchNormal (None, 19, 19, 728) 2912 block7_sepconv1[0][0] \n",
"__________________________________________________________________________________________________\n",
"block7_sepconv2_act (Activation (None, 19, 19, 728) 0 block7_sepconv1_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"block7_sepconv2 (SeparableConv2 (None, 19, 19, 728) 536536 block7_sepconv2_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block7_sepconv2_bn (BatchNormal (None, 19, 19, 728) 2912 block7_sepconv2[0][0] \n",
"__________________________________________________________________________________________________\n",
"block7_sepconv3_act (Activation (None, 19, 19, 728) 0 block7_sepconv2_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"block7_sepconv3 (SeparableConv2 (None, 19, 19, 728) 536536 block7_sepconv3_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block7_sepconv3_bn (BatchNormal (None, 19, 19, 728) 2912 block7_sepconv3[0][0] \n",
"__________________________________________________________________________________________________\n",
"add_65 (Add) (None, 19, 19, 728) 0 block7_sepconv3_bn[0][0] \n",
" add_64[0][0] \n",
"__________________________________________________________________________________________________\n",
"block8_sepconv1_act (Activation (None, 19, 19, 728) 0 add_65[0][0] \n",
"__________________________________________________________________________________________________\n",
"block8_sepconv1 (SeparableConv2 (None, 19, 19, 728) 536536 block8_sepconv1_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block8_sepconv1_bn (BatchNormal (None, 19, 19, 728) 2912 block8_sepconv1[0][0] \n",
"__________________________________________________________________________________________________\n",
"block8_sepconv2_act (Activation (None, 19, 19, 728) 0 block8_sepconv1_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"block8_sepconv2 (SeparableConv2 (None, 19, 19, 728) 536536 block8_sepconv2_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block8_sepconv2_bn (BatchNormal (None, 19, 19, 728) 2912 block8_sepconv2[0][0] \n",
"__________________________________________________________________________________________________\n",
"block8_sepconv3_act (Activation (None, 19, 19, 728) 0 block8_sepconv2_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"block8_sepconv3 (SeparableConv2 (None, 19, 19, 728) 536536 block8_sepconv3_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block8_sepconv3_bn (BatchNormal (None, 19, 19, 728) 2912 block8_sepconv3[0][0] \n",
"__________________________________________________________________________________________________\n",
"add_66 (Add) (None, 19, 19, 728) 0 block8_sepconv3_bn[0][0] \n",
" add_65[0][0] \n",
"__________________________________________________________________________________________________\n",
"block9_sepconv1_act (Activation (None, 19, 19, 728) 0 add_66[0][0] \n",
"__________________________________________________________________________________________________\n",
"block9_sepconv1 (SeparableConv2 (None, 19, 19, 728) 536536 block9_sepconv1_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block9_sepconv1_bn (BatchNormal (None, 19, 19, 728) 2912 block9_sepconv1[0][0] \n",
"__________________________________________________________________________________________________\n",
"block9_sepconv2_act (Activation (None, 19, 19, 728) 0 block9_sepconv1_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"block9_sepconv2 (SeparableConv2 (None, 19, 19, 728) 536536 block9_sepconv2_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block9_sepconv2_bn (BatchNormal (None, 19, 19, 728) 2912 block9_sepconv2[0][0] \n",
"__________________________________________________________________________________________________\n",
"block9_sepconv3_act (Activation (None, 19, 19, 728) 0 block9_sepconv2_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"block9_sepconv3 (SeparableConv2 (None, 19, 19, 728) 536536 block9_sepconv3_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block9_sepconv3_bn (BatchNormal (None, 19, 19, 728) 2912 block9_sepconv3[0][0] \n",
"__________________________________________________________________________________________________\n",
"add_67 (Add) (None, 19, 19, 728) 0 block9_sepconv3_bn[0][0] \n",
" add_66[0][0] \n",
"__________________________________________________________________________________________________\n",
"block10_sepconv1_act (Activatio (None, 19, 19, 728) 0 add_67[0][0] \n",
"__________________________________________________________________________________________________\n",
"block10_sepconv1 (SeparableConv (None, 19, 19, 728) 536536 block10_sepconv1_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block10_sepconv1_bn (BatchNorma (None, 19, 19, 728) 2912 block10_sepconv1[0][0] \n",
"__________________________________________________________________________________________________\n",
"block10_sepconv2_act (Activatio (None, 19, 19, 728) 0 block10_sepconv1_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"block10_sepconv2 (SeparableConv (None, 19, 19, 728) 536536 block10_sepconv2_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block10_sepconv2_bn (BatchNorma (None, 19, 19, 728) 2912 block10_sepconv2[0][0] \n",
"__________________________________________________________________________________________________\n",
"block10_sepconv3_act (Activatio (None, 19, 19, 728) 0 block10_sepconv2_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"block10_sepconv3 (SeparableConv (None, 19, 19, 728) 536536 block10_sepconv3_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block10_sepconv3_bn (BatchNorma (None, 19, 19, 728) 2912 block10_sepconv3[0][0] \n",
"__________________________________________________________________________________________________\n",
"add_68 (Add) (None, 19, 19, 728) 0 block10_sepconv3_bn[0][0] \n",
" add_67[0][0] \n",
"__________________________________________________________________________________________________\n",
"block11_sepconv1_act (Activatio (None, 19, 19, 728) 0 add_68[0][0] \n",
"__________________________________________________________________________________________________\n",
"block11_sepconv1 (SeparableConv (None, 19, 19, 728) 536536 block11_sepconv1_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block11_sepconv1_bn (BatchNorma (None, 19, 19, 728) 2912 block11_sepconv1[0][0] \n",
"__________________________________________________________________________________________________\n",
"block11_sepconv2_act (Activatio (None, 19, 19, 728) 0 block11_sepconv1_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"block11_sepconv2 (SeparableConv (None, 19, 19, 728) 536536 block11_sepconv2_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block11_sepconv2_bn (BatchNorma (None, 19, 19, 728) 2912 block11_sepconv2[0][0] \n",
"__________________________________________________________________________________________________\n",
"block11_sepconv3_act (Activatio (None, 19, 19, 728) 0 block11_sepconv2_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"block11_sepconv3 (SeparableConv (None, 19, 19, 728) 536536 block11_sepconv3_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block11_sepconv3_bn (BatchNorma (None, 19, 19, 728) 2912 block11_sepconv3[0][0] \n",
"__________________________________________________________________________________________________\n",
"add_69 (Add) (None, 19, 19, 728) 0 block11_sepconv3_bn[0][0] \n",
" add_68[0][0] \n",
"__________________________________________________________________________________________________\n",
"block12_sepconv1_act (Activatio (None, 19, 19, 728) 0 add_69[0][0] \n",
"__________________________________________________________________________________________________\n",
"block12_sepconv1 (SeparableConv (None, 19, 19, 728) 536536 block12_sepconv1_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block12_sepconv1_bn (BatchNorma (None, 19, 19, 728) 2912 block12_sepconv1[0][0] \n",
"__________________________________________________________________________________________________\n",
"block12_sepconv2_act (Activatio (None, 19, 19, 728) 0 block12_sepconv1_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"block12_sepconv2 (SeparableConv (None, 19, 19, 728) 536536 block12_sepconv2_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block12_sepconv2_bn (BatchNorma (None, 19, 19, 728) 2912 block12_sepconv2[0][0] \n",
"__________________________________________________________________________________________________\n",
"block12_sepconv3_act (Activatio (None, 19, 19, 728) 0 block12_sepconv2_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"block12_sepconv3 (SeparableConv (None, 19, 19, 728) 536536 block12_sepconv3_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block12_sepconv3_bn (BatchNorma (None, 19, 19, 728) 2912 block12_sepconv3[0][0] \n",
"__________________________________________________________________________________________________\n",
"add_70 (Add) (None, 19, 19, 728) 0 block12_sepconv3_bn[0][0] \n",
" add_69[0][0] \n",
"__________________________________________________________________________________________________\n",
"block13_sepconv1_act (Activatio (None, 19, 19, 728) 0 add_70[0][0] \n",
"__________________________________________________________________________________________________\n",
"block13_sepconv1 (SeparableConv (None, 19, 19, 728) 536536 block13_sepconv1_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block13_sepconv1_bn (BatchNorma (None, 19, 19, 728) 2912 block13_sepconv1[0][0] \n",
"__________________________________________________________________________________________________\n",
"block13_sepconv2_act (Activatio (None, 19, 19, 728) 0 block13_sepconv1_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"block13_sepconv2 (SeparableConv (None, 19, 19, 1024) 752024 block13_sepconv2_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block13_sepconv2_bn (BatchNorma (None, 19, 19, 1024) 4096 block13_sepconv2[0][0] \n",
"__________________________________________________________________________________________________\n",
"conv2d_23 (Conv2D) (None, 10, 10, 1024) 745472 add_70[0][0] \n",
"__________________________________________________________________________________________________\n",
"block13_pool (MaxPooling2D) (None, 10, 10, 1024) 0 block13_sepconv2_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"batch_normalization_23 (BatchNo (None, 10, 10, 1024) 4096 conv2d_23[0][0] \n",
"__________________________________________________________________________________________________\n",
"add_71 (Add) (None, 10, 10, 1024) 0 block13_pool[0][0] \n",
" batch_normalization_23[0][0] \n",
"__________________________________________________________________________________________________\n",
"block14_sepconv1 (SeparableConv (None, 10, 10, 1536) 1582080 add_71[0][0] \n",
"__________________________________________________________________________________________________\n",
"block14_sepconv1_bn (BatchNorma (None, 10, 10, 1536) 6144 block14_sepconv1[0][0] \n",
"__________________________________________________________________________________________________\n",
"block14_sepconv1_act (Activatio (None, 10, 10, 1536) 0 block14_sepconv1_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"block14_sepconv2 (SeparableConv (None, 10, 10, 2048) 3159552 block14_sepconv1_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"block14_sepconv2_bn (BatchNorma (None, 10, 10, 2048) 8192 block14_sepconv2[0][0] \n",
"__________________________________________________________________________________________________\n",
"block14_sepconv2_act (Activatio (None, 10, 10, 2048) 0 block14_sepconv2_bn[0][0] \n",
"__________________________________________________________________________________________________\n",
"avg_pool (GlobalAveragePooling2 (None, 2048) 0 block14_sepconv2_act[0][0] \n",
"__________________________________________________________________________________________________\n",
"predictions (Dense) (None, 1000) 2049000 avg_pool[0][0] \n",
"__________________________________________________________________________________________________\n",
"dense_5 (Dense) (None, 7) 7007 predictions[0][0] \n",
2022-01-03 20:32:42 +00:00
"==================================================================================================\n",
2022-08-03 03:14:38 +00:00
"Total params: 22,917,487\n",
"Trainable params: 22,862,959\n",
2022-01-28 04:00:13 +00:00
"Non-trainable params: 54,528\n",
2022-01-03 20:32:42 +00:00
"__________________________________________________________________________________________________\n"
]
}
],
"source": [
2022-08-03 03:14:38 +00:00
"with mirrored_strategy.scope(): # for training on dual gpus\n",
"# physical_devices = tf.config.list_physical_devices('GPU')\n",
"# tf.config.experimental.set_memory_growth(physical_devices[0], True)\n",
" base_model = tf.keras.applications.xception.Xception(include_top=True, pooling='avg')\n",
" for layer in base_model.layers:\n",
" layer.trainable = True\n",
" output = Dense(7, activation='softmax')(base_model.output)\n",
" model = tf.keras.Model(base_model.input, output)\n",
" model.compile(optimizer=Adam(learning_rate=.001), loss='categorical_crossentropy',\n",
" metrics=['accuracy'])\n",
"# sparse_categorical_crossentropy\n",
2022-01-03 20:32:42 +00:00
"#model = add_regularization(model)\n",
"model.summary()\n"
]
},
{
"cell_type": "code",
2022-08-03 03:14:38 +00:00
"execution_count": 87,
2022-01-03 20:32:42 +00:00
"id": "9cd2ba27",
"metadata": {
"scrolled": false
},
"outputs": [
2022-08-03 03:14:38 +00:00
{
"name": "stderr",
"output_type": "stream",
"text": [
"2022-08-01 23:37:26.275294: W tensorflow/core/grappler/optimizers/data/auto_shard.cc:656] In AUTO-mode, and switching to DATA-based sharding, instead of FILE-based sharding as we cannot find appropriate reader dataset op(s) to shard. Error: Did not find a shardable source, walked to a node which is not a dataset: name: \"FlatMapDataset/_2\"\n",
"op: \"FlatMapDataset\"\n",
"input: \"TensorDataset/_1\"\n",
"attr {\n",
" key: \"Targuments\"\n",
" value {\n",
" list {\n",
" }\n",
" }\n",
"}\n",
"attr {\n",
" key: \"f\"\n",
" value {\n",
" func {\n",
" name: \"__inference_Dataset_flat_map_flat_map_fn_93447\"\n",
" }\n",
" }\n",
"}\n",
"attr {\n",
" key: \"output_shapes\"\n",
" value {\n",
" list {\n",
" shape {\n",
" dim {\n",
" size: -1\n",
" }\n",
" dim {\n",
" size: -1\n",
" }\n",
" dim {\n",
" size: -1\n",
" }\n",
" dim {\n",
" size: -1\n",
" }\n",
" }\n",
" shape {\n",
" dim {\n",
" size: -1\n",
" }\n",
" dim {\n",
" size: -1\n",
" }\n",
" }\n",
" }\n",
" }\n",
"}\n",
"attr {\n",
" key: \"output_types\"\n",
" value {\n",
" list {\n",
" type: DT_FLOAT\n",
" type: DT_FLOAT\n",
" }\n",
" }\n",
"}\n",
". Consider either turning off auto-sharding or switching the auto_shard_policy to DATA to shard this dataset. You can do this by creating a new `tf.data.Options()` object then setting `options.experimental_distribute.auto_shard_policy = AutoShardPolicy.DATA` before applying the options object to the dataset via `dataset.with_options(options)`.\n"
]
},
2022-01-03 20:32:42 +00:00
{
"name": "stdout",
"output_type": "stream",
"text": [
2022-01-28 04:00:13 +00:00
"Epoch 1/6\n",
2022-08-03 03:14:38 +00:00
"INFO:tensorflow:batch_all_reduce: 158 all-reduces with algorithm = nccl, num_packs = 1\n",
"INFO:tensorflow:batch_all_reduce: 158 all-reduces with algorithm = nccl, num_packs = 1\n",
" 17/782 [..............................] - ETA: 6:35 - loss: 1.9460 - accuracy: 0.1428"
2022-01-03 20:32:42 +00:00
]
},
{
2022-01-29 01:05:54 +00:00
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
2022-08-03 03:14:38 +00:00
"Input \u001b[0;32mIn [87]\u001b[0m, in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfit\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtrain_generator\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2\u001b[0m \u001b[43m \u001b[49m\u001b[43msteps_per_epoch\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mlen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mtrain_generator\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3\u001b[0m \u001b[43m \u001b[49m\u001b[43mvalidation_data\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mvalidation_generator\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 4\u001b[0m \u001b[43m \u001b[49m\u001b[43mvalidation_steps\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mlen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mvalidation_generator\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 5\u001b[0m \u001b[43m \u001b[49m\u001b[43mepochs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m6\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 6\u001b[0m \u001b[43m \u001b[49m\u001b[43mverbose\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/miniconda3/envs/tensorflow-cuda/lib/python3.9/site-packages/tensorflow/python/keras/engine/training.py:1100\u001b[0m, in \u001b[0;36mModel.fit\u001b[0;34m(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)\u001b[0m\n\u001b[1;32m 1093\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m trace\u001b[38;5;241m.\u001b[39mTrace(\n\u001b[1;32m 1094\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mtrain\u001b[39m\u001b[38;5;124m'\u001b[39m,\n\u001b[1;32m 1095\u001b[0m epoch_num\u001b[38;5;241m=\u001b[39mepoch,\n\u001b[1;32m 1096\u001b[0m step_num\u001b[38;5;241m=\u001b[39mstep,\n\u001b[1;32m 1097\u001b[0m batch_size\u001b[38;5;241m=\u001b[39mbatch_size,\n\u001b[1;32m 1098\u001b[0m _r\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1\u001b[39m):\n\u001b[1;32m 1099\u001b[0m callbacks\u001b[38;5;241m.\u001b[39mon_train_batch_begin(step)\n\u001b[0;32m-> 1100\u001b[0m tmp_logs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtrain_function\u001b[49m\u001b[43m(\u001b[49m\u001b[43miterator\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1101\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m data_handler\u001b[38;5;241m.\u001b[39mshould_sync:\n\u001b[1;32m 1102\u001b[0m context\u001b[38;5;241m.\u001b[39masync_wait()\n",
"File \u001b[0;32m~/miniconda3/envs/tensorflow-cuda/lib/python3.9/site-packages/tensorflow/python/eager/def_function.py:828\u001b[0m, in \u001b[0;36mFunction.__call__\u001b[0;34m(self, *args, **kwds)\u001b[0m\n\u001b[1;32m 826\u001b[0m tracing_count \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mexperimental_get_tracing_count()\n\u001b[1;32m 827\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m trace\u001b[38;5;241m.\u001b[39mTrace(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_name) \u001b[38;5;28;01mas\u001b[39;00m tm:\n\u001b[0;32m--> 828\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwds\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 829\u001b[0m compiler \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mxla\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_experimental_compile \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mnonXla\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 830\u001b[0m new_tracing_count \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mexperimental_get_tracing_count()\n",
"File \u001b[0;32m~/miniconda3/envs/tensorflow-cuda/lib/python3.9/site-packages/tensorflow/python/eager/def_function.py:855\u001b[0m, in \u001b[0;36mFunction._call\u001b[0;34m(self, *args, **kwds)\u001b[0m\n\u001b[1;32m 852\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_lock\u001b[38;5;241m.\u001b[39mrelease()\n\u001b[1;32m 853\u001b[0m \u001b[38;5;66;03m# In this case we have created variables on the first call, so we run the\u001b[39;00m\n\u001b[1;32m 854\u001b[0m \u001b[38;5;66;03m# defunned version which is guaranteed to never create variables.\u001b[39;00m\n\u001b[0;32m--> 855\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_stateless_fn\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwds\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;66;03m# pylint: disable=not-callable\u001b[39;00m\n\u001b[1;32m 856\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_stateful_fn \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 857\u001b[0m \u001b[38;5;66;03m# Release the lock early so that multiple threads can perform the call\u001b[39;00m\n\u001b[1;32m 858\u001b[0m \u001b[38;5;66;03m# in parallel.\u001b[39;00m\n\u001b[1;32m 859\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_lock\u001b[38;5;241m.\u001b[39mrelease()\n",
"File \u001b[0;32m~/miniconda3/envs/tensorflow-cuda/lib/python3.9/site-packages/tensorflow/python/eager/function.py:2942\u001b[0m, in \u001b[0;36mFunction.__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 2939\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_lock:\n\u001b[1;32m 2940\u001b[0m (graph_function,\n\u001b[1;32m 2941\u001b[0m filtered_flat_args) \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_maybe_define_function(args, kwargs)\n\u001b[0;32m-> 2942\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mgraph_function\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_flat\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2943\u001b[0m \u001b[43m \u001b[49m\u001b[43mfiltered_flat_args\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcaptured_inputs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mgraph_function\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcaptured_inputs\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/miniconda3/envs/tensorflow-cuda/lib/python3.9/site-packages/tensorflow/python/eager/function.py:1918\u001b[0m, in \u001b[0;36mConcreteFunction._call_flat\u001b[0;34m(self, args, captured_inputs, cancellation_manager)\u001b[0m\n\u001b[1;32m 1914\u001b[0m possible_gradient_type \u001b[38;5;241m=\u001b[39m gradients_util\u001b[38;5;241m.\u001b[39mPossibleTapeGradientTypes(args)\n\u001b[1;32m 1915\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m (possible_gradient_type \u001b[38;5;241m==\u001b[39m gradients_util\u001b[38;5;241m.\u001b[39mPOSSIBLE_GRADIENT_TYPES_NONE\n\u001b[1;32m 1916\u001b[0m \u001b[38;5;129;01mand\u001b[39;00m executing_eagerly):\n\u001b[1;32m 1917\u001b[0m \u001b[38;5;66;03m# No tape is watching; skip to running the function.\u001b[39;00m\n\u001b[0;32m-> 1918\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_build_call_outputs(\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_inference_function\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcall\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1919\u001b[0m \u001b[43m \u001b[49m\u001b[43mctx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcancellation_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcancellation_manager\u001b[49m\u001b[43m)\u001b[49m)\n\u001b[1;32m 1920\u001b[0m forward_backward \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_select_forward_and_backward_functions(\n\u001b[1;32m 1921\u001b[0m args,\n\u001b[1;32m 1922\u001b[0m possible_gradient_type,\n\u001b[1;32m 1923\u001b[0m executing_eagerly)\n\u001b[1;32m 1924\u001b[0m forward_function, args_with_tangents \u001b[38;5;241m=\u001b[39m forward_backward\u001b[38;5;241m.\u001b[39mforward()\n",
"File \u001b[0;32m~/miniconda3/envs/tensorflow-cuda/lib/python3.9/site-packages/tensorflow/python/eager/function.py:555\u001b[0m, in \u001b[0;36m_EagerDefinedFunction.call\u001b[0;34m(self, ctx, args, cancellation_manager)\u001b[0m\n\u001b[1;32m 553\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m _InterpolateFunctionError(\u001b[38;5;28mself\u001b[39m):\n\u001b[1;32m 554\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m cancellation_manager \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m--> 555\u001b[0m outputs \u001b[38;5;241m=\u001b[39m \u001b[43mexecute\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mexecute\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 556\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mstr\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msignature\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mname\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 557\u001b[0m \u001b[43m \u001b[49m\u001b[43mnum_outputs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_num_outputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 558\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 559\u001b[0m \u001b[43m \u001b[49m\u001b[43mattrs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mattrs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 560\u001b[0m \u001b[43m \u001b[49m\u001b[43mctx\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mctx\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 561\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 562\u001b[0m outputs \u001b[38;5;241m=\u001b[39m execute\u001b[38;5;241m.\u001b[39mexecute_with_cancellation(\n\u001b[1;32m 563\u001b[0m \u001b[38;5;28mstr\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msignature\u001b[38;5;241m.\u001b[39mname),\n\u001b[1;32m 564\u001b[0m num_outputs\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_num_outputs,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 567\u001b[0m ctx\u001b[38;5;241m=\u001b[39mctx,\n\u001b[1;32m 568\u001b[0m cancellation_manager\u001b[38;5;241m=\u001b[39mcancellation_manager)\n",
"File \u001b[0;32m~/miniconda3/envs/tensorflow-cuda/lib/python3.9/site-packages/tensorflow/python/eager/execute.py:59\u001b[0m, in \u001b[0;36mquick_execute\u001b[0;34m(op_name, num_outputs, inputs, attrs, ctx, name)\u001b[0m\n\u001b[1;32m 57\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 58\u001b[0m ctx\u001b[38;5;241m.\u001b[39mensure_initialized()\n\u001b[0;32m---> 59\u001b[0m tensors \u001b[38;5;241m=\u001b[39m \u001b[43mpywrap_tfe\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mTFE_Py_Execute\u001b[49m\u001b[43m(\u001b[49m\u001b[43mctx\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_handle\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdevice_name\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mop_name\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 60\u001b[0m \u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mattrs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mnum_outputs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 61\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m core\u001b[38;5;241m.\u001b[39m_NotOkStatusException \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 62\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m name \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
2022-01-29 01:05:54 +00:00
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
]
2022-01-03 20:32:42 +00:00
}
],
"source": [
2022-08-03 03:14:38 +00:00
"\n",
2022-01-03 20:32:42 +00:00
"model.fit(x=train_generator,\n",
" steps_per_epoch=len(train_generator),\n",
" validation_data=validation_generator,\n",
" validation_steps=len(validation_generator),\n",
2022-01-28 04:00:13 +00:00
" epochs=6,\n",
2022-01-03 20:32:42 +00:00
" verbose=1)"
]
},
{
"cell_type": "code",
2022-01-29 01:05:54 +00:00
"execution_count": null,
2022-01-03 20:32:42 +00:00
"id": "63f791af",
"metadata": {},
2022-01-29 01:05:54 +00:00
"outputs": [],
2022-01-28 04:00:13 +00:00
"source": [
"model.save(\"Model_1.h5\")"
]
2022-01-03 20:32:42 +00:00
}
],
"metadata": {
"kernelspec": {
2022-08-03 03:14:38 +00:00
"display_name": "Python 3 (ipykernel)",
2022-01-03 20:32:42 +00:00
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
2022-08-03 03:14:38 +00:00
"version": "3.9.12"
2022-01-03 20:32:42 +00:00
}
},
"nbformat": 4,
"nbformat_minor": 5
}