Below is my code for Cat and Dog Image Classifier project, I stucked on few issues and don’t really know how to proceed. I have an error when I pass probabilities variable to plotImages function, and my loss functions seems to be bugged. Any tips are highly appreciated
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers, models
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
import numpy as np
import matplotlib.pyplot as plt
PATH = 'cats_and_dogs'
train_dir = os.path.join(PATH, 'train')
validation_dir = os.path.join(PATH, 'validation')
test_dir = os.path.join(PATH, 'test')
# Get number of files in each directory. The train and validation directories
# each have the subdirecories "dogs" and "cats".
total_train = sum([len(files) for r, d, files in os.walk(train_dir)])
total_val = sum([len(files) for r, d, files in os.walk(validation_dir)])
total_test = len(os.listdir(test_dir))
# Variables for pre-processing and training.
batch_size = 128
epochs = 15
IMG_HEIGHT = 150
IMG_WIDTH = 150
#To Setup
train_image_generator = ImageDataGenerator(rescale=1./255)
validation_image_generator = ImageDataGenerator(rescale=1./255)
test_image_generator = ImageDataGenerator(rescale=1./255)
train_data_gen = train_image_generator.flow_from_directory(
train_dir,
target_size=(150, 150),
batch_size=128,
class_mode='sparse')
val_data_gen = validation_image_generator.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=128,
class_mode='sparse')
test_data_gen = test_image_generator.flow_from_directory(
PATH,
target_size=(150, 150),
batch_size=128,
classes=['test'],
class_mode='sparse',
shuffle=False)
print(train_image_generator)
print(train_data_gen)
#Plotting Images
def plotImages(images_arr, probabilities = False):
fig, axes = plt.subplots(len(images_arr), 1, figsize=(5,len(images_arr) * 3))
if probabilities is False:
for img, ax in zip( images_arr, axes):
ax.imshow(img)
ax.axis('off')
else:
for img, probability, ax in zip( images_arr, probabilities, axes):
ax.imshow(img)
ax.axis('off')
if probability > 0.5:
ax.set_title("%.2f" % (probability*100) + "% dog")
else:
ax.set_title("%.2f" % ((1-probability)*100) + "% cat")
plt.show()
sample_training_images, _ = next(train_data_gen)
#plotImages(sample_training_images[:5])
#Creating more training data using transformation
train_image_generator = ImageDataGenerator(
rescale=1./255,
rotation_range=50,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
#Plotting new transformed images
train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
directory=train_dir,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='binary')
augmented_images = [train_data_gen[0][0][0] for i in range(5)]
#plotImages(augmented_images)
#Model
model = Sequential()
model.add(layers.Conv2D(16, (3, 3), activation='relu', input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(32, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(32, (3, 3), activation='relu'))
model.summary()
model.add(layers.Flatten())
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='adam',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
history = model.fit(train_data_gen, steps_per_epoch=10, epochs=15,
validation_data=val_data_gen)
#Show accuracy
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
#Run Test
probabilities = model.predict(test_data_gen)
print(probabilities[1])
plotImages(test_data_gen, probabilities)
answers = [1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0,
1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0,
1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1,
1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1,
0, 0, 0, 0, 0, 0]
correct = 0
for probability, answer in zip(probabilities, answers):
if round(probability) == answer:
correct +=1
percentage_identified = (correct / len(answers))
passed_challenge = percentage_identified > 0.63
print(f"Your model correctly identified {round(percentage_identified, 2)}% of the images of cats and dogs.")
if passed_challenge:
print("You passed the challenge!")
else:
print("You haven't passed yet. Your model should identify at least 63% of the images. Keep trying. You will get it!")`Preformatted text`