Larger dataset

📙 Notebook: Using more sophisticated images with CNN.

Extract zip file + view image

# extract zip file
import zipfile

local_zip = 'file.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp')
zip_ref.close()
# show image
import matplotlib.image as mpimg
import matplotlib.pyplot as plt

img = mpimg.imread(img_path)
plt.imshow(img)
plt.show()

image to np array

from keras.preprocessing import image

path = './image.png'
img = image.load_img(path, target_size=(150, 150))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])

classes = model.predict(images, batch_size=10)

Plot loss and acc

history = model.fit(...)
acc      = history.history[     'accuracy' ]
val_acc  = history.history[ 'val_accuracy' ]
loss     = history.history[    'loss' ]
val_loss = history.history['val_loss' ]
# plot accuracy
plt.plot  ( epochs,     acc )
plt.plot  ( epochs, val_acc )
plt.title ('Training and validation acc')
plt.figure()
# plot loss function
plt.plot  ( epochs,     loss )
plt.plot  ( epochs, val_loss )
plt.title ('Training and validation loss')

Cats vs dogs

📙 Notebook: Cat vs Dog simple DNN.

os

base_dir = '/tmp/cats-v-dogs'
os.mkdir(base_dir)

train_dir = os.path.join(base_dir, 'training')
validation_dir = os.path.join(base_dir, 'testing')
os.mkdir(train_dir)
os.mkdir(validation_dir)
os.listdir(DIRECTORY) # gives you a listing of the contents of that directory
os.path.getsize(PATH) # gives you the size of the file
copyfile(source, destination) # copies a file from source to destination
random.sample(list, len(list)) # shuffles a list

Split data