Not an Hotdog

I love the silicon valley tv series. and lately I've been doing some machine learning and deep learning studies, so what's the most famous image classification problem of the silicon valley series? Obviously Jin Yang's not an hotdog

The following is the Keras model used for binary classification.

from keras.preprocessing.image import ImageDataGenerator  
from keras.models import Sequential  
from keras.layers import Conv2D, MaxPooling2D  
from keras.layers import Activation, Dropout, Flatten, Dense  
from keras import backend as K

img_width, img_height = 150, 150  
train_data_dir = 'data/hotdogs/train'  
validation_data_dir = 'data/hotdogs/validation'  
nb_train_samples = 2000  
nb_validation_samples = 800  
epochs = 50  
batch_size = 16

if K.image_data_format() == 'channels_first':  
    input_shape = (3, img_width, img_height)
else:  
    input_shape = (img_width, img_height, 3)

model = Sequential()  
model.add(Conv2D(32, (3, 3), input_shape=input_shape))  
model.add(Activation('relu'))  
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(32, (3, 3)))  
model.add(Activation('relu'))  
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(64, (3, 3)))  
model.add(Activation('relu'))  
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())  
model.add(Dense(64))  
model.add(Activation('relu'))  
model.add(Dropout(0.5))  
model.add(Dense(1))  
model.add(Activation('sigmoid'))

model.compile(loss='binary_crossentropy',  
              optimizer='adam',
              metrics=['accuracy'])

# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(  
        rotation_range=40,
        horizontal_flip=True,
        rescale=1. / 255)

# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(  
    train_data_dir,
    target_size=(img_width, img_height),
    batch_size=batch_size,
    class_mode='binary')

validation_generator = test_datagen.flow_from_directory(  
    validation_data_dir,
    target_size=(img_width, img_height),
    batch_size=batch_size,
    class_mode='binary')

model.fit_generator(  
    train_generator,
    steps_per_epoch=nb_train_samples // batch_size,
    epochs=epochs,
    validation_data=validation_generator,
    validation_steps=nb_validation_samples // batch_size)

model.save_weights('is_hotdog.h5')  

Davide Andreazzini

Read more posts by this author.