samedi 7 mars 2020

Evaluate_generator always returns the same value - all digits (even with models trained on different datasets)

I have been training models on two different datasets, and I also have trained variations of the models and to my surprise, in all cases I get the same exact value returned with keras's Evaluate_generator (0.8158521060497039, all digits!!) Is this a known bug? how can I tackle this? How come a bug with an important feature goes unnoticed? For one model, my validation accuracy is over 96, and for the other only around 84, yet I get the same exact result.

I am not sure it is related to my scripts at this point, but for reference:

this is what my training looks like (I also use a more vanilla version with no callbacks):

#Import some packages to use
import cv2
import numpy as np
import pandas as pd
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
import os
from keras.callbacks import EarlyStopping,  ReduceLROnPlateau

os.environ["CUDA_VISIBLE_DEVICES"]="0"

train_dir = '/media/d/DATA_2/Outside/data/train'
eval_dir = '/media/d/DATA_2/Outside/data/eval'
test_dir = '/media/d/DATA_2/Outside/data/test'

earlyStopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min')
mcp_save = ModelCheckpoint('/media/d/DATA_2/Outside/mdl_wts_best.hdf5', save_best_only=True, monitor='val_loss', mode='min')
reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=1, epsilon=1e-5, mode='min')

# create a data generator
train_datagen = ImageDataGenerator(rescale=1./255,   #Scale the image between 0 and 1
                                    rotation_range=40,
                                    width_shift_range=0.2,
                                    height_shift_range=0.2,
                                    shear_range=0.2,
                                    zoom_range=0.2,
                                    horizontal_flip=True,)

val_datagen = ImageDataGenerator(rescale=1./255)  #We do not augment validation data. we only perform rescale

test_datagen = ImageDataGenerator(rescale=1./255)  #We do not augment test data. we only perform rescale

# load and iterate training dataset
train_generator = train_datagen.flow_from_directory(train_dir, class_mode='categorical', batch_size=8, shuffle='True', seed=42)
# load and iterate validation dataset
val_generator = val_datagen.flow_from_directory(eval_dir, class_mode='categorical', batch_size=8, shuffle='True', seed=42)
# load and iterate test dataset
test_generator = test_datagen.flow_from_directory(test_dir, class_mode=None, batch_size=1, shuffle='False', seed=42)

#We will use a batch size of 32. Note: batch size should be a factor of 2.***4,8,16,32,64...***
#batch_size = 4

labels = (train_generator.class_indices)
print(labels)
np.save('/media/d/DATA_2/Outside/classes', labels)

from keras import layers
from keras import models
from keras import optimizers
from keras.layers import Dropout

from keras.preprocessing.image import img_to_array, load_img

model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=(256, 256, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(Dropout(0.2))
model.add(layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(Dropout(0.2))
model.add(layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(Dropout(0.2))
model.add(layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(Dropout(0.2))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))  #Dropout for regularization
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dense(2, activation='softmax'))  

#Lets see our model
model.summary()


model.compile(loss='categorical_crossentropy', optimizer=optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0), metrics=['acc']) 

#The training part
#We train for 64 epochs with about 100 steps per epoch
history = model.fit_generator(train_generator,
                              steps_per_epoch=train_generator.n // train_generator.batch_size,
                              epochs=200,
                              validation_data=val_generator,
                              validation_steps=val_generator.n // val_generator.batch_size,
                              callbacks=[earlyStopping, mcp_save, reduce_lr_loss])

#Save the model
model.save_weights('/media/d/DATA_2/Outside/model_weights.h5')
model.save('/media/d/DATA_2/Outside/model_keras.h5')

#lets plot the train and val curve
#get the details form the history object
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(1, len(acc) + 1)

#Train and validation accuracy
plt.plot(epochs, acc, 'b', label='Training accuracy')
plt.plot(epochs, val_acc, 'r', label='Validation accuracy')
plt.title('Training and Validation accurarcy')
plt.legend()

plt.figure()
#Train and validation loss
plt.plot(epochs, loss, 'b', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Training and Validation loss')
plt.legend()

plt.show()

and I evaluate as below:

#Import some packages to use
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
import os
from keras.regularizers import l2
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, LearningRateScheduler
from keras.layers.core import Dense, Dropout, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.initializers import RandomNormal
from keras.models import load_model

os.environ["CUDA_VISIBLE_DEVICES"]="0"


train_dir = '/media/d/DATA_2/Outside/data/train'
eval_dir = '/media/d/DATA_2/Outsidet/data/eval'
test_dir = '/media/d/DATA_2/Outside/data/test'

test_datagen = ImageDataGenerator(rescale=1./255)  #We do not augment validation data. we only perform rescale

test_generator = test_datagen.flow_from_directory(test_dir,  target_size=(256,256), class_mode='categorical', batch_size=1, shuffle='False', seed=42, workers=0, pickle_safe = True)

# load the trained model and label binarizer from disk
model = load_model('/media/d/DATA_2/Outside/model_keras.h5')

eval_scores = model.evaluate_generator(generator=test_generator, steps=test_generator.n // test_generator.batch_size)
print("Test Accuracy = ", eval_scores[1])
np.save('/media/d/DATA_2/Outside/Test_Accuracy', eval_scores)

PS: I added batch_size=1, shuffle='False', seed=42, workers=0, pickle_safe = True as these were recommended online, but no change

Aucun commentaire:

Enregistrer un commentaire