Commit 7af5d617 authored by mihaivanea's avatar mihaivanea
Browse files

Got the data into a good format. Can start to fine tune resnet

parent d426a026
#!/vol/bitbucket/mv1315/urop/venv/bin/python3.5
import numpy as np
import tensorflow as tf
from keras.layers import Dense, Input, Flatten
from keras.models import Model
from generate_arrays_fddb import load_arrays_fddb
from keras.optimizers import SGD
inputs = Input(shape=(224, 224,3,))
......@@ -14,8 +16,10 @@ predictions = Dense(4, activation="relu")(x)
model = Model(inputs=inputs, outputs=predictions)
sgd = SGD(lr=0.001, momentum=0.9, decay=1e-6, nesterov=True)
model.compile(
optimizer="rmsprop",
optimizer=sgd,
loss="mean_squared_error",
metrics=["accuracy"])
......@@ -24,7 +28,10 @@ fddb_path_train = "../fddb/FDDB-folds/FDDB-fold-01-ellipseList.txt"
x_train = np.empty((0 ,224,224,3))
y_train = np.empty((0 , 4))
for i in range(1, 11):
fddb_path_test = "../fddb/FDDB-folds/FDDB-fold-10-ellipseList.txt"
x_test, y_test = load_arrays_fddb(fddb_path_test)
for i in range(1, 10):
x_t, y_t = load_arrays_fddb(fddb_path_train[:29] + \
"{}".format(str(i).zfill(2)) + fddb_path_train[31:])
x_train = np.concatenate((x_train, x_t), axis=0)
......@@ -32,7 +39,10 @@ for i in range(1, 11):
model.fit(
x_train, y_train,
batch_size=64,
batch_size=16,
epochs=10,
verbose=1)
verbose=1,
validation_split=0.1)
predictions = model.predict(x_test, batch_size=128, verbose=2)
print(str(predictions))
#!/vol/bitbucket/mv1315/urop/venv/bin/python3.5
import numpy as np
......@@ -13,7 +13,10 @@ from keras.losses import categorical_crossentropy, mean_squared_error
# instantiate a ResNet model with pre-trained wights
input_tensor = Input(shape=(224, 224, 3))
base_model = ResNet50(
input_tensor=input_tensor,
include_top=True,
weights="imagenet",
classes=1000)
......@@ -32,8 +35,15 @@ x = base_model.output
# add a fully connected layer with softmax
# to work for 1000 classes
## Fully Connected Softmax Layer
#x_fc = AveragePooling2D((8, 8), strides=(8, 8), name='avg_pool')(x)
#x_fc = Flatten(name='flatten')(x_fc)
#x_fc = Dense(1000, activation='softmax', name='predictions')(x_fc)
classifier_branch = Dense(
1000,
name="class_dense_one",
activation="softmax")(x)
#classifier_out = resnet_model(classifier_branch)
......@@ -41,6 +51,7 @@ classifier_branch = Dense(
# Add a regression layer to be trained on FDDB.
regression_branch = Dense(
4,
name="regress_dense_one",
input_dim=4,
kernel_initializer="normal",
activation="relu")(x)
......@@ -66,7 +77,7 @@ for layer in base_model.layers:
rms = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
# Defiene an optimiser for the regression branch.
sgd = SGD(lr=0.001, momentum=0.0, decay=0.0, nesterov=False)
sgd = SGD(lr=0.001, momentum=0.9, decay=1e-6, nesterov=True)
##Define a loss function for the classifier.
#classifier_loss = categorical_crossentropy()
......@@ -75,26 +86,32 @@ sgd = SGD(lr=0.001, momentum=0.0, decay=0.0, nesterov=False)
#classifier_loss = mean_squared_error()
model.compile(
optimizer=rms,
optimizer=sgd,
loss=["categorical_crossentropy", "mean_squared_error"])
# Load the new model on FDDB.
# TODO: Generator still doens't work.
fddb_path_train = "../fddb/FDDB-folds/FDDB-fold-01-ellipseList.txt"
fddb_path_valid = "../fddb/FDDB-folds/FDDB-fold-02-ellipseList.txt"
# Loading the training and validation data from FDDB.
x_train, y_train = load_arrays_fddb(fddb_path_train)
x_valid, y_valid = load_arrays_fddb(fddb_path_valid)
x_train = np.empty((0 ,224,224,3))
y_train = np.empty((0 , 4))
for i in range(1, 11):
x_t, y_t = load_arrays_fddb(fddb_path_train[:29] + \
"{}".format(str(i).zfill(2)) + fddb_path_train[31:])
x_train = np.concatenate((x_train, x_t), axis=0)
y_train = np.concatenate((y_train, y_t), axis=0)
# Train the new model on FDDB.
model.fit(
x_train, y_train,
batch_size=16,
epochs=10,
verbose=1,
validation_data=(x_valid, y_valid)
)
#model.fit(
# x_train, y_train,
# batch_size=16,
# epochs=10,
# verbose=1,
# validation_split=0.1)
for i, layer in enumerate(base_model.layers):
print(i, layer.name)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment