Commit 825e971e authored by mihaivanea's avatar mihaivanea
Browse files

Working on generating arrays from fddb

parent 661c74f3
import imp
facenet = imp.load_source("detect_face",
......@@ -8,13 +8,15 @@ import numpy as np
from import imread, imsave
from sys import argv
path = argv[1]
#path = "./photo.jpg"
#path = argv[1]
path = "./photo.jpg"
with tf.Session() as sess:
model = facenet.create_mtcnn(sess)
img = imread(path)
box, pts = facenet.detect_face(img,10, *model, threshold=[0.25, 0.25, 0.25], factor=0.5)
#box, pts = facenet.detect_face(img,10, *model, threshold=[0.25, 0.25, 0.25], factor=0.5)
box = [[123.583300, 85.549500, 1.265839, 269.693400]]
pts = []
out = facenet.add_labels(img, box, pts)
def get_gen(n):
my_list = range(1, n + 1)
for g in my_list:
yield g * g
my_gen = get_gen(10)
for g in my_gen:
for g in my_gen:
import numpy as np
from import imread
from os import system
from sys import stdout, argv
# generate arrays only for a minibatch for now"
path = "../fddb/FDDB-folds/FDDB-fold-01.txt"
def generate_arrays_fddb(path):
image_paths = [line.rstrip('\n') for line in open(path)]
for img in image_paths:
data = imread("fddb/" + img + ".jpg")
target_path = img[:(len(img) - 4)] + "-ellipseList" + img[(len(img) - 4):]
target =
yield (data, target)
from keras.applications.inception_v3 import InceptionV3
from keras.preprocessing import image
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras import backend as K
# create the base pre-trained model
base_model = InceptionV3(weights='imagenet', include_top=False)
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# let's add a fully-connected layer
x = Dense(1024, activation='relu')(x)
# and a logistic layer -- let's say we have 200 classes
predictions = Dense(200, activation='softmax')(x)
# this is the model we will train
model = Model(inputs=base_model.input, outputs=predictions)
# first: train only the top layers (which were randomly initialized)
# i.e. freeze all convolutional InceptionV3 layers
for layer in base_model.layers:
layer.trainable = False
# compile the model (should be done *after* setting layers to non-trainable)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
# train the model on the new data for a few epochs
model.fit_generator(steps_per_epoch=1000, epochs=10)
# at this point, the top layers are well trained and we can start fine-tuning
# convolutional layers from inception V3. We will freeze the bottom N layers
# and train the remaining top layers.
# let's visualize layer names and layer indices to see how many layers
# we should freeze:
for i, layer in enumerate(base_model.layers):
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
import numpy as np
model = VGG16(weights='imagenet', include_top=False)
img_path = "tiger.jpg"
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
features = model.predict(x)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment