Skip to content

Commit

Permalink
Cleaned Face Recognition Code
Browse files Browse the repository at this point in the history
  • Loading branch information
neelanjan00 committed Aug 2, 2020
1 parent 6a72fd5 commit c041452
Show file tree
Hide file tree
Showing 246 changed files with 246 additions and 1,077 deletions.
Empty file added .Rhistory
Empty file.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
Diff not rendered.
66 changes: 66 additions & 0 deletions Image_Dataset_Generator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
from cv2 import cv2
import numpy as np
import os
import dlib
from imutils import face_utils
from imutils.face_utils import FaceAligner

detector = dlib.get_frontal_face_detector()
shape_predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
face_aligner = FaceAligner(shape_predictor, desiredFaceWidth=200)

#face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_default.xml')

video_capture = cv2.VideoCapture(0)

name = input("Enter name of person: ")

train_path = './faces-dataset/train/'
val_path = './faces-dataset/val/'

new_train_directory = os.path.join(train_path, name)
new_val_directory = os.path.join(val_path, name)

if not os.path.exists(new_train_directory):
os.makedirs(new_train_directory, exist_ok = 'True')

if not os.path.exists(new_val_directory):
os.makedirs(new_val_directory, exist_ok = 'True')

number_of_images = 0
MAX_NUMBER_OF_IMAGES = 50
count = 0

while number_of_images <= MAX_NUMBER_OF_IMAGES:
ret, frame = video_capture.read()

frame = cv2.flip(frame, 1)

frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

#faces = face_cascade.detectMultiScale(frame, 1.3, 5)
faces = detector(frame_gray)
if len(faces) == 1:
face = faces[0]
(x, y, w, h) = face_utils.rect_to_bb(face)
face_img = frame_gray[y-50:y + h+100, x-50:x + w+100]
face_aligned = face_aligner.align(frame, frame_gray, face)

if count == 5:
if number_of_images <= 30 :
cv2.imwrite(os.path.join(new_train_directory, str(name+str(number_of_images)+'.jpg')), face_aligned)
else :
cv2.imwrite(os.path.join(new_val_directory, str(name+str(number_of_images)+'.jpg')), face_aligned)
number_of_images += 1
count = 0
print(count)
count+=1


cv2.imshow('Video', frame)

if(cv2.waitKey(1) & 0xFF == ord('q')):
break

video_capture.release()
cv2.destroyAllWindows()
40 changes: 40 additions & 0 deletions PredictFaces.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
from Preprocess import extract_face, get_embedding
from tensorflow.python.keras.models import load_model
from sklearn.preprocessing import Normalizer, LabelEncoder
import argparse
import pickle
import numpy as np

in_encoder = Normalizer()
out_encoder = LabelEncoder()
out_encoder.classes_ = np.load('classes.npy')
facenet_model = load_model('facenet_keras.h5')

with open('SVCtrainedModel.pkl', 'rb') as f:
model = pickle.load(f)

ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="Test Image Path")
# ap.add_argument("-n", "--name", required=True,
# help="Name of the person (same as the class name)")
args = vars(ap.parse_args())

random_face = extract_face(args['image'])
random_face_emd = in_encoder.transform([get_embedding(facenet_model, random_face)])[0]
# random_face_name = args['name']

samples = np.expand_dims(random_face_emd, axis = 0)
yhat_class = model.predict(samples)
yhat_prob = model.predict_proba(samples)

class_index = yhat_class[0]
class_probability = yhat_prob[0, class_index] * 100
predicted_name = out_encoder.inverse_transform(yhat_class)[0]
all_names = out_encoder.inverse_transform([i for i in range(len(out_encoder.classes_))])

print("Predicted Probabilities: ")
for i, name in enumerate(all_names):
print(name, ": ", yhat_prob[0][i] * 100)
# print('Expected: %s' % random_face_name)
print('Predicted: %s' % predicted_name)
64 changes: 64 additions & 0 deletions Preprocess.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
from mtcnn.mtcnn import MTCNN
from PIL import Image
import numpy as np
import os

def extract_face(filename, required_size=(160, 160)):
# load image from file
image = Image.open(filename)
# convert to RGB, if needed
image = image.convert('RGB')
# convert to array
pixels = np.asarray(image)
# create the detector, using default weights
detector = MTCNN()
# detect faces in the image
results = detector.detect_faces(pixels)
# extract the bounding box from the first face
x1, y1, width, height = results[0]['box']
# deal with negative pixel index
x1, y1 = abs(x1), abs(y1)
x2, y2 = x1 + width, y1 + height
# extract the face
face = pixels[y1:y2, x1:x2]
# resize pixels to the model size
image = Image.fromarray(face)
image = image.resize(required_size)
face_array = np.asarray(image)
return face_array


def load_face(dir):
faces = list()
# enumerate files
for filename in os.listdir(dir):
path = dir + filename
face = extract_face(path)
faces.append(face)
return faces


def load_dataset(dir):
# list for faces and labels
X, y = list(), list()
for subdir in os.listdir(dir):
path = dir + subdir + '/'
faces = load_face(path)
labels = [subdir for i in range(len(faces))]
print("loaded %d sample for class: %s" % (len(faces),subdir) ) # print progress
X.extend(faces)
y.extend(labels)
return np.asarray(X), np.asarray(y)


def get_embedding(model, face):
# scale pixel values
face = face.astype('float32')
# standardization
mean, std = face.mean(), face.std()
face = (face-mean)/std
# transfer face into one sample (3 dimension to 4 dimension)
sample = np.expand_dims(face, axis=0)
# make prediction to get embedding
yhat = model.predict(sample)
return yhat[0]
4 changes: 2 additions & 2 deletions README.md
100644 → 100755
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
# APPI-AI

# Driver-Drowsiness-Detection
Real-time drowsiness detection for drivers using head pose estimation, eye status (open or closed) and yawn detection.
74 changes: 74 additions & 0 deletions TrainModel.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
import numpy as np
from tensorflow.python.keras.models import load_model
from PIL import Image
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import Normalizer, LabelEncoder
from sklearn.svm import SVC
import os, pickle
from Preprocess import extract_face, load_face, load_dataset, get_embedding

# load train dataset
trainX, trainy = load_dataset('./faces-dataset/train/')
print(trainX.shape, trainy.shape)

# load test dataset
testX, testy = load_dataset('./faces-dataset/val/')
print(testX.shape, testy.shape)

# load facenet pretrained model
facenet_model = load_model('facenet_keras.h5')
print('Loaded Model')

# convert each face in the train set into embedding
emdTrainX = list()
for face in trainX:
emd = get_embedding(facenet_model, face)
emdTrainX.append(emd)

emdTrainX = np.asarray(emdTrainX)
print(emdTrainX.shape)

# convert each face in the test set into embedding
emdTestX = list()
for face in testX:
emd = get_embedding(facenet_model, face)
emdTestX.append(emd)
emdTestX = np.asarray(emdTestX)
print(emdTestX.shape)

# print dataset characteristics
print("Dataset: train=%d, test=%d" % (emdTrainX.shape[0], emdTestX.shape[0]))

# normalize input vectors
in_encoder = Normalizer()
emdTrainX_norm = in_encoder.transform(emdTrainX)
emdTestX_norm = in_encoder.transform(emdTestX)

# label encode targets
out_encoder = LabelEncoder()
out_encoder.fit(trainy)

# Save the encoder
np.save('classes.npy', out_encoder.classes_)

trainy_enc = out_encoder.transform(trainy)
testy_enc = out_encoder.transform(testy)

# fit model
model = SVC(kernel='linear', probability=True)
model.fit(emdTrainX_norm, trainy_enc)

# predict
yhat_train = model.predict(emdTrainX_norm)
yhat_test = model.predict(emdTestX_norm)

# score
score_train = accuracy_score(trainy_enc, yhat_train)
score_test = accuracy_score(testy_enc, yhat_test)

# summarize
print('Accuracy: train=%.3f, test=%.3f' % (score_train*100, score_test*100))

# save the model
with open('SVCtrainedModel.pkl', 'wb') as f:
pickle.dump(model, f)
Binary file added __pycache__/Preprocess.cpython-37.pyc
Binary file not shown.
1,075 changes: 0 additions & 1,075 deletions face-recognition-with-facenet.ipynb

This file was deleted.

Loading

0 comments on commit c041452

Please sign in to comment.