Commit 99587795 authored by Kyle Anderson's avatar Kyle Anderson

Working reworked register user

The new register user, using custom methods, appears to work nicely from basic testing. Will continue with other things now.
parent d55e0090
import os
import cv2
DATABASE_LOC = "./dataset/faces.pickle"
DATA_DIR = "./data"
DATABASE_LOC = os.path.join(DATA_DIR, "faces.pickle")
CASCADE_DIR = "./cascades"
# Output location for pickle database files
OUTPUT_DIR = "./output"
RES_DIRECTORY = "./res"
# Directory for the face detection model.
FACE_DETECTION_MODEL_DIR = os.path.join(RES_DIRECTORY, "face_detection_model")
EMBEDDINGS_PROCESSOR_LOC = os.path.join(RES_DIRECTORY, "openface_nn4.small2.v1.t7")
def display_frame(frame):
......@@ -13,3 +24,41 @@ def start_video_stream(camera: int):
Also waits for the video stream to open before returning it."""
video_stream = cv2.VideoCapture(0)
return video_stream
def load_cascade(cascade_loc: str) -> cv2.CascadeClassifier:
"""
Opens the cascade classifier at the given path.
:param cascade_loc: The file location of the cascade.
:return:The CascadeClassifier class.
"""
return cv2.CascadeClassifier(cascade_loc)
def load_detector(proto_path: str, model_path: str):
"""
Loads the caffe detector with the given proto text file and the model file.
:param proto_path: The path location of the prototext file.
:param model_path: The path to the caffe model.
:return: The detector.
"""
return cv2.dnn.readNetFromCaffe(proto_path, model_path)
def load_embedding_model(model_path: str):
"""
Loads the torch embedding model at the given location.
:param model_path: The path to the model.
:return: The embedding model
"""
return cv2.dnn.readNetFromTorch(model_path)
CAFFE_MODEL_NAME = "res10_300x300_ssd_iter_140000.caffemodel"
PROTOTXT_NAME = "deploy.prototxt"
def load_detector_from_dir(detector_dir: str):
prototxt: str = os.path.join(detector_dir, PROTOTXT_NAME)
caffe_model: str = os.path.join(detector_dir, CAFFE_MODEL_NAME)
return load_detector(prototxt, caffe_model)
import os
import cv2
import imutils
import numpy
from imutils import paths as impaths
import common
import data_handler
IMAGE_RESIZE_WIDTH = 600
def detect_faces(face_detector, image):
"""
Detects faces in the provided image using the provided face_detector.
:param face_detector: The face_detector.
:param image: The image to be processed.
:return: The detected faces in the image.
"""
image_blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), scalefactor=1.0, size=(300, 300),
mean=(104.0, 177.0, 123.0), swapRB=False, crop=False)
face_detector.setInput(image_blob)
return face_detector.forward()
def find_best_match(faces, image, min_confidence: float = 0.5):
"""
Finds the single face in the given list of faces with the best match
:param image: The image from which the face was detected.
:param faces: The list of faces to go through.
:param min_confidence: The minimum percentage confidence in order for a face to be considered recognized.
:return: The best matched face, or None if none were matched or none were matched with a large enough confidence.
"""
best_match = None
if len(faces) > 0:
# Assume that each image has only one face, so take the bounding box with the largest probability of being a face.
i = numpy.argmax(faces[0, 0, :, 2])
confidence = faces[0, 0, i, 2]
# Only continue if the confidence is enough
if confidence > min_confidence:
img_height, img_width = image.shape[0:2]
# Determine the bounding box for the face
box = faces[0, 0, i, 3:7] * numpy.array([img_width, img_height, img_width, img_height])
# Get start and end positions for the box.
startx, starty, endx, endy = box.astype("int")
# Extract face ROI and get dimensions for it
face = image[starty: endy, startx:endx]
face_height, face_width = face.shape[0:2]
# Don't match the face if it's too small.
if face_width >= 20 and face_height >= 20:
best_match = face
return best_match
def extract_face_embeddings(face, embedding_cnn):
"""
Extracts the facial embeddings for the given face
:param face: The face for which embeddings should be created
:param embedding_cnn: The embedding cnn to be used.
:return: The embeddings for the face
"""
# Construct a blob and pass it to the embedder to obtain a 128-d quantification for the face.
face_blob = cv2.dnn.blobFromImage(face, scalefactor=1.0 / 255, size=(96, 96), mean=(0, 0, 0), swapRB=True,
crop=False)
embedding_cnn.setInput(face_blob)
vec = embedding_cnn.forward()
return vec
def process_dataset(directory_location: str, detector_dir: str = common.FACE_DETECTION_MODEL_DIR,
embedding_model_path: str = common.EMBEDDINGS_PROCESSOR_LOC,
show_output: bool = False, file_output: str = None) -> dict:
"""
Processes the images in the given directory for facial identification.
:param directory_location: The path to a directory full of a dataset of images for the same person.
Note that each subdirectory within this directory should be named the same as the user_id for the user.
E.g:
dataset
--- some_user_id
-- image1.png
-- image2.png
--- some_other_user_id
-- image1.png
-- image2.png
:param detector_dir: String location of the detection file directory.
:param embedding_model_path: The path to the embedding model.
:param show_output: True to print progress, False otherwise.
:param file_output: The pickle file to which the embeddings should be outputted. None means it won't be saved.
:return: The processed dataset dictionary, with format { "user_id" : [encoding1, encoding2, ...] , ... }
"""
# Dictionary with results.
result_database = {}
image_paths = list(impaths.list_images(directory_location))
face_detector = common.load_detector_from_dir(detector_dir)
embedding_cnn = common.load_embedding_model(embedding_model_path)
for (i, image_path) in enumerate(image_paths):
current_user_id: str = image_path.split(os.path.sep)[-2]
if show_output:
print(f"Processing image {i + 1} for user {current_user_id}.")
image = cv2.imread(image_path)
image = imutils.resize(image, width=IMAGE_RESIZE_WIDTH)
faces = detect_faces(face_detector, image)
face = find_best_match(faces, image)
if face is not None:
facial_embeddings = extract_face_embeddings(face, embedding_cnn)
if facial_embeddings is not None and len(facial_embeddings) > 0:
if current_user_id not in result_database:
result_database[current_user_id] = []
result_database[current_user_id].append(facial_embeddings)
if file_output is not None:
data_handler.write_database(file_output, result_database)
return result_database
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Registers users' facial encodings from a dataset of images containing their face.")
parser.add_argument("dataset", type=str, help="Location of the dataset which should be processed.")
parser.add_argument("output", type=str,
help="Location of the output pickle database file to which the encodings should be written.")
args = parser.parse_args()
process_dataset(args.dataset, show_output=True, file_output=args.output)
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
<?xml version="1.0"?>
<opencv_storage>
<!-- Automatically converted from haarcascade2, window size = 64x16 -->
<haarcascade_pltzzz64x16_16STG type_id="opencv-haar-classifier">
<size>
64 16</size>
<stages>
<_>
<!-- stage 0 -->
<trees>
<_>
<!-- tree 0 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
32 2 8 6 -1.</_>
<_>
32 4 8 2 3.</_></rects>
<tilted>0</tilted></feature>
<threshold>1.6915600746870041e-002</threshold>
<left_val>-9.5547717809677124e-001</left_val>
<right_val>8.9129137992858887e-001</right_val></_></_>
<_>
<!-- tree 1 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
0 4 6 10 -1.</_>
<_>
3 4 3 10 2.</_></rects>
<tilted>0</tilted></feature>
<threshold>2.4228349328041077e-002</threshold>
<left_val>-9.2089319229125977e-001</left_val>
<right_val>8.8723921775817871e-001</right_val></_></_>
<_>
<!-- tree 2 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
55 0 8 6 -1.</_>
<_>
55 0 4 3 2.</_>
<_>
59 3 4 3 2.</_></rects>
<tilted>0</tilted></feature>
<threshold>-1.0168660432100296e-002</threshold>
<left_val>8.8940089941024780e-001</left_val>
<right_val>-7.7847331762313843e-001</right_val></_></_>
<_>
<!-- tree 3 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
44 7 4 9 -1.</_>
<_>
44 10 4 3 3.</_></rects>
<tilted>0</tilted></feature>
<threshold>2.0863260142505169e-003</threshold>
<left_val>-8.7998157739639282e-001</left_val>
<right_val>5.8651781082153320e-001</right_val></_></_></trees>
<stage_threshold>-2.0683259963989258e+000</stage_threshold>
<parent>-1</parent>
<next>-1</next></_>
<_>
<!-- stage 1 -->
<trees>
<_>
<!-- tree 0 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
29 1 16 4 -1.</_>
<_>
29 3 16 2 2.</_></rects>
<tilted>0</tilted></feature>
<threshold>2.9062159359455109e-002</threshold>
<left_val>-8.7765061855316162e-001</left_val>
<right_val>8.5373121500015259e-001</right_val></_></_>
<_>
<!-- tree 1 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
0 5 9 8 -1.</_>
<_>
3 5 3 8 3.</_></rects>
<tilted>0</tilted></feature>
<threshold>2.3903399705886841e-002</threshold>
<left_val>-9.2079448699951172e-001</left_val>
<right_val>7.5155001878738403e-001</right_val></_></_>
<_>
<!-- tree 2 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
44 0 20 14 -1.</_>
<_>
44 0 10 7 2.</_>
<_>
54 7 10 7 2.</_></rects>
<tilted>0</tilted></feature>
<threshold>-3.5404648631811142e-002</threshold>
<left_val>6.7834627628326416e-001</left_val>
<right_val>-9.0937072038650513e-001</right_val></_></_>
<_>
<!-- tree 3 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
41 7 6 9 -1.</_>
<_>
43 7 2 9 3.</_></rects>
<tilted>0</tilted></feature>
<threshold>6.2988721765577793e-003</threshold>
<left_val>-8.1054258346557617e-001</left_val>
<right_val>5.8985030651092529e-001</right_val></_></_>
<_>
<!-- tree 4 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
0 4 21 4 -1.</_>
<_>
7 4 7 4 3.</_></rects>
<tilted>0</tilted></feature>
<threshold>3.4959490876644850e-003</threshold>
<left_val>-9.7632282972335815e-001</left_val>
<right_val>4.5473039150238037e-001</right_val></_></_></trees>
<stage_threshold>-1.6632349491119385e+000</stage_threshold>
<parent>0</parent>
<next>-1</next></_>
<_>
<!-- stage 2 -->
<trees>
<_>
<!-- tree 0 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
31 2 11 6 -1.</_>
<_>
31 4 11 2 3.</_></rects>
<tilted>0</tilted></feature>
<threshold>2.3864099755883217e-002</threshold>
<left_val>-9.3137168884277344e-001</left_val>
<right_val>8.2478952407836914e-001</right_val></_></_>
<_>
<!-- tree 1 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
56 3 6 11 -1.</_>
<_>
59 3 3 11 2.</_></rects>
<tilted>0</tilted></feature>
<threshold>-2.5775209069252014e-002</threshold>
<left_val>8.5526448488235474e-001</left_val>
<right_val>-8.7574672698974609e-001</right_val></_></_>
<_>
<!-- tree 2 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
32 14 32 2 -1.</_>
<_>
32 15 32 1 2.</_></rects>
<tilted>0</tilted></feature>
<threshold>-1.0646049864590168e-002</threshold>
<left_val>8.5167151689529419e-001</left_val>
<right_val>-6.7789041996002197e-001</right_val></_></_>
<_>
<!-- tree 3 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
0 2 8 14 -1.</_>
<_>
4 2 4 14 2.</_></rects>
<tilted>0</tilted></feature>
<threshold>2.7000989764928818e-002</threshold>
<left_val>-8.0041092634201050e-001</left_val>
<right_val>6.4893317222595215e-001</right_val></_></_>
<_>
<!-- tree 4 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
19 0 22 6 -1.</_>
<_>
19 0 11 3 2.</_>
<_>
30 3 11 3 2.</_></rects>
<tilted>0</tilted></feature>
<threshold>5.2989721298217773e-003</threshold>
<left_val>-9.5342522859573364e-001</left_val>
<right_val>5.0140267610549927e-001</right_val></_></_></trees>
<stage_threshold>-1.3346730470657349e+000</stage_threshold>
<parent>1</parent>
<next>-1</next></_>
<_>
<!-- stage 3 -->
<trees>
<_>
<!-- tree 0 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
56 0 6 6 -1.</_>
<_>
56 0 3 3 2.</_>
<_>
59 3 3 3 2.</_></rects>
<tilted>0</tilted></feature>
<threshold>-6.9233630783855915e-003</threshold>
<left_val>8.2654470205307007e-001</left_val>
<right_val>-8.5396027565002441e-001</right_val></_></_>
<_>
<!-- tree 1 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
32 0 14 12 -1.</_>
<_>
32 0 7 6 2.</_>
<_>
39 6 7 6 2.</_></rects>
<tilted>0</tilted></feature>
<threshold>1.2539249658584595e-001</threshold>
<left_val>-1.2996139936149120e-002</left_val>
<right_val>-3.2377028808593750e+003</right_val></_></_>
<_>
<!-- tree 2 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
2 1 43 4 -1.</_>
<_>
2 3 43 2 2.</_></rects>
<tilted>0</tilted></feature>
<threshold>6.3474893569946289e-002</threshold>
<left_val>-6.4648061990737915e-001</left_val>
<right_val>8.2302427291870117e-001</right_val></_></_>
<_>
<!-- tree 3 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
34 10 30 5 -1.</_>
<_>
44 10 10 5 3.</_></rects>
<tilted>0</tilted></feature>
<threshold>4.2217150330543518e-002</threshold>
<left_val>-7.5190877914428711e-001</left_val>
<right_val>6.3705182075500488e-001</right_val></_></_>
<_>
<!-- tree 4 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
0 9 9 5 -1.</_>
<_>
3 9 3 5 3.</_></rects>
<tilted>0</tilted></feature>
<threshold>2.0000640302896500e-002</threshold>
<left_val>-6.2077498435974121e-001</left_val>
<right_val>6.1317932605743408e-001</right_val></_></_></trees>
<stage_threshold>-1.6521669626235962e+000</stage_threshold>
<parent>2</parent>
<next>-1</next></_>
<_>
<!-- stage 4 -->
<trees>
<_>
<!-- tree 0 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
2 1 43 6 -1.</_>
<_>
2 3 43 2 3.</_></rects>
<tilted>0</tilted></feature>
<threshold>9.2297486960887909e-002</threshold>
<left_val>-7.2764229774475098e-001</left_val>
<right_val>8.0554759502410889e-001</right_val></_></_>
<_>
<!-- tree 1 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
53 4 9 8 -1.</_>
<_>
56 4 3 8 3.</_></rects>
<tilted>0</tilted></feature>
<threshold>2.7613969519734383e-002</threshold>
<left_val>-7.0769268274307251e-001</left_val>
<right_val>7.3315787315368652e-001</right_val></_></_>
<_>
<!-- tree 2 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
36 4 14 8 -1.</_>
<_>
36 4 7 4 2.</_>
<_>
43 8 7 4 2.</_></rects>
<tilted>0</tilted></feature>
<threshold>1.2465449981391430e-002</threshold>
<left_val>-8.4359270334243774e-001</left_val>
<right_val>5.7046437263488770e-001</right_val></_></_>
<_>
<!-- tree 3 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
14 14 49 2 -1.</_>
<_>
14 15 49 1 2.</_></rects>
<tilted>0</tilted></feature>
<threshold>-2.3886829614639282e-002</threshold>
<left_val>8.2656508684158325e-001</left_val>
<right_val>-5.2783298492431641e-001</right_val></_></_></trees>
<stage_threshold>-1.4523630142211914e+000</stage_threshold>
<parent>3</parent>
<next>-1</next></_>
<_>
<!-- stage 5 -->
<trees>
<_>
<!-- tree 0 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
0 5 4 9 -1.</_>
<_>
2 5 2 9 2.</_></rects>
<tilted>0</tilted></feature>
<threshold>1.8821349367499352e-002</threshold>
<left_val>-8.1122857332229614e-001</left_val>
<right_val>6.9127470254898071e-001</right_val></_></_>
<_>
<!-- tree 1 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
21 1 38 4 -1.</_>
<_>
21 3 38 2 2.</_></rects>
<tilted>0</tilted></feature>
<threshold>6.1703320592641830e-002</threshold>
<left_val>-7.6482647657394409e-001</left_val>
<right_val>6.4212161302566528e-001</right_val></_></_>
<_>
<!-- tree 2 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
44 12 18 3 -1.</_>
<_>
53 12 9 3 2.</_></rects>
<tilted>0</tilted></feature>
<threshold>-1.6298670321702957e-002</threshold>
<left_val>5.0207728147506714e-001</left_val>
<right_val>-8.4020161628723145e-001</right_val></_></_>
<_>
<!-- tree 3 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
10 4 9 3 -1.</_>
<_>
13 4 3 3 3.</_></rects>
<tilted>0</tilted></feature>
<threshold>-4.9458951689302921e-003</threshold>
<left_val>6.1991941928863525e-001</left_val>
<right_val>-6.1633539199829102e-001</right_val></_></_>
<_>
<!-- tree 4 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
40 4 10 4 -1.</_>
<_>
45 4 5 4 2.</_></rects>
<tilted>0</tilted></feature>
<threshold>-5.1894597709178925e-003</threshold>
<left_val>4.4975179433822632e-001</left_val>
<right_val>-8.0651968717575073e-001</right_val></_></_>
<_>
<!-- tree 5 -->
<_>
<!-- root node -->
<feature>
<rects>
<_>
17 14 47 2 -1.</_>
<_>
17 15 47 1 2.</_></rects>
<tilted>0</tilted></feature>
<threshold>-1.8824130296707153e-002</threshold>
<left_val>6.1992841958999634e-001</left_val>
<right_val>-5.5643159151077271e-001</right_val></_></_>
<_>
<!-- tree 6 -->
<_>
<!-- root node -->
<feature> </