FacialRecognition-Demo/application/face_rec.py

87 lines
2.8 KiB
Python

import face_recognition
import os
import cv2
from application.db import Session, Person
import base64
import numpy as np
from base64 import decodestring
import base64
from io import StringIO
from PIL import Image
KNOWN_FACES_DIR = 'known_faces'
UNKNOWN_FACES_DIR = 'unknown_faces'
TOLERANCE = 0.6
FRAME_THICKNESS = 3
FONT_THICKNESS = 2
MODEL = 'hog' # default: 'hog', other one can be 'cnn' - CUDA accelerated (if available) deep-learning pretrained model
# Returns (R, G, B) from name
def name_to_color(name):
# Take 3 first letters, tolower()
# lowercased character ord() value rage is 97 to 122, substract 97, multiply by 8
color = [(ord(c.lower())-97)*8 for c in name[:3]]
return color
def readb64(base64_string):
sbuf = StringIO()
sbuf.write(base64.b64decode(base64_string))
pimg = Image.open(sbuf)
return cv2.cvtColor(np.array(pimg), cv2.COLOR_RGB2BGR)
print('Loading known faces...')
known_faces = []
known_names = []
def initFaceRec():
session = Session()
# We oranize known faces as subfolders of KNOWN_FACES_DIR
# Each subfolder's name becomes our label (name)
for face, name in session.query(Person.face, Person.person_id).all():
# Load an image
nparr = np.fromstring(base64.b64decode(face), np.uint8)
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
# Get 128-dimension face encoding
# Always returns a list of found faces, for this purpose we take first face only (assuming one face per image as you can't be twice on one image)
encoding = face_recognition.face_encodings(image)[0]
# Append encodings and name
known_faces.append(encoding)
known_names.append(name)
session.close()
def identifyFace(image):
print('Processing unknown faces...')
#image = face_recognition.load_image_file('C:/Users/ofjok/Desktop/1.png')
nparr = np.fromstring(base64.b64decode(image), np.uint8)
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
#image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
locations = face_recognition.face_locations(image, model=MODEL)
encodings = face_recognition.face_encodings(image, locations)
res = {}
for face_encoding, face_location in zip(encodings, locations):
# We use compare_faces (but might use face_distance as well)
# Returns array of True/False values in order of passed known_faces
results = face_recognition.face_distance(known_faces, face_encoding)
# Since order is being preserved, we check if any face was found then grab index
# then label (name) of first matching known face withing a tolerance
# If at least one is true, get a name of first of found labels
res = {known_names[i]: results[i] for i in range(0, len(results)) }
return res
#identifyFace("")