2020-05-08 16:04:17 +00:00
|
|
|
import dlib
|
2020-04-27 14:37:55 +00:00
|
|
|
import face_recognition
|
|
|
|
|
import os
|
|
|
|
|
import cv2
|
2020-04-27 17:44:55 +00:00
|
|
|
from application.db import Session, Person
|
2020-04-27 14:37:55 +00:00
|
|
|
import base64
|
|
|
|
|
import numpy as np
|
|
|
|
|
from io import StringIO
|
2020-05-20 14:27:02 +00:00
|
|
|
import application.config as config
|
2020-04-27 14:37:55 +00:00
|
|
|
|
2020-05-20 14:27:02 +00:00
|
|
|
TOLERANCE = config.tolerance
|
2020-04-27 14:37:55 +00:00
|
|
|
FRAME_THICKNESS = 3
|
|
|
|
|
FONT_THICKNESS = 2
|
2020-05-20 14:27:02 +00:00
|
|
|
MODEL = config.model # default: 'hog', other one can be 'cnn' - CUDA accelerated (if available) deep-learning pretrained model
|
2020-04-27 14:37:55 +00:00
|
|
|
|
|
|
|
|
known_faces = []
|
|
|
|
|
known_names = []
|
|
|
|
|
|
2020-04-27 17:44:55 +00:00
|
|
|
def initFaceRec():
|
2020-05-20 15:16:15 +00:00
|
|
|
''' Initializes Facial recognition with faces in current db '''
|
|
|
|
|
|
2020-05-20 14:27:02 +00:00
|
|
|
dlib.DLIB_USE_CUDA = config.useCUDA
|
2020-05-20 15:16:15 +00:00
|
|
|
print('LOADING known faces...', dlib.DLIB_USE_CUDA)
|
2020-04-27 17:44:55 +00:00
|
|
|
session = Session()
|
|
|
|
|
for face, name in session.query(Person.face, Person.person_id).all():
|
|
|
|
|
# Load an image
|
|
|
|
|
nparr = np.fromstring(base64.b64decode(face), np.uint8)
|
|
|
|
|
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
|
|
|
|
|
|
|
|
|
# Get 128-dimension face encoding
|
|
|
|
|
encoding = face_recognition.face_encodings(image)[0]
|
2020-04-27 14:37:55 +00:00
|
|
|
|
2020-04-27 17:44:55 +00:00
|
|
|
# Append encodings and name
|
|
|
|
|
known_faces.append(encoding)
|
|
|
|
|
known_names.append(name)
|
2020-05-20 15:16:15 +00:00
|
|
|
|
2020-05-09 16:44:38 +00:00
|
|
|
print('DONE Loading known faces...')
|
2020-04-29 20:00:17 +00:00
|
|
|
session.close()
|
2020-04-27 14:37:55 +00:00
|
|
|
|
2020-04-29 20:00:17 +00:00
|
|
|
def identifyFace(image):
|
2020-05-07 17:43:16 +00:00
|
|
|
print('Identifying Face')
|
2020-04-29 20:00:17 +00:00
|
|
|
nparr = np.fromstring(base64.b64decode(image), np.uint8)
|
|
|
|
|
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
2020-05-11 17:24:50 +00:00
|
|
|
|
2020-04-27 17:44:55 +00:00
|
|
|
locations = face_recognition.face_locations(image, model=MODEL)
|
|
|
|
|
encodings = face_recognition.face_encodings(image, locations)
|
2020-04-27 14:37:55 +00:00
|
|
|
|
2020-05-20 15:16:15 +00:00
|
|
|
# res is the return object key: name, value: matching score
|
2020-04-27 17:44:55 +00:00
|
|
|
res = {}
|
|
|
|
|
for face_encoding, face_location in zip(encodings, locations):
|
|
|
|
|
results = face_recognition.face_distance(known_faces, face_encoding)
|
|
|
|
|
res = {known_names[i]: results[i] for i in range(0, len(results)) }
|
|
|
|
|
|
|
|
|
|
return res
|
2020-04-27 14:37:55 +00:00
|
|
|
|
2020-05-07 17:43:16 +00:00
|
|
|
def identifyFaceVideo(url):
|
2020-05-09 16:44:38 +00:00
|
|
|
|
2020-05-20 15:16:15 +00:00
|
|
|
# allways get new latest image from url
|
2020-05-07 17:43:16 +00:00
|
|
|
video = cv2.VideoCapture(url)
|
|
|
|
|
image = video.read()[1]
|
2020-05-20 15:16:15 +00:00
|
|
|
#scale
|
|
|
|
|
image = cv2.resize(image,None,fx=config.scaleInput,fy=config.scaleInput)
|
2020-05-07 17:43:16 +00:00
|
|
|
ret, image = cv2.imencode(".png", image)
|
|
|
|
|
|
2020-05-20 15:16:15 +00:00
|
|
|
#convert image to format readable by face_recognition lib
|
2020-05-07 17:43:16 +00:00
|
|
|
nparr = np.fromstring(image, np.uint8)
|
|
|
|
|
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
|
|
|
|
|
|
|
|
|
locations = face_recognition.face_locations(image, model=MODEL)
|
|
|
|
|
encodings = face_recognition.face_encodings(image, locations)
|
|
|
|
|
|
2020-05-08 16:04:17 +00:00
|
|
|
face_locations = {} #face locations to be drawn
|
2020-05-07 17:43:16 +00:00
|
|
|
|
2020-05-20 15:16:15 +00:00
|
|
|
# can be multithreaded here
|
|
|
|
|
# compares each face against all faces in DB
|
2020-05-08 16:04:17 +00:00
|
|
|
for face_encoding, face_location in zip(encodings, locations):
|
|
|
|
|
face_locations.update(compareFace(face_encoding, face_location))
|
2020-05-07 17:43:16 +00:00
|
|
|
|
2020-05-09 16:44:38 +00:00
|
|
|
session = Session()
|
2020-05-20 15:16:15 +00:00
|
|
|
# marks faces and retrives faces by id
|
2020-05-08 16:04:17 +00:00
|
|
|
for k, v in face_locations.items():
|
|
|
|
|
# Paint frame
|
|
|
|
|
cv2.rectangle(image, v[0], v[1], [255, 0, 0], FRAME_THICKNESS)
|
|
|
|
|
# Wite a name
|
2020-05-09 16:44:38 +00:00
|
|
|
name = " ".join(session.query(Person.fname, Person.lname).filter(Person.person_id == int(k)).first())
|
|
|
|
|
cv2.putText(image, name, v[0], cv2.FONT_HERSHEY_SIMPLEX, 1.5, [255, 0, 255], FONT_THICKNESS)
|
|
|
|
|
session.close()
|
2020-05-07 17:43:16 +00:00
|
|
|
image = cv2.imencode(".jpg", image)[1]
|
|
|
|
|
return image
|
2020-04-29 20:00:17 +00:00
|
|
|
|
2020-05-08 16:04:17 +00:00
|
|
|
|
|
|
|
|
def compareFace(face_encoding, face_location):
|
2020-05-20 15:16:15 +00:00
|
|
|
''' return dict with locations and id of person '''
|
2020-05-08 16:04:17 +00:00
|
|
|
results = face_recognition.compare_faces(known_faces, face_encoding, TOLERANCE)
|
|
|
|
|
face_locations = {}
|
|
|
|
|
match = None
|
|
|
|
|
if True in results: # If at least one is true, get a name of first of found labels
|
2020-05-09 16:44:38 +00:00
|
|
|
match = known_names[results.index(True)]
|
2020-05-08 16:04:17 +00:00
|
|
|
top_left = (face_location[3], face_location[0])
|
|
|
|
|
bottom_right = (face_location[1], face_location[2])
|
|
|
|
|
|
2020-05-09 16:44:38 +00:00
|
|
|
face_locations[str(match)] = (top_left, bottom_right)
|
2020-05-08 16:04:17 +00:00
|
|
|
return face_locations
|