finished doku draft

This commit is contained in:
Askill 2020-05-20 17:16:15 +02:00
parent ffcfa37916
commit 19bdb6f979
8 changed files with 36 additions and 11 deletions

View File

@ -2,7 +2,7 @@
## Quick Start
In diesem Abschnitt finden Sie alle nötigen Informationen zum schnellen Deployen der Applikation.
In diesem Abschnitt finden Sie alle nötigen Informationen zum schnellen Deployen der Applikation.
### Anforderungen
@ -59,13 +59,28 @@ In *./application/config.py* können Sie die relevanten Parameter konfigurieren.
tolerance = 0.6
useCUDA = True
videoSource = "http://192.168.178.56:8080/video"
scaleInput = 0.5
Video Source kann eine Videodatei, ein live stream oder eine Webcam sein:
https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html#goal
**Nach dem erfolgreichen Deployment sollte es wie folgt aussehen:**
![./images/1.png](./images/1.png)
## Komponenten
Die Applikation nutzt Client seitiges rendern mit JS Templateing.
Serverseitig wird nur die Grundlegende Struktur, das Gerüst sozusagen gerendert.
Das Backend besteht aus 3 Komponenten:
- Kamera
- Server
- Datenbank
**Komponentendiagramm:**
![](./images/2.png)
## API
Die API ist RESTful, besitzt 2 Endpoints mit dem Präfix */api/v1*.
@ -102,3 +117,8 @@ Struktur:
## Datenstruktur
Für den Datenbankzugriff wird der ORM SQLAlchemy genutzt.
Es existieren Personen und Fingerabdrücke mit einer 1:N Beziehung.
Fingerabdrücke werden momentan ignoriert.
**Datenbankstruktur**
![](./images/3.png)

View File

@ -14,4 +14,4 @@ model = "hog" # can be hog or cnn
tolerance = 0.6
useCUDA = True # is only relevant if dlib installer glitched out during installatzion
videoSource = "http://192.168.178.56:8080/video" # used by openCV, can use webcams or videostreams
scaleInput = 0.5

View File

@ -13,8 +13,6 @@ connection = engine.connect()
Base = declarative_base()
Session = sessionmaker(bind=engine)
lastImage = ""
class Gender(enum.Enum):
other = "Other"
male = "Male"

View File

@ -141,7 +141,7 @@ class Camera(Resource):
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
def genProcessed(self, url=None):
"""Video streaming generator function."""
"""Video streaming generator function for processed video."""
url = config.videoSource
while True:
frame = fr.identifyFaceVideo(url).tobytes()

View File

@ -17,8 +17,10 @@ known_faces = []
known_names = []
def initFaceRec():
''' Initializes Facial recognition with faces in current db '''
dlib.DLIB_USE_CUDA = config.useCUDA
print('Loading known faces...', dlib.DLIB_USE_CUDA)
print('LOADING known faces...', dlib.DLIB_USE_CUDA)
session = Session()
for face, name in session.query(Person.face, Person.person_id).all():
# Load an image
@ -26,12 +28,12 @@ def initFaceRec():
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
# Get 128-dimension face encoding
# Always returns a list of found faces, for this purpose we take first face only (assuming one face per image as you can't be twice on one image)
encoding = face_recognition.face_encodings(image)[0]
# Append encodings and name
known_faces.append(encoding)
known_names.append(name)
print('DONE Loading known faces...')
session.close()
@ -43,8 +45,8 @@ def identifyFace(image):
locations = face_recognition.face_locations(image, model=MODEL)
encodings = face_recognition.face_encodings(image, locations)
# res is the return object key: name, value: matching score
res = {}
for face_encoding, face_location in zip(encodings, locations):
results = face_recognition.face_distance(known_faces, face_encoding)
res = {known_names[i]: results[i] for i in range(0, len(results)) }
@ -53,11 +55,14 @@ def identifyFace(image):
def identifyFaceVideo(url):
# allways get new latest image from url
video = cv2.VideoCapture(url)
image = video.read()[1]
image = cv2.resize(image,None,fx=0.5,fy=0.5)
#scale
image = cv2.resize(image,None,fx=config.scaleInput,fy=config.scaleInput)
ret, image = cv2.imencode(".png", image)
#convert image to format readable by face_recognition lib
nparr = np.fromstring(image, np.uint8)
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
@ -66,24 +71,26 @@ def identifyFaceVideo(url):
face_locations = {} #face locations to be drawn
# can be multithreaded here
# compares each face against all faces in DB
for face_encoding, face_location in zip(encodings, locations):
face_locations.update(compareFace(face_encoding, face_location))
session = Session()
# marks faces and retrives faces by id
for k, v in face_locations.items():
# Paint frame
cv2.rectangle(image, v[0], v[1], [255, 0, 0], FRAME_THICKNESS)
# Wite a name
name = " ".join(session.query(Person.fname, Person.lname).filter(Person.person_id == int(k)).first())
cv2.putText(image, name, v[0], cv2.FONT_HERSHEY_SIMPLEX, 1.5, [255, 0, 255], FONT_THICKNESS)
# Show image
session.close()
image = cv2.imencode(".jpg", image)[1]
return image
def compareFace(face_encoding, face_location):
''' return dict with locations and id of person '''
results = face_recognition.compare_faces(known_faces, face_encoding, TOLERANCE)
face_locations = {}
match = None

BIN
images/1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 410 KiB

BIN
images/2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

BIN
images/3.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB