diff --git a/README.md b/README.md index 609ddea..4f93017 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ ## Quick Start -In diesem Abschnitt finden Sie alle nötigen Informationen zum schnellen Deployen der Applikation. +In diesem Abschnitt finden Sie alle nötigen Informationen zum schnellen Deployen der Applikation. ### Anforderungen @@ -59,13 +59,28 @@ In *./application/config.py* können Sie die relevanten Parameter konfigurieren. tolerance = 0.6 useCUDA = True videoSource = "http://192.168.178.56:8080/video" + scaleInput = 0.5 Video Source kann eine Videodatei, ein live stream oder eine Webcam sein: https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html#goal +**Nach dem erfolgreichen Deployment sollte es wie folgt aussehen:** + +![./images/1.png](./images/1.png) ## Komponenten +Die Applikation nutzt Client seitiges rendern mit JS Templateing. +Serverseitig wird nur die Grundlegende Struktur, das Gerüst sozusagen gerendert. + +Das Backend besteht aus 3 Komponenten: +- Kamera +- Server +- Datenbank + +**Komponentendiagramm:** +![](./images/2.png) + ## API Die API ist RESTful, besitzt 2 Endpoints mit dem Präfix */api/v1*. @@ -102,3 +117,8 @@ Struktur: ## Datenstruktur +Für den Datenbankzugriff wird der ORM SQLAlchemy genutzt. +Es existieren Personen und Fingerabdrücke mit einer 1:N Beziehung. +Fingerabdrücke werden momentan ignoriert. +**Datenbankstruktur** +![](./images/3.png) \ No newline at end of file diff --git a/application/config.py b/application/config.py index 25a6512..18db28c 100644 --- a/application/config.py +++ b/application/config.py @@ -14,4 +14,4 @@ model = "hog" # can be hog or cnn tolerance = 0.6 useCUDA = True # is only relevant if dlib installer glitched out during installatzion videoSource = "http://192.168.178.56:8080/video" # used by openCV, can use webcams or videostreams - +scaleInput = 0.5 diff --git a/application/db.py b/application/db.py index f03bfbf..cccb40b 100644 --- a/application/db.py +++ b/application/db.py @@ -13,8 +13,6 @@ connection = engine.connect() Base = declarative_base() Session = sessionmaker(bind=engine) -lastImage = "" - class Gender(enum.Enum): other = "Other" male = "Male" diff --git a/application/endpoints.py b/application/endpoints.py index cb61e57..33419a5 100644 --- a/application/endpoints.py +++ b/application/endpoints.py @@ -141,7 +141,7 @@ class Camera(Resource): b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') def genProcessed(self, url=None): - """Video streaming generator function.""" + """Video streaming generator function for processed video.""" url = config.videoSource while True: frame = fr.identifyFaceVideo(url).tobytes() diff --git a/application/face_rec.py b/application/face_rec.py index ff30170..b7f1653 100644 --- a/application/face_rec.py +++ b/application/face_rec.py @@ -17,8 +17,10 @@ known_faces = [] known_names = [] def initFaceRec(): + ''' Initializes Facial recognition with faces in current db ''' + dlib.DLIB_USE_CUDA = config.useCUDA - print('Loading known faces...', dlib.DLIB_USE_CUDA) + print('LOADING known faces...', dlib.DLIB_USE_CUDA) session = Session() for face, name in session.query(Person.face, Person.person_id).all(): # Load an image @@ -26,12 +28,12 @@ def initFaceRec(): image = cv2.imdecode(nparr, cv2.IMREAD_COLOR) # Get 128-dimension face encoding - # Always returns a list of found faces, for this purpose we take first face only (assuming one face per image as you can't be twice on one image) encoding = face_recognition.face_encodings(image)[0] # Append encodings and name known_faces.append(encoding) known_names.append(name) + print('DONE Loading known faces...') session.close() @@ -43,8 +45,8 @@ def identifyFace(image): locations = face_recognition.face_locations(image, model=MODEL) encodings = face_recognition.face_encodings(image, locations) + # res is the return object key: name, value: matching score res = {} - for face_encoding, face_location in zip(encodings, locations): results = face_recognition.face_distance(known_faces, face_encoding) res = {known_names[i]: results[i] for i in range(0, len(results)) } @@ -53,11 +55,14 @@ def identifyFace(image): def identifyFaceVideo(url): + # allways get new latest image from url video = cv2.VideoCapture(url) image = video.read()[1] - image = cv2.resize(image,None,fx=0.5,fy=0.5) + #scale + image = cv2.resize(image,None,fx=config.scaleInput,fy=config.scaleInput) ret, image = cv2.imencode(".png", image) + #convert image to format readable by face_recognition lib nparr = np.fromstring(image, np.uint8) image = cv2.imdecode(nparr, cv2.IMREAD_COLOR) @@ -66,24 +71,26 @@ def identifyFaceVideo(url): face_locations = {} #face locations to be drawn + # can be multithreaded here + # compares each face against all faces in DB for face_encoding, face_location in zip(encodings, locations): face_locations.update(compareFace(face_encoding, face_location)) session = Session() + # marks faces and retrives faces by id for k, v in face_locations.items(): # Paint frame cv2.rectangle(image, v[0], v[1], [255, 0, 0], FRAME_THICKNESS) # Wite a name name = " ".join(session.query(Person.fname, Person.lname).filter(Person.person_id == int(k)).first()) cv2.putText(image, name, v[0], cv2.FONT_HERSHEY_SIMPLEX, 1.5, [255, 0, 255], FONT_THICKNESS) - - # Show image session.close() image = cv2.imencode(".jpg", image)[1] return image def compareFace(face_encoding, face_location): + ''' return dict with locations and id of person ''' results = face_recognition.compare_faces(known_faces, face_encoding, TOLERANCE) face_locations = {} match = None diff --git a/images/1.png b/images/1.png new file mode 100644 index 0000000..da36f08 Binary files /dev/null and b/images/1.png differ diff --git a/images/2.png b/images/2.png new file mode 100644 index 0000000..d37c89b Binary files /dev/null and b/images/2.png differ diff --git a/images/3.png b/images/3.png new file mode 100644 index 0000000..8fce41f Binary files /dev/null and b/images/3.png differ