diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 67dd241..0000000 --- a/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM python -COPY ./certs /certs - -COPY ./ /app -RUN pip install -r /app/requirements.txt - -CMD python /app/run.py \ No newline at end of file diff --git a/README.md b/README.md index 4ba031d..609ddea 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,104 @@ -BioSys +# Gesichtserkennung Demo + +## Quick Start + +In diesem Abschnitt finden Sie alle nötigen Informationen zum schnellen Deployen der Applikation. + +### Anforderungen + + - Python 3.x + - pip (für Python 3) + +### Deployment +#### ohne CUDA + + pip install -r requirements.txt + python run.py + +#### mit CUDA + + original stackOverflow Antwort: + https://stackoverflow.com/questions/49731346/compile-dlib-with-cuda/57592670#57592670 + + Install Intel Performance Library: https://software.seek.intel.com/performance-libraries + Install cuDNN: https://developer.nvidia.com/cudnn + + pip install cmake + git clone https://github.com/davisking/dlib.git + cd dlib + git submodule init + git submodule update + mkdir build + cd build + cmake -D DLIB_USE_CUDA=1 -D USE_AVX_INSTRUCTIONS=1 ../ + cmake --build . --config Release + cd ../ + python setup.py install + cd ../ + + pip install -r requirements.txt + python run.py +### Konfiguration -# Notes +Die Konfigurationsdatei finden Sie im Ordner "application". +In *./application/config.py* können Sie die relevanten Parameter konfigurieren. -FingerprintID: 0 is the left pinky 9 is the right pinky, people with more than 10 fingers may use another system. \ No newline at end of file + + # Database config + databaseFile = "./test.sqlite" + echoDatabase = False + + # Web Server config + debug = True + port = '5001' + + # Face recognition config + model = "hog" # hog or cnn + tolerance = 0.6 + useCUDA = True + videoSource = "http://192.168.178.56:8080/video" + +Video Source kann eine Videodatei, ein live stream oder eine Webcam sein: +https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html#goal + + +## Komponenten + +## API +Die API ist RESTful, besitzt 2 Endpoints mit dem Präfix */api/v1*. + +Struktur: + + /person/id?useFace + POST: + legt neues Person Objekt an + GET: + Parameter: + id: URL Parameter, id des gesuchten Objektes, optional + useFace: Query Parameter, letztes gespeicherte Bild wird zum identifizieren genutzt , optional + + Get: wenn nur id gesetzt ist + Verifizierung: wenn id und useFace gesetzt sind + Identifizierung: wenn useFace gesetzt ist + + PUT: + disabled + DELETE: + Löscht Objekt per ID + + /camera/type + POST: + speichert den aktuellen Frame + GET: + type - still (default): gibt letzten gespeicherten Frame + type - stream: gibt einen mjpeg Video Stream aus, welcher als Bild eingebettet werden kann + type - processed: mjpeg Video Stream mit umrandeten Gesichtern und eingezeichneten Namen + PUT: + disabled + DELETE: + disabled + + +## Datenstruktur diff --git a/application/__init__.py b/application/__init__.py index 3e471ed..a700db0 100644 --- a/application/__init__.py +++ b/application/__init__.py @@ -12,7 +12,6 @@ app = Flask(__name__) api = Api(app, version='1', contact={"name":""}, license={"name":"Online Dienst Dokumentation"}, api_spec_url='/api/swagger') - api.add_resource(endpoints.PersonList,'/api/v1/person/', '/api/v1/person/') api.add_resource(endpoints.Camera,'/api/v1/camera/', "/api/v1/camera/") diff --git a/application/config.py b/application/config.py index 6c2c8da..25a6512 100644 --- a/application/config.py +++ b/application/config.py @@ -1,2 +1,17 @@ +# Database config +databaseFile = "./test.sqlite" +echoDatabase = False + + +# Web Server config debug = True +port = '5001' + + +# Face recognition config +model = "hog" # can be hog or cnn +tolerance = 0.6 +useCUDA = True # is only relevant if dlib installer glitched out during installatzion +videoSource = "http://192.168.178.56:8080/video" # used by openCV, can use webcams or videostreams + diff --git a/application/db.py b/application/db.py index a1a5a77..f03bfbf 100644 --- a/application/db.py +++ b/application/db.py @@ -6,8 +6,9 @@ from sqlalchemy.ext.declarative import declarative_base import enum from flask_sqlalchemy import SQLAlchemy from flask import Flask +import application.config as config -engine = db.create_engine('sqlite:///./test.sqlite', echo=False) +engine = db.create_engine('sqlite:///' + config.databaseFile, echo=config.echoDatabase) connection = engine.connect() Base = declarative_base() Session = sessionmaker(bind=engine) diff --git a/application/endpoints.py b/application/endpoints.py index 525d1f6..cb61e57 100644 --- a/application/endpoints.py +++ b/application/endpoints.py @@ -53,8 +53,6 @@ class PersonList(Resource): parser.add_argument('useFace', type=bool, required=False) args = parser.parse_args() - - # this indicates that the captured face should be use for identification / validation if "useFace" in args and args["useFace"]: Camera().post() @@ -112,6 +110,7 @@ class PersonList(Resource): data = session.query(Person).filter_by(person_id=id).delete() session.commit() session.close() + fr.initFaceRec() return flask.make_response(flask.jsonify({'data': data}), 204) except Exception as e: @@ -122,7 +121,7 @@ class Camera(Resource): # provides the function used for the live streams class VideoCamera(object): """Video stream object""" - url = "http://192.168.178.56:8080/video" + url = config.videoSource def __init__(self): self.video = cv2.VideoCapture(self.url) @@ -143,7 +142,7 @@ class Camera(Resource): def genProcessed(self, url=None): """Video streaming generator function.""" - url = "http://192.168.178.56:8080/video" + url = config.videoSource while True: frame = fr.identifyFaceVideo(url).tobytes() yield (b'--frame\r\n' diff --git a/application/face_rec.py b/application/face_rec.py index 31a8b4d..ff30170 100644 --- a/application/face_rec.py +++ b/application/face_rec.py @@ -6,18 +6,18 @@ from application.db import Session, Person import base64 import numpy as np from io import StringIO +import application.config as config -TOLERANCE = 0.6 +TOLERANCE = config.tolerance FRAME_THICKNESS = 3 FONT_THICKNESS = 2 -MODEL = "cnn" # default: 'hog', other one can be 'cnn' - CUDA accelerated (if available) deep-learning pretrained model - +MODEL = config.model # default: 'hog', other one can be 'cnn' - CUDA accelerated (if available) deep-learning pretrained model known_faces = [] known_names = [] def initFaceRec(): - dlib.DLIB_USE_CUDA=True + dlib.DLIB_USE_CUDA = config.useCUDA print('Loading known faces...', dlib.DLIB_USE_CUDA) session = Session() for face, name in session.query(Person.face, Person.person_id).all(): @@ -67,7 +67,6 @@ def identifyFaceVideo(url): face_locations = {} #face locations to be drawn for face_encoding, face_location in zip(encodings, locations): - face_locations.update(compareFace(face_encoding, face_location)) session = Session() diff --git a/dlib b/dlib deleted file mode 160000 index c7062aa..0000000 --- a/dlib +++ /dev/null @@ -1 +0,0 @@ -Subproject commit c7062aa3631ba4912255a9bcc0563d404d3bdd5d diff --git a/dlibinstallation.txt b/dlibinstallation.txt deleted file mode 100644 index e8aa73e..0000000 --- a/dlibinstallation.txt +++ /dev/null @@ -1 +0,0 @@ -https://stackoverflow.com/a/57592670/10785079 \ No newline at end of file diff --git a/run.py b/run.py index 5ee6923..2c16c86 100644 --- a/run.py +++ b/run.py @@ -1,8 +1,9 @@ from application import app from application.face_rec import initFaceRec +import application.config as config initFaceRec() -app.run(host="localhost", port='5001', debug=True, threaded=True) +app.run(host="localhost", port=config.port, debug=config.debug, threaded=True) diff --git a/test.sqlite b/test.sqlite index 6bb29f9..329c1c4 100644 Binary files a/test.sqlite and b/test.sqlite differ