started Doku
This commit is contained in:
parent
94d1c1156f
commit
ffcfa37916
|
|
@ -1,7 +0,0 @@
|
|||
FROM python
|
||||
COPY ./certs /certs
|
||||
|
||||
COPY ./ /app
|
||||
RUN pip install -r /app/requirements.txt
|
||||
|
||||
CMD python /app/run.py
|
||||
103
README.md
103
README.md
|
|
@ -1,7 +1,104 @@
|
|||
BioSys
|
||||
# Gesichtserkennung Demo
|
||||
|
||||
## Quick Start
|
||||
|
||||
In diesem Abschnitt finden Sie alle nötigen Informationen zum schnellen Deployen der Applikation.
|
||||
|
||||
### Anforderungen
|
||||
|
||||
- Python 3.x
|
||||
- pip (für Python 3)
|
||||
|
||||
### Deployment
|
||||
#### ohne CUDA
|
||||
|
||||
pip install -r requirements.txt
|
||||
python run.py
|
||||
|
||||
#### mit CUDA
|
||||
|
||||
original stackOverflow Antwort:
|
||||
https://stackoverflow.com/questions/49731346/compile-dlib-with-cuda/57592670#57592670
|
||||
|
||||
Install Intel Performance Library: https://software.seek.intel.com/performance-libraries
|
||||
Install cuDNN: https://developer.nvidia.com/cudnn
|
||||
|
||||
pip install cmake
|
||||
git clone https://github.com/davisking/dlib.git
|
||||
cd dlib
|
||||
git submodule init
|
||||
git submodule update
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -D DLIB_USE_CUDA=1 -D USE_AVX_INSTRUCTIONS=1 ../
|
||||
cmake --build . --config Release
|
||||
cd ../
|
||||
python setup.py install
|
||||
cd ../
|
||||
|
||||
pip install -r requirements.txt
|
||||
python run.py
|
||||
|
||||
|
||||
### Konfiguration
|
||||
|
||||
# Notes
|
||||
Die Konfigurationsdatei finden Sie im Ordner "application".
|
||||
In *./application/config.py* können Sie die relevanten Parameter konfigurieren.
|
||||
|
||||
FingerprintID: 0 is the left pinky 9 is the right pinky, people with more than 10 fingers may use another system.
|
||||
|
||||
# Database config
|
||||
databaseFile = "./test.sqlite"
|
||||
echoDatabase = False
|
||||
|
||||
# Web Server config
|
||||
debug = True
|
||||
port = '5001'
|
||||
|
||||
# Face recognition config
|
||||
model = "hog" # hog or cnn
|
||||
tolerance = 0.6
|
||||
useCUDA = True
|
||||
videoSource = "http://192.168.178.56:8080/video"
|
||||
|
||||
Video Source kann eine Videodatei, ein live stream oder eine Webcam sein:
|
||||
https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html#goal
|
||||
|
||||
|
||||
## Komponenten
|
||||
|
||||
## API
|
||||
Die API ist RESTful, besitzt 2 Endpoints mit dem Präfix */api/v1*.
|
||||
|
||||
Struktur:
|
||||
|
||||
/person/id?useFace
|
||||
POST:
|
||||
legt neues Person Objekt an
|
||||
GET:
|
||||
Parameter:
|
||||
id: URL Parameter, id des gesuchten Objektes, optional
|
||||
useFace: Query Parameter, letztes gespeicherte Bild wird zum identifizieren genutzt , optional
|
||||
|
||||
Get: wenn nur id gesetzt ist
|
||||
Verifizierung: wenn id und useFace gesetzt sind
|
||||
Identifizierung: wenn useFace gesetzt ist
|
||||
|
||||
PUT:
|
||||
disabled
|
||||
DELETE:
|
||||
Löscht Objekt per ID
|
||||
|
||||
/camera/type
|
||||
POST:
|
||||
speichert den aktuellen Frame
|
||||
GET:
|
||||
type - still (default): gibt letzten gespeicherten Frame
|
||||
type - stream: gibt einen mjpeg Video Stream aus, welcher als Bild eingebettet werden kann
|
||||
type - processed: mjpeg Video Stream mit umrandeten Gesichtern und eingezeichneten Namen
|
||||
PUT:
|
||||
disabled
|
||||
DELETE:
|
||||
disabled
|
||||
|
||||
|
||||
## Datenstruktur
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@ app = Flask(__name__)
|
|||
api = Api(app, version='1', contact={"name":""}, license={"name":"Online Dienst Dokumentation"}, api_spec_url='/api/swagger')
|
||||
|
||||
|
||||
|
||||
api.add_resource(endpoints.PersonList,'/api/v1/person/<string:id>', '/api/v1/person/')
|
||||
api.add_resource(endpoints.Camera,'/api/v1/camera/<string:type>', "/api/v1/camera/")
|
||||
|
||||
|
|
|
|||
|
|
@ -1,2 +1,17 @@
|
|||
|
||||
# Database config
|
||||
databaseFile = "./test.sqlite"
|
||||
echoDatabase = False
|
||||
|
||||
|
||||
# Web Server config
|
||||
debug = True
|
||||
port = '5001'
|
||||
|
||||
|
||||
# Face recognition config
|
||||
model = "hog" # can be hog or cnn
|
||||
tolerance = 0.6
|
||||
useCUDA = True # is only relevant if dlib installer glitched out during installatzion
|
||||
videoSource = "http://192.168.178.56:8080/video" # used by openCV, can use webcams or videostreams
|
||||
|
||||
|
|
|
|||
|
|
@ -6,8 +6,9 @@ from sqlalchemy.ext.declarative import declarative_base
|
|||
import enum
|
||||
from flask_sqlalchemy import SQLAlchemy
|
||||
from flask import Flask
|
||||
import application.config as config
|
||||
|
||||
engine = db.create_engine('sqlite:///./test.sqlite', echo=False)
|
||||
engine = db.create_engine('sqlite:///' + config.databaseFile, echo=config.echoDatabase)
|
||||
connection = engine.connect()
|
||||
Base = declarative_base()
|
||||
Session = sessionmaker(bind=engine)
|
||||
|
|
|
|||
|
|
@ -53,8 +53,6 @@ class PersonList(Resource):
|
|||
parser.add_argument('useFace', type=bool, required=False)
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
|
||||
# this indicates that the captured face should be use for identification / validation
|
||||
if "useFace" in args and args["useFace"]:
|
||||
Camera().post()
|
||||
|
|
@ -112,6 +110,7 @@ class PersonList(Resource):
|
|||
data = session.query(Person).filter_by(person_id=id).delete()
|
||||
session.commit()
|
||||
session.close()
|
||||
fr.initFaceRec()
|
||||
return flask.make_response(flask.jsonify({'data': data}), 204)
|
||||
|
||||
except Exception as e:
|
||||
|
|
@ -122,7 +121,7 @@ class Camera(Resource):
|
|||
# provides the function used for the live streams
|
||||
class VideoCamera(object):
|
||||
"""Video stream object"""
|
||||
url = "http://192.168.178.56:8080/video"
|
||||
url = config.videoSource
|
||||
def __init__(self):
|
||||
self.video = cv2.VideoCapture(self.url)
|
||||
|
||||
|
|
@ -143,7 +142,7 @@ class Camera(Resource):
|
|||
|
||||
def genProcessed(self, url=None):
|
||||
"""Video streaming generator function."""
|
||||
url = "http://192.168.178.56:8080/video"
|
||||
url = config.videoSource
|
||||
while True:
|
||||
frame = fr.identifyFaceVideo(url).tobytes()
|
||||
yield (b'--frame\r\n'
|
||||
|
|
|
|||
|
|
@ -6,18 +6,18 @@ from application.db import Session, Person
|
|||
import base64
|
||||
import numpy as np
|
||||
from io import StringIO
|
||||
import application.config as config
|
||||
|
||||
TOLERANCE = 0.6
|
||||
TOLERANCE = config.tolerance
|
||||
FRAME_THICKNESS = 3
|
||||
FONT_THICKNESS = 2
|
||||
MODEL = "cnn" # default: 'hog', other one can be 'cnn' - CUDA accelerated (if available) deep-learning pretrained model
|
||||
|
||||
MODEL = config.model # default: 'hog', other one can be 'cnn' - CUDA accelerated (if available) deep-learning pretrained model
|
||||
|
||||
known_faces = []
|
||||
known_names = []
|
||||
|
||||
def initFaceRec():
|
||||
dlib.DLIB_USE_CUDA=True
|
||||
dlib.DLIB_USE_CUDA = config.useCUDA
|
||||
print('Loading known faces...', dlib.DLIB_USE_CUDA)
|
||||
session = Session()
|
||||
for face, name in session.query(Person.face, Person.person_id).all():
|
||||
|
|
@ -67,7 +67,6 @@ def identifyFaceVideo(url):
|
|||
face_locations = {} #face locations to be drawn
|
||||
|
||||
for face_encoding, face_location in zip(encodings, locations):
|
||||
|
||||
face_locations.update(compareFace(face_encoding, face_location))
|
||||
|
||||
session = Session()
|
||||
|
|
|
|||
1
dlib
1
dlib
|
|
@ -1 +0,0 @@
|
|||
Subproject commit c7062aa3631ba4912255a9bcc0563d404d3bdd5d
|
||||
|
|
@ -1 +0,0 @@
|
|||
https://stackoverflow.com/a/57592670/10785079
|
||||
3
run.py
3
run.py
|
|
@ -1,8 +1,9 @@
|
|||
from application import app
|
||||
from application.face_rec import initFaceRec
|
||||
import application.config as config
|
||||
|
||||
initFaceRec()
|
||||
app.run(host="localhost", port='5001', debug=True, threaded=True)
|
||||
app.run(host="localhost", port=config.port, debug=config.debug, threaded=True)
|
||||
|
||||
|
||||
|
||||
|
|
|
|||
BIN
test.sqlite
BIN
test.sqlite
Binary file not shown.
Loading…
Reference in New Issue