improved stability and performance

by utilising one global object forn the camera, which avoids many invisible internal opencv problemms
This commit is contained in:
Askill 2020-07-25 22:03:48 +02:00
parent 248a9c608c
commit e4b78fb78e
8 changed files with 144 additions and 70 deletions

2
.gitignore vendored
View File

@ -2,3 +2,5 @@
__pycache__/ __pycache__/
DP_UareU_WSDK_223/ DP_UareU_WSDK_223/
test.sqlite

31
application/camera.py Normal file
View File

@ -0,0 +1,31 @@
import cv2
import base64
import application.config as config
import numpy as np
# provides the function used for the live streams
class VideoCamera(object):
"""Video stream object"""
url = config.videoSource
def __init__(self):
self.video = cv2.VideoCapture(self.url, cv2.CAP_DSHOW)
self.video.set(cv2.CAP_PROP_FRAME_WIDTH, 720)
self.video.set(cv2.CAP_PROP_FRAME_HEIGHT, 450)
def __del__(self):
self.video.release()
def get_frame(self, ending):
success, image = self.video.read()
if image is None:
image = np.zeros((100,100,3), np.uint8)
ret, jpeg = cv2.imencode(ending, image)
return jpeg
def get_frame2(self, ending):
success, image = self.video.read()
return image

View File

@ -10,8 +10,9 @@ port = '5001'
# Face recognition config # Face recognition config
model = "hog" # can be hog or cnn model = "cnn" # can be hog or cnn
tolerance = 0.6 tolerance = 0.7
useCUDA = True # is only relevant if dlib installer glitched out during installatzion useCUDA = True # is only relevant if dlib installer glitched out during installatzion
videoSource = "http://192.168.178.56:8080/video" # used by openCV, can use webcams or videostreams videoSource = 0
scaleInput = 0.5 #videoSource = "http://192.168.178.56:8080/video" # used by openCV, can use webcams or videostreams
scaleInput = 0.6

View File

@ -7,7 +7,12 @@ import cv2
import base64 import base64
from application.db import Session, Person, Fingerprint from application.db import Session, Person, Fingerprint
import application.face_rec as fr import application.face_rec as fr
lastImage = ""
import application.camera as cam
lastImage = None
video = cv2.VideoCapture(config.videoSource, cv2.CAP_DSHOW)
vidCam = cam.VideoCamera()
class PersonList(Resource): class PersonList(Resource):
def post(self, id = None): def post(self, id = None):
@ -38,7 +43,10 @@ class PersonList(Resource):
for x in data: for x in data:
arr.append(x.serialize()) arr.append(x.serialize())
session.close() session.close()
fr.initFaceRec() fr.initFaceRec()
return flask.make_response(flask.jsonify({'data': arr}), 201) return flask.make_response(flask.jsonify({'data': arr}), 201)
except Exception as e: except Exception as e:
@ -118,43 +126,34 @@ class PersonList(Resource):
return flask.make_response(flask.jsonify({'error': str(e)}), 404) return flask.make_response(flask.jsonify({'error': str(e)}), 404)
class Camera(Resource): class Camera(Resource):
# provides the function used for the live streams
class VideoCamera(object):
"""Video stream object"""
url = config.videoSource
def __init__(self):
self.video = cv2.VideoCapture(self.url)
def __del__(self):
self.video.release()
def get_frame(self, ending):
success, image = self.video.read()
ret, jpeg = cv2.imencode(ending, image)
return jpeg
def gen(self, camera): def gen(self, camera):
"""Video streaming generator function.""" """Video streaming generator function."""
while True: while True:
frame = camera.get_frame('.jpg').tobytes() frame = camera.get_frame('.jpg').tobytes()
yield (b'--frame\r\n' yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') b'Content-Type:image/jpeg\r\n'
b'Content-Length: ' + f"{len(frame)}".encode() + b'\r\n'
b'\r\n' + frame + b'\r\n')
def genProcessed(self, url=None): def genProcessed(self, cam):
"""Video streaming generator function for processed video.""" """Video streaming generator function for processed video."""
url = config.videoSource
while True: while True:
frame = fr.identifyFaceVideo(url).tobytes() frame = fr.identifyFaceVideo(cam).tobytes()
yield (b'--frame\r\n' yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') b'Content-Type:image/jpeg\r\n'
b'Content-Length: ' + f"{len(frame)}".encode() + b'\r\n'
b'\r\n' + frame + b'\r\n')
def get(self, type = "stream"): def get(self, type = "stream"):
global lastImage global lastImage
global vidCam
try: try:
if type == "stream": if type == "stream":
return flask.Response(self.gen(self.VideoCamera()), mimetype='multipart/x-mixed-replace; boundary=frame') return flask.Response(self.gen(vidCam), mimetype='multipart/x-mixed-replace; boundary=frame')
elif type == "processed": elif type == "processed":
return flask.Response(self.genProcessed(self.VideoCamera()), mimetype='multipart/x-mixed-replace; boundary=frame') return flask.Response(self.genProcessed(vidCam), mimetype='multipart/x-mixed-replace; boundary=frame')
elif type == "still": elif type == "still":
return flask.Response(base64.b64decode(lastImage), mimetype='image/png') return flask.Response(base64.b64decode(lastImage), mimetype='image/png')
@ -165,8 +164,9 @@ class Camera(Resource):
def post(self): def post(self):
global lastImage global lastImage
global vidCam
try: try:
lastImage = base64.b64encode(self.VideoCamera().get_frame('.png')) lastImage = base64.b64encode(vidCam.get_frame('.png'))
except Exception as e: except Exception as e:
print("error: -", e) print("error: -", e)
return flask.make_response(flask.jsonify({'error': str(e)}), 404) return flask.make_response(flask.jsonify({'error': str(e)}), 404)

View File

@ -16,11 +16,10 @@ MODEL = config.model # default: 'hog', other one can be 'cnn' - CUDA accelerate
known_faces = [] known_faces = []
known_names = [] known_names = []
def initFaceRec(): def initFaceRec() :
''' Initializes Facial recognition with faces in current db ''' ''' Initializes Facial recognition with faces in current db '''
dlib.DLIB_USE_CUDA = config.useCUDA print('LOADING known faces...')
print('LOADING known faces...', dlib.DLIB_USE_CUDA)
session = Session() session = Session()
for face, name in session.query(Person.face, Person.person_id).all(): for face, name in session.query(Person.face, Person.person_id).all():
# Load an image # Load an image
@ -28,17 +27,24 @@ def initFaceRec():
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR) image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
# Get 128-dimension face encoding # Get 128-dimension face encoding
encoding = face_recognition.face_encodings(image)
if len(encoding) >= 1:
encoding = face_recognition.face_encodings(image)[0] encoding = face_recognition.face_encodings(image)[0]
else:
continue
# Append encodings and name # Append encodings and name
known_faces.append(encoding) known_faces.append(encoding)
known_names.append(name) known_names.append(name)
print('DONE Loading known faces...') print('DONE Loading known faces...')
session.close() session.close()
def identifyFace(image): def identifyFace(image):
print('Identifying Face') print('Identifying Face')
res = {}
try:
nparr = np.fromstring(base64.b64decode(image), np.uint8) nparr = np.fromstring(base64.b64decode(image), np.uint8)
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR) image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
@ -46,17 +52,21 @@ def identifyFace(image):
encodings = face_recognition.face_encodings(image, locations) encodings = face_recognition.face_encodings(image, locations)
# res is the return object key: name, value: matching score # res is the return object key: name, value: matching score
res = {}
count = 0
for face_encoding, face_location in zip(encodings, locations): for face_encoding, face_location in zip(encodings, locations):
results = face_recognition.face_distance(known_faces, face_encoding) results = face_recognition.face_distance(known_faces, face_encoding)
res = {known_names[i]: results[i] for i in range(0, len(results)) } res = {known_names[i]: results[i] for i in range(0, len(results)) }
count += 1
print(count)
except:
print("error")
return res return res
def identifyFaceVideo(url): def identifyFaceVideo(video):
video = video.video
# allways get new latest image from url # allways get new latest image from url
video = cv2.VideoCapture(url)
image = video.read()[1] image = video.read()[1]
#scale #scale
image = cv2.resize(image,None,fx=config.scaleInput,fy=config.scaleInput) image = cv2.resize(image,None,fx=config.scaleInput,fy=config.scaleInput)
@ -74,16 +84,22 @@ def identifyFaceVideo(url):
# can be multithreaded here # can be multithreaded here
# compares each face against all faces in DB # compares each face against all faces in DB
for face_encoding, face_location in zip(encodings, locations): for face_encoding, face_location in zip(encodings, locations):
try:
face_locations.update(compareFace(face_encoding, face_location)) face_locations.update(compareFace(face_encoding, face_location))
except Exception as e:
print(e)
session = Session() session = Session()
# marks faces and retrives faces by id # marks faces and retrives faces by id
for k, v in face_locations.items(): for k, v in face_locations.items():
try:
# Paint frame # Paint frame
cv2.rectangle(image, v[0], v[1], [255, 0, 0], FRAME_THICKNESS) cv2.rectangle(image, v[0], v[1], [255, 0, 0], FRAME_THICKNESS)
# Wite a name # Wite a name
name = " ".join(session.query(Person.fname, Person.lname).filter(Person.person_id == int(k)).first()) name = " ".join(session.query(Person.fname, Person.lname).filter(Person.person_id == int(k)).first())
cv2.putText(image, name, v[0], cv2.FONT_HERSHEY_SIMPLEX, 1.5, [255, 0, 255], FONT_THICKNESS) cv2.putText(image, name, v[0], cv2.FONT_HERSHEY_SIMPLEX, 1.5, [255, 0, 255], FONT_THICKNESS)
except Exception as e:
print(e)
session.close() session.close()
image = cv2.imencode(".jpg", image)[1] image = cv2.imencode(".jpg", image)[1]
return image return image

View File

@ -111,18 +111,6 @@ function validate(){
); );
} }
function loadStream() {
string = `
<img src="${rootKontext + "/api/v1/camera/stream"}" id="image-left"> </img>
`
ml.innerHTML += string;
string = `
<img src="${rootKontext + "/api/v1/camera/still"}" id="image-right"> </img>
`
mr.innerHTML += string;
}
function loadData() { function loadData() {
getJSON(rootKontext + "/api/v1/person/", getJSON(rootKontext + "/api/v1/person/",
function (error, data) { function (error, data) {

36
tempstream.py Normal file
View File

@ -0,0 +1,36 @@
from flask import Flask, request, Response
import cv2
from time import sleep
app = Flask(__name__)
class VideoCamera(object):
"""Video stream object"""
url = "./example.mp4"
def __init__(self):
self.video = cv2.VideoCapture(self.url)
def __del__(self):
self.video.release()
def get_frame(self, ending):
success, image = self.video.read()
ret, jpeg = cv2.imencode(ending, image)
sleep(.023)
return jpeg
def gen(camera):
"""Video streaming generator function."""
while True:
frame = camera.get_frame('.jpg').tobytes()
yield (b'--frame\r\n'
b'Content-Type:image/jpeg\r\n'
b'Content-Length: ' + f"{len(frame)}".encode() + b'\r\n'
b'\r\n' + frame + b'\r\n')
@app.route("/1.mjpeg")
def webhook():
return Response(gen(VideoCamera()), mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5003, threaded=True, debug=False)

Binary file not shown.