This commit is contained in:
Patrice 2019-07-18 14:46:16 +02:00
parent 184e981aac
commit 88c5a4803a
3 changed files with 11 additions and 14 deletions

View File

@ -30,4 +30,6 @@ Pfad | Beschreibung
### Architektur ### Architektur
![Architektur Übersicht](arch.png) ![Architektur Übersicht](arch.png)
Es wurden nur die Komponenten mit einer soliden Umrandung umgesetzt.

View File

@ -13,6 +13,7 @@ from flask import Flask, jsonify, Response, send_file
application = Flask(__name__) application = Flask(__name__)
# init the "database"
clients = [] clients = []
cams = [] cams = []
lastImages = list(range(0,10)) lastImages = list(range(0,10))
@ -22,6 +23,7 @@ with open("./clients.json", 'r', encoding='utf-8') as f:
with open("./cams.json", 'r', encoding='utf-8') as f: with open("./cams.json", 'r', encoding='utf-8') as f:
cams = json.loads(f.read()) cams = json.loads(f.read())
# provides th function used for the live streams
class VideoCamera(object): class VideoCamera(object):
"""Video stream object""" """Video stream object"""
def __init__(self, url): def __init__(self, url):
@ -60,6 +62,7 @@ def main():
for cam in cams: for cam in cams:
cam["last_detection"] = 0 cam["last_detection"] = 0
# check all camers sequentially while the server is running
while True: while True:
for cam in cams: for cam in cams:
stream = cam["ip"] stream = cam["ip"]
@ -126,11 +129,8 @@ def cam_stream(num):
@application.route('/cam/<num>/processed') @application.route('/cam/<num>/processed')
def cam_stream_processed(num): def cam_stream_processed(num):
#frame = cv2.imencode('.jpg', lastImages[int(num)])[1]
#return send_file(io.BytesIO(frame), mimetype='image/jpeg')
return Response(gen_processed(num), mimetype='multipart/x-mixed-replace; boundary=frame') return Response(gen_processed(num), mimetype='multipart/x-mixed-replace; boundary=frame')
######### ########### ######### ###########
### Start ### ### Start ###
######### ########### ######### ###########
@ -138,5 +138,4 @@ def cam_stream_processed(num):
if __name__ == '__main__': if __name__ == '__main__':
_thread.start_new_thread(main, () ) _thread.start_new_thread(main, () )
application.run(host='0.0.0.0', port=5000, threaded=True) application.run(host='0.0.0.0', port=5000, threaded=True)

View File

@ -6,7 +6,8 @@ import numpy as np
import tensorflow as tf import tensorflow as tf
import cv2 import cv2
# Detector API can be changed out given the I/O remains the same
# this way you can use a different N-Net if you like to
class DetectorAPI: class DetectorAPI:
def __init__(self, path_to_ckpt): def __init__(self, path_to_ckpt):
self.path_to_ckpt = path_to_ckpt self.path_to_ckpt = path_to_ckpt
@ -70,7 +71,7 @@ class Detector:
r, img = cap.read() r, img = cap.read()
if img is None: if img is None:
return img return img
# scale the image down for faster processing
scale_percent = 60 # percent of original size scale_percent = 60 # percent of original size
width = int(img.shape[1] * scale_percent / 100) width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100) height = int(img.shape[0] * scale_percent / 100)
@ -78,10 +79,12 @@ class Detector:
img = cv2.resize(img, dim) img = cv2.resize(img, dim)
# get the results from the net
boxes, scores, classes, num = self.odapi.process_frame(img) boxes, scores, classes, num = self.odapi.process_frame(img)
res = False res = False
for i in range(len(boxes)): for i in range(len(boxes)):
# Class 1 represents human # Class 1 represents human
# draw recogniction boxes and return resulting image + true/false
if classes[i] == 1: if classes[i] == 1:
if scores[i] > self.threshold: if scores[i] > self.threshold:
box = boxes[i] box = boxes[i]
@ -92,10 +95,3 @@ class Detector:
res = False res = False
return img, res return img, res
#def __del__(self):
#self.cap.release()
#cv2.destroyAllWindows()
#requests.get("http://192.168.178.53/stop")