docs
This commit is contained in:
parent
184e981aac
commit
88c5a4803a
|
|
@ -31,3 +31,5 @@ Pfad | Beschreibung
|
|||
### Architektur
|
||||
|
||||

|
||||
|
||||
Es wurden nur die Komponenten mit einer soliden Umrandung umgesetzt.
|
||||
|
|
@ -13,6 +13,7 @@ from flask import Flask, jsonify, Response, send_file
|
|||
|
||||
application = Flask(__name__)
|
||||
|
||||
# init the "database"
|
||||
clients = []
|
||||
cams = []
|
||||
lastImages = list(range(0,10))
|
||||
|
|
@ -22,6 +23,7 @@ with open("./clients.json", 'r', encoding='utf-8') as f:
|
|||
with open("./cams.json", 'r', encoding='utf-8') as f:
|
||||
cams = json.loads(f.read())
|
||||
|
||||
# provides th function used for the live streams
|
||||
class VideoCamera(object):
|
||||
"""Video stream object"""
|
||||
def __init__(self, url):
|
||||
|
|
@ -60,6 +62,7 @@ def main():
|
|||
for cam in cams:
|
||||
cam["last_detection"] = 0
|
||||
|
||||
# check all camers sequentially while the server is running
|
||||
while True:
|
||||
for cam in cams:
|
||||
stream = cam["ip"]
|
||||
|
|
@ -126,11 +129,8 @@ def cam_stream(num):
|
|||
|
||||
@application.route('/cam/<num>/processed')
|
||||
def cam_stream_processed(num):
|
||||
#frame = cv2.imencode('.jpg', lastImages[int(num)])[1]
|
||||
#return send_file(io.BytesIO(frame), mimetype='image/jpeg')
|
||||
return Response(gen_processed(num), mimetype='multipart/x-mixed-replace; boundary=frame')
|
||||
|
||||
|
||||
######### ###########
|
||||
### Start ###
|
||||
######### ###########
|
||||
|
|
@ -138,5 +138,4 @@ def cam_stream_processed(num):
|
|||
if __name__ == '__main__':
|
||||
|
||||
_thread.start_new_thread(main, () )
|
||||
|
||||
application.run(host='0.0.0.0', port=5000, threaded=True)
|
||||
|
|
|
|||
|
|
@ -6,7 +6,8 @@ import numpy as np
|
|||
import tensorflow as tf
|
||||
import cv2
|
||||
|
||||
|
||||
# Detector API can be changed out given the I/O remains the same
|
||||
# this way you can use a different N-Net if you like to
|
||||
class DetectorAPI:
|
||||
def __init__(self, path_to_ckpt):
|
||||
self.path_to_ckpt = path_to_ckpt
|
||||
|
|
@ -70,7 +71,7 @@ class Detector:
|
|||
r, img = cap.read()
|
||||
if img is None:
|
||||
return img
|
||||
|
||||
# scale the image down for faster processing
|
||||
scale_percent = 60 # percent of original size
|
||||
width = int(img.shape[1] * scale_percent / 100)
|
||||
height = int(img.shape[0] * scale_percent / 100)
|
||||
|
|
@ -78,10 +79,12 @@ class Detector:
|
|||
|
||||
img = cv2.resize(img, dim)
|
||||
|
||||
# get the results from the net
|
||||
boxes, scores, classes, num = self.odapi.process_frame(img)
|
||||
res = False
|
||||
for i in range(len(boxes)):
|
||||
# Class 1 represents human
|
||||
# draw recogniction boxes and return resulting image + true/false
|
||||
if classes[i] == 1:
|
||||
if scores[i] > self.threshold:
|
||||
box = boxes[i]
|
||||
|
|
@ -92,10 +95,3 @@ class Detector:
|
|||
res = False
|
||||
return img, res
|
||||
|
||||
|
||||
|
||||
#def __del__(self):
|
||||
|
||||
#self.cap.release()
|
||||
#cv2.destroyAllWindows()
|
||||
#requests.get("http://192.168.178.53/stop")
|
||||
|
|
|
|||
Loading…
Reference in New Issue