diff --git a/client/main.py b/client/main.py index 5a67f17..2f4c876 100644 --- a/client/main.py +++ b/client/main.py @@ -12,11 +12,11 @@ playing = False def index(): global playing, p, w if playing: - return 406 + return str(406) else: playing = True p = w.play() - return 200 + return str(200) @app.route('/stop') def test(): @@ -24,9 +24,9 @@ def test(): if playing: playing = False p.stop() - return 200 + return str(200) else: - return 406 + return str(406) port = int(os.environ.get('PORT', 81)) diff --git a/client/rave.wav b/client/rave.wav new file mode 100644 index 0000000..cdb6ae7 Binary files /dev/null and b/client/rave.wav differ diff --git a/server/app.py b/server/app.py index 8d57e09..03f9e8d 100644 --- a/server/app.py +++ b/server/app.py @@ -15,7 +15,7 @@ application = Flask(__name__) clients = [] cams = [] -lastImages = list(range(0,4)) +lastImages = list(range(0,10)) with open("./clients.json", 'r', encoding='utf-8') as f: clients = json.loads(f.read()) @@ -55,45 +55,49 @@ def gen_processed(num): def main(): detector = dt.Detector() - t = 1 # seconds a person can leave the room for + t = 5 # seconds a person can leave the room for t0 = time.time() elapsed = 0 while True: - for cam in cams: - - stream = cam["ip"] - - clientStatus = clients[cam["client_id"]]["status"] - clientIp = clients[cam["client_id"]]["ip"] + cam = cams[2] - elapsed = time.time() - t0 - if elapsed > t and clientStatus: - try: - r = requests.get(clientIp + "/stop") - if r.status_code == 200: - clients[cam["client_id"]]["status"] = False - cam["status"] = False - except: - print("request error") + + stream = cam["ip"] + + clientStatus = clients[cam["client_id"]]["status"] + clientIp = clients[cam["client_id"]]["ip"] - tmp = time.time() - try: - img, result = detector.detect(stream) + elapsed = time.time() - t0 + if elapsed > t and clientStatus: + try: + r = requests.get(clientIp + "/stop") + #if r.status_code == 200: + clients[cam["client_id"]]["status"] = False except: - continue - print(cam["client_id"], result, time.time()-tmp) - lastImages[cam["id"]] = img + print("request error") - if result and not clientStatus: + tmp = time.time() + try: + img, result = detector.detect(stream) + except: + continue + + print(cam["id"], result, time.time()-tmp) + lastImages[cam["id"]] = img + + if result: + cam["status"] = True + if not clientStatus: try: r = requests.get(clientIp + "/play") if r.status_code == 200: clients[cam["client_id"]]["status"] = True - cam["status"] = True t0 = time.time() except: print("request error") + else: + cam["status"] = False ######### ########### diff --git a/server/cams.json b/server/cams.json index 5e5d662..2089581 100644 --- a/server/cams.json +++ b/server/cams.json @@ -13,20 +13,30 @@ "id": 1, "label": "cam2", "ip": "http://89.29.108.38:80/mjpg/video.mjpg", - "client_id": 1, + "client_id": 2, "status": false, "x":0.4, "y":0.2, "angle": 190 }, { - "id": 2, - "label": "cam3", - "ip": "http://89.29.108.38:80/mjpg/video.mjpg", + "id": 3, + "label": "cam4", + "ip": "http://82.150.206.177/cgi-bin/faststream.jpg?stream=half&fps=15&rand=COUNTER", + "client_id": 1, + "status": false, + "x":0.75, + "y":0.4, + "angle": 100 + }, + { + "id": 4, + "label": "cam5", + "ip": "http://79.189.131.176:99/videostream.cgi?user=admin&pwd=", "client_id": 2, "status": false, - "x":0.9, - "y":0.1, - "angle": 270 + "x":0.8, + "y":0.8, + "angle": 90 } ] \ No newline at end of file diff --git a/server/detector.py b/server/detector.py index 7e5d9f2..99f7976 100644 --- a/server/detector.py +++ b/server/detector.py @@ -62,7 +62,7 @@ class Detector: def __init__(self): self.model_path = "./model.pb" self.odapi = DetectorAPI(path_to_ckpt=self.model_path) - self.threshold = 0.7 + self.threshold = 0.6 def detect(self, stream): cap = cv2.VideoCapture(stream) @@ -70,7 +70,13 @@ class Detector: r, img = cap.read() if img is None: return img - img = cv2.resize(img, (1000, 543)) + + scale_percent = 60 # percent of original size + width = int(img.shape[1] * scale_percent / 100) + height = int(img.shape[0] * scale_percent / 100) + dim = (width, height) + + img = cv2.resize(img, dim) boxes, scores, classes, num = self.odapi.process_frame(img) res = False @@ -81,9 +87,12 @@ class Detector: box = boxes[i] cv2.rectangle(img, (box[1], box[0]), (box[3], box[2]), (255, 0, 0), 2) res = True + return img, res else: res = False - return img, res + return img, res + + #def __del__(self):