fixed multiple box bug
This commit is contained in:
parent
44a51015cd
commit
f450cfb0e9
|
|
@ -16,10 +16,10 @@ application = Flask(__name__)
|
|||
clients = []
|
||||
cams = []
|
||||
lastImages = list(range(0,4))
|
||||
with open("./server/clients.json", 'r', encoding='utf-8') as f:
|
||||
with open("./clients.json", 'r', encoding='utf-8') as f:
|
||||
clients = json.loads(f.read())
|
||||
|
||||
with open("./server/cams.json", 'r', encoding='utf-8') as f:
|
||||
with open("./cams.json", 'r', encoding='utf-8') as f:
|
||||
cams = json.loads(f.read())
|
||||
|
||||
class VideoCamera(object):
|
||||
|
|
@ -78,7 +78,10 @@ def main():
|
|||
print("request error")
|
||||
|
||||
tmp = time.time()
|
||||
img, result = detector.detect(stream)
|
||||
try:
|
||||
img, result = detector.detect(stream)
|
||||
except:
|
||||
continue
|
||||
print(cam["client_id"], result, time.time()-tmp)
|
||||
lastImages[cam["id"]] = img
|
||||
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
{
|
||||
"id": 0,
|
||||
"label": "cam1",
|
||||
"ip": "http://89.29.108.38:80/mjpg/video.mjpg",
|
||||
"ip": "http://77.22.202.109/mjpg/video.mjpg",
|
||||
"client_id": 0,
|
||||
"status": false,
|
||||
"x":0.2,
|
||||
|
|
@ -22,7 +22,7 @@
|
|||
{
|
||||
"id": 2,
|
||||
"label": "cam3",
|
||||
"ip": "http://62.99.80.154:81/mjpg/video.mjpg",
|
||||
"ip": "http://89.29.108.38:80/mjpg/video.mjpg",
|
||||
"client_id": 2,
|
||||
"status": false,
|
||||
"x":0.9,
|
||||
|
|
|
|||
|
|
@ -60,9 +60,9 @@ class DetectorAPI:
|
|||
|
||||
class Detector:
|
||||
def __init__(self):
|
||||
self.model_path = "./server/model.pb"
|
||||
self.model_path = "./model.pb"
|
||||
self.odapi = DetectorAPI(path_to_ckpt=self.model_path)
|
||||
self.threshold = 0.3
|
||||
self.threshold = 0.7
|
||||
|
||||
def detect(self, stream):
|
||||
cap = cv2.VideoCapture(stream)
|
||||
|
|
@ -70,20 +70,20 @@ class Detector:
|
|||
r, img = cap.read()
|
||||
if img is None:
|
||||
return img
|
||||
img = cv2.resize(img, (720, 480))
|
||||
img = cv2.resize(img, (1000, 543))
|
||||
|
||||
boxes, scores, classes, num = self.odapi.process_frame(img)
|
||||
|
||||
res = False
|
||||
for i in range(len(boxes)):
|
||||
# Class 1 represents human
|
||||
if classes[i] == 1:
|
||||
if scores[i] > self.threshold:
|
||||
box = boxes[i]
|
||||
cv2.rectangle(img, (box[1], box[0]), (box[3], box[2]), (255, 0, 0), 2)
|
||||
return img, True
|
||||
res = True
|
||||
else:
|
||||
return img, False
|
||||
|
||||
res = False
|
||||
return img, res
|
||||
|
||||
#def __del__(self):
|
||||
|
||||
|
|
|
|||
|
|
@ -1,37 +0,0 @@
|
|||
import time
|
||||
import requests
|
||||
import detector as dt
|
||||
import cv2
|
||||
|
||||
if __name__ == "__main__":
|
||||
t = 1 # seconds a person can leave the room for
|
||||
t0 = time.time()
|
||||
time.clock()
|
||||
elapsed = 0
|
||||
#stream = "https://192.168.178.56:8080/video"
|
||||
stream = "http://217.128.254.187:8083/mjpg/video.mjpg"
|
||||
detector = dt.Detector(stream)
|
||||
music_playing = False
|
||||
|
||||
#cv2.startWindowThread()
|
||||
#cv2.namedWindow("preview")
|
||||
|
||||
while True:
|
||||
elapsed = time.time() - t0
|
||||
if elapsed > t and music_playing:
|
||||
r = requests.get("http://192.168.178.53/stop")
|
||||
if r.status_code == 200:
|
||||
music_playing = False
|
||||
tmp = time.time()
|
||||
img = detector.detect()
|
||||
print(time.time()-tmp)
|
||||
if img is not None and not music_playing:
|
||||
r = requests.get("http://192.168.178.53/play")
|
||||
if r.status_code == 200:
|
||||
music_playing = True
|
||||
t0 = time.time()
|
||||
|
||||
|
||||
cv2.imshow("preview", img)
|
||||
cv2.waitKey(1)
|
||||
|
||||
|
|
@ -1,4 +1,6 @@
|
|||
requests
|
||||
flask
|
||||
gunicorn
|
||||
opencv-python
|
||||
opencv-python
|
||||
tensorflow
|
||||
numpy
|
||||
Loading…
Reference in New Issue