added black formatting
This commit is contained in:
parent
7559d37787
commit
07484fc4f4
|
|
@ -16,7 +16,7 @@ class Classifier(ClassifierInterface):
|
||||||
print("1")
|
print("1")
|
||||||
self.model_path = os.path.join(os.path.dirname(__file__), "./class1.pb")
|
self.model_path = os.path.join(os.path.dirname(__file__), "./class1.pb")
|
||||||
self.odapi = self.DetectorAPI(path_to_ckpt=self.model_path)
|
self.odapi = self.DetectorAPI(path_to_ckpt=self.model_path)
|
||||||
self.threshold = 0.9
|
self.threshold = 0.9
|
||||||
with open(os.path.join(os.path.dirname(__file__), "coco_map.json")) as file:
|
with open(os.path.join(os.path.dirname(__file__), "coco_map.json")) as file:
|
||||||
mapping = json.load(file)
|
mapping = json.load(file)
|
||||||
self.classes = dict()
|
self.classes = dict()
|
||||||
|
|
@ -30,7 +30,7 @@ class Classifier(ClassifierInterface):
|
||||||
for i in range(len(boxes)):
|
for i in range(len(boxes)):
|
||||||
if scores[i] > self.threshold:
|
if scores[i] > self.threshold:
|
||||||
if classes[i] in self.classes:
|
if classes[i] in self.classes:
|
||||||
#print(self.classes[classes[i]])
|
# print(self.classes[classes[i]])
|
||||||
return self.classes[classes[i]]
|
return self.classes[classes[i]]
|
||||||
|
|
||||||
def tagLayer(self, data):
|
def tagLayer(self, data):
|
||||||
|
|
@ -39,8 +39,8 @@ class Classifier(ClassifierInterface):
|
||||||
for cnt in cnts:
|
for cnt in cnts:
|
||||||
if cnt.any():
|
if cnt.any():
|
||||||
cv2.imshow("changes x", cnt)
|
cv2.imshow("changes x", cnt)
|
||||||
cv2.waitKey(10) & 0XFF
|
cv2.waitKey(10) & 0xFF
|
||||||
cnt= imutils.resize(cnt, width=320)
|
cnt = imutils.resize(cnt, width=320)
|
||||||
x = self.detect(cnt)
|
x = self.detect(cnt)
|
||||||
|
|
||||||
res.append(x)
|
res.append(x)
|
||||||
|
|
@ -49,11 +49,11 @@ class Classifier(ClassifierInterface):
|
||||||
for re in res:
|
for re in res:
|
||||||
if re not in di:
|
if re not in di:
|
||||||
di[re] = 0
|
di[re] = 0
|
||||||
di[re]+=1
|
di[re] += 1
|
||||||
|
|
||||||
# remove all tags that occour infrequently
|
# remove all tags that occour infrequently
|
||||||
# if a giraff is only seen in 2 out of 100 frames, there probably wasn't a giraff in the layer
|
# if a giraff is only seen in 2 out of 100 frames, there probably wasn't a giraff in the layer
|
||||||
#
|
#
|
||||||
di.pop(None, None)
|
di.pop(None, None)
|
||||||
total = 0
|
total = 0
|
||||||
for value in di.values():
|
for value in di.values():
|
||||||
|
|
@ -71,7 +71,7 @@ class Classifier(ClassifierInterface):
|
||||||
class DetectorAPI:
|
class DetectorAPI:
|
||||||
def __init__(self, path_to_ckpt):
|
def __init__(self, path_to_ckpt):
|
||||||
self.path_to_ckpt = path_to_ckpt
|
self.path_to_ckpt = path_to_ckpt
|
||||||
gpus = tf.config.experimental.list_physical_devices('GPU')
|
gpus = tf.config.experimental.list_physical_devices("GPU")
|
||||||
if gpus:
|
if gpus:
|
||||||
try:
|
try:
|
||||||
for gpu in gpus:
|
for gpu in gpus:
|
||||||
|
|
@ -81,23 +81,23 @@ class Classifier(ClassifierInterface):
|
||||||
self.detection_graph = tf.Graph()
|
self.detection_graph = tf.Graph()
|
||||||
with self.detection_graph.as_default():
|
with self.detection_graph.as_default():
|
||||||
od_graph_def = tf.GraphDef()
|
od_graph_def = tf.GraphDef()
|
||||||
with tf.gfile.GFile(self.path_to_ckpt, 'rb') as fid:
|
with tf.gfile.GFile(self.path_to_ckpt, "rb") as fid:
|
||||||
serialized_graph = fid.read()
|
serialized_graph = fid.read()
|
||||||
od_graph_def.ParseFromString(serialized_graph)
|
od_graph_def.ParseFromString(serialized_graph)
|
||||||
tf.import_graph_def(od_graph_def, name='')
|
tf.import_graph_def(od_graph_def, name="")
|
||||||
|
|
||||||
self.default_graph = self.detection_graph.as_default()
|
self.default_graph = self.detection_graph.as_default()
|
||||||
self.sess = tf.Session(graph=self.detection_graph)
|
self.sess = tf.Session(graph=self.detection_graph)
|
||||||
|
|
||||||
# Definite input and output Tensors for detection_graph
|
# Definite input and output Tensors for detection_graph
|
||||||
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
|
self.image_tensor = self.detection_graph.get_tensor_by_name("image_tensor:0")
|
||||||
# Each box represents a part of the image where a particular object was detected.
|
# Each box represents a part of the image where a particular object was detected.
|
||||||
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
|
self.detection_boxes = self.detection_graph.get_tensor_by_name("detection_boxes:0")
|
||||||
# Each score represent how level of confidence for each of the objects.
|
# Each score represent how level of confidence for each of the objects.
|
||||||
# Score is shown on the result image, together with the class label.
|
# Score is shown on the result image, together with the class label.
|
||||||
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
|
self.detection_scores = self.detection_graph.get_tensor_by_name("detection_scores:0")
|
||||||
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
|
self.detection_classes = self.detection_graph.get_tensor_by_name("detection_classes:0")
|
||||||
self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
|
self.num_detections = self.detection_graph.get_tensor_by_name("num_detections:0")
|
||||||
|
|
||||||
def process_frame(self, image):
|
def process_frame(self, image):
|
||||||
# Expand dimensions since the trained_model expects images to have shape: [1, None, None, 3]
|
# Expand dimensions since the trained_model expects images to have shape: [1, None, None, 3]
|
||||||
|
|
@ -106,16 +106,17 @@ class Classifier(ClassifierInterface):
|
||||||
|
|
||||||
(boxes, scores, classes, num) = self.sess.run(
|
(boxes, scores, classes, num) = self.sess.run(
|
||||||
[self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],
|
[self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],
|
||||||
feed_dict={self.image_tensor: image_np_expanded})
|
feed_dict={self.image_tensor: image_np_expanded},
|
||||||
|
)
|
||||||
|
|
||||||
im_height, im_width,_ = image.shape
|
im_height, im_width, _ = image.shape
|
||||||
boxes_list = [None for i in range(boxes.shape[1])]
|
boxes_list = [None for i in range(boxes.shape[1])]
|
||||||
for i in range(boxes.shape[1]):
|
for i in range(boxes.shape[1]):
|
||||||
boxes_list[i] = (
|
boxes_list[i] = (
|
||||||
int(boxes[0, i, 0] * im_height),
|
int(boxes[0, i, 0] * im_height),
|
||||||
int(boxes[0, i, 1] * im_width),
|
int(boxes[0, i, 1] * im_width),
|
||||||
int(boxes[0, i, 2] * im_height),
|
int(boxes[0, i, 2] * im_height),
|
||||||
int(boxes[0, i, 3] * im_width)
|
int(boxes[0, i, 3] * im_width),
|
||||||
)
|
)
|
||||||
|
|
||||||
return boxes_list, scores[0].tolist(), [int(x) for x in classes[0].tolist()], int(num[0])
|
return boxes_list, scores[0].tolist(), [int(x) for x in classes[0].tolist()], int(num[0])
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,9 @@
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
|
|
||||||
class ClassifierInterface(ABC):
|
class ClassifierInterface(ABC):
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def tagLayer(self, layers):
|
def tagLayer(self, layers):
|
||||||
"""takes filled contours of one frame, returns list (len(), same as input)
|
"""takes filled contours of one frame, returns list (len(), same as input)
|
||||||
of lists with tags for corresponfing contours"""
|
of lists with tags for corresponfing contours"""
|
||||||
pass
|
pass
|
||||||
|
|
|
||||||
|
|
@ -9,20 +9,21 @@ from Application.Classifiers.ClassifierInterface import ClassifierInterface
|
||||||
|
|
||||||
class Classifier(ClassifierInterface):
|
class Classifier(ClassifierInterface):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.threshold = .5
|
self.threshold = 0.5
|
||||||
with open(os.path.join(os.path.dirname(__file__), "coco_map.json")) as file:
|
with open(os.path.join(os.path.dirname(__file__), "coco_map.json")) as file:
|
||||||
mapping = json.load(file)
|
mapping = json.load(file)
|
||||||
self.classes = dict()
|
self.classes = dict()
|
||||||
for element in mapping:
|
for element in mapping:
|
||||||
self.classes[element["id"]-1] = element["display_name"]
|
self.classes[element["id"] - 1] = element["display_name"]
|
||||||
|
|
||||||
self.net = cv2.dnn.readNet(os.path.join(os.path.dirname(
|
self.net = cv2.dnn.readNet(
|
||||||
__file__), "yolov4.weights"), os.path.join(os.path.dirname(__file__), "yolov4.cfg"))
|
os.path.join(os.path.dirname(__file__), "yolov4.weights"),
|
||||||
|
os.path.join(os.path.dirname(__file__), "yolov4.cfg"),
|
||||||
|
)
|
||||||
# self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
|
# self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
|
||||||
# self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
|
# self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
|
||||||
self.layer_names = self.net.getLayerNames()
|
self.layer_names = self.net.getLayerNames()
|
||||||
self.outputlayers = [self.layer_names[i[0] - 1]
|
self.outputlayers = [self.layer_names[i[0] - 1] for i in self.net.getUnconnectedOutLayers()]
|
||||||
for i in self.net.getUnconnectedOutLayers()]
|
|
||||||
|
|
||||||
print("Classifier Initiated")
|
print("Classifier Initiated")
|
||||||
|
|
||||||
|
|
@ -40,8 +41,7 @@ class Classifier(ClassifierInterface):
|
||||||
else:
|
else:
|
||||||
img2 = np.zeros(shape=[320, 320, 3], dtype=np.uint8)
|
img2 = np.zeros(shape=[320, 320, 3], dtype=np.uint8)
|
||||||
img2[:height, :width] = contour
|
img2[:height, :width] = contour
|
||||||
blob = cv2.dnn.blobFromImage(
|
blob = cv2.dnn.blobFromImage(img2, 1 / 256, (320, 320), (0, 0, 0), True, crop=False) # reduce 416 to 320
|
||||||
img2, 1/256, (320, 320), (0, 0, 0), True, crop=False) # reduce 416 to 320
|
|
||||||
self.net.setInput(blob)
|
self.net.setInput(blob)
|
||||||
outs = self.net.forward(self.outputlayers)
|
outs = self.net.forward(self.outputlayers)
|
||||||
for out in outs:
|
for out in outs:
|
||||||
|
|
@ -52,11 +52,8 @@ class Classifier(ClassifierInterface):
|
||||||
if confidence > self.threshold:
|
if confidence > self.threshold:
|
||||||
if self.classes[class_id] not in results:
|
if self.classes[class_id] not in results:
|
||||||
cv2.imshow("changes x", img2)
|
cv2.imshow("changes x", img2)
|
||||||
cv2.waitKey(10) & 0XFF
|
cv2.waitKey(10) & 0xFF
|
||||||
results.append(self.classes[class_id])
|
results.append(self.classes[class_id])
|
||||||
#print(self.classes[x], score)
|
# print(self.classes[x], score)
|
||||||
|
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,32 +1,31 @@
|
||||||
|
|
||||||
class Config:
|
class Config:
|
||||||
c = {
|
c = {
|
||||||
"min_area" : 100,
|
"min_area": 100,
|
||||||
"max_area" : 900000,
|
"max_area": 900000,
|
||||||
"threashold" : 7,
|
"threashold": 7,
|
||||||
"resizeWidth" : 500,
|
"resizeWidth": 500,
|
||||||
"inputPath" : None,
|
"inputPath": None,
|
||||||
"outputPath": None,
|
"outputPath": None,
|
||||||
"maxLayerLength": 5000,
|
"maxLayerLength": 5000,
|
||||||
"minLayerLength": 40,
|
"minLayerLength": 40,
|
||||||
"tolerance": 20,
|
"tolerance": 20,
|
||||||
"maxLength": None,
|
"maxLength": None,
|
||||||
"ttolerance": 60,
|
"ttolerance": 60,
|
||||||
"videoBufferLength": 250,
|
"videoBufferLength": 250,
|
||||||
"LayersPerContour": 220,
|
"LayersPerContour": 220,
|
||||||
"avgNum":10
|
"avgNum": 10,
|
||||||
}
|
}
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
'''This is basically just a wrapper for a json / python dict'''
|
"""This is basically just a wrapper for a json / python dict"""
|
||||||
print("Current Config:")
|
print("Current Config:")
|
||||||
for key, value in self.c.items():
|
for key, value in self.c.items():
|
||||||
print(f"{key}:\t\t{value}")
|
print(f"{key}:\t\t{value}")
|
||||||
|
|
||||||
def __getitem__(self, key):
|
def __getitem__(self, key):
|
||||||
if key not in self.c:
|
if key not in self.c:
|
||||||
return None
|
return None
|
||||||
return self.c[key]
|
return self.c[key]
|
||||||
|
|
||||||
def __setitem__(self, key, value):
|
def __setitem__(self, key, value):
|
||||||
self.c[key] = value
|
self.c[key] = value
|
||||||
|
|
|
||||||
|
|
@ -11,6 +11,7 @@ import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
|
||||||
class ContourExtractor:
|
class ContourExtractor:
|
||||||
|
|
||||||
# extracedContours = {frame_number: [(contour, (x,y,w,h)), ...], }
|
# extracedContours = {frame_number: [(contour, (x,y,w,h)), ...], }
|
||||||
|
|
@ -51,10 +52,9 @@ class ContourExtractor:
|
||||||
with ThreadPool(2) as pool:
|
with ThreadPool(2) as pool:
|
||||||
while not videoReader.videoEnded():
|
while not videoReader.videoEnded():
|
||||||
if videoReader.buffer.qsize() == 0:
|
if videoReader.buffer.qsize() == 0:
|
||||||
time.sleep(.5)
|
time.sleep(0.5)
|
||||||
|
|
||||||
tmpData = [videoReader.pop()
|
tmpData = [videoReader.pop() for i in range(0, videoReader.buffer.qsize())]
|
||||||
for i in range(0, videoReader.buffer.qsize())]
|
|
||||||
pool.map(self.computeMovingAverage, (tmpData,))
|
pool.map(self.computeMovingAverage, (tmpData,))
|
||||||
pool.map(self.async2, (tmpData,))
|
pool.map(self.async2, (tmpData,))
|
||||||
# for data in tmpData:
|
# for data in tmpData:
|
||||||
|
|
@ -75,20 +75,20 @@ class ContourExtractor:
|
||||||
time.sleep(0.1)
|
time.sleep(0.1)
|
||||||
firstFrame = self.averages.pop(frameCount, None)
|
firstFrame = self.averages.pop(frameCount, None)
|
||||||
|
|
||||||
if frameCount % (10*self.fps) == 1:
|
if frameCount % (10 * self.fps) == 1:
|
||||||
print(
|
print(
|
||||||
f" \r \033[K {round((frameCount/self.fps)*100/self.length, 2)} % processed in {round(time.time() - self.start, 2)}s", end='\r')
|
f" \r \033[K {round((frameCount/self.fps)*100/self.length, 2)} % processed in {round(time.time() - self.start, 2)}s",
|
||||||
|
end="\r",
|
||||||
|
)
|
||||||
|
|
||||||
gray = self.prepareFrame(frame)
|
gray = self.prepareFrame(frame)
|
||||||
frameDelta = cv2.absdiff(gray, firstFrame)
|
frameDelta = cv2.absdiff(gray, firstFrame)
|
||||||
thresh = cv2.threshold(frameDelta, self.threashold,
|
thresh = cv2.threshold(frameDelta, self.threashold, 255, cv2.THRESH_BINARY)[1]
|
||||||
255, cv2.THRESH_BINARY)[1]
|
|
||||||
# dilate the thresholded image to fill in holes, then find contours
|
# dilate the thresholded image to fill in holes, then find contours
|
||||||
thresh = cv2.dilate(thresh, None, iterations=10)
|
thresh = cv2.dilate(thresh, None, iterations=10)
|
||||||
#cv2.imshow("changes x", thresh)
|
# cv2.imshow("changes x", thresh)
|
||||||
#cv2.waitKey(10) & 0XFF
|
# cv2.waitKey(10) & 0XFF
|
||||||
cnts = cv2.findContours(
|
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
||||||
thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
||||||
cnts = imutils.grab_contours(cnts)
|
cnts = imutils.grab_contours(cnts)
|
||||||
|
|
||||||
contours = []
|
contours = []
|
||||||
|
|
@ -101,7 +101,7 @@ class ContourExtractor:
|
||||||
contours.append((x, y, w, h))
|
contours.append((x, y, w, h))
|
||||||
# the mask has to be packed like this, since np doesn't have a bit array,
|
# the mask has to be packed like this, since np doesn't have a bit array,
|
||||||
# meaning every bit in the mask would take up 8bits, which migth be too much
|
# meaning every bit in the mask would take up 8bits, which migth be too much
|
||||||
masks.append(np.packbits(np.copy(thresh[y:y+h, x:x+w]), axis=0))
|
masks.append(np.packbits(np.copy(thresh[y : y + h, x : x + w]), axis=0))
|
||||||
|
|
||||||
if len(contours) != 0 and contours is not None:
|
if len(contours) != 0 and contours is not None:
|
||||||
# this should be thread safe
|
# this should be thread safe
|
||||||
|
|
@ -131,8 +131,7 @@ class ContourExtractor:
|
||||||
if self.lastFrames is not None:
|
if self.lastFrames is not None:
|
||||||
frames = self.lastFrames + frames
|
frames = self.lastFrames + frames
|
||||||
|
|
||||||
tmp = [[j, frames, averageFrames]
|
tmp = [[j, frames, averageFrames] for j in range(averageFrames, len(frames))]
|
||||||
for j in range(averageFrames, len(frames))]
|
|
||||||
with ThreadPool(os.cpu_count()) as pool:
|
with ThreadPool(os.cpu_count()) as pool:
|
||||||
pool.map(self.averageDaFrames, tmp)
|
pool.map(self.averageDaFrames, tmp)
|
||||||
|
|
||||||
|
|
@ -143,7 +142,7 @@ class ContourExtractor:
|
||||||
frameNumber, frame = frames[j]
|
frameNumber, frame = frames[j]
|
||||||
frame = self.prepareFrame(frame)
|
frame = self.prepareFrame(frame)
|
||||||
|
|
||||||
avg = frame/averageFrames
|
avg = frame / averageFrames
|
||||||
for jj in range(0, averageFrames-1):
|
for jj in range(0, averageFrames - 1):
|
||||||
avg += self.prepareFrame(frames[j-jj][1])/averageFrames
|
avg += self.prepareFrame(frames[j - jj][1]) / averageFrames
|
||||||
self.averages[frameNumber] = np.array(np.round(avg), dtype=np.uint8)
|
self.averages[frameNumber] = np.array(np.round(avg), dtype=np.uint8)
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,7 @@ import imutils
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import cv2
|
import cv2
|
||||||
import pickle
|
import pickle
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
|
||||||
class Exporter:
|
class Exporter:
|
||||||
|
|
@ -43,8 +43,7 @@ class Exporter:
|
||||||
|
|
||||||
start = time.time()
|
start = time.time()
|
||||||
for i, layer in enumerate(layers):
|
for i, layer in enumerate(layers):
|
||||||
print(
|
print(f"\r {i}/{len(layers)} {round(i/len(layers)*100,2)}% {round((time.time() - start), 2)}s", end="\r")
|
||||||
f"\r {i}/{len(layers)} {round(i/len(layers)*100,2)}% {round((time.time() - start), 2)}s", end='\r')
|
|
||||||
if len(layer.bounds[0]) == 0:
|
if len(layer.bounds[0]) == 0:
|
||||||
continue
|
continue
|
||||||
videoReader = VideoReader(self.config)
|
videoReader = VideoReader(self.config)
|
||||||
|
|
@ -58,16 +57,21 @@ class Exporter:
|
||||||
if x is None:
|
if x is None:
|
||||||
continue
|
continue
|
||||||
factor = videoReader.w / self.resizeWidth
|
factor = videoReader.w / self.resizeWidth
|
||||||
x, y, w, h = (int(x * factor), int(y * factor),
|
x, y, w, h = (int(x * factor), int(y * factor), int(w * factor), int(h * factor))
|
||||||
int(w * factor), int(h * factor))
|
|
||||||
|
|
||||||
frame2[y:y+h, x:x+w] = np.copy(frame[y:y+h, x:x+w])
|
frame2[y : y + h, x : x + w] = np.copy(frame[y : y + h, x : x + w])
|
||||||
|
|
||||||
timestr = datetime.fromtimestamp(
|
timestr = datetime.fromtimestamp(int(frameCount / self.fps) + videoReader.getStartTime())
|
||||||
int(frameCount/self.fps) + videoReader.getStartTime())
|
cv2.putText(
|
||||||
cv2.putText(frame2, str(i) + " " + f"{timestr.hour}:{timestr.minute}:{timestr.second}", (int(
|
frame2,
|
||||||
x+w/2), int(y+h/2)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
|
str(i) + " " + f"{timestr.hour}:{timestr.minute}:{timestr.second}",
|
||||||
#cv2.putText(frame2, str(layer.stats["avg"]) + " " + str(layer.stats["var"]) + " " + str(layer.stats["dev"]), (int(500), int(500)), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,0,255), 2)
|
(int(x + w / 2), int(y + h / 2)),
|
||||||
|
cv2.FONT_HERSHEY_SIMPLEX,
|
||||||
|
1,
|
||||||
|
(255, 255, 255),
|
||||||
|
2,
|
||||||
|
)
|
||||||
|
# cv2.putText(frame2, str(layer.stats["avg"]) + " " + str(layer.stats["var"]) + " " + str(layer.stats["dev"]), (int(500), int(500)), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,0,255), 2)
|
||||||
writer.append_data(frame2)
|
writer.append_data(frame2)
|
||||||
videoReader.vc.release()
|
videoReader.vc.release()
|
||||||
videoReader.thread.join()
|
videoReader.thread.join()
|
||||||
|
|
@ -83,7 +87,7 @@ class Exporter:
|
||||||
maxLength = self.getMaxLengthOfLayers(layers)
|
maxLength = self.getMaxLengthOfLayers(layers)
|
||||||
underlay = cv2.VideoCapture(self.footagePath).read()[1]
|
underlay = cv2.VideoCapture(self.footagePath).read()[1]
|
||||||
underlay = cv2.cvtColor(underlay, cv2.COLOR_BGR2RGB)
|
underlay = cv2.cvtColor(underlay, cv2.COLOR_BGR2RGB)
|
||||||
#underlay = np.zeros(shape=[videoReader.h, videoReader.w, 3], dtype=np.uint8)
|
# underlay = np.zeros(shape=[videoReader.h, videoReader.w, 3], dtype=np.uint8)
|
||||||
frames = []
|
frames = []
|
||||||
for i in range(maxLength):
|
for i in range(maxLength):
|
||||||
frames.append(np.copy(underlay))
|
frames.append(np.copy(underlay))
|
||||||
|
|
@ -91,8 +95,8 @@ class Exporter:
|
||||||
|
|
||||||
while not videoReader.videoEnded():
|
while not videoReader.videoEnded():
|
||||||
frameCount, frame = videoReader.pop()
|
frameCount, frame = videoReader.pop()
|
||||||
if frameCount % (60*self.fps) == 0:
|
if frameCount % (60 * self.fps) == 0:
|
||||||
print("Minutes processed: ", frameCount/(60*self.fps), end="\r")
|
print("Minutes processed: ", frameCount / (60 * self.fps), end="\r")
|
||||||
if frame is None:
|
if frame is None:
|
||||||
print("ContourExtractor: frame was None")
|
print("ContourExtractor: frame was None")
|
||||||
continue
|
continue
|
||||||
|
|
@ -103,31 +107,45 @@ class Exporter:
|
||||||
for i in range(0, len(layer.bounds[frameCount - layer.startFrame])):
|
for i in range(0, len(layer.bounds[frameCount - layer.startFrame])):
|
||||||
try:
|
try:
|
||||||
underlay1 = underlay
|
underlay1 = underlay
|
||||||
(x, y, w,
|
(x, y, w, h) = layer.bounds[frameCount - layer.startFrame][i]
|
||||||
h) = layer.bounds[frameCount - layer.startFrame][i]
|
|
||||||
mask = layer.masks[frameCount - layer.startFrame][i]
|
mask = layer.masks[frameCount - layer.startFrame][i]
|
||||||
if x is None:
|
if x is None:
|
||||||
break
|
break
|
||||||
factor = videoReader.w / self.resizeWidth
|
factor = videoReader.w / self.resizeWidth
|
||||||
x, y, w, h = (int(x * factor), int(y * factor),
|
x, y, w, h = (int(x * factor), int(y * factor), int(w * factor), int(h * factor))
|
||||||
int(w * factor), int(h * factor))
|
|
||||||
|
|
||||||
mask = imutils.resize(mask, width=w, height=h+1)
|
mask = imutils.resize(mask, width=w, height=h + 1)
|
||||||
mask = np.resize(mask, (h, w))
|
mask = np.resize(mask, (h, w))
|
||||||
mask = cv2.erode(mask, None, iterations=10)
|
mask = cv2.erode(mask, None, iterations=10)
|
||||||
mask *= 255
|
mask *= 255
|
||||||
frame2 = frames[frameCount - layer.startFrame + layer.exportOffset]
|
frame2 = frames[frameCount - layer.startFrame + layer.exportOffset]
|
||||||
xx = np.copy(cv2.bitwise_and(
|
xx = np.copy(
|
||||||
frame2[y:y+h, x:x+w], frame2[y:y+h, x:x+w], mask=cv2.bitwise_not(mask)))
|
cv2.bitwise_and(
|
||||||
frame2[y:y+h, x:x+w] = cv2.addWeighted(xx, 1, np.copy(
|
frame2[y : y + h, x : x + w],
|
||||||
cv2.bitwise_and(frame[y:y+h, x:x+w], frame[y:y+h, x:x+w], mask=mask)), 1, 0)
|
frame2[y : y + h, x : x + w],
|
||||||
|
mask=cv2.bitwise_not(mask),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
frame2[y : y + h, x : x + w] = cv2.addWeighted(
|
||||||
|
xx,
|
||||||
|
1,
|
||||||
|
np.copy(cv2.bitwise_and(frame[y : y + h, x : x + w], frame[y : y + h, x : x + w], mask=mask)),
|
||||||
|
1,
|
||||||
|
0,
|
||||||
|
)
|
||||||
frames[frameCount - layer.startFrame + layer.exportOffset] = np.copy(frame2)
|
frames[frameCount - layer.startFrame + layer.exportOffset] = np.copy(frame2)
|
||||||
#cv2.imshow("changes x", frame2)
|
# cv2.imshow("changes x", frame2)
|
||||||
#cv2.waitKey(10) & 0XFF
|
# cv2.waitKey(10) & 0XFF
|
||||||
time = datetime.fromtimestamp(
|
time = datetime.fromtimestamp(int(frameCount / self.fps) + videoReader.getStartTime())
|
||||||
int(frameCount/self.fps) + videoReader.getStartTime())
|
cv2.putText(
|
||||||
cv2.putText(frames[frameCount - layer.startFrame + layer.exportOffset], f"{time.hour}:{time.minute}:{time.second}", (int(
|
frames[frameCount - layer.startFrame + layer.exportOffset],
|
||||||
x+w/2), int(y+h/2)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
|
f"{time.hour}:{time.minute}:{time.second}",
|
||||||
|
(int(x + w / 2), int(y + h / 2)),
|
||||||
|
cv2.FONT_HERSHEY_SIMPLEX,
|
||||||
|
1,
|
||||||
|
(255, 255, 255),
|
||||||
|
2,
|
||||||
|
)
|
||||||
except:
|
except:
|
||||||
continue
|
continue
|
||||||
videoReader.thread.join()
|
videoReader.thread.join()
|
||||||
|
|
@ -153,10 +171,9 @@ class Exporter:
|
||||||
return maxLength
|
return maxLength
|
||||||
|
|
||||||
def makeListOfFrames(self, layers):
|
def makeListOfFrames(self, layers):
|
||||||
'''Returns set of all Frames which are relavant to the Layers'''
|
"""Returns set of all Frames which are relavant to the Layers"""
|
||||||
frameNumbers = set()
|
frameNumbers = set()
|
||||||
for layer in layers:
|
for layer in layers:
|
||||||
frameNumbers.update(
|
frameNumbers.update(list(range(layer.startFrame, layer.startFrame + len(layer))))
|
||||||
list(range(layer.startFrame, layer.startFrame + len(layer))))
|
|
||||||
|
|
||||||
return sorted(list(frameNumbers))
|
return sorted(list(frameNumbers))
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,9 @@
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from matplotlib import pyplot as plt
|
from matplotlib import pyplot as plt
|
||||||
|
|
||||||
|
|
||||||
class HeatMap:
|
class HeatMap:
|
||||||
def __init__(self, x, y, contours, resizeFactor = 1):
|
def __init__(self, x, y, contours, resizeFactor=1):
|
||||||
self.imageBW = np.zeros(shape=[y, x, 3], dtype=np.float64)
|
self.imageBW = np.zeros(shape=[y, x, 3], dtype=np.float64)
|
||||||
self._resizeFactor = resizeFactor
|
self._resizeFactor = resizeFactor
|
||||||
self._createImage(contours)
|
self._createImage(contours)
|
||||||
|
|
@ -10,11 +11,16 @@ class HeatMap:
|
||||||
def _createImage(self, contours):
|
def _createImage(self, contours):
|
||||||
for contour in contours:
|
for contour in contours:
|
||||||
for x, y, w, h in contour:
|
for x, y, w, h in contour:
|
||||||
x, y, w, h = x*self._resizeFactor, y*self._resizeFactor, w*self._resizeFactor, h*self._resizeFactor
|
x, y, w, h = (
|
||||||
self.imageBW[int(y):int(y+h), int(x):int(x+w)] += 1
|
x * self._resizeFactor,
|
||||||
|
y * self._resizeFactor,
|
||||||
|
w * self._resizeFactor,
|
||||||
|
h * self._resizeFactor,
|
||||||
|
)
|
||||||
|
self.imageBW[int(y) : int(y + h), int(x) : int(x + w)] += 1
|
||||||
|
|
||||||
self.imageBW = np.nan_to_num(self.imageBW/ self.imageBW.sum(axis=1)[:, np.newaxis], 0)
|
self.imageBW = np.nan_to_num(self.imageBW / self.imageBW.sum(axis=1)[:, np.newaxis], 0)
|
||||||
|
|
||||||
def showImage(self):
|
def showImage(self):
|
||||||
plt.imshow(self.imageBW*255)
|
plt.imshow(self.imageBW * 255)
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,6 @@
|
||||||
import pickle
|
import pickle
|
||||||
|
|
||||||
|
|
||||||
class Importer:
|
class Importer:
|
||||||
def __init__(self, config):
|
def __init__(self, config):
|
||||||
self.path = config["importPath"]
|
self.path = config["importPath"]
|
||||||
|
|
@ -8,4 +9,4 @@ class Importer:
|
||||||
print("Loading previous results")
|
print("Loading previous results")
|
||||||
with open(self.path, "rb") as file:
|
with open(self.path, "rb") as file:
|
||||||
layers, contours, masks = pickle.load(file)
|
layers, contours, masks = pickle.load(file)
|
||||||
return (layers, contours, masks)
|
return (layers, contours, masks)
|
||||||
|
|
|
||||||
|
|
@ -2,24 +2,25 @@ import numpy as np
|
||||||
import cv2
|
import cv2
|
||||||
import imutils
|
import imutils
|
||||||
|
|
||||||
|
|
||||||
class Layer:
|
class Layer:
|
||||||
#bounds = [[(x,y,w,h), ],]
|
# bounds = [[(x,y,w,h), ],]
|
||||||
|
|
||||||
startFrame = None
|
startFrame = None
|
||||||
lastFrame = None
|
lastFrame = None
|
||||||
length = None
|
length = None
|
||||||
|
|
||||||
def __init__(self, startFrame, data, mask, config):
|
def __init__(self, startFrame, data, mask, config):
|
||||||
'''returns a Layer object
|
"""returns a Layer object
|
||||||
|
|
||||||
Layers are collections of contours with a StartFrame,
|
Layers are collections of contours with a StartFrame,
|
||||||
which is the number of the frame the first contour of
|
which is the number of the frame the first contour of
|
||||||
this layer was extraced from
|
this layer was extraced from
|
||||||
|
|
||||||
A Contour is a CV2 Contour, which is a y*x*3 rgb numpy array,
|
A Contour is a CV2 Contour, which is a y*x*3 rgb numpy array,
|
||||||
but we only care about the corners of the contours.
|
but we only care about the corners of the contours.
|
||||||
So we save the bounds (x,y,w,h) in bounds[] and the actual content in data[]
|
So we save the bounds (x,y,w,h) in bounds[] and the actual content in data[]
|
||||||
'''
|
"""
|
||||||
self.startFrame = startFrame
|
self.startFrame = startFrame
|
||||||
self.lastFrame = startFrame
|
self.lastFrame = startFrame
|
||||||
self.config = config
|
self.config = config
|
||||||
|
|
@ -31,10 +32,9 @@ class Layer:
|
||||||
|
|
||||||
self.bounds.append([data])
|
self.bounds.append([data])
|
||||||
self.masks.append([mask])
|
self.masks.append([mask])
|
||||||
#print("Layer constructed")
|
|
||||||
|
|
||||||
def add(self, frameNumber, bound, mask):
|
def add(self, frameNumber, bound, mask):
|
||||||
'''Adds a bound to the Layer at the layer index which corresponds to the given framenumber'''
|
"""Adds a bound to the Layer at the layer index which corresponds to the given framenumber"""
|
||||||
index = frameNumber - self.startFrame
|
index = frameNumber - self.startFrame
|
||||||
if index < 0:
|
if index < 0:
|
||||||
return
|
return
|
||||||
|
|
@ -48,34 +48,6 @@ class Layer:
|
||||||
self.bounds[index].append(bound)
|
self.bounds[index].append(bound)
|
||||||
self.masks[index].append(mask)
|
self.masks[index].append(mask)
|
||||||
|
|
||||||
|
|
||||||
def calcStats(self):
|
|
||||||
'''calculates average distance, variation and deviation of layer movement'''
|
|
||||||
middles = []
|
|
||||||
for i, bounds in enumerate(self.bounds):
|
|
||||||
for j, bound in enumerate(bounds):
|
|
||||||
if None in bound:
|
|
||||||
continue
|
|
||||||
x = (bound[0] + bound[2]/2)
|
|
||||||
y = (bound[1] + bound[3]/2)
|
|
||||||
middles.append([x,y])
|
|
||||||
|
|
||||||
avg = 0
|
|
||||||
for i in range(1, len(middles), 2):
|
|
||||||
avg += (((float(middles[i][0]-middles[i-1][0])/len(middles))**2 + float(middles[i][1]-middles[i-1][1])/len(middles))**2)**(1/2)
|
|
||||||
self.stats = dict()
|
|
||||||
self.stats["avg"] = round(avg,2)
|
|
||||||
|
|
||||||
x=0
|
|
||||||
for i in range(1, len(middles), 2):
|
|
||||||
x += (((((float(middles[i][0]-middles[i-1][0])/len(middles))**2 + float(middles[i][1]-middles[i-1][1])/len(middles))**2)**(1/2)) - avg)**2
|
|
||||||
|
|
||||||
x /= (len(middles)-1)
|
|
||||||
|
|
||||||
self.stats["var"] = round(x,2)
|
|
||||||
self.stats["dev"] = round((x)**(1/2), 2)
|
|
||||||
|
|
||||||
|
|
||||||
def getLength(self):
|
def getLength(self):
|
||||||
return len(self) + self.exportOffset
|
return len(self) + self.exportOffset
|
||||||
|
|
||||||
|
|
@ -84,20 +56,20 @@ class Layer:
|
||||||
return self.length
|
return self.length
|
||||||
|
|
||||||
def spaceOverlaps(self, layer2):
|
def spaceOverlaps(self, layer2):
|
||||||
'''Checks if there is an overlap in the bounds of current layer with given layer'''
|
"""Checks if there is an overlap in the bounds of current layer with given layer"""
|
||||||
overlap = False
|
overlap = False
|
||||||
maxLen = min(len(layer2.bounds), len(self.bounds))
|
maxLen = min(len(layer2.bounds), len(self.bounds))
|
||||||
bounds = self.bounds[:maxLen]
|
bounds = self.bounds[:maxLen]
|
||||||
for b1s, b2s in zip(bounds[::10], layer2.bounds[:maxLen:10]):
|
for b1s, b2s in zip(bounds[::10], layer2.bounds[:maxLen:10]):
|
||||||
for b1 in b1s:
|
for b1 in b1s:
|
||||||
for b2 in b2s:
|
for b2 in b2s:
|
||||||
if self.contoursOverlay((b1[0], b1[1]+b1[3]), (b1[0]+b1[2], b1[1]), (b2[0], b2[1]+b2[3]), (b2[0]+b2[2], b2[1])):
|
if self.contoursOverlay((b1[0], b1[1] + b1[3]), (b1[0] + b1[2], b1[1]), (b2[0], b2[1] + b2[3]), (b2[0] + b2[2], b2[1])):
|
||||||
overlap = True
|
overlap = True
|
||||||
break
|
break
|
||||||
return overlap
|
return overlap
|
||||||
|
|
||||||
def timeOverlaps(self, layer2):
|
def timeOverlaps(self, layer2):
|
||||||
'''Checks for overlap in time between current and given layer'''
|
"""Checks for overlap in time between current and given layer"""
|
||||||
s1 = self.exportOffset
|
s1 = self.exportOffset
|
||||||
e1 = self.lastFrame - self.startFrame + self.exportOffset
|
e1 = self.lastFrame - self.startFrame + self.exportOffset
|
||||||
s2 = layer2.exportOffset
|
s2 = layer2.exportOffset
|
||||||
|
|
@ -111,9 +83,8 @@ class Layer:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def contoursOverlay(self, l1, r1, l2, r2):
|
def contoursOverlay(self, l1, r1, l2, r2):
|
||||||
if(l1[0] >= r2[0] or l2[0] >= r1[0]):
|
if l1[0] >= r2[0] or l2[0] >= r1[0]:
|
||||||
return False
|
return False
|
||||||
if(l1[1] <= r2[1] or l2[1] <= r1[1]):
|
if l1[1] <= r2[1] or l2[1] <= r1[1]:
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -7,6 +7,7 @@ from multiprocessing.pool import ThreadPool
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
|
||||||
class LayerFactory:
|
class LayerFactory:
|
||||||
def __init__(self, config, data=None):
|
def __init__(self, config, data=None):
|
||||||
self.data = {}
|
self.data = {}
|
||||||
|
|
@ -24,7 +25,7 @@ class LayerFactory:
|
||||||
self.extractLayers(data)
|
self.extractLayers(data)
|
||||||
|
|
||||||
def extractLayers(self, data, maskArr):
|
def extractLayers(self, data, maskArr):
|
||||||
'''Bundle given contours together into Layer Objects'''
|
"""Bundle given contours together into Layer Objects"""
|
||||||
|
|
||||||
frameNumber = min(data)
|
frameNumber = min(data)
|
||||||
contours = data[frameNumber]
|
contours = data[frameNumber]
|
||||||
|
|
@ -40,15 +41,15 @@ class LayerFactory:
|
||||||
for frameNumber in sorted(data.keys()):
|
for frameNumber in sorted(data.keys()):
|
||||||
contours = data[frameNumber]
|
contours = data[frameNumber]
|
||||||
masks = maskArr[frameNumber]
|
masks = maskArr[frameNumber]
|
||||||
masks = [np.unpackbits(mask, axis=0)
|
masks = [np.unpackbits(mask, axis=0) for mask, contours in zip(masks, contours)]
|
||||||
for mask, contours in zip(masks, contours)]
|
|
||||||
if frameNumber % 100 == 0:
|
if frameNumber % 100 == 0:
|
||||||
print(
|
print(
|
||||||
f" {int(round(frameNumber/max(data.keys()), 2)*100)}% done with Layer extraction {len(self.layers)} Layers", end='\r')
|
f" {int(round(frameNumber/max(data.keys()), 2)*100)}% done with Layer extraction {len(self.layers)} Layers",
|
||||||
|
end="\r",
|
||||||
|
)
|
||||||
|
|
||||||
tmp = [[frameNumber, contour, mask]
|
tmp = [[frameNumber, contour, mask] for contour, mask in zip(contours, masks)]
|
||||||
for contour, mask in zip(contours, masks)]
|
# pool.map(self.getLayers, tmp)
|
||||||
#pool.map(self.getLayers, tmp)
|
|
||||||
for x in tmp:
|
for x in tmp:
|
||||||
self.getLayers(x)
|
self.getLayers(x)
|
||||||
|
|
||||||
|
|
@ -68,22 +69,20 @@ class LayerFactory:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
lastXframes = min(40, len(layer))
|
lastXframes = min(40, len(layer))
|
||||||
lastBounds = [bound for bounds in layer.bounds[-lastXframes:]
|
lastBounds = [bound for bounds in layer.bounds[-lastXframes:] for bound in bounds]
|
||||||
for bound in bounds]
|
|
||||||
|
|
||||||
for j, bounds in enumerate(sorted(lastBounds, reverse=True)):
|
for j, bounds in enumerate(sorted(lastBounds, reverse=True)):
|
||||||
if bounds is None:
|
if bounds is None:
|
||||||
break
|
break
|
||||||
(x2, y2, w2, h2) = bounds
|
(x2, y2, w2, h2) = bounds
|
||||||
if self.contoursOverlay((x-tol, y+h+tol), (x+w+tol, y-tol), (x2, y2+h2), (x2+w2, y2)):
|
if self.contoursOverlay((x - tol, y + h + tol), (x + w + tol, y - tol), (x2, y2 + h2), (x2 + w2, y2)):
|
||||||
layer.add(frameNumber, (x, y, w, h), mask)
|
layer.add(frameNumber, (x, y, w, h), mask)
|
||||||
foundLayerIDs.add(i)
|
foundLayerIDs.add(i)
|
||||||
break
|
break
|
||||||
|
|
||||||
foundLayerIDs = sorted(list(foundLayerIDs))
|
foundLayerIDs = sorted(list(foundLayerIDs))
|
||||||
if len(foundLayerIDs) == 0:
|
if len(foundLayerIDs) == 0:
|
||||||
self.layers.append(
|
self.layers.append(Layer(frameNumber, (x, y, w, h), mask, self.config))
|
||||||
Layer(frameNumber, (x, y, w, h), mask, self.config))
|
|
||||||
if len(foundLayerIDs) > 1:
|
if len(foundLayerIDs) > 1:
|
||||||
self.mergeLayers(foundLayerIDs)
|
self.mergeLayers(foundLayerIDs)
|
||||||
|
|
||||||
|
|
@ -118,8 +117,8 @@ class LayerFactory:
|
||||||
for lc2, l2 in enumerate(pL):
|
for lc2, l2 in enumerate(pL):
|
||||||
if lc2 == lc:
|
if lc2 == lc:
|
||||||
continue
|
continue
|
||||||
for cnt in l.bounds[x-l.startFrame]:
|
for cnt in l.bounds[x - l.startFrame]:
|
||||||
for cnt2 in l2.bounds[x-l2.startFrame]:
|
for cnt2 in l2.bounds[x - l2.startFrame]:
|
||||||
if self.contoursOverlay(cnt, cnt2):
|
if self.contoursOverlay(cnt, cnt2):
|
||||||
merge.add(indexes[lc])
|
merge.add(indexes[lc])
|
||||||
merge.add(indexes[lc2])
|
merge.add(indexes[lc2])
|
||||||
|
|
@ -152,9 +151,9 @@ class LayerFactory:
|
||||||
return maxFrame
|
return maxFrame
|
||||||
|
|
||||||
def contoursOverlay(self, l1, r1, l2, r2):
|
def contoursOverlay(self, l1, r1, l2, r2):
|
||||||
if(l1[0] >= r2[0] or l2[0] >= r1[0]):
|
if l1[0] >= r2[0] or l2[0] >= r1[0]:
|
||||||
return False
|
return False
|
||||||
if(l1[1] <= r2[1] or l2[1] <= r1[1]):
|
if l1[1] <= r2[1] or l2[1] <= r1[1]:
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -20,7 +20,7 @@ class LayerManager:
|
||||||
self.resizeWidth = config["resizeWidth"]
|
self.resizeWidth = config["resizeWidth"]
|
||||||
self.footagePath = config["inputPath"]
|
self.footagePath = config["inputPath"]
|
||||||
self.config = config
|
self.config = config
|
||||||
#self.classifier = Classifier()
|
# self.classifier = Classifier()
|
||||||
self.tags = []
|
self.tags = []
|
||||||
print("LayerManager constructed")
|
print("LayerManager constructed")
|
||||||
|
|
||||||
|
|
@ -67,13 +67,12 @@ class LayerManager:
|
||||||
self.layers = layers
|
self.layers = layers
|
||||||
|
|
||||||
def tagLayers(self):
|
def tagLayers(self):
|
||||||
'''Use classifieres the tag all Layers, by reading the contour content from the original video, then applying the classifier'''
|
"""Use classifieres the tag all Layers, by reading the contour content from the original video, then applying the classifier"""
|
||||||
print("Tagging Layers")
|
print("Tagging Layers")
|
||||||
exporter = Exporter(self.config)
|
exporter = Exporter(self.config)
|
||||||
start = time.time()
|
start = time.time()
|
||||||
for i, layer in enumerate(self.layers):
|
for i, layer in enumerate(self.layers):
|
||||||
print(
|
print(f"{round(i/len(self.layers)*100,2)} {round((time.time() - start), 2)}")
|
||||||
f"{round(i/len(self.layers)*100,2)} {round((time.time() - start), 2)}")
|
|
||||||
start = time.time()
|
start = time.time()
|
||||||
if len(layer.bounds[0]) == 0:
|
if len(layer.bounds[0]) == 0:
|
||||||
continue
|
continue
|
||||||
|
|
@ -94,7 +93,7 @@ class LayerManager:
|
||||||
y = int(y * factor)
|
y = int(y * factor)
|
||||||
w = int(w * factor)
|
w = int(w * factor)
|
||||||
h = int(h * factor)
|
h = int(h * factor)
|
||||||
data.append(np.copy(frame[y:y+h, x:x+w]))
|
data.append(np.copy(frame[y : y + h, x : x + w]))
|
||||||
layer.data.append(data)
|
layer.data.append(data)
|
||||||
tags = self.classifier.tagLayer(layer.data)
|
tags = self.classifier.tagLayer(layer.data)
|
||||||
print(tags)
|
print(tags)
|
||||||
|
|
@ -109,8 +108,7 @@ class LayerManager:
|
||||||
lenL = len(self.layers)
|
lenL = len(self.layers)
|
||||||
for i in range(1, len(self.layers)):
|
for i in range(1, len(self.layers)):
|
||||||
layer = self.layers[i]
|
layer = self.layers[i]
|
||||||
print(
|
print(f"\r {i}/{lenL}", end="\r")
|
||||||
f"\r {i}/{lenL}", end='\r')
|
|
||||||
overlap = True
|
overlap = True
|
||||||
tries = 1
|
tries = 1
|
||||||
while overlap:
|
while overlap:
|
||||||
|
|
@ -123,5 +121,5 @@ class LayerManager:
|
||||||
self.layers[i].exportOffset += 20 * tries
|
self.layers[i].exportOffset += 20 * tries
|
||||||
tries += 1
|
tries += 1
|
||||||
|
|
||||||
#if self.layers[i].exportOffset >= 300000:
|
# if self.layers[i].exportOffset >= 300000:
|
||||||
# break
|
# break
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@ import cv2
|
||||||
import threading
|
import threading
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
|
||||||
class VideoReader:
|
class VideoReader:
|
||||||
listOfFrames = None
|
listOfFrames = None
|
||||||
w = None
|
w = None
|
||||||
|
|
@ -26,7 +27,7 @@ class VideoReader:
|
||||||
self.calcStartTime()
|
self.calcStartTime()
|
||||||
if setOfFrames is not None:
|
if setOfFrames is not None:
|
||||||
self.listOfFrames = sorted(setOfFrames)
|
self.listOfFrames = sorted(setOfFrames)
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
self.fillBuffer()
|
self.fillBuffer()
|
||||||
return self
|
return self
|
||||||
|
|
@ -47,14 +48,13 @@ class VideoReader:
|
||||||
self.listOfFrames = listOfFrames
|
self.listOfFrames = listOfFrames
|
||||||
|
|
||||||
if self.listOfFrames is not None:
|
if self.listOfFrames is not None:
|
||||||
self.thread = threading.Thread(
|
self.thread = threading.Thread(target=self.readFramesByList, args=())
|
||||||
target=self.readFramesByList, args=())
|
|
||||||
else:
|
else:
|
||||||
self.thread = threading.Thread(target=self.readFrames, args=())
|
self.thread = threading.Thread(target=self.readFrames, args=())
|
||||||
self.thread.start()
|
self.thread.start()
|
||||||
|
|
||||||
def readFrames(self):
|
def readFrames(self):
|
||||||
'''Reads video from start to finish'''
|
"""Reads video from start to finish"""
|
||||||
while self.lastFrame < self.endFrame:
|
while self.lastFrame < self.endFrame:
|
||||||
res, frame = self.vc.read()
|
res, frame = self.vc.read()
|
||||||
if res:
|
if res:
|
||||||
|
|
@ -64,7 +64,7 @@ class VideoReader:
|
||||||
self.stopped = True
|
self.stopped = True
|
||||||
|
|
||||||
def readFramesByList(self):
|
def readFramesByList(self):
|
||||||
'''Reads all frames from a list of frame numbers'''
|
"""Reads all frames from a list of frame numbers"""
|
||||||
self.vc.set(1, self.listOfFrames[0])
|
self.vc.set(1, self.listOfFrames[0])
|
||||||
self.lastFrame = self.listOfFrames[0]
|
self.lastFrame = self.listOfFrames[0]
|
||||||
self.endFrame = self.listOfFrames[-1]
|
self.endFrame = self.listOfFrames[-1]
|
||||||
|
|
@ -120,7 +120,7 @@ class VideoReader:
|
||||||
return self.starttime
|
return self.starttime
|
||||||
|
|
||||||
def getWH(self):
|
def getWH(self):
|
||||||
'''get width and height'''
|
"""get width and height"""
|
||||||
if self.w is None or self.h is None:
|
if self.w is None or self.h is None:
|
||||||
res, image = self.vc.read()
|
res, image = self.vc.read()
|
||||||
self.w = image.shape[1]
|
self.w = image.shape[1]
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,5 @@
|
||||||
|
import math
|
||||||
import math
|
from PIL import Image, ImageDraw
|
||||||
from PIL import Image, ImageDraw
|
|
||||||
import random
|
import random
|
||||||
import imageio
|
import imageio
|
||||||
import glob
|
import glob
|
||||||
|
|
@ -16,10 +15,12 @@ length = 1 # in minutes
|
||||||
numberOfEvents = 4
|
numberOfEvents = 4
|
||||||
|
|
||||||
dirname = os.path.dirname(__file__)
|
dirname = os.path.dirname(__file__)
|
||||||
outputPath = os.path.join(dirname, 'out.mp4')
|
outputPath = os.path.join(dirname, "out.mp4")
|
||||||
|
|
||||||
|
|
||||||
def getRandomColorString():
|
def getRandomColorString():
|
||||||
return '#{:06x}'.format(random.randint(0, 256**3))
|
return "#{:06x}".format(random.randint(0, 256 ** 3))
|
||||||
|
|
||||||
|
|
||||||
def genVideo():
|
def genVideo():
|
||||||
writer = imageio.get_writer(outputPath, fps=fps)
|
writer = imageio.get_writer(outputPath, fps=fps)
|
||||||
|
|
@ -27,29 +28,27 @@ def genVideo():
|
||||||
writer.append_data(np.zeros(shape=[1080, 1920, 3], dtype=np.uint8))
|
writer.append_data(np.zeros(shape=[1080, 1920, 3], dtype=np.uint8))
|
||||||
|
|
||||||
for i in range(numberOfEvents):
|
for i in range(numberOfEvents):
|
||||||
objectWidth = (5 + random.randint(0, 5)) * xmax / 100
|
objectWidth = (5 + random.randint(0, 5)) * xmax / 100
|
||||||
objectHeight = (10 + random.randint(-5, 5)) * ymax / 100
|
objectHeight = (10 + random.randint(-5, 5)) * ymax / 100
|
||||||
|
|
||||||
objectX = random.randint(0, xmax)
|
objectX = random.randint(0, xmax)
|
||||||
objectY = random.randint(0, ymax)
|
objectY = random.randint(0, ymax)
|
||||||
|
|
||||||
objectSpeedX = random.randint( 1 ,5 )
|
objectSpeedX = random.randint(1, 5)
|
||||||
objectSpeedY = random.randint( 1, 5 )
|
objectSpeedY = random.randint(1, 5)
|
||||||
color = getRandomColorString()
|
color = getRandomColorString()
|
||||||
|
|
||||||
for j in range(int(fps*length*60 / numberOfEvents)):
|
for j in range(int(fps * length * 60 / numberOfEvents)):
|
||||||
objectX -= objectSpeedX
|
objectX -= objectSpeedX
|
||||||
objectY -= objectSpeedY
|
objectY -= objectSpeedY
|
||||||
|
|
||||||
objectShape = [
|
objectShape = [(objectX, objectY), (objectX + objectWidth, objectY + objectHeight)]
|
||||||
(objectX, objectY),
|
img = Image.new("RGB", (xmax, ymax))
|
||||||
(objectX + objectWidth, objectY + objectHeight)
|
img1 = ImageDraw.Draw(img)
|
||||||
]
|
img1.rectangle(objectShape, fill=color)
|
||||||
img = Image.new("RGB", (xmax, ymax))
|
|
||||||
img1 = ImageDraw.Draw(img)
|
|
||||||
img1.rectangle(objectShape, fill = color)
|
|
||||||
writer.append_data(np.array(img))
|
writer.append_data(np.array(img))
|
||||||
|
|
||||||
writer.close()
|
writer.close()
|
||||||
|
|
||||||
genVideo()
|
|
||||||
|
genVideo()
|
||||||
|
|
|
||||||
|
|
@ -1,34 +1,34 @@
|
||||||
#python
|
# python
|
||||||
import cv2
|
import cv2
|
||||||
import imageio
|
import imageio
|
||||||
import time
|
import time
|
||||||
|
|
||||||
writer = imageio.get_writer("./x23.mp4", fps=15)
|
writer = imageio.get_writer("./x23.mp4", fps=15)
|
||||||
|
|
||||||
url = "http://50.227.41.1/mjpg/video.mjpg"
|
url = "http://50.227.41.1/mjpg/video.mjpg"
|
||||||
i = 0
|
i = 0
|
||||||
cap = cv2.VideoCapture(url)
|
cap = cv2.VideoCapture(url)
|
||||||
while True :
|
while True:
|
||||||
try:
|
try:
|
||||||
if i < 10:
|
if i < 10:
|
||||||
i+=1
|
i += 1
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
||||||
result, frame = cap.read()
|
result, frame = cap.read()
|
||||||
|
|
||||||
if result == False:
|
if result == False:
|
||||||
print("Error in cap.read()") # this is for preventing a breaking error
|
print("Error in cap.read()") # this is for preventing a breaking error
|
||||||
# break;
|
# break;
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
break
|
break
|
||||||
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||||
writer.append_data(frame)
|
writer.append_data(frame)
|
||||||
i+=1
|
i += 1
|
||||||
|
|
||||||
if i > 20*60*60*2:
|
if i > 20 * 60 * 60 * 2:
|
||||||
break
|
break
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("meh")
|
print("meh")
|
||||||
cap.release()
|
cap.release()
|
||||||
cv2.destroyAllWindows()
|
cv2.destroyAllWindows()
|
||||||
writer.close()
|
writer.close()
|
||||||
|
|
|
||||||
8
main.py
8
main.py
|
|
@ -22,8 +22,7 @@ def main():
|
||||||
|
|
||||||
config["inputPath"] = os.path.join(dirName, fileName)
|
config["inputPath"] = os.path.join(dirName, fileName)
|
||||||
config["outputPath"] = os.path.join(outputPath, fileName)
|
config["outputPath"] = os.path.join(outputPath, fileName)
|
||||||
config["importPath"] = os.path.join(
|
config["importPath"] = os.path.join(outputPath, fileName.split(".")[0] + ".txt")
|
||||||
outputPath, fileName.split(".")[0] + ".txt")
|
|
||||||
config["w"], config["h"] = VideoReader(config).getWH()
|
config["w"], config["h"] = VideoReader(config).getWH()
|
||||||
|
|
||||||
if not os.path.exists(config["importPath"]):
|
if not os.path.exists(config["importPath"]):
|
||||||
|
|
@ -38,13 +37,12 @@ def main():
|
||||||
layerManager = LayerManager(config, layers)
|
layerManager = LayerManager(config, layers)
|
||||||
layerManager.transformLayers()
|
layerManager.transformLayers()
|
||||||
|
|
||||||
|
# layerManager.tagLayers()
|
||||||
#layerManager.tagLayers()
|
|
||||||
layers = layerManager.layers
|
layers = layerManager.layers
|
||||||
if len(layers) == 0:
|
if len(layers) == 0:
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
heatmap = HeatMap(1920, 1088, [contour for layer in layers for contour in layer.bounds], 1920/config["resizeWidth"])
|
heatmap = HeatMap(1920, 1088, [contour for layer in layers for contour in layer.bounds], 1920 / config["resizeWidth"])
|
||||||
heatmap.showImage()
|
heatmap.showImage()
|
||||||
|
|
||||||
exporter = Exporter(config)
|
exporter = Exporter(config)
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue