added black formatting
This commit is contained in:
parent
7559d37787
commit
07484fc4f4
|
|
@ -16,7 +16,7 @@ class Classifier(ClassifierInterface):
|
|||
print("1")
|
||||
self.model_path = os.path.join(os.path.dirname(__file__), "./class1.pb")
|
||||
self.odapi = self.DetectorAPI(path_to_ckpt=self.model_path)
|
||||
self.threshold = 0.9
|
||||
self.threshold = 0.9
|
||||
with open(os.path.join(os.path.dirname(__file__), "coco_map.json")) as file:
|
||||
mapping = json.load(file)
|
||||
self.classes = dict()
|
||||
|
|
@ -30,7 +30,7 @@ class Classifier(ClassifierInterface):
|
|||
for i in range(len(boxes)):
|
||||
if scores[i] > self.threshold:
|
||||
if classes[i] in self.classes:
|
||||
#print(self.classes[classes[i]])
|
||||
# print(self.classes[classes[i]])
|
||||
return self.classes[classes[i]]
|
||||
|
||||
def tagLayer(self, data):
|
||||
|
|
@ -39,8 +39,8 @@ class Classifier(ClassifierInterface):
|
|||
for cnt in cnts:
|
||||
if cnt.any():
|
||||
cv2.imshow("changes x", cnt)
|
||||
cv2.waitKey(10) & 0XFF
|
||||
cnt= imutils.resize(cnt, width=320)
|
||||
cv2.waitKey(10) & 0xFF
|
||||
cnt = imutils.resize(cnt, width=320)
|
||||
x = self.detect(cnt)
|
||||
|
||||
res.append(x)
|
||||
|
|
@ -49,11 +49,11 @@ class Classifier(ClassifierInterface):
|
|||
for re in res:
|
||||
if re not in di:
|
||||
di[re] = 0
|
||||
di[re]+=1
|
||||
di[re] += 1
|
||||
|
||||
# remove all tags that occour infrequently
|
||||
# if a giraff is only seen in 2 out of 100 frames, there probably wasn't a giraff in the layer
|
||||
#
|
||||
#
|
||||
di.pop(None, None)
|
||||
total = 0
|
||||
for value in di.values():
|
||||
|
|
@ -71,7 +71,7 @@ class Classifier(ClassifierInterface):
|
|||
class DetectorAPI:
|
||||
def __init__(self, path_to_ckpt):
|
||||
self.path_to_ckpt = path_to_ckpt
|
||||
gpus = tf.config.experimental.list_physical_devices('GPU')
|
||||
gpus = tf.config.experimental.list_physical_devices("GPU")
|
||||
if gpus:
|
||||
try:
|
||||
for gpu in gpus:
|
||||
|
|
@ -81,23 +81,23 @@ class Classifier(ClassifierInterface):
|
|||
self.detection_graph = tf.Graph()
|
||||
with self.detection_graph.as_default():
|
||||
od_graph_def = tf.GraphDef()
|
||||
with tf.gfile.GFile(self.path_to_ckpt, 'rb') as fid:
|
||||
with tf.gfile.GFile(self.path_to_ckpt, "rb") as fid:
|
||||
serialized_graph = fid.read()
|
||||
od_graph_def.ParseFromString(serialized_graph)
|
||||
tf.import_graph_def(od_graph_def, name='')
|
||||
tf.import_graph_def(od_graph_def, name="")
|
||||
|
||||
self.default_graph = self.detection_graph.as_default()
|
||||
self.sess = tf.Session(graph=self.detection_graph)
|
||||
|
||||
# Definite input and output Tensors for detection_graph
|
||||
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
|
||||
self.image_tensor = self.detection_graph.get_tensor_by_name("image_tensor:0")
|
||||
# Each box represents a part of the image where a particular object was detected.
|
||||
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
|
||||
self.detection_boxes = self.detection_graph.get_tensor_by_name("detection_boxes:0")
|
||||
# Each score represent how level of confidence for each of the objects.
|
||||
# Score is shown on the result image, together with the class label.
|
||||
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
|
||||
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
|
||||
self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
|
||||
self.detection_scores = self.detection_graph.get_tensor_by_name("detection_scores:0")
|
||||
self.detection_classes = self.detection_graph.get_tensor_by_name("detection_classes:0")
|
||||
self.num_detections = self.detection_graph.get_tensor_by_name("num_detections:0")
|
||||
|
||||
def process_frame(self, image):
|
||||
# Expand dimensions since the trained_model expects images to have shape: [1, None, None, 3]
|
||||
|
|
@ -106,16 +106,17 @@ class Classifier(ClassifierInterface):
|
|||
|
||||
(boxes, scores, classes, num) = self.sess.run(
|
||||
[self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],
|
||||
feed_dict={self.image_tensor: image_np_expanded})
|
||||
feed_dict={self.image_tensor: image_np_expanded},
|
||||
)
|
||||
|
||||
im_height, im_width,_ = image.shape
|
||||
im_height, im_width, _ = image.shape
|
||||
boxes_list = [None for i in range(boxes.shape[1])]
|
||||
for i in range(boxes.shape[1]):
|
||||
boxes_list[i] = (
|
||||
int(boxes[0, i, 0] * im_height),
|
||||
int(boxes[0, i, 1] * im_width),
|
||||
int(boxes[0, i, 2] * im_height),
|
||||
int(boxes[0, i, 3] * im_width)
|
||||
int(boxes[0, i, 3] * im_width),
|
||||
)
|
||||
|
||||
return boxes_list, scores[0].tolist(), [int(x) for x in classes[0].tolist()], int(num[0])
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
class ClassifierInterface(ABC):
|
||||
@abstractmethod
|
||||
def tagLayer(self, layers):
|
||||
"""takes filled contours of one frame, returns list (len(), same as input)
|
||||
"""takes filled contours of one frame, returns list (len(), same as input)
|
||||
of lists with tags for corresponfing contours"""
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -9,20 +9,21 @@ from Application.Classifiers.ClassifierInterface import ClassifierInterface
|
|||
|
||||
class Classifier(ClassifierInterface):
|
||||
def __init__(self):
|
||||
self.threshold = .5
|
||||
self.threshold = 0.5
|
||||
with open(os.path.join(os.path.dirname(__file__), "coco_map.json")) as file:
|
||||
mapping = json.load(file)
|
||||
self.classes = dict()
|
||||
for element in mapping:
|
||||
self.classes[element["id"]-1] = element["display_name"]
|
||||
self.classes[element["id"] - 1] = element["display_name"]
|
||||
|
||||
self.net = cv2.dnn.readNet(os.path.join(os.path.dirname(
|
||||
__file__), "yolov4.weights"), os.path.join(os.path.dirname(__file__), "yolov4.cfg"))
|
||||
self.net = cv2.dnn.readNet(
|
||||
os.path.join(os.path.dirname(__file__), "yolov4.weights"),
|
||||
os.path.join(os.path.dirname(__file__), "yolov4.cfg"),
|
||||
)
|
||||
# self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
|
||||
# self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
|
||||
self.layer_names = self.net.getLayerNames()
|
||||
self.outputlayers = [self.layer_names[i[0] - 1]
|
||||
for i in self.net.getUnconnectedOutLayers()]
|
||||
self.outputlayers = [self.layer_names[i[0] - 1] for i in self.net.getUnconnectedOutLayers()]
|
||||
|
||||
print("Classifier Initiated")
|
||||
|
||||
|
|
@ -40,8 +41,7 @@ class Classifier(ClassifierInterface):
|
|||
else:
|
||||
img2 = np.zeros(shape=[320, 320, 3], dtype=np.uint8)
|
||||
img2[:height, :width] = contour
|
||||
blob = cv2.dnn.blobFromImage(
|
||||
img2, 1/256, (320, 320), (0, 0, 0), True, crop=False) # reduce 416 to 320
|
||||
blob = cv2.dnn.blobFromImage(img2, 1 / 256, (320, 320), (0, 0, 0), True, crop=False) # reduce 416 to 320
|
||||
self.net.setInput(blob)
|
||||
outs = self.net.forward(self.outputlayers)
|
||||
for out in outs:
|
||||
|
|
@ -52,11 +52,8 @@ class Classifier(ClassifierInterface):
|
|||
if confidence > self.threshold:
|
||||
if self.classes[class_id] not in results:
|
||||
cv2.imshow("changes x", img2)
|
||||
cv2.waitKey(10) & 0XFF
|
||||
cv2.waitKey(10) & 0xFF
|
||||
results.append(self.classes[class_id])
|
||||
#print(self.classes[x], score)
|
||||
# print(self.classes[x], score)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,32 +1,31 @@
|
|||
|
||||
class Config:
|
||||
c = {
|
||||
"min_area" : 100,
|
||||
"max_area" : 900000,
|
||||
"threashold" : 7,
|
||||
"resizeWidth" : 500,
|
||||
"inputPath" : None,
|
||||
"min_area": 100,
|
||||
"max_area": 900000,
|
||||
"threashold": 7,
|
||||
"resizeWidth": 500,
|
||||
"inputPath": None,
|
||||
"outputPath": None,
|
||||
"maxLayerLength": 5000,
|
||||
"minLayerLength": 40,
|
||||
"maxLayerLength": 5000,
|
||||
"minLayerLength": 40,
|
||||
"tolerance": 20,
|
||||
"maxLength": None,
|
||||
"ttolerance": 60,
|
||||
"videoBufferLength": 250,
|
||||
"LayersPerContour": 220,
|
||||
"avgNum":10
|
||||
}
|
||||
"avgNum": 10,
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
'''This is basically just a wrapper for a json / python dict'''
|
||||
"""This is basically just a wrapper for a json / python dict"""
|
||||
print("Current Config:")
|
||||
for key, value in self.c.items():
|
||||
print(f"{key}:\t\t{value}")
|
||||
|
||||
|
||||
def __getitem__(self, key):
|
||||
if key not in self.c:
|
||||
return None
|
||||
return self.c[key]
|
||||
return self.c[key]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self.c[key] = value
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ import cv2
|
|||
import numpy as np
|
||||
import os
|
||||
|
||||
|
||||
class ContourExtractor:
|
||||
|
||||
# extracedContours = {frame_number: [(contour, (x,y,w,h)), ...], }
|
||||
|
|
@ -51,10 +52,9 @@ class ContourExtractor:
|
|||
with ThreadPool(2) as pool:
|
||||
while not videoReader.videoEnded():
|
||||
if videoReader.buffer.qsize() == 0:
|
||||
time.sleep(.5)
|
||||
time.sleep(0.5)
|
||||
|
||||
tmpData = [videoReader.pop()
|
||||
for i in range(0, videoReader.buffer.qsize())]
|
||||
tmpData = [videoReader.pop() for i in range(0, videoReader.buffer.qsize())]
|
||||
pool.map(self.computeMovingAverage, (tmpData,))
|
||||
pool.map(self.async2, (tmpData,))
|
||||
# for data in tmpData:
|
||||
|
|
@ -75,20 +75,20 @@ class ContourExtractor:
|
|||
time.sleep(0.1)
|
||||
firstFrame = self.averages.pop(frameCount, None)
|
||||
|
||||
if frameCount % (10*self.fps) == 1:
|
||||
if frameCount % (10 * self.fps) == 1:
|
||||
print(
|
||||
f" \r \033[K {round((frameCount/self.fps)*100/self.length, 2)} % processed in {round(time.time() - self.start, 2)}s", end='\r')
|
||||
f" \r \033[K {round((frameCount/self.fps)*100/self.length, 2)} % processed in {round(time.time() - self.start, 2)}s",
|
||||
end="\r",
|
||||
)
|
||||
|
||||
gray = self.prepareFrame(frame)
|
||||
frameDelta = cv2.absdiff(gray, firstFrame)
|
||||
thresh = cv2.threshold(frameDelta, self.threashold,
|
||||
255, cv2.THRESH_BINARY)[1]
|
||||
thresh = cv2.threshold(frameDelta, self.threashold, 255, cv2.THRESH_BINARY)[1]
|
||||
# dilate the thresholded image to fill in holes, then find contours
|
||||
thresh = cv2.dilate(thresh, None, iterations=10)
|
||||
#cv2.imshow("changes x", thresh)
|
||||
#cv2.waitKey(10) & 0XFF
|
||||
cnts = cv2.findContours(
|
||||
thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
||||
# cv2.imshow("changes x", thresh)
|
||||
# cv2.waitKey(10) & 0XFF
|
||||
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
||||
cnts = imutils.grab_contours(cnts)
|
||||
|
||||
contours = []
|
||||
|
|
@ -101,7 +101,7 @@ class ContourExtractor:
|
|||
contours.append((x, y, w, h))
|
||||
# the mask has to be packed like this, since np doesn't have a bit array,
|
||||
# meaning every bit in the mask would take up 8bits, which migth be too much
|
||||
masks.append(np.packbits(np.copy(thresh[y:y+h, x:x+w]), axis=0))
|
||||
masks.append(np.packbits(np.copy(thresh[y : y + h, x : x + w]), axis=0))
|
||||
|
||||
if len(contours) != 0 and contours is not None:
|
||||
# this should be thread safe
|
||||
|
|
@ -131,8 +131,7 @@ class ContourExtractor:
|
|||
if self.lastFrames is not None:
|
||||
frames = self.lastFrames + frames
|
||||
|
||||
tmp = [[j, frames, averageFrames]
|
||||
for j in range(averageFrames, len(frames))]
|
||||
tmp = [[j, frames, averageFrames] for j in range(averageFrames, len(frames))]
|
||||
with ThreadPool(os.cpu_count()) as pool:
|
||||
pool.map(self.averageDaFrames, tmp)
|
||||
|
||||
|
|
@ -143,7 +142,7 @@ class ContourExtractor:
|
|||
frameNumber, frame = frames[j]
|
||||
frame = self.prepareFrame(frame)
|
||||
|
||||
avg = frame/averageFrames
|
||||
for jj in range(0, averageFrames-1):
|
||||
avg += self.prepareFrame(frames[j-jj][1])/averageFrames
|
||||
avg = frame / averageFrames
|
||||
for jj in range(0, averageFrames - 1):
|
||||
avg += self.prepareFrame(frames[j - jj][1]) / averageFrames
|
||||
self.averages[frameNumber] = np.array(np.round(avg), dtype=np.uint8)
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ import imutils
|
|||
import numpy as np
|
||||
import cv2
|
||||
import pickle
|
||||
import time
|
||||
import time
|
||||
|
||||
|
||||
class Exporter:
|
||||
|
|
@ -43,8 +43,7 @@ class Exporter:
|
|||
|
||||
start = time.time()
|
||||
for i, layer in enumerate(layers):
|
||||
print(
|
||||
f"\r {i}/{len(layers)} {round(i/len(layers)*100,2)}% {round((time.time() - start), 2)}s", end='\r')
|
||||
print(f"\r {i}/{len(layers)} {round(i/len(layers)*100,2)}% {round((time.time() - start), 2)}s", end="\r")
|
||||
if len(layer.bounds[0]) == 0:
|
||||
continue
|
||||
videoReader = VideoReader(self.config)
|
||||
|
|
@ -58,16 +57,21 @@ class Exporter:
|
|||
if x is None:
|
||||
continue
|
||||
factor = videoReader.w / self.resizeWidth
|
||||
x, y, w, h = (int(x * factor), int(y * factor),
|
||||
int(w * factor), int(h * factor))
|
||||
x, y, w, h = (int(x * factor), int(y * factor), int(w * factor), int(h * factor))
|
||||
|
||||
frame2[y:y+h, x:x+w] = np.copy(frame[y:y+h, x:x+w])
|
||||
frame2[y : y + h, x : x + w] = np.copy(frame[y : y + h, x : x + w])
|
||||
|
||||
timestr = datetime.fromtimestamp(
|
||||
int(frameCount/self.fps) + videoReader.getStartTime())
|
||||
cv2.putText(frame2, str(i) + " " + f"{timestr.hour}:{timestr.minute}:{timestr.second}", (int(
|
||||
x+w/2), int(y+h/2)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
|
||||
#cv2.putText(frame2, str(layer.stats["avg"]) + " " + str(layer.stats["var"]) + " " + str(layer.stats["dev"]), (int(500), int(500)), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,0,255), 2)
|
||||
timestr = datetime.fromtimestamp(int(frameCount / self.fps) + videoReader.getStartTime())
|
||||
cv2.putText(
|
||||
frame2,
|
||||
str(i) + " " + f"{timestr.hour}:{timestr.minute}:{timestr.second}",
|
||||
(int(x + w / 2), int(y + h / 2)),
|
||||
cv2.FONT_HERSHEY_SIMPLEX,
|
||||
1,
|
||||
(255, 255, 255),
|
||||
2,
|
||||
)
|
||||
# cv2.putText(frame2, str(layer.stats["avg"]) + " " + str(layer.stats["var"]) + " " + str(layer.stats["dev"]), (int(500), int(500)), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,0,255), 2)
|
||||
writer.append_data(frame2)
|
||||
videoReader.vc.release()
|
||||
videoReader.thread.join()
|
||||
|
|
@ -83,7 +87,7 @@ class Exporter:
|
|||
maxLength = self.getMaxLengthOfLayers(layers)
|
||||
underlay = cv2.VideoCapture(self.footagePath).read()[1]
|
||||
underlay = cv2.cvtColor(underlay, cv2.COLOR_BGR2RGB)
|
||||
#underlay = np.zeros(shape=[videoReader.h, videoReader.w, 3], dtype=np.uint8)
|
||||
# underlay = np.zeros(shape=[videoReader.h, videoReader.w, 3], dtype=np.uint8)
|
||||
frames = []
|
||||
for i in range(maxLength):
|
||||
frames.append(np.copy(underlay))
|
||||
|
|
@ -91,8 +95,8 @@ class Exporter:
|
|||
|
||||
while not videoReader.videoEnded():
|
||||
frameCount, frame = videoReader.pop()
|
||||
if frameCount % (60*self.fps) == 0:
|
||||
print("Minutes processed: ", frameCount/(60*self.fps), end="\r")
|
||||
if frameCount % (60 * self.fps) == 0:
|
||||
print("Minutes processed: ", frameCount / (60 * self.fps), end="\r")
|
||||
if frame is None:
|
||||
print("ContourExtractor: frame was None")
|
||||
continue
|
||||
|
|
@ -103,31 +107,45 @@ class Exporter:
|
|||
for i in range(0, len(layer.bounds[frameCount - layer.startFrame])):
|
||||
try:
|
||||
underlay1 = underlay
|
||||
(x, y, w,
|
||||
h) = layer.bounds[frameCount - layer.startFrame][i]
|
||||
(x, y, w, h) = layer.bounds[frameCount - layer.startFrame][i]
|
||||
mask = layer.masks[frameCount - layer.startFrame][i]
|
||||
if x is None:
|
||||
break
|
||||
factor = videoReader.w / self.resizeWidth
|
||||
x, y, w, h = (int(x * factor), int(y * factor),
|
||||
int(w * factor), int(h * factor))
|
||||
x, y, w, h = (int(x * factor), int(y * factor), int(w * factor), int(h * factor))
|
||||
|
||||
mask = imutils.resize(mask, width=w, height=h+1)
|
||||
mask = imutils.resize(mask, width=w, height=h + 1)
|
||||
mask = np.resize(mask, (h, w))
|
||||
mask = cv2.erode(mask, None, iterations=10)
|
||||
mask *= 255
|
||||
frame2 = frames[frameCount - layer.startFrame + layer.exportOffset]
|
||||
xx = np.copy(cv2.bitwise_and(
|
||||
frame2[y:y+h, x:x+w], frame2[y:y+h, x:x+w], mask=cv2.bitwise_not(mask)))
|
||||
frame2[y:y+h, x:x+w] = cv2.addWeighted(xx, 1, np.copy(
|
||||
cv2.bitwise_and(frame[y:y+h, x:x+w], frame[y:y+h, x:x+w], mask=mask)), 1, 0)
|
||||
xx = np.copy(
|
||||
cv2.bitwise_and(
|
||||
frame2[y : y + h, x : x + w],
|
||||
frame2[y : y + h, x : x + w],
|
||||
mask=cv2.bitwise_not(mask),
|
||||
)
|
||||
)
|
||||
frame2[y : y + h, x : x + w] = cv2.addWeighted(
|
||||
xx,
|
||||
1,
|
||||
np.copy(cv2.bitwise_and(frame[y : y + h, x : x + w], frame[y : y + h, x : x + w], mask=mask)),
|
||||
1,
|
||||
0,
|
||||
)
|
||||
frames[frameCount - layer.startFrame + layer.exportOffset] = np.copy(frame2)
|
||||
#cv2.imshow("changes x", frame2)
|
||||
#cv2.waitKey(10) & 0XFF
|
||||
time = datetime.fromtimestamp(
|
||||
int(frameCount/self.fps) + videoReader.getStartTime())
|
||||
cv2.putText(frames[frameCount - layer.startFrame + layer.exportOffset], f"{time.hour}:{time.minute}:{time.second}", (int(
|
||||
x+w/2), int(y+h/2)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
|
||||
# cv2.imshow("changes x", frame2)
|
||||
# cv2.waitKey(10) & 0XFF
|
||||
time = datetime.fromtimestamp(int(frameCount / self.fps) + videoReader.getStartTime())
|
||||
cv2.putText(
|
||||
frames[frameCount - layer.startFrame + layer.exportOffset],
|
||||
f"{time.hour}:{time.minute}:{time.second}",
|
||||
(int(x + w / 2), int(y + h / 2)),
|
||||
cv2.FONT_HERSHEY_SIMPLEX,
|
||||
1,
|
||||
(255, 255, 255),
|
||||
2,
|
||||
)
|
||||
except:
|
||||
continue
|
||||
videoReader.thread.join()
|
||||
|
|
@ -153,10 +171,9 @@ class Exporter:
|
|||
return maxLength
|
||||
|
||||
def makeListOfFrames(self, layers):
|
||||
'''Returns set of all Frames which are relavant to the Layers'''
|
||||
"""Returns set of all Frames which are relavant to the Layers"""
|
||||
frameNumbers = set()
|
||||
for layer in layers:
|
||||
frameNumbers.update(
|
||||
list(range(layer.startFrame, layer.startFrame + len(layer))))
|
||||
frameNumbers.update(list(range(layer.startFrame, layer.startFrame + len(layer))))
|
||||
|
||||
return sorted(list(frameNumbers))
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
import numpy as np
|
||||
from matplotlib import pyplot as plt
|
||||
|
||||
|
||||
class HeatMap:
|
||||
def __init__(self, x, y, contours, resizeFactor = 1):
|
||||
def __init__(self, x, y, contours, resizeFactor=1):
|
||||
self.imageBW = np.zeros(shape=[y, x, 3], dtype=np.float64)
|
||||
self._resizeFactor = resizeFactor
|
||||
self._createImage(contours)
|
||||
|
|
@ -10,11 +11,16 @@ class HeatMap:
|
|||
def _createImage(self, contours):
|
||||
for contour in contours:
|
||||
for x, y, w, h in contour:
|
||||
x, y, w, h = x*self._resizeFactor, y*self._resizeFactor, w*self._resizeFactor, h*self._resizeFactor
|
||||
self.imageBW[int(y):int(y+h), int(x):int(x+w)] += 1
|
||||
x, y, w, h = (
|
||||
x * self._resizeFactor,
|
||||
y * self._resizeFactor,
|
||||
w * self._resizeFactor,
|
||||
h * self._resizeFactor,
|
||||
)
|
||||
self.imageBW[int(y) : int(y + h), int(x) : int(x + w)] += 1
|
||||
|
||||
self.imageBW = np.nan_to_num(self.imageBW/ self.imageBW.sum(axis=1)[:, np.newaxis], 0)
|
||||
self.imageBW = np.nan_to_num(self.imageBW / self.imageBW.sum(axis=1)[:, np.newaxis], 0)
|
||||
|
||||
def showImage(self):
|
||||
plt.imshow(self.imageBW*255)
|
||||
plt.imshow(self.imageBW * 255)
|
||||
plt.show()
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
import pickle
|
||||
|
||||
|
||||
class Importer:
|
||||
def __init__(self, config):
|
||||
self.path = config["importPath"]
|
||||
|
|
@ -8,4 +9,4 @@ class Importer:
|
|||
print("Loading previous results")
|
||||
with open(self.path, "rb") as file:
|
||||
layers, contours, masks = pickle.load(file)
|
||||
return (layers, contours, masks)
|
||||
return (layers, contours, masks)
|
||||
|
|
|
|||
|
|
@ -2,24 +2,25 @@ import numpy as np
|
|||
import cv2
|
||||
import imutils
|
||||
|
||||
|
||||
class Layer:
|
||||
#bounds = [[(x,y,w,h), ],]
|
||||
# bounds = [[(x,y,w,h), ],]
|
||||
|
||||
startFrame = None
|
||||
lastFrame = None
|
||||
length = None
|
||||
|
||||
def __init__(self, startFrame, data, mask, config):
|
||||
'''returns a Layer object
|
||||
|
||||
Layers are collections of contours with a StartFrame,
|
||||
"""returns a Layer object
|
||||
|
||||
Layers are collections of contours with a StartFrame,
|
||||
which is the number of the frame the first contour of
|
||||
this layer was extraced from
|
||||
|
||||
A Contour is a CV2 Contour, which is a y*x*3 rgb numpy array,
|
||||
but we only care about the corners of the contours.
|
||||
So we save the bounds (x,y,w,h) in bounds[] and the actual content in data[]
|
||||
'''
|
||||
but we only care about the corners of the contours.
|
||||
So we save the bounds (x,y,w,h) in bounds[] and the actual content in data[]
|
||||
"""
|
||||
self.startFrame = startFrame
|
||||
self.lastFrame = startFrame
|
||||
self.config = config
|
||||
|
|
@ -31,10 +32,9 @@ class Layer:
|
|||
|
||||
self.bounds.append([data])
|
||||
self.masks.append([mask])
|
||||
#print("Layer constructed")
|
||||
|
||||
def add(self, frameNumber, bound, mask):
|
||||
'''Adds a bound to the Layer at the layer index which corresponds to the given framenumber'''
|
||||
"""Adds a bound to the Layer at the layer index which corresponds to the given framenumber"""
|
||||
index = frameNumber - self.startFrame
|
||||
if index < 0:
|
||||
return
|
||||
|
|
@ -48,34 +48,6 @@ class Layer:
|
|||
self.bounds[index].append(bound)
|
||||
self.masks[index].append(mask)
|
||||
|
||||
|
||||
def calcStats(self):
|
||||
'''calculates average distance, variation and deviation of layer movement'''
|
||||
middles = []
|
||||
for i, bounds in enumerate(self.bounds):
|
||||
for j, bound in enumerate(bounds):
|
||||
if None in bound:
|
||||
continue
|
||||
x = (bound[0] + bound[2]/2)
|
||||
y = (bound[1] + bound[3]/2)
|
||||
middles.append([x,y])
|
||||
|
||||
avg = 0
|
||||
for i in range(1, len(middles), 2):
|
||||
avg += (((float(middles[i][0]-middles[i-1][0])/len(middles))**2 + float(middles[i][1]-middles[i-1][1])/len(middles))**2)**(1/2)
|
||||
self.stats = dict()
|
||||
self.stats["avg"] = round(avg,2)
|
||||
|
||||
x=0
|
||||
for i in range(1, len(middles), 2):
|
||||
x += (((((float(middles[i][0]-middles[i-1][0])/len(middles))**2 + float(middles[i][1]-middles[i-1][1])/len(middles))**2)**(1/2)) - avg)**2
|
||||
|
||||
x /= (len(middles)-1)
|
||||
|
||||
self.stats["var"] = round(x,2)
|
||||
self.stats["dev"] = round((x)**(1/2), 2)
|
||||
|
||||
|
||||
def getLength(self):
|
||||
return len(self) + self.exportOffset
|
||||
|
||||
|
|
@ -84,20 +56,20 @@ class Layer:
|
|||
return self.length
|
||||
|
||||
def spaceOverlaps(self, layer2):
|
||||
'''Checks if there is an overlap in the bounds of current layer with given layer'''
|
||||
"""Checks if there is an overlap in the bounds of current layer with given layer"""
|
||||
overlap = False
|
||||
maxLen = min(len(layer2.bounds), len(self.bounds))
|
||||
bounds = self.bounds[:maxLen]
|
||||
for b1s, b2s in zip(bounds[::10], layer2.bounds[:maxLen:10]):
|
||||
for b1 in b1s:
|
||||
for b2 in b2s:
|
||||
if self.contoursOverlay((b1[0], b1[1]+b1[3]), (b1[0]+b1[2], b1[1]), (b2[0], b2[1]+b2[3]), (b2[0]+b2[2], b2[1])):
|
||||
if self.contoursOverlay((b1[0], b1[1] + b1[3]), (b1[0] + b1[2], b1[1]), (b2[0], b2[1] + b2[3]), (b2[0] + b2[2], b2[1])):
|
||||
overlap = True
|
||||
break
|
||||
return overlap
|
||||
|
||||
|
||||
def timeOverlaps(self, layer2):
|
||||
'''Checks for overlap in time between current and given layer'''
|
||||
"""Checks for overlap in time between current and given layer"""
|
||||
s1 = self.exportOffset
|
||||
e1 = self.lastFrame - self.startFrame + self.exportOffset
|
||||
s2 = layer2.exportOffset
|
||||
|
|
@ -111,9 +83,8 @@ class Layer:
|
|||
return False
|
||||
|
||||
def contoursOverlay(self, l1, r1, l2, r2):
|
||||
if(l1[0] >= r2[0] or l2[0] >= r1[0]):
|
||||
if l1[0] >= r2[0] or l2[0] >= r1[0]:
|
||||
return False
|
||||
if(l1[1] <= r2[1] or l2[1] <= r1[1]):
|
||||
if l1[1] <= r2[1] or l2[1] <= r1[1]:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ from multiprocessing.pool import ThreadPool
|
|||
import numpy as np
|
||||
import os
|
||||
|
||||
|
||||
class LayerFactory:
|
||||
def __init__(self, config, data=None):
|
||||
self.data = {}
|
||||
|
|
@ -24,7 +25,7 @@ class LayerFactory:
|
|||
self.extractLayers(data)
|
||||
|
||||
def extractLayers(self, data, maskArr):
|
||||
'''Bundle given contours together into Layer Objects'''
|
||||
"""Bundle given contours together into Layer Objects"""
|
||||
|
||||
frameNumber = min(data)
|
||||
contours = data[frameNumber]
|
||||
|
|
@ -40,15 +41,15 @@ class LayerFactory:
|
|||
for frameNumber in sorted(data.keys()):
|
||||
contours = data[frameNumber]
|
||||
masks = maskArr[frameNumber]
|
||||
masks = [np.unpackbits(mask, axis=0)
|
||||
for mask, contours in zip(masks, contours)]
|
||||
masks = [np.unpackbits(mask, axis=0) for mask, contours in zip(masks, contours)]
|
||||
if frameNumber % 100 == 0:
|
||||
print(
|
||||
f" {int(round(frameNumber/max(data.keys()), 2)*100)}% done with Layer extraction {len(self.layers)} Layers", end='\r')
|
||||
f" {int(round(frameNumber/max(data.keys()), 2)*100)}% done with Layer extraction {len(self.layers)} Layers",
|
||||
end="\r",
|
||||
)
|
||||
|
||||
tmp = [[frameNumber, contour, mask]
|
||||
for contour, mask in zip(contours, masks)]
|
||||
#pool.map(self.getLayers, tmp)
|
||||
tmp = [[frameNumber, contour, mask] for contour, mask in zip(contours, masks)]
|
||||
# pool.map(self.getLayers, tmp)
|
||||
for x in tmp:
|
||||
self.getLayers(x)
|
||||
|
||||
|
|
@ -68,22 +69,20 @@ class LayerFactory:
|
|||
continue
|
||||
|
||||
lastXframes = min(40, len(layer))
|
||||
lastBounds = [bound for bounds in layer.bounds[-lastXframes:]
|
||||
for bound in bounds]
|
||||
lastBounds = [bound for bounds in layer.bounds[-lastXframes:] for bound in bounds]
|
||||
|
||||
for j, bounds in enumerate(sorted(lastBounds, reverse=True)):
|
||||
if bounds is None:
|
||||
break
|
||||
(x2, y2, w2, h2) = bounds
|
||||
if self.contoursOverlay((x-tol, y+h+tol), (x+w+tol, y-tol), (x2, y2+h2), (x2+w2, y2)):
|
||||
if self.contoursOverlay((x - tol, y + h + tol), (x + w + tol, y - tol), (x2, y2 + h2), (x2 + w2, y2)):
|
||||
layer.add(frameNumber, (x, y, w, h), mask)
|
||||
foundLayerIDs.add(i)
|
||||
break
|
||||
|
||||
foundLayerIDs = sorted(list(foundLayerIDs))
|
||||
if len(foundLayerIDs) == 0:
|
||||
self.layers.append(
|
||||
Layer(frameNumber, (x, y, w, h), mask, self.config))
|
||||
self.layers.append(Layer(frameNumber, (x, y, w, h), mask, self.config))
|
||||
if len(foundLayerIDs) > 1:
|
||||
self.mergeLayers(foundLayerIDs)
|
||||
|
||||
|
|
@ -118,8 +117,8 @@ class LayerFactory:
|
|||
for lc2, l2 in enumerate(pL):
|
||||
if lc2 == lc:
|
||||
continue
|
||||
for cnt in l.bounds[x-l.startFrame]:
|
||||
for cnt2 in l2.bounds[x-l2.startFrame]:
|
||||
for cnt in l.bounds[x - l.startFrame]:
|
||||
for cnt2 in l2.bounds[x - l2.startFrame]:
|
||||
if self.contoursOverlay(cnt, cnt2):
|
||||
merge.add(indexes[lc])
|
||||
merge.add(indexes[lc2])
|
||||
|
|
@ -152,9 +151,9 @@ class LayerFactory:
|
|||
return maxFrame
|
||||
|
||||
def contoursOverlay(self, l1, r1, l2, r2):
|
||||
if(l1[0] >= r2[0] or l2[0] >= r1[0]):
|
||||
if l1[0] >= r2[0] or l2[0] >= r1[0]:
|
||||
return False
|
||||
if(l1[1] <= r2[1] or l2[1] <= r1[1]):
|
||||
if l1[1] <= r2[1] or l2[1] <= r1[1]:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ class LayerManager:
|
|||
self.resizeWidth = config["resizeWidth"]
|
||||
self.footagePath = config["inputPath"]
|
||||
self.config = config
|
||||
#self.classifier = Classifier()
|
||||
# self.classifier = Classifier()
|
||||
self.tags = []
|
||||
print("LayerManager constructed")
|
||||
|
||||
|
|
@ -67,13 +67,12 @@ class LayerManager:
|
|||
self.layers = layers
|
||||
|
||||
def tagLayers(self):
|
||||
'''Use classifieres the tag all Layers, by reading the contour content from the original video, then applying the classifier'''
|
||||
"""Use classifieres the tag all Layers, by reading the contour content from the original video, then applying the classifier"""
|
||||
print("Tagging Layers")
|
||||
exporter = Exporter(self.config)
|
||||
start = time.time()
|
||||
for i, layer in enumerate(self.layers):
|
||||
print(
|
||||
f"{round(i/len(self.layers)*100,2)} {round((time.time() - start), 2)}")
|
||||
print(f"{round(i/len(self.layers)*100,2)} {round((time.time() - start), 2)}")
|
||||
start = time.time()
|
||||
if len(layer.bounds[0]) == 0:
|
||||
continue
|
||||
|
|
@ -94,7 +93,7 @@ class LayerManager:
|
|||
y = int(y * factor)
|
||||
w = int(w * factor)
|
||||
h = int(h * factor)
|
||||
data.append(np.copy(frame[y:y+h, x:x+w]))
|
||||
data.append(np.copy(frame[y : y + h, x : x + w]))
|
||||
layer.data.append(data)
|
||||
tags = self.classifier.tagLayer(layer.data)
|
||||
print(tags)
|
||||
|
|
@ -109,8 +108,7 @@ class LayerManager:
|
|||
lenL = len(self.layers)
|
||||
for i in range(1, len(self.layers)):
|
||||
layer = self.layers[i]
|
||||
print(
|
||||
f"\r {i}/{lenL}", end='\r')
|
||||
print(f"\r {i}/{lenL}", end="\r")
|
||||
overlap = True
|
||||
tries = 1
|
||||
while overlap:
|
||||
|
|
@ -123,5 +121,5 @@ class LayerManager:
|
|||
self.layers[i].exportOffset += 20 * tries
|
||||
tries += 1
|
||||
|
||||
#if self.layers[i].exportOffset >= 300000:
|
||||
# if self.layers[i].exportOffset >= 300000:
|
||||
# break
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import cv2
|
|||
import threading
|
||||
import os
|
||||
|
||||
|
||||
class VideoReader:
|
||||
listOfFrames = None
|
||||
w = None
|
||||
|
|
@ -26,7 +27,7 @@ class VideoReader:
|
|||
self.calcStartTime()
|
||||
if setOfFrames is not None:
|
||||
self.listOfFrames = sorted(setOfFrames)
|
||||
|
||||
|
||||
def __enter__(self):
|
||||
self.fillBuffer()
|
||||
return self
|
||||
|
|
@ -47,14 +48,13 @@ class VideoReader:
|
|||
self.listOfFrames = listOfFrames
|
||||
|
||||
if self.listOfFrames is not None:
|
||||
self.thread = threading.Thread(
|
||||
target=self.readFramesByList, args=())
|
||||
self.thread = threading.Thread(target=self.readFramesByList, args=())
|
||||
else:
|
||||
self.thread = threading.Thread(target=self.readFrames, args=())
|
||||
self.thread.start()
|
||||
|
||||
def readFrames(self):
|
||||
'''Reads video from start to finish'''
|
||||
"""Reads video from start to finish"""
|
||||
while self.lastFrame < self.endFrame:
|
||||
res, frame = self.vc.read()
|
||||
if res:
|
||||
|
|
@ -64,7 +64,7 @@ class VideoReader:
|
|||
self.stopped = True
|
||||
|
||||
def readFramesByList(self):
|
||||
'''Reads all frames from a list of frame numbers'''
|
||||
"""Reads all frames from a list of frame numbers"""
|
||||
self.vc.set(1, self.listOfFrames[0])
|
||||
self.lastFrame = self.listOfFrames[0]
|
||||
self.endFrame = self.listOfFrames[-1]
|
||||
|
|
@ -120,7 +120,7 @@ class VideoReader:
|
|||
return self.starttime
|
||||
|
||||
def getWH(self):
|
||||
'''get width and height'''
|
||||
"""get width and height"""
|
||||
if self.w is None or self.h is None:
|
||||
res, image = self.vc.read()
|
||||
self.w = image.shape[1]
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
|
||||
import math
|
||||
from PIL import Image, ImageDraw
|
||||
import math
|
||||
from PIL import Image, ImageDraw
|
||||
import random
|
||||
import imageio
|
||||
import glob
|
||||
|
|
@ -16,10 +15,12 @@ length = 1 # in minutes
|
|||
numberOfEvents = 4
|
||||
|
||||
dirname = os.path.dirname(__file__)
|
||||
outputPath = os.path.join(dirname, 'out.mp4')
|
||||
outputPath = os.path.join(dirname, "out.mp4")
|
||||
|
||||
|
||||
def getRandomColorString():
|
||||
return '#{:06x}'.format(random.randint(0, 256**3))
|
||||
return "#{:06x}".format(random.randint(0, 256 ** 3))
|
||||
|
||||
|
||||
def genVideo():
|
||||
writer = imageio.get_writer(outputPath, fps=fps)
|
||||
|
|
@ -27,29 +28,27 @@ def genVideo():
|
|||
writer.append_data(np.zeros(shape=[1080, 1920, 3], dtype=np.uint8))
|
||||
|
||||
for i in range(numberOfEvents):
|
||||
objectWidth = (5 + random.randint(0, 5)) * xmax / 100
|
||||
objectWidth = (5 + random.randint(0, 5)) * xmax / 100
|
||||
objectHeight = (10 + random.randint(-5, 5)) * ymax / 100
|
||||
|
||||
objectX = random.randint(0, xmax)
|
||||
objectY = random.randint(0, ymax)
|
||||
|
||||
objectSpeedX = random.randint( 1 ,5 )
|
||||
objectSpeedY = random.randint( 1, 5 )
|
||||
objectSpeedX = random.randint(1, 5)
|
||||
objectSpeedY = random.randint(1, 5)
|
||||
color = getRandomColorString()
|
||||
|
||||
for j in range(int(fps*length*60 / numberOfEvents)):
|
||||
for j in range(int(fps * length * 60 / numberOfEvents)):
|
||||
objectX -= objectSpeedX
|
||||
objectY -= objectSpeedY
|
||||
|
||||
objectShape = [
|
||||
(objectX, objectY),
|
||||
(objectX + objectWidth, objectY + objectHeight)
|
||||
]
|
||||
img = Image.new("RGB", (xmax, ymax))
|
||||
img1 = ImageDraw.Draw(img)
|
||||
img1.rectangle(objectShape, fill = color)
|
||||
objectShape = [(objectX, objectY), (objectX + objectWidth, objectY + objectHeight)]
|
||||
img = Image.new("RGB", (xmax, ymax))
|
||||
img1 = ImageDraw.Draw(img)
|
||||
img1.rectangle(objectShape, fill=color)
|
||||
writer.append_data(np.array(img))
|
||||
|
||||
writer.close()
|
||||
|
||||
genVideo()
|
||||
|
||||
genVideo()
|
||||
|
|
|
|||
|
|
@ -1,34 +1,34 @@
|
|||
#python
|
||||
# python
|
||||
import cv2
|
||||
import imageio
|
||||
import time
|
||||
|
||||
writer = imageio.get_writer("./x23.mp4", fps=15)
|
||||
|
||||
|
||||
url = "http://50.227.41.1/mjpg/video.mjpg"
|
||||
i = 0
|
||||
cap = cv2.VideoCapture(url)
|
||||
while True :
|
||||
while True:
|
||||
try:
|
||||
if i < 10:
|
||||
i+=1
|
||||
i += 1
|
||||
continue
|
||||
|
||||
|
||||
|
||||
result, frame = cap.read()
|
||||
|
||||
if result == False:
|
||||
print("Error in cap.read()") # this is for preventing a breaking error
|
||||
|
||||
if result == False:
|
||||
print("Error in cap.read()") # this is for preventing a breaking error
|
||||
# break;
|
||||
time.sleep(1)
|
||||
time.sleep(1)
|
||||
break
|
||||
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||
writer.append_data(frame)
|
||||
i+=1
|
||||
i += 1
|
||||
|
||||
if i > 20*60*60*2:
|
||||
if i > 20 * 60 * 60 * 2:
|
||||
break
|
||||
except Exception as e:
|
||||
print("meh")
|
||||
cap.release()
|
||||
cv2.destroyAllWindows()
|
||||
writer.close()
|
||||
writer.close()
|
||||
|
|
|
|||
8
main.py
8
main.py
|
|
@ -22,8 +22,7 @@ def main():
|
|||
|
||||
config["inputPath"] = os.path.join(dirName, fileName)
|
||||
config["outputPath"] = os.path.join(outputPath, fileName)
|
||||
config["importPath"] = os.path.join(
|
||||
outputPath, fileName.split(".")[0] + ".txt")
|
||||
config["importPath"] = os.path.join(outputPath, fileName.split(".")[0] + ".txt")
|
||||
config["w"], config["h"] = VideoReader(config).getWH()
|
||||
|
||||
if not os.path.exists(config["importPath"]):
|
||||
|
|
@ -38,13 +37,12 @@ def main():
|
|||
layerManager = LayerManager(config, layers)
|
||||
layerManager.transformLayers()
|
||||
|
||||
|
||||
#layerManager.tagLayers()
|
||||
# layerManager.tagLayers()
|
||||
layers = layerManager.layers
|
||||
if len(layers) == 0:
|
||||
exit(1)
|
||||
|
||||
heatmap = HeatMap(1920, 1088, [contour for layer in layers for contour in layer.bounds], 1920/config["resizeWidth"])
|
||||
heatmap = HeatMap(1920, 1088, [contour for layer in layers for contour in layer.bounds], 1920 / config["resizeWidth"])
|
||||
heatmap.showImage()
|
||||
|
||||
exporter = Exporter(config)
|
||||
|
|
|
|||
Loading…
Reference in New Issue