manager().queue()

This commit is contained in:
Askill 2022-09-11 11:25:36 +02:00
parent 1d7a56c417
commit 99a699b039
9 changed files with 375 additions and 355 deletions

View File

@ -17,68 +17,64 @@ class ContourExtractor:
# extracedContours = {frame_number: [(contour, (x,y,w,h)), ...], }
# dict with frame numbers as keys and the contour bounds of every contour for that frame
def getExtractedContours(self):
return self.extractedContours
def get_extracted_contours(self):
return self.extracted_contours
def getExtractedMasks(self):
return self.extractedMasks
def get_extracted_masks(self):
return self.extracted_masks
def __init__(self, config):
self.frameBuffer = Queue(16)
self.extractedContours = dict()
self.extractedMasks = dict()
self.frame_buffer = Queue(16)
self.extracted_contours = dict()
self.extracted_masks = dict()
self.min_area = config["min_area"]
self.max_area = config["max_area"]
self.threashold = config["threashold"]
self.resizeWidth = config["resizeWidth"]
self.videoPath = config["inputPath"]
self.xDim = 0
self.yDim = 0
self.resize_width = config["resizeWidth"]
self.video_path = config["inputPath"]
self.x_dim = 0
self.y_dim = 0
self.config = config
self.lastFrames = None
self.last_frames = None
self.averages = dict()
print("ContourExtractor initiated")
def extractContours(self):
def extract_contours(self):
self.start = time.time()
with VideoReader(self.config) as videoReader:
self.fps = videoReader.getFPS()
self.length = videoReader.getLength()
self.fps = videoReader.get_fps()
self.length = videoReader.get_length()
with ThreadPool(2) as pool:
with ThreadPool(os.cpu_count()) as pool:
while True:
while not videoReader.videoEnded() and videoReader.buffer.qsize() == 0:
while not videoReader.video_ended() and videoReader.buffer.qsize() == 0:
time.sleep(0.5)
tmpData = [videoReader.pop() for i in range(0, videoReader.buffer.qsize())]
if videoReader.videoEnded():
tmp_data = [videoReader.pop() for i in range(0, videoReader.buffer.qsize())]
if videoReader.video_ended():
break
pool.map(self.computeMovingAverage, (tmpData,))
pool.map(self.async2, (tmpData,))
pool.map(self.compute_moving_Average, (tmp_data,))
pool.map(self.get_contours, tmp_data)
return self.extractedContours, self.extractedMasks
return self.extracted_contours, self.extracted_masks
def async2(self, tmpData):
with ThreadPool(os.cpu_count()) as pool2:
pool2.map(self.getContours, tmpData)
def getContours(self, data):
frameCount, frame = data
def get_contours(self, data):
frame_count, frame = data
# wait for the reference frame, which is calculated by averaging some revious frames
while frameCount not in self.averages:
while frame_count not in self.averages:
time.sleep(0.1)
firstFrame = self.averages.pop(frameCount, None)
first_frame = self.averages.pop(frame_count, None)
if frameCount % (10 * self.fps) == 1:
if frame_count % (10 * self.fps) == 1:
print(
f" \r \033[K {round((frameCount/self.fps)*100/self.length, 2)} % processed in {round(time.time() - self.start, 2)}s",
f" \r \033[K {round((frame_count/self.fps)*100/self.length, 2)} % processed in {round(time.time() - self.start, 2)}s",
end="\r",
)
gray = self.prepareFrame(frame)
frameDelta = cv2.absdiff(gray, firstFrame)
thresh = cv2.threshold(frameDelta, self.threashold, 255, cv2.THRESH_BINARY)[1]
gray = self.prepare_frame(frame)
frame_delta = cv2.absdiff(gray, first_frame)
thresh = cv2.threshold(frame_delta, self.threashold, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
thresh = cv2.dilate(thresh, None, iterations=10)
# cv2.imshow("changes x", thresh)
@ -100,44 +96,43 @@ class ContourExtractor:
if len(contours) != 0 and contours is not None:
# this should be thread safe
self.extractedContours[frameCount] = contours
self.extractedMasks[frameCount] = masks
self.extracted_contours[frame_count] = contours
self.extracted_masks[frame_count] = masks
def prepareFrame(self, frame):
frame = imutils.resize(frame, width=self.resizeWidth)
def prepare_frame(self, frame):
frame = imutils.resize(frame, width=self.resize_width)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (3, 3), 0)
return gray
def computeMovingAverage(self, frames):
avg = []
averageFrames = self.config["avgNum"]
def compute_moving_Average(self, frames):
average_frames = self.config["avgNum"]
if frames[0][0] < averageFrames:
if frames[0][0] < average_frames:
frame = frames[0][1]
frame = self.prepareFrame(frame)
frame = self.prepare_frame(frame)
for j in range(0, len(frames)):
frameNumber, _ = frames[j]
self.averages[frameNumber] = frame
frame_number, _ = frames[j]
self.averages[frame_number] = frame
# put last x frames into a buffer
self.lastFrames = frames[-averageFrames:]
self.last_frames = frames[-average_frames:]
return
if self.lastFrames is not None:
frames = self.lastFrames + frames
if self.last_frames is not None:
frames = self.last_frames + frames
tmp = [[j, frames, averageFrames] for j in range(averageFrames, len(frames))]
tmp = [[j, frames, average_frames] for j in range(average_frames, len(frames))]
with ThreadPool(int(os.cpu_count())) as pool:
pool.map(self.averageDaFrames, tmp)
pool.map(self.average_da_frames, tmp)
self.lastFrames = frames[-averageFrames:]
self.last_frames = frames[-average_frames:]
def averageDaFrames(self, dat):
j, frames, averageFrames = dat
frameNumber, frame = frames[j]
frame = self.prepareFrame(frame)
def average_da_frames(self, dat):
j, frames, average_frames = dat
frame_number, frame = frames[j]
frame = self.prepare_frame(frame)
avg = frame / averageFrames
for jj in range(0, averageFrames - 1):
avg += self.prepareFrame(frames[j - jj][1]) / averageFrames
self.averages[frameNumber] = np.array(np.round(avg), dtype=np.uint8)
avg = frame / average_frames
for jj in range(0, average_frames - 1):
avg += self.prepare_frame(frames[j - jj][1]) / average_frames
self.averages[frame_number] = np.array(np.round(avg), dtype=np.uint8)

View File

@ -11,114 +11,112 @@ from Application.VideoReader import VideoReader
class Exporter:
fps = 30
def __init__(self, config):
self.footagePath = config["inputPath"]
self.outputPath = config["outputPath"]
self.resizeWidth = config["resizeWidth"]
self.footage_path = config["inputPath"]
self.output_path = config["outputPath"]
self.resize_width = config["resizeWidth"]
self.config = config
print("Exporter initiated")
def export(self, layers, contours, masks, raw=True, overlayed=True, blackBackground=False, showProgress=False):
def export(self, layers, contours, masks, raw=True, overlayed=True, black_background=False, show_progress=False):
if raw:
self.exportRawData(layers, contours, masks)
self.export_raw_data(layers, contours, masks)
if overlayed:
self.exportOverlayed(layers, blackBackground, showProgress)
self.export_overlayed(layers, black_background, show_progress)
else:
self.exportLayers(layers)
self.export_layers(layers)
def exportLayers(self, layers):
listOfFrames = self.makeListOfFrames(layers)
with VideoReader(self.config, listOfFrames) as videoReader:
def export_layers(self, layers):
list_of_frames = self.make_list_of_frames(layers)
with VideoReader(self.config, list_of_frames) as video_reader:
underlay = cv2.VideoCapture(self.footagePath).read()[1]
underlay = cv2.VideoCapture(self.footage_path).read()[1]
underlay = cv2.cvtColor(underlay, cv2.COLOR_BGR2RGB)
fps = videoReader.getFPS()
writer = imageio.get_writer(self.outputPath, fps=fps)
fps = video_reader.get_fps()
writer = imageio.get_writer(self.output_path, fps=fps)
start = time.time()
for i, layer in enumerate(layers):
print(f"\r {i}/{len(layers)} {round(i/len(layers)*100,2)}% {round((time.time() - start), 2)}s", end="\r")
if len(layer.bounds[0]) == 0:
continue
videoReader = VideoReader(self.config)
listOfFrames = self.makeListOfFrames([layer])
videoReader.fillBuffer(listOfFrames)
while not videoReader.videoEnded():
frameCount, frame = videoReader.pop()
video_reader = VideoReader(self.config)
list_of_frames = self.make_list_of_frames([layer])
video_reader.fill_buffer(list_of_frames)
while not video_reader.video_ended():
frame_count, frame = video_reader.pop()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame2 = np.copy(underlay)
for (x, y, w, h) in layer.bounds[frameCount - layer.startFrame]:
for (x, y, w, h) in layer.bounds[frame_count - layer.startFrame]:
if x is None:
continue
factor = videoReader.w / self.resizeWidth
factor = video_reader.w / self.resize_width
x, y, w, h = (int(x * factor), int(y * factor), int(w * factor), int(h * factor))
frame2[y : y + h, x : x + w] = np.copy(frame[y : y + h, x : x + w])
self.addTimestamp(frame2, videoReader, frameCount, layer, x, y, w, h)
self.add_timestamp(frame2, video_reader, frame_count, x, y, w, h)
writer.append_data(frame2)
writer.close()
def exportOverlayed(self, layers, blackBackground=False, showProgress=False):
def export_overlayed(self, layers, black_background=False, show_progress=False):
listOfFrames = self.makeListOfFrames(layers)
maxLength = self.getMaxLengthOfLayers(layers)
list_of_frames = self.make_list_of_frames(layers)
max_length = self.get_max_length_of_layers(layers)
if blackBackground:
underlay = np.zeros(shape=[videoReader.h, videoReader.w, 3], dtype=np.uint8)
else:
underlay = cv2.VideoCapture(self.footagePath).read()[1]
underlay = cv2.cvtColor(underlay, cv2.COLOR_BGR2RGB)
with VideoReader(self.config, list_of_frames) as videoReader:
if black_background:
underlay = np.zeros(shape=[videoReader.h, videoReader.w, 3], dtype=np.uint8)
else:
underlay = cv2.VideoCapture(self.footage_path).read()[1]
underlay = cv2.cvtColor(underlay, cv2.COLOR_BGR2RGB)
frames = []
for i in range(maxLength):
frames.append(np.copy(underlay))
with VideoReader(self.config, listOfFrames) as videoReader:
while not videoReader.videoEnded():
frameCount, frame = videoReader.pop()
if frameCount % (60 * self.fps) == 0:
print("Minutes processed: ", frameCount / (60 * self.fps), end="\r")
frames = []
for i in range(max_length):
frames.append(np.copy(underlay))
fps = videoReader.fps
while not videoReader.video_ended():
frame_count, frame = videoReader.pop()
if frame_count % (60 * fps) == 0:
print("Minutes processed: ", frame_count / (60 * fps), end="\r")
if frame is None:
print("ContourExtractor: frame was None")
continue
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
for layer in layers:
if layer.startFrame <= frameCount and layer.startFrame + len(layer.bounds) > frameCount:
for i in range(0, len(layer.bounds[frameCount - layer.startFrame])):
if layer.startFrame <= frame_count and layer.startFrame + len(layer.bounds) > frame_count:
for i in range(0, len(layer.bounds[frame_count - layer.startFrame])):
try:
x, y, w, h = layer.bounds[frameCount - layer.startFrame][i]
x, y, w, h = layer.bounds[frame_count - layer.startFrame][i]
if None in (x, y, w, h):
break
factor = videoReader.w / self.resizeWidth
factor = videoReader.w / self.resize_width
x, y, w, h = (int(x * factor), int(y * factor), int(w * factor), int(h * factor))
mask = self.getMask(i, frameCount, layer, w, h)
background = frames[frameCount - layer.startFrame + layer.exportOffset]
self.addMaskedContent(frame, x, y, w, h, mask, background)
frames[frameCount - layer.startFrame + layer.exportOffset] = np.copy(background)
mask = self.get_mask(i, frame_count, layer, w, h)
background = frames[frame_count - layer.startFrame + layer.exportOffset]
self.add_masked_content(frame, x, y, w, h, mask, background)
frames[frame_count - layer.startFrame + layer.exportOffset] = np.copy(background)
if showProgress:
if show_progress:
cv2.imshow("changes x", background)
cv2.waitKey(10) & 0xFF
self.addTimestamp(frames[frameCount - layer.startFrame + layer.exportOffset], videoReader, frameCount, layer, x, y, w, h)
self.add_timestamp(frames[frame_count - layer.startFrame + layer.exportOffset], videoReader, frame_count, x, y, w, h)
except:
continue
writer = imageio.get_writer(self.outputPath, fps=videoReader.getFPS())
writer = imageio.get_writer(self.output_path, fps=videoReader.get_fps())
for frame in frames:
writer.append_data(frame)
writer.close()
def addMaskedContent(self, frame, x, y, w, h, mask, background):
maskedFrame = np.copy(
def add_masked_content(self, frame, x, y, w, h, mask, background):
masked_frame = np.copy(
cv2.bitwise_and(
background[y : y + h, x : x + w],
background[y : y + h, x : x + w],
@ -126,15 +124,15 @@ class Exporter:
)
)
background[y : y + h, x : x + w] = cv2.addWeighted(
maskedFrame,
masked_frame,
1,
np.copy(cv2.bitwise_and(frame[y : y + h, x : x + w], frame[y : y + h, x : x + w], mask=mask)),
1,
0,
)
def addTimestamp(self, frame, videoReader, frameCount, layer, x, y, w, h):
time = datetime.fromtimestamp(int(frameCount / self.fps) + videoReader.getStartTime())
def add_timestamp(self, frame, video_reader, frame_count, x, y, w, h):
time = datetime.fromtimestamp(int(frame_count / video_reader.fps) + video_reader.get_start_time())
cv2.putText(
frame,
f"{time.hour}:{time.minute}:{time.second}",
@ -145,29 +143,33 @@ class Exporter:
2,
)
def getMask(self, i, frameCount, layer, w, h):
mask = layer.masks[frameCount - layer.startFrame][i]
def get_mask(self, i, frame_count, layer, w, h):
mask = layer.masks[frame_count - layer.startFrame][i]
mask = imutils.resize(mask, width=w, height=h + 1)
mask = np.resize(mask, (h, w))
mask = cv2.erode(mask, None, iterations=10)
mask *= 255
return mask
def exportRawData(self, layers, contours, masks):
with open(self.config["importPath"], "wb+") as file:
pickle.dump((layers, contours, masks), file)
def export_raw_data(self, layers, contours, masks):
with open(self.config["importPath"] + "_layers", "wb+") as file:
pickle.dump(layers, file)
with open(self.config["importPath"] + "_contours", "wb+") as file:
pickle.dump(contours, file)
with open(self.config["importPath"] + "_masks", "wb+") as file:
pickle.dump(masks, file)
def getMaxLengthOfLayers(self, layers):
maxLength = 0
def get_max_length_of_layers(self, layers):
max_length = 0
for layer in layers:
if layer.getLength() > maxLength:
maxLength = layer.getLength()
return maxLength
if layer.getLength() > max_length:
max_length = layer.getLength()
return max_length
def makeListOfFrames(self, layers):
"""Returns set of all Frames which are relavant to the Layers"""
frameNumbers = set()
def make_list_of_frames(self, layers):
"""Returns set of all Frames which are relevant to the Layers"""
frame_numbers = set()
for layer in layers:
frameNumbers.update(list(range(layer.startFrame, layer.startFrame + len(layer))))
frame_numbers.update(list(range(layer.startFrame, layer.startFrame + len(layer))))
return sorted(list(frameNumbers))
return sorted(list(frame_numbers))

View File

@ -1,26 +1,30 @@
import numpy as np
from matplotlib import pyplot as plt
from PIL import Image
class HeatMap:
def __init__(self, x, y, contours, resizeFactor=1):
self.imageBW = np.zeros(shape=[y, x, 3], dtype=np.float64)
self._resizeFactor = resizeFactor
self._createImage(contours)
def __init__(self, x, y, contours, resize_factor=1):
self.image_bw = np.zeros(shape=[y, x, 3], dtype=np.float64)
self._resize_factor = resize_factor
self._create_image(contours)
def _createImage(self, contours):
def _create_image(self, contours):
for contour in contours:
for x, y, w, h in contour:
x, y, w, h = (
x * self._resizeFactor,
y * self._resizeFactor,
w * self._resizeFactor,
h * self._resizeFactor,
x * self._resize_factor,
y * self._resize_factor,
w * self._resize_factor,
h * self._resize_factor,
)
self.imageBW[int(y) : int(y + h), int(x) : int(x + w)] += 1
self.image_bw[int(y) : int(y + h), int(x) : int(x + w)] += 1
self.imageBW = np.nan_to_num(self.imageBW / self.imageBW.sum(axis=1)[:, np.newaxis], 0)
self.image_bw = np.nan_to_num(self.image_bw / self.image_bw.sum(axis=1)[:, np.newaxis], 0)
def showImage(self):
plt.imshow(self.imageBW * 255)
def show_image(self):
plt.imshow(self.image_bw * 255)
plt.show()
def save__image(self, path):
im = Image.fromarray(self.image_bw * 255)
im.save(path)

View File

@ -1,12 +1,24 @@
import pickle
import os.path
class Importer:
def __init__(self, config):
self.path = config["importPath"]
def importRawData(self):
def import_raw_data(self):
print("Loading previous results")
with open(self.path, "rb") as file:
layers, contours, masks = pickle.load(file)
return (layers, contours, masks)
layers = self.load_if_present(self.path + "_layers")
contours = self.load_if_present(self.path + "_contours")
masks = self.load_if_present(self.path + "_masks")
return layers, contours, masks
def load_if_present(self, path):
var = None
if os.path.isfile(path):
with open(path, "rb") as file:
var = pickle.load(file)
else:
print(path, "file not found")
return var

View File

@ -6,11 +6,11 @@ import numpy as np
class Layer:
# bounds = [[(x,y,w,h), ],]
startFrame = None
lastFrame = None
start_frame = None
last_frame = None
length = None
def __init__(self, startFrame, data, mask, config):
def __init__(self, start_frame, data, mask, config):
"""returns a Layer object
Layers are collections of contours with a StartFrame,
@ -21,57 +21,57 @@ class Layer:
but we only care about the corners of the contours.
So we save the bounds (x,y,w,h) in bounds[] and the actual content in data[]
"""
self.startFrame = startFrame
self.lastFrame = startFrame
self.start_frame = start_frame
self.last_frame = start_frame
self.config = config
self.data = []
self.bounds = []
self.masks = []
self.stats = dict()
self.exportOffset = 0
self.export_offset = 0
self.bounds.append([data])
self.masks.append([mask])
def add(self, frameNumber, bound, mask):
def add(self, frame_number, bound, mask):
"""Adds a bound to the Layer at the layer index which corresponds to the given framenumber"""
index = frameNumber - self.startFrame
index = frame_number - self.start_frame
if index < 0:
return
if frameNumber > self.lastFrame:
for i in range(frameNumber - self.lastFrame):
if frame_number > self.last_frame:
for i in range(frame_number - self.last_frame):
self.bounds.append([])
self.masks.append([])
self.lastFrame = frameNumber
self.last_frame = frame_number
if bound not in self.bounds[index]:
self.bounds[index].append(bound)
self.masks[index].append(mask)
def getLength(self):
return len(self) + self.exportOffset
def get_length(self):
return len(self) + self.export_offset
def __len__(self):
self.length = len(self.bounds)
return self.length
def spaceOverlaps(self, layer2):
def space_overlaps(self, layer2):
"""Checks if there is an overlap in the bounds of current layer with given layer"""
overlap = False
maxLen = min(len(layer2.bounds), len(self.bounds))
bounds = self.bounds[:maxLen]
for b1s, b2s in zip(bounds[::10], layer2.bounds[:maxLen:10]):
max_len = min(len(layer2.bounds), len(self.bounds))
bounds = self.bounds[:max_len]
for b1s, b2s in zip(bounds[::10], layer2.bounds[:max_len:10]):
for b1 in b1s:
for b2 in b2s:
if self.contoursOverlay((b1[0], b1[1] + b1[3]), (b1[0] + b1[2], b1[1]), (b2[0], b2[1] + b2[3]), (b2[0] + b2[2], b2[1])):
if self.contours_overlay((b1[0], b1[1] + b1[3]), (b1[0] + b1[2], b1[1]), (b2[0], b2[1] + b2[3]), (b2[0] + b2[2], b2[1])):
overlap = True
break
return overlap
def timeOverlaps(self, layer2):
def time_overlaps(self, layer2):
"""Checks for overlap in time between current and given layer"""
s1 = self.exportOffset
e1 = self.lastFrame - self.startFrame + self.exportOffset
s1 = self.export_offset
e1 = self.last_frame - self.start_frame + self.export_offset
s2 = layer2.exportOffset
e2 = layer2.lastFrame - layer2.startFrame + layer2.exportOffset
@ -82,7 +82,7 @@ class Layer:
else:
return False
def contoursOverlay(self, l1, r1, l2, r2):
def contours_overlay(self, l1, r1, l2, r2):
if l1[0] >= r2[0] or l2[0] >= r1[0]:
return False
if l1[1] <= r2[1] or l2[1] <= r1[1]:

View File

@ -15,153 +15,153 @@ class LayerFactory:
self.layers = []
self.tolerance = config["tolerance"]
self.ttolerance = config["ttolerance"]
self.minLayerLength = config["minLayerLength"]
self.maxLayerLength = config["maxLayerLength"]
self.resizeWidth = config["resizeWidth"]
self.footagePath = config["inputPath"]
self.min_layer_length = config["minLayerLength"]
self.max_layer_length = config["maxLayerLength"]
self.resize_width = config["resizeWidth"]
self.footage_path = config["inputPath"]
self.config = config
print("LayerFactory constructed")
self.data = data
if data is not None:
self.extractLayers(data)
self.extract_layers(data)
def extractLayers(self, data, maskArr):
def extract_layers(self, data, mask_arr):
"""Bundle given contours together into Layer Objects"""
frameNumber = min(data)
contours = data[frameNumber]
masks = maskArr[frameNumber]
frame_number = min(data)
contours = data[frame_number]
masks = mask_arr[frame_number]
for contour, mask in zip(contours, masks):
mask = np.unpackbits(mask, axis=0)
self.layers.append(Layer(frameNumber, contour, mask, self.config))
self.layers.append(Layer(frame_number, contour, mask, self.config))
self.oldLayerIDs = []
self.old_layer_i_ds = []
with ThreadPool(os.cpu_count()) as pool:
for frameNumber in sorted(data.keys()):
contours = data[frameNumber]
masks = maskArr[frameNumber]
for frame_number in sorted(data.keys()):
contours = data[frame_number]
masks = mask_arr[frame_number]
masks = [np.unpackbits(mask, axis=0) for mask, contours in zip(masks, contours)]
if frameNumber % 100 == 0:
if frame_number % 100 == 0:
print(
f" {int(round(frameNumber/max(data.keys()), 2)*100)}% done with Layer extraction {len(self.layers)} Layers",
f" {int(round(frame_number/max(data.keys()), 2)*100)}% done with Layer extraction {len(self.layers)} Layers",
end="\r",
)
tmp = [[frameNumber, contour, mask] for contour, mask in zip(contours, masks)]
tmp = [[frame_number, contour, mask] for contour, mask in zip(contours, masks)]
# pool.map(self.getLayers, tmp)
for x in tmp:
self.getLayers(x)
self.get_layers(x)
# self.joinLayers()
return self.layers
def getLayers(self, data):
frameNumber = data[0]
def get_layers(self, data):
frame_number = data[0]
bounds = data[1]
mask = data[2]
(x, y, w, h) = bounds
tol = self.tolerance
foundLayerIDs = set()
found_layer_i_ds = set()
for i, layer in enumerate(self.layers):
if frameNumber - layer.lastFrame > self.ttolerance:
if frame_number - layer.last_frame > self.ttolerance:
continue
lastXframes = min(40, len(layer))
lastBounds = [bound for bounds in layer.bounds[-lastXframes:] for bound in bounds]
last_xframes = min(40, len(layer))
last_bounds = [bound for bounds in layer.bounds[-last_xframes:] for bound in bounds]
for j, bounds in enumerate(sorted(lastBounds, reverse=True)):
for j, bounds in enumerate(sorted(last_bounds, reverse=True)):
if bounds is None:
break
(x2, y2, w2, h2) = bounds
if self.contoursOverlay((x - tol, y + h + tol), (x + w + tol, y - tol), (x2, y2 + h2), (x2 + w2, y2)):
layer.add(frameNumber, (x, y, w, h), mask)
foundLayerIDs.add(i)
if self.contours_overlay((x - tol, y + h + tol), (x + w + tol, y - tol), (x2, y2 + h2), (x2 + w2, y2)):
layer.add(frame_number, (x, y, w, h), mask)
found_layer_i_ds.add(i)
break
foundLayerIDs = sorted(list(foundLayerIDs))
if len(foundLayerIDs) == 0:
self.layers.append(Layer(frameNumber, (x, y, w, h), mask, self.config))
if len(foundLayerIDs) > 1:
self.mergeLayers(foundLayerIDs)
found_layer_i_ds = sorted(list(found_layer_i_ds))
if len(found_layer_i_ds) == 0:
self.layers.append(Layer(frame_number, (x, y, w, h), mask, self.config))
if len(found_layer_i_ds) > 1:
self.merge_layers(found_layer_i_ds)
def mergeLayers(self, foundLayerIDs):
layers = self.getLayersByID(foundLayerIDs)
mergedLayers = layers[0]
def merge_layers(self, found_layer_i_ds):
layers = self.get_layers_by_id(found_layer_i_ds)
merged_layers = layers[0]
for layer in layers[1:]:
for i, (contours, masks) in enumerate(zip(layer.bounds, layer.masks)):
for contour, mask in zip(contours, masks):
mergedLayers.add(layer.startFrame + i, contour, mask)
merged_layers.add(layer.startFrame + i, contour, mask)
for i, id in enumerate(foundLayerIDs):
for i, id in enumerate(found_layer_i_ds):
del self.layers[id - i]
self.layers.append(mergedLayers)
self.layers.append(merged_layers)
def joinLayers(self):
def join_layers(self):
self.layers.sort(key=lambda c: c.startFrame)
minFrame = self.getMinStart(self.layers)
maxFrame = self.getMaxEnd(self.layers)
min_frame = self.get_min_start(self.layers)
max_frame = self.get_max_end(self.layers)
for i in range(minFrame, maxFrame):
pL, indexes = self.getPossibleLayers(i)
if len(pL) <= 1:
for i in range(min_frame, max_frame):
p_l, indexes = self.get_possible_layers(i)
if len(p_l) <= 1:
continue
merge = set()
innerMax = self.getMaxEnd(pL)
for x in range(self.getMinStart(pL), innerMax):
for lc, l in enumerate(pL):
inner_max = self.get_max_end(p_l)
for x in range(self.get_min_start(p_l), inner_max):
for lc, l in enumerate(p_l):
if l.startFrame < x or l.lastFrame > x:
continue
for lc2, l2 in enumerate(pL):
for lc2, l2 in enumerate(p_l):
if lc2 == lc:
continue
for cnt in l.bounds[x - l.startFrame]:
for cnt2 in l2.bounds[x - l2.startFrame]:
if self.contoursOverlay(cnt, cnt2):
if self.contours_overlay(cnt, cnt2):
merge.add(indexes[lc])
merge.add(indexes[lc2])
merge = list(merge)
if len(merge) > 1:
self.mergeLayers(merge)
i = innerMax
self.merge_layers(merge)
i = inner_max
def getPossibleLayers(self, t):
def get_possible_layers(self, t):
ret = []
ii = []
for i, layer in enumerate(self.layers):
if layer.startFrame <= t and layer.lastFrame <= t:
if layer.start_frame <= t and layer.last_frame <= t:
ret.append(layer)
ii.append(i)
return (ret, ii)
def getMinStart(self, layers):
minFrame = layers[0].startFrame
def get_min_start(self, layers):
min_frame = layers[0].startFrame
for l in layers:
if l.startFrame < minFrame:
minFrame = l.startFrame
return minFrame
if l.startFrame < min_frame:
min_frame = l.startFrame
return min_frame
def getMaxEnd(self, layers):
maxFrame = layers[0].lastFrame
def get_max_end(self, layers):
max_frame = layers[0].lastFrame
for l in layers:
if l.lastFrame < maxFrame:
maxFrame = l.lastFrame
return maxFrame
if l.lastFrame < max_frame:
max_frame = l.lastFrame
return max_frame
def contoursOverlay(self, l1, r1, l2, r2):
def contours_overlay(self, l1, r1, l2, r2):
if l1[0] >= r2[0] or l2[0] >= r1[0]:
return False
if l1[1] <= r2[1] or l2[1] <= r1[1]:
return False
return True
def getLayersByID(self, foundLayerIDs):
def get_layers_by_id(self, found_layer_i_ds):
layers = []
for layerID in foundLayerIDs:
layers.append(self.layers[layerID])
for layer_id in found_layer_i_ds:
layers.append(self.layers[layer_id])
layers.sort(key=lambda c: c.startFrame)
return layers

View File

@ -17,53 +17,53 @@ class LayerManager:
self.layers = layers
self.tolerance = config["tolerance"]
self.ttolerance = config["ttolerance"]
self.minLayerLength = config["minLayerLength"]
self.maxLayerLength = config["maxLayerLength"]
self.resizeWidth = config["resizeWidth"]
self.footagePath = config["inputPath"]
self.min_layer_length = config["minLayerLength"]
self.max_layer_length = config["maxLayerLength"]
self.resize_width = config["resizeWidth"]
self.footage_path = config["inputPath"]
self.config = config
# self.classifier = Classifier()
self.tags = []
print("LayerManager constructed")
def cleanLayers(self):
def clean_layers(self):
print("'Cleaning' Layers")
print("Before deleting short layers ", len(self.layers))
self.freeMin()
self.free_min()
print("Before deleting long layers ", len(self.layers))
self.freeMax()
self.sortLayers()
self.free_max()
self.sort_layers()
print("Before deleting sparse layers ", len(self.layers))
self.deleteSparse()
self.delete_sparse()
print("after deleting sparse layers ", len(self.layers))
#self.calcTimeOffset()
def deleteSparse(self):
toDelete = []
def delete_sparse(self):
to_delete = []
for i, l in enumerate(self.layers):
empty = l.bounds.count([])
if empty / len(l) > 0.5:
toDelete.append(i)
to_delete.append(i)
for i, id in enumerate(toDelete):
for i, id in enumerate(to_delete):
del self.layers[id - i]
def freeMin(self):
def free_min(self):
self.data.clear()
layers = []
for l in self.layers:
if len(l) > self.minLayerLength:
if len(l) > self.min_layer_length:
layers.append(l)
self.layers = layers
def freeMax(self):
def free_max(self):
layers = []
for l in self.layers:
if len(l) < self.maxLayerLength:
if len(l) < self.max_layer_length:
layers.append(l)
self.layers = layers
def tagLayers(self):
def tag_layers(self):
"""Use classifieres the tag all Layers, by reading the contour content from the original video, then applying the classifier"""
print("Tagging Layers")
exporter = Exporter(self.config)
@ -73,19 +73,19 @@ class LayerManager:
start = time.time()
if len(layer.bounds[0]) == 0:
continue
listOfFrames = exporter.makeListOfFrames([layer])
list_of_frames = exporter.make_list_of_frames([layer])
videoReader = VideoReader(self.config, listOfFrames)
videoReader.fillBuffer()
video_reader = VideoReader(self.config, list_of_frames)
video_reader.fill_buffer()
while not videoReader.videoEnded():
frameCount, frame = videoReader.pop()
while not video_reader.video_ended():
frame_count, frame = video_reader.pop()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
data = []
for (x, y, w, h) in layer.bounds[frameCount - layer.startFrame]:
for (x, y, w, h) in layer.bounds[frame_count - layer.startFrame]:
if x is None:
break
factor = videoReader.w / self.resizeWidth
factor = video_reader.w / self.resize_width
x = int(x * factor)
y = int(y * factor)
w = int(w * factor)
@ -96,16 +96,16 @@ class LayerManager:
print(tags)
self.tags.append(tags)
videoReader.thread.join()
video_reader.thread.join()
def sortLayers(self):
def sort_layers(self):
self.layers.sort(key=lambda c: c.startFrame)
def calcTimeOffset(self):
lenL = len(self.layers)
def calc_time_offset(self):
len_l = len(self.layers)
for i in range(1, len(self.layers)):
layer = self.layers[i]
print(f"\r {i}/{lenL}", end="\r")
print(f"\r {i}/{len_l}", end="\r")
overlap = True
tries = 1
while overlap:

View File

@ -7,33 +7,33 @@ import cv2
class VideoReader:
listOfFrames = None
list_of_frames = None
w = None
h = None
def __init__(self, config, setOfFrames=None, multiprocess=False):
videoPath = config["inputPath"]
if videoPath is None:
raise Exception("ERROR: Video reader needs a videoPath!")
self.videoPath = videoPath
self.lastFrame = 0
def __init__(self, config, set_of_frames=None, multiprocess=False):
video_path = config["inputPath"]
if video_path is None:
raise Exception("ERROR: Video reader needs a video_path!")
self.video_path = video_path
self.last_frame = 0
# buffer data struct:
# buffer = Queue([(frameNumber, frame), ])
self.multiprocess = multiprocess
if multiprocess:
self.buffer = multiprocessing.Queue(config["videoBufferLength"])
self.buffer = multiprocessing.Manager().Queue(config["videoBufferLength"])
else:
self.buffer = queue.Queue(config["videoBufferLength"])
self.stopped = False
self.getWH()
self.calcFPS()
self.calcLength()
self.calcStartTime()
if setOfFrames is not None:
self.listOfFrames = sorted(setOfFrames)
self.get_wh()
self.calc_fps()
self.calc_length()
self.calc_start_time()
if set_of_frames is not None:
self.list_of_frames = sorted(set_of_frames)
def __enter__(self):
self.fillBuffer()
self.fill_buffer()
return self
def __exit__(self, type, value, traceback):
@ -43,97 +43,97 @@ class VideoReader:
self.thread.join()
def pop(self):
frameNumber, frame = self.buffer.get(block=True)
frame_number, frame = self.buffer.get(block=True)
if frame is None:
self.stopped = True
return frameNumber, frame
return frame_number, frame
def fillBuffer(self, listOfFrames=None):
self.endFrame = int(cv2.VideoCapture(self.videoPath).get(cv2.CAP_PROP_FRAME_COUNT))
if listOfFrames is not None:
self.listOfFrames = listOfFrames
def fill_buffer(self, list_of_frames=None):
self.end_frame = int(cv2.VideoCapture(self.video_path).get(cv2.CAP_PROP_FRAME_COUNT))
if list_of_frames is not None:
self.list_of_frames = list_of_frames
if self.multiprocess:
if self.listOfFrames is not None:
self.thread = multiprocessing.Process(target=self.readFramesByList, args=())
if self.list_of_frames is not None:
self.thread = multiprocessing.Process(target=self.read_frames_by_list, args=())
else:
self.thread = multiprocessing.Process(target=self.readFrames, args=())
self.thread = multiprocessing.Process(target=self.read_frames, args=())
else:
if self.listOfFrames is not None:
self.thread = threading.Thread(target=self.readFramesByList, args=())
if self.list_of_frames is not None:
self.thread = threading.Thread(target=self.read_frames_by_list, args=())
else:
self.thread = threading.Thread(target=self.readFrames, args=())
self.thread = threading.Thread(target=self.read_frames, args=())
self.thread.start()
def readFrames(self):
def read_frames(self):
"""Reads video from start to finish"""
self.vc = cv2.VideoCapture(self.videoPath)
while self.lastFrame < self.endFrame:
self.vc = cv2.VideoCapture(self.video_path)
while self.last_frame < self.end_frame:
res, frame = self.vc.read()
if res:
self.buffer.put((self.lastFrame, frame))
self.lastFrame += 1
self.buffer.put((self.lastFrame, None))
self.buffer.put((self.last_frame, frame))
self.last_frame += 1
self.buffer.put((self.last_frame, None))
def readFramesByList(self):
def read_frames_by_list(self):
"""Reads all frames from a list of frame numbers"""
self.vc = cv2.VideoCapture(self.videoPath)
self.vc.set(1, self.listOfFrames[0])
self.lastFrame = self.listOfFrames[0]
self.endFrame = self.listOfFrames[-1]
self.vc = cv2.VideoCapture(self.video_path)
self.vc.set(1, self.list_of_frames[0])
self.last_frame = self.list_of_frames[0]
self.end_frame = self.list_of_frames[-1]
while self.lastFrame < self.endFrame:
if self.lastFrame in self.listOfFrames:
while self.last_frame < self.end_frame:
if self.last_frame in self.list_of_frames:
res, frame = self.vc.read()
if res:
self.buffer.put((self.lastFrame, frame))
self.buffer.put((self.last_frame, frame))
else:
print("Couldn't read Frame")
# since the list is sorted the first element is always the lowest relevant framenumber
# [0,1,2,3,32,33,34,35,67,68,69]
self.listOfFrames.pop(0)
self.lastFrame += 1
self.list_of_frames.pop(0)
self.last_frame += 1
else:
# if current Frame number is not in list of Frames, we can skip a few frames
self.vc.set(1, self.listOfFrames[0])
self.lastFrame = self.listOfFrames[0]
self.buffer.put((self.lastFrame, None))
self.vc.set(1, self.list_of_frames[0])
self.last_frame = self.list_of_frames[0]
self.buffer.put((self.last_frame, None))
def videoEnded(self):
def video_ended(self):
if self.stopped and self.buffer.empty():
return True
else:
return False
def calcFPS(self):
self.fps = cv2.VideoCapture(self.videoPath).get(cv2.CAP_PROP_FPS)
def calc_fps(self):
self.fps = cv2.VideoCapture(self.video_path).get(cv2.CAP_PROP_FPS)
def getFPS(self):
def get_fps(self):
if self.fps is None:
self.calcFPS()
self.calc_fps()
return self.fps
def calcLength(self):
fc = int(cv2.VideoCapture(self.videoPath).get(cv2.CAP_PROP_FRAME_COUNT))
self.length = fc / self.getFPS()
def calc_length(self):
fc = int(cv2.VideoCapture(self.video_path).get(cv2.CAP_PROP_FRAME_COUNT))
self.length = fc / self.get_fps()
def getLength(self):
def get_length(self):
if self.length is None:
self.calcLength()
self.calc_length()
return self.length
def calcStartTime(self):
starttime = os.stat(self.videoPath).st_mtime
length = self.getLength()
def calc_start_time(self):
starttime = os.stat(self.video_path).st_mtime
length = self.get_length()
starttime = starttime - length
self.starttime = starttime
def getStartTime(self):
def get_start_time(self):
return self.starttime
def getWH(self):
def get_wh(self):
"""get width and height"""
vc = cv2.VideoCapture(self.videoPath)
vc = cv2.VideoCapture(self.video_path)
if self.w is None or self.h is None:
res, image = vc.read()
self.w = image.shape[1]

53
main.py
View File

@ -1,5 +1,6 @@
import os
import time
import argparse
from Application.Config import Config
from Application.ContourExctractor import ContourExtractor
@ -12,45 +13,51 @@ from Application.VideoReader import VideoReader
def main(config):
startTotal = time.time()
start_total = time.time()
if not os.path.exists(config["importPath"]):
contours, masks = ContourExtractor(config).extractContours()
layerFactory = LayerFactory(config)
layers = layerFactory.extractLayers(contours, masks)
contours, masks = ContourExtractor(config).extract_contours()
layers = LayerFactory(config).extract_layers(contours, masks)
else:
layers, contours, masks = Importer(config).importRawData()
layerFactory = LayerFactory(config)
layers = layerFactory.extractLayers(contours, masks)
layers, contours, masks = Importer(config).import_raw_data()
layers = LayerFactory(config).extract_layers(contours, masks)
layerManager = LayerManager(config, layers)
layerManager.cleanLayers()
layer_manager = LayerManager(config, layers)
layer_manager.clean_layers()
# layerManager.tagLayers()
if len(layerManager.layers) == 0:
if len(layer_manager.layers) == 0:
exit(1)
heatmap = HeatMap(
config["w"], config["h"], [contour for layer in layerManager.layers for contour in layer.bounds], 1920 / config["resizeWidth"]
config["w"], config["h"], [contour for layer in layer_manager.layers for contour in layer.bounds], 1920 / config["resizeWidth"]
)
heatmap.showImage()
heatmap.save__image(config["outputPath"].split(".")[0] + "_heatmap.png")
print(f"Exporting {len(contours)} Contours and {len(layerManager.layers)} Layers")
Exporter(config).export(layerManager.layers, contours, masks, raw=True, overlayed=True)
print("Total time: ", time.time() - startTotal)
print(f"Exporting {len(contours)} Contours and {len(layer_manager.layers)} Layers")
Exporter(config).export(layer_manager.layers, contours, masks, raw=True, overlayed=True)
print("Total time: ", time.time() - start_total)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Extract movement from static camera recording')
parser.add_argument('input', metavar='input_file', type=str,
help='input video to extract movement from')
parser.add_argument('output', metavar='output_dir', type=str, nargs="?", default="output",
help='output directory to save results and cached files into')
args = parser.parse_args()
config = Config()
inputPath = os.path.join(os.path.dirname(__file__), "input/x23-1.mp4")
outputPath = os.path.join(os.path.dirname(__file__), "output")
fileName = inputPath.split("/")[-1]
input_path = os.path.join(os.path.dirname(__file__), args.input)
output_path = os.path.join(os.path.dirname(__file__), args.output)
config["inputPath"] = inputPath
config["outputPath"] = os.path.join(outputPath, fileName)
config["importPath"] = os.path.join(outputPath, fileName.split(".")[0] + ".txt")
config["w"], config["h"] = VideoReader(config).getWH()
file_name = input_path.split("/")[-1]
config["inputPath"] = input_path
config["outputPath"] = os.path.join(output_path, file_name)
config["importPath"] = os.path.join(output_path, file_name.split(".")[0] + ".txt")
config["w"], config["h"] = VideoReader(config).get_wh()
main(config)