more refactor
This commit is contained in:
parent
432c837145
commit
ae8f13b1b1
|
|
@ -48,7 +48,7 @@ class Exporter:
|
|||
frame_count, frame = video_reader.pop()
|
||||
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||
frame2 = np.copy(underlay)
|
||||
for (x, y, w, h) in layer.bounds[frame_count - layer.startFrame]:
|
||||
for (x, y, w, h) in layer.bounds[frame_count - layer.start_frame]:
|
||||
if x is None:
|
||||
continue
|
||||
factor = video_reader.w / self.resize_width
|
||||
|
|
@ -82,30 +82,30 @@ class Exporter:
|
|||
if frame_count % (60 * fps) == 0:
|
||||
print("Minutes processed: ", frame_count / (60 * fps), end="\r")
|
||||
if frame is None:
|
||||
print("ContourExtractor: frame was None")
|
||||
print("Exporter: frame was None")
|
||||
continue
|
||||
|
||||
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||
for layer in layers:
|
||||
if layer.startFrame <= frame_count and layer.startFrame + len(layer.bounds) > frame_count:
|
||||
for i in range(0, len(layer.bounds[frame_count - layer.startFrame])):
|
||||
if layer.start_frame <= frame_count and layer.start_frame + len(layer.bounds) > frame_count:
|
||||
for i in range(0, len(layer.bounds[frame_count - layer.start_frame])):
|
||||
try:
|
||||
x, y, w, h = layer.bounds[frame_count - layer.startFrame][i]
|
||||
x, y, w, h = layer.bounds[frame_count - layer.start_frame][i]
|
||||
if None in (x, y, w, h):
|
||||
break
|
||||
factor = videoReader.w / self.resize_width
|
||||
x, y, w, h = (int(x * factor), int(y * factor), int(w * factor), int(h * factor))
|
||||
|
||||
mask = self.get_mask(i, frame_count, layer, w, h)
|
||||
background = frames[frame_count - layer.startFrame + layer.exportOffset]
|
||||
background = frames[frame_count - layer.start_frame + layer.export_offset]
|
||||
self.add_masked_content(frame, x, y, w, h, mask, background)
|
||||
frames[frame_count - layer.startFrame + layer.exportOffset] = np.copy(background)
|
||||
frames[frame_count - layer.start_frame + layer.export_offset] = np.copy(background)
|
||||
|
||||
if show_progress:
|
||||
cv2.imshow("changes x", background)
|
||||
cv2.waitKey(10) & 0xFF
|
||||
|
||||
self.add_timestamp(frames[frame_count - layer.startFrame + layer.exportOffset], videoReader, frame_count, x, y, w, h)
|
||||
self.add_timestamp(frames[frame_count - layer.start_frame + layer.export_offset], videoReader, frame_count, x, y, w, h)
|
||||
except:
|
||||
continue
|
||||
|
||||
|
|
@ -144,7 +144,7 @@ class Exporter:
|
|||
)
|
||||
|
||||
def get_mask(self, i, frame_count, layer, w, h):
|
||||
mask = layer.masks[frame_count - layer.startFrame][i]
|
||||
mask = layer.masks[frame_count - layer.start_frame][i]
|
||||
mask = imutils.resize(mask, width=w, height=h + 1)
|
||||
mask = np.resize(mask, (h, w))
|
||||
mask = cv2.erode(mask, None, iterations=10)
|
||||
|
|
@ -152,24 +152,24 @@ class Exporter:
|
|||
return mask
|
||||
|
||||
def export_raw_data(self, layers, contours, masks):
|
||||
with open(self.config["importPath"] + "_layers", "wb+") as file:
|
||||
with open(self.config["cachePath"].split(".")[0] + "_layers.txt", "wb+") as file:
|
||||
pickle.dump(layers, file)
|
||||
with open(self.config["importPath"] + "_contours", "wb+") as file:
|
||||
with open(self.config["cachePath"].split(".")[0] + "_contours.txt", "wb+") as file:
|
||||
pickle.dump(contours, file)
|
||||
with open(self.config["importPath"] + "_masks", "wb+") as file:
|
||||
with open(self.config["cachePath"].split(".")[0] + "_masks.txt", "wb+") as file:
|
||||
pickle.dump(masks, file)
|
||||
|
||||
def get_max_length_of_layers(self, layers):
|
||||
max_length = 0
|
||||
for layer in layers:
|
||||
if layer.getLength() > max_length:
|
||||
max_length = layer.getLength()
|
||||
if layer.get_length() > max_length:
|
||||
max_length = layer.get_length()
|
||||
return max_length
|
||||
|
||||
def make_list_of_frames(self, layers):
|
||||
"""Returns set of all Frames which are relevant to the Layers"""
|
||||
frame_numbers = set()
|
||||
for layer in layers:
|
||||
frame_numbers.update(list(range(layer.startFrame, layer.startFrame + len(layer))))
|
||||
frame_numbers.update(list(range(layer.start_frame, layer.start_frame + len(layer))))
|
||||
|
||||
return sorted(list(frame_numbers))
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
import numpy as np
|
||||
from matplotlib import pyplot as plt
|
||||
from PIL import Image
|
||||
|
||||
class HeatMap:
|
||||
def __init__(self, x, y, contours, resize_factor=1):
|
||||
|
|
@ -25,6 +24,5 @@ class HeatMap:
|
|||
plt.imshow(self.image_bw * 255)
|
||||
plt.show()
|
||||
|
||||
def save__image(self, path):
|
||||
im = Image.fromarray(self.image_bw * 255)
|
||||
im.save(path)
|
||||
def save_image(self, path):
|
||||
plt.imsave(path, (255 * self.image_bw).astype(np.uint8))
|
||||
|
|
@ -3,14 +3,14 @@ import os.path
|
|||
|
||||
class Importer:
|
||||
def __init__(self, config):
|
||||
self.path = config["importPath"]
|
||||
self.path = config["cachePath"]
|
||||
|
||||
def import_raw_data(self):
|
||||
print("Loading previous results")
|
||||
|
||||
layers = self.load_if_present(self.path + "_layers")
|
||||
contours = self.load_if_present(self.path + "_contours")
|
||||
masks = self.load_if_present(self.path + "_masks")
|
||||
layers = self.load_if_present(self.path + "_layers.txt")
|
||||
contours = self.load_if_present(self.path + "_contours.txt")
|
||||
masks = self.load_if_present(self.path + "_masks.txt")
|
||||
|
||||
return layers, contours, masks
|
||||
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ class Layer:
|
|||
def __init__(self, start_frame, data, mask, config):
|
||||
"""returns a Layer object
|
||||
|
||||
Layers are collections of contours with a StartFrame,
|
||||
Layers are collections of contours with a start_frame,
|
||||
which is the number of the frame the first contour of
|
||||
this layer was extraced from
|
||||
|
||||
|
|
@ -72,8 +72,8 @@ class Layer:
|
|||
"""Checks for overlap in time between current and given layer"""
|
||||
s1 = self.export_offset
|
||||
e1 = self.last_frame - self.start_frame + self.export_offset
|
||||
s2 = layer2.exportOffset
|
||||
e2 = layer2.lastFrame - layer2.startFrame + layer2.exportOffset
|
||||
s2 = layer2.export_offset
|
||||
e2 = layer2.last_frame - layer2.start_frame + layer2.export_offset
|
||||
|
||||
if s2 >= s1 and s2 <= e1:
|
||||
return True
|
||||
|
|
|
|||
|
|
@ -93,7 +93,7 @@ class LayerFactory:
|
|||
for layer in layers[1:]:
|
||||
for i, (contours, masks) in enumerate(zip(layer.bounds, layer.masks)):
|
||||
for contour, mask in zip(contours, masks):
|
||||
merged_layers.add(layer.startFrame + i, contour, mask)
|
||||
merged_layers.add(layer.start_frame + i, contour, mask)
|
||||
|
||||
for i, id in enumerate(found_layer_i_ds):
|
||||
del self.layers[id - i]
|
||||
|
|
@ -101,7 +101,7 @@ class LayerFactory:
|
|||
self.layers.append(merged_layers)
|
||||
|
||||
def join_layers(self):
|
||||
self.layers.sort(key=lambda c: c.startFrame)
|
||||
self.layers.sort(key=lambda c: c.start_frame)
|
||||
min_frame = self.get_min_start(self.layers)
|
||||
max_frame = self.get_max_end(self.layers)
|
||||
|
||||
|
|
@ -113,13 +113,13 @@ class LayerFactory:
|
|||
inner_max = self.get_max_end(p_l)
|
||||
for x in range(self.get_min_start(p_l), inner_max):
|
||||
for lc, l in enumerate(p_l):
|
||||
if l.startFrame < x or l.lastFrame > x:
|
||||
if l.start_frame < x or l.last_frame > x:
|
||||
continue
|
||||
for lc2, l2 in enumerate(p_l):
|
||||
if lc2 == lc:
|
||||
continue
|
||||
for cnt in l.bounds[x - l.startFrame]:
|
||||
for cnt2 in l2.bounds[x - l2.startFrame]:
|
||||
for cnt in l.bounds[x - l.start_frame]:
|
||||
for cnt2 in l2.bounds[x - l2.start_frame]:
|
||||
if self.contours_overlay(cnt, cnt2):
|
||||
merge.add(indexes[lc])
|
||||
merge.add(indexes[lc2])
|
||||
|
|
@ -138,17 +138,17 @@ class LayerFactory:
|
|||
return (ret, ii)
|
||||
|
||||
def get_min_start(self, layers):
|
||||
min_frame = layers[0].startFrame
|
||||
min_frame = layers[0].start_frame
|
||||
for l in layers:
|
||||
if l.startFrame < min_frame:
|
||||
min_frame = l.startFrame
|
||||
if l.start_frame < min_frame:
|
||||
min_frame = l.start_frame
|
||||
return min_frame
|
||||
|
||||
def get_max_end(self, layers):
|
||||
max_frame = layers[0].lastFrame
|
||||
max_frame = layers[0].last_frame
|
||||
for l in layers:
|
||||
if l.lastFrame < max_frame:
|
||||
max_frame = l.lastFrame
|
||||
if l.last_frame < max_frame:
|
||||
max_frame = l.last_frame
|
||||
return max_frame
|
||||
|
||||
def contours_overlay(self, l1, r1, l2, r2):
|
||||
|
|
@ -163,5 +163,5 @@ class LayerFactory:
|
|||
for layer_id in found_layer_i_ds:
|
||||
layers.append(self.layers[layer_id])
|
||||
|
||||
layers.sort(key=lambda c: c.startFrame)
|
||||
layers.sort(key=lambda c: c.start_frame)
|
||||
return layers
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@ class LayerManager:
|
|||
frame_count, frame = video_reader.pop()
|
||||
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||
data = []
|
||||
for (x, y, w, h) in layer.bounds[frame_count - layer.startFrame]:
|
||||
for (x, y, w, h) in layer.bounds[frame_count - layer.start_frame]:
|
||||
if x is None:
|
||||
break
|
||||
factor = video_reader.w / self.resize_width
|
||||
|
|
@ -99,7 +99,7 @@ class LayerManager:
|
|||
video_reader.thread.join()
|
||||
|
||||
def sort_layers(self):
|
||||
self.layers.sort(key=lambda c: c.startFrame)
|
||||
self.layers.sort(key=lambda c: c.start_frame)
|
||||
|
||||
def calc_time_offset(self):
|
||||
len_l = len(self.layers)
|
||||
|
|
@ -115,8 +115,8 @@ class LayerManager:
|
|||
overlap = True
|
||||
break
|
||||
if overlap:
|
||||
self.layers[i].exportOffset += 20 * tries
|
||||
self.layers[i].export_offset += 20 * tries
|
||||
tries += 1
|
||||
|
||||
# if self.layers[i].exportOffset >= 300000:
|
||||
# if self.layers[i].export_offset >= 300000:
|
||||
# break
|
||||
|
|
|
|||
11
main.py
11
main.py
|
|
@ -15,11 +15,11 @@ from Application.VideoReader import VideoReader
|
|||
def main(config):
|
||||
start_total = time.time()
|
||||
|
||||
if not os.path.exists(config["importPath"]):
|
||||
contours, masks = ContourExtractor(config).extract_contours()
|
||||
if os.path.exists(config["cachePath"] + "_layers.txt"):
|
||||
layers, contours, masks = Importer(config).import_raw_data()
|
||||
layers = LayerFactory(config).extract_layers(contours, masks)
|
||||
else:
|
||||
layers, contours, masks = Importer(config).import_raw_data()
|
||||
contours, masks = ContourExtractor(config).extract_contours()
|
||||
layers = LayerFactory(config).extract_layers(contours, masks)
|
||||
|
||||
layer_manager = LayerManager(config, layers)
|
||||
|
|
@ -32,7 +32,8 @@ def main(config):
|
|||
heatmap = HeatMap(
|
||||
config["w"], config["h"], [contour for layer in layer_manager.layers for contour in layer.bounds], 1920 / config["resizeWidth"]
|
||||
)
|
||||
heatmap.save__image(config["outputPath"].split(".")[0] + "_heatmap.png")
|
||||
heatmap.show_image()
|
||||
#heatmap.save_image(config["outputPath"].split(".")[0] + "_heatmap.png") # not working yet
|
||||
|
||||
print(f"Exporting {len(contours)} Contours and {len(layer_manager.layers)} Layers")
|
||||
Exporter(config).export(layer_manager.layers, contours, masks, raw=True, overlayed=True)
|
||||
|
|
@ -57,7 +58,7 @@ if __name__ == "__main__":
|
|||
|
||||
config["inputPath"] = input_path
|
||||
config["outputPath"] = os.path.join(output_path, file_name)
|
||||
config["importPath"] = os.path.join(output_path, file_name.split(".")[0] + ".txt")
|
||||
config["cachePath"] = os.path.join(output_path, file_name.split(".")[0])
|
||||
config["w"], config["h"] = VideoReader(config).get_wh()
|
||||
|
||||
main(config)
|
||||
|
|
|
|||
Loading…
Reference in New Issue