diff --git a/Config.py b/Config.py index c066923..3e3f902 100644 --- a/Config.py +++ b/Config.py @@ -2,25 +2,23 @@ class Config: c = { "min_area" : 500, - "max_area" : 28000, - "threashold" : 13, - "xDim" : 0, - "yDim" : 0, + "max_area" : 9000, + "threashold" : 10, "resizeWidth" : 512, - "inputPath" : "", - "outputPath": "", - "maxLayerLength": 1000, - "minLayerLength": 0, - "fps": 30, - "tolerance": 5, + "inputPath" : None, + "outputPath": None, + "maxLayerLength": 900, + "minLayerLength": 30, + "tolerance": 10, "maxLength": None, - "" - } - __init__(self): - print(Current Config:) + "ttolerance": 10, + "videoBufferLength": 16} + + def __init__(self): + print("Current Config:", self.c) def __getitem__(self, key): return self.c[key] def __setitem__(self, key, value): - return self.c[key] = value + self.c[key] = value diff --git a/ContourExctractor.py b/ContourExctractor.py index 71adcc2..f6c8546 100644 --- a/ContourExctractor.py +++ b/ContourExctractor.py @@ -17,6 +17,7 @@ from VideoReader import VideoReader from queue import Queue import threading from multiprocessing.pool import ThreadPool +from Config import Config class ContourExtractor: @@ -26,28 +27,31 @@ class ContourExtractor: def getextractedContours(self): return self.extractedContours - def __init__(self): + def __init__(self, config): self.frameBuffer = Queue(16) self.extractedContours = dict() - self.min_area = 30 - self.max_area = 1000 - self.threashold = 13 + self.min_area = config["min_area"] + self.max_area = config["max_area"] + self.threashold = config["threashold"] + self.resizeWidth = config["resizeWidth"] + self.videoPath = config["inputPath"] self.xDim = 0 self.yDim = 0 + self.config = config print("ContourExtractor initiated") - def extractContours(self, videoPath, resizeWidth): + def extractContours(self): extractedContours = dict() - videoReader = VideoReader(videoPath) + videoReader = VideoReader(self.config) self.xDim = videoReader.w self.yDim = videoReader.h - self.resizeWidth = resizeWidth + videoReader.fillBuffer() frameCount, frame = videoReader.pop() #init compare image - frame = imutils.resize(frame, width=resizeWidth) + frame = imutils.resize(frame, width=self.resizeWidth) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #gray = np.asarray(gray[:,:,1]/2 + gray[:,:,2]/2).astype(np.uint8) gray = cv2.GaussianBlur(gray, (5, 5), 0) @@ -63,14 +67,13 @@ class ContourExtractor: start = time.time() if videoReader.buffer.qsize() == 0: - time.sleep(1) + time.sleep(.5) tmpData = [videoReader.pop() for i in range(0, videoReader.buffer.qsize())] frameCount = tmpData[-1][0] pool.map(self.getContours, tmpData) videoReader.thread.join() - return self.extractedContours def getContours(self, data): @@ -82,7 +85,7 @@ class ContourExtractor: frameDelta = cv2.absdiff(gray, firstFrame) thresh = cv2.threshold(frameDelta, self.threashold, 255, cv2.THRESH_BINARY)[1] # dilate the thresholded image to fill in holes, then find contours - thresh = cv2.dilate(thresh, None, iterations=3) + thresh = cv2.dilate(thresh, None, iterations=4) cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = imutils.grab_contours(cnts) @@ -99,12 +102,6 @@ class ContourExtractor: # this should be thread safe self.extractedContours[frameCount] = contours - - - - - - def displayContours(self): values = self.extractedContours.values() for xx in values: diff --git a/Exporter.py b/Exporter.py index 9ce9807..92f684a 100644 --- a/Exporter.py +++ b/Exporter.py @@ -9,29 +9,33 @@ from VideoReader import VideoReader class Exporter: fps = 30 - def __init__(self): + def __init__(self, config): + self.footagePath = config["inputPath"] + self.outputPath = config["outputPath"] + self.resizeWidth = config["resizeWidth"] + self.config = config print("Exporter initiated") - def export(self, frames, outputPath): + def export(self): fps = self.fps writer = imageio.get_writer(outputPath, fps=fps) for frame in frames: writer.append_data(np.array(frame)) writer.close() - def exportLayers(self, layers, footagePath, outputPath, resizeWidth): + def exportLayers(self, layers): listOfFrames = self.makeListOfFrames(layers) - videoReader = VideoReader(footagePath, listOfFrames) + videoReader = VideoReader(self.config, listOfFrames) videoReader.fillBuffer() maxLength = self.getMaxLengthOfLayers(layers) - underlay = cv2.VideoCapture(footagePath).read()[1] + underlay = cv2.VideoCapture(self.footagePath).read()[1] underlay = cv2.cvtColor(underlay, cv2.COLOR_BGR2RGB) frames = [underlay]*maxLength exportFrame = 0 - fps = self.fps - writer = imageio.get_writer(outputPath, fps=fps) + self.fps = videoReader.getFPS() + writer = imageio.get_writer(self.outputPath, fps=self.fps) while not videoReader.videoEnded(): frameCount, frame = videoReader.pop() if frameCount % (60*self.fps) == 0: @@ -45,12 +49,11 @@ class Exporter: for layer in layers: if layer.startFrame <= frameCount and layer.startFrame + len(layer.bounds) > frameCount: (x, y, w, h) = layer.bounds[frameCount - layer.startFrame] - factor = videoReader.w / resizeWidth + factor = videoReader.w / self.resizeWidth x = int(x * factor) y = int(y * factor) w = int(w * factor) h = int(h * factor) - # if exportFrame as index instead of frameCount - layer.startFrame then we have layer after layer frame2 = underlay frame2[y:y+h, x:x+w] = frame[y:y+h, x:x+w] writer.append_data(frame2) @@ -59,12 +62,13 @@ class Exporter: videoReader.thread.join() - def exportOverlayed(self, layers, footagePath, outputPath, resizeWidth): + def exportOverlayed(self, layers): + listOfFrames = self.makeListOfFrames(layers) - videoReader = VideoReader(footagePath, listOfFrames) + videoReader = VideoReader(self.config, listOfFrames) videoReader.fillBuffer() maxLength = self.getMaxLengthOfLayers(layers) - underlay = cv2.VideoCapture(footagePath).read()[1] + underlay = cv2.VideoCapture(self.footagePath).read()[1] underlay = cv2.cvtColor(underlay, cv2.COLOR_BGR2RGB) frames = [underlay]*maxLength exportFrame = 0 @@ -81,7 +85,7 @@ class Exporter: for layer in layers: if layer.startFrame <= frameCount and layer.startFrame + len(layer.bounds) > frameCount: (x, y, w, h) = layer.bounds[frameCount - layer.startFrame] - factor = videoReader.w / resizeWidth + factor = videoReader.w / self.resizeWidth x = int(x * factor) y = int(y * factor) w = int(w * factor) @@ -94,9 +98,9 @@ class Exporter: videoReader.thread.join() - + self.fps = videoReader.getFPS() fps = self.fps - writer = imageio.get_writer(outputPath, fps=fps) + writer = imageio.get_writer(self.outputPath, fps=fps) for frame in frames: writer.append_data(frame) diff --git a/LayerFactory.py b/LayerFactory.py index d19ee38..0e8c718 100644 --- a/LayerFactory.py +++ b/LayerFactory.py @@ -1,15 +1,23 @@ from Layer import Layer +from Config import Config class LayerFactory: - data = {} - layers = [] - tolerance = 5 - def __init__(self, data=None): + def __init__(self, config, data=None): + self.data = {} + self.layers = [] + self.tolerance = config["tolerance"] + self.ttolerance = config["ttolerance"] + self.minLayerLength = config["minLayerLength"] + self.maxLayerLength = config["maxLayerLength"] + self.resizeWidth = config["resizeWidth"] + self.footagePath = config["inputPath"] print("LayerFactory constructed") self.data = data if data is not None: self.extractLayers(data) + + def removeStaticLayers(self): '''Removes Layers with little to no movement''' layers = [] @@ -24,11 +32,11 @@ class LayerFactory: self.layers = layers - def freeData(self, maxLayerLength, minLayerLength): + def freeData(self): self.data.clear() layers = [] for l in self.layers: - if l.getLength() < maxLayerLength and l.getLength() > minLayerLength: + if l.getLength() < self.maxLayerLength and l.getLength() > self.minLayerLength: layers.append(l) self.layers = layers self.removeStaticLayers() @@ -51,14 +59,15 @@ class LayerFactory: oldLayerIDs = [] # inserts all the fucking contours as layers? - for frameNumber, contours in data.items(): + for frameNumber in sorted(data.keys()): + contours = data[frameNumber] if frameNumber%5000 == 0: print(f"{int(round(frameNumber/max(data.keys()), 2)*100)}% done with Layer extraction") for (x,y,w,h) in contours: foundLayer = False for i in set(range(0, len(self.layers))).difference(set(oldLayerIDs)): - if frameNumber - self.layers[i].lastFrame > 10: + if frameNumber - self.layers[i].lastFrame > self.ttolerance: oldLayerIDs.append(i) continue @@ -67,7 +76,6 @@ class LayerFactory: self.layers[i].add(frameNumber, (x,y,w,h)) foundLayer = True break - if not foundLayer: self.layers.append(Layer(frameNumber, (x,y,w,h))) @@ -78,15 +86,13 @@ class LayerFactory: # If one rectangle is above other if(l1[1] <= r2[1] or l2[1] <= r1[1]): return False - return True - def fillLayers(self, footagePath, resizeWidth): + def fillLayers(self): for i in range(len(self.layers)): if i % 20 == 0: print(f"filled {int(round(i/len(self.layers),2)*100)}% of all Layers") - self.layers[i].fill(footagePath, resizeWidth) + self.layers[i].fill(self.footagePath, self.resizeWidth) def sortLayers(self): - # straight bubble - self.layers.sort(key = lambda c:c.lastFrame) + self.layers.sort(key = lambda c:c.startFrame) diff --git a/VideoReader.py b/VideoReader.py index fa7b97e..fb37343 100644 --- a/VideoReader.py +++ b/VideoReader.py @@ -4,6 +4,7 @@ import cv2 from time import sleep from queue import Queue import threading +from Config import Config class VideoReader: @@ -11,14 +12,15 @@ class VideoReader: #buffer = [(frameNumber, frame)] listOfFrames = None - def __init__(self, videoPath, setOfFrames = None): + def __init__(self, config, setOfFrames = None): + videoPath = config["inputPath"] if videoPath is None: print("Video reader needs a videoPath!") return None self.videoPath = videoPath self.lastFrame = 0 - self.buffer = Queue(16) + self.buffer = Queue(config["videoBufferLength"]) self.vc = cv2.VideoCapture(videoPath) self.stopped = False res, image = self.vc.read() @@ -83,10 +85,11 @@ class VideoReader: self.stopped = True def videoEnded(self): - if self.stopped: - return True - else: - return False + return self.stopped + + + def getFPS(self): + return self.vc.get(cv2.CAP_PROP_FPS) diff --git a/main.py b/main.py index a26ebcd..52cf811 100644 --- a/main.py +++ b/main.py @@ -5,29 +5,26 @@ from Exporter import Exporter from LayerFactory import LayerFactory from Analyzer import Analyzer from VideoReader import VideoReader +from Config import Config import cv2 #TODO # finden von relevanten Stellen anhand von zu findenen metriken für vergleichsbilder def demo(): print("startup") - resizeWidth = 256 - maxLayerLength = 20*30 - minLayerLength = 30 start = time.time() + config = Config() - footagePath = os.path.join(os.path.dirname(__file__), "./generate test footage/3.mp4") - #analyzer = Analyzer(footagePath) - #print("Time consumed reading video: ", time.time() - start) + config["inputPath"] = os.path.join(os.path.dirname(__file__), "./generate test footage/3.mp4") + config["outputPath"] = os.path.join(os.path.dirname(__file__), "./output/short.mp4") - contours = ContourExtractor().extractContours(footagePath, resizeWidth) + contours = ContourExtractor(config).extractContours() print("Time consumed extracting: ", time.time() - start) - layerFactory = LayerFactory(contours) - layerFactory.freeData(maxLayerLength, minLayerLength) - print("sort Layers") + layerFactory = LayerFactory(config, contours) + layerFactory.freeData() layerFactory.sortLayers() - Exporter().exportOverlayed(layerFactory.layers,footagePath, os.path.join(os.path.dirname(__file__), "./short.mp4"), resizeWidth) + Exporter(config).exportOverlayed(layerFactory.layers) print("Total time: ", time.time() - start) def init():