From df1c3cea05753ae8fb2c784f8f705cb36df28dd1 Mon Sep 17 00:00:00 2001 From: Askill Date: Mon, 15 Aug 2022 12:20:28 +0200 Subject: [PATCH] video reader has multiprocessing --- Application/ContourExctractor.py | 9 ++++---- Application/Exporter.py | 1 - Application/VideoReader.py | 34 +++++++++++++++++------------- main.py | 36 +++++++++++++++----------------- requirements.txt | 4 +++- 5 files changed, 45 insertions(+), 39 deletions(-) diff --git a/Application/ContourExctractor.py b/Application/ContourExctractor.py index 08d37c7..f146a54 100644 --- a/Application/ContourExctractor.py +++ b/Application/ContourExctractor.py @@ -13,7 +13,6 @@ import os class ContourExtractor: - # extracedContours = {frame_number: [(contour, (x,y,w,h)), ...], } # dict with frame numbers as keys and the contour bounds of every contour for that frame @@ -50,11 +49,13 @@ class ContourExtractor: self.start = time.time() # start a bunch of frames and let them read from the video reader buffer until the video reader reaches EOF with ThreadPool(2) as pool: - while not videoReader.videoEnded(): - if videoReader.buffer.qsize() == 0: + while True: + while not videoReader.videoEnded() and videoReader.buffer.qsize() == 0: time.sleep(0.5) tmpData = [videoReader.pop() for i in range(0, videoReader.buffer.qsize())] + if videoReader.videoEnded(): + break pool.map(self.computeMovingAverage, (tmpData,)) pool.map(self.async2, (tmpData,)) # for data in tmpData: @@ -132,7 +133,7 @@ class ContourExtractor: frames = self.lastFrames + frames tmp = [[j, frames, averageFrames] for j in range(averageFrames, len(frames))] - with ThreadPool(os.cpu_count()) as pool: + with ThreadPool(int(os.cpu_count())) as pool: pool.map(self.averageDaFrames, tmp) self.lastFrames = frames[-averageFrames:] diff --git a/Application/Exporter.py b/Application/Exporter.py index 2368171..9ce8f8d 100644 --- a/Application/Exporter.py +++ b/Application/Exporter.py @@ -149,7 +149,6 @@ class Exporter: except: continue videoReader.thread.join() - videoReader.vc.release() self.fps = videoReader.getFPS() fps = self.fps diff --git a/Application/VideoReader.py b/Application/VideoReader.py index e637fdf..8e251be 100644 --- a/Application/VideoReader.py +++ b/Application/VideoReader.py @@ -1,4 +1,4 @@ -from queue import Queue +import multiprocessing import cv2 import threading @@ -18,8 +18,8 @@ class VideoReader: self.lastFrame = 0 # buffer data struct: # buffer = Queue([(frameNumber, frame), ]) - self.buffer = Queue(config["videoBufferLength"]) - self.vc = cv2.VideoCapture(videoPath) + self.buffer = multiprocessing.Queue(config["videoBufferLength"]) + #self.vc = cv2.VideoCapture(videoPath) self.stopped = False self.getWH() self.calcFPS() @@ -40,31 +40,35 @@ class VideoReader: self.vc.release() def pop(self): - return self.buffer.get(block=True) + frameNumber, frame = self.buffer.get(block=True) + if frame is None: + self.stopped = True + return frameNumber, frame def fillBuffer(self, listOfFrames=None): - self.endFrame = int(self.vc.get(cv2.CAP_PROP_FRAME_COUNT)) + self.endFrame = int(cv2.VideoCapture(self.videoPath).get(cv2.CAP_PROP_FRAME_COUNT)) if listOfFrames is not None: self.listOfFrames = listOfFrames if self.listOfFrames is not None: - self.thread = threading.Thread(target=self.readFramesByList, args=()) + self.thread = multiprocessing.Process(target=self.readFramesByList, args=()) else: - self.thread = threading.Thread(target=self.readFrames, args=()) + self.thread = multiprocessing.Process(target=self.readFrames, args=()) self.thread.start() def readFrames(self): """Reads video from start to finish""" + self.vc = cv2.VideoCapture(self.videoPath) while self.lastFrame < self.endFrame: res, frame = self.vc.read() if res: self.buffer.put((self.lastFrame, frame)) self.lastFrame += 1 - - self.stopped = True + self.buffer.put((self.lastFrame, None)) def readFramesByList(self): """Reads all frames from a list of frame numbers""" + self.vc = cv2.VideoCapture(self.videoPath) self.vc.set(1, self.listOfFrames[0]) self.lastFrame = self.listOfFrames[0] self.endFrame = self.listOfFrames[-1] @@ -75,7 +79,7 @@ class VideoReader: if res: self.buffer.put((self.lastFrame, frame)) else: - print("READING FRAMES IS FALSE") + print("Couldn't read Frame") # since the list is sorted the first element is always the lowest relevant framenumber # [0,1,2,3,32,33,34,35,67,68,69] self.listOfFrames.pop(0) @@ -84,8 +88,8 @@ class VideoReader: # if current Frame number is not in list of Frames, we can skip a few frames self.vc.set(1, self.listOfFrames[0]) self.lastFrame = self.listOfFrames[0] + self.buffer.put((self.lastFrame, None)) - self.stopped = True def videoEnded(self): if self.stopped and self.buffer.empty(): @@ -94,7 +98,7 @@ class VideoReader: return False def calcFPS(self): - self.fps = self.vc.get(cv2.CAP_PROP_FPS) + self.fps = cv2.VideoCapture(self.videoPath).get(cv2.CAP_PROP_FPS) def getFPS(self): if self.fps is None: @@ -102,7 +106,7 @@ class VideoReader: return self.fps def calcLength(self): - fc = int(self.vc.get(cv2.CAP_PROP_FRAME_COUNT)) + fc = int(cv2.VideoCapture(self.videoPath).get(cv2.CAP_PROP_FRAME_COUNT)) self.length = fc / self.getFPS() def getLength(self): @@ -121,8 +125,10 @@ class VideoReader: def getWH(self): """get width and height""" + vc = cv2.VideoCapture(self.videoPath) if self.w is None or self.h is None: - res, image = self.vc.read() + res, image = vc.read() self.w = image.shape[1] self.h = image.shape[0] + return (self.w, self.h) diff --git a/main.py b/main.py index 0eeae65..5aa9fca 100644 --- a/main.py +++ b/main.py @@ -1,7 +1,6 @@ import os import time -from Application.Classifiers import * from Application.Config import Config from Application.ContourExctractor import ContourExtractor from Application.Exporter import Exporter @@ -12,18 +11,8 @@ from Application.LayerManager import LayerManager from Application.VideoReader import VideoReader -def main(): +def main(config): startTotal = time.time() - config = Config() - - fileName = "./x23-1.mp4" - outputPath = os.path.join(os.path.dirname(__file__), "output") - dirName = os.path.join(os.path.dirname(__file__), "generate test footage") - - config["inputPath"] = os.path.join(dirName, fileName) - config["outputPath"] = os.path.join(outputPath, fileName) - config["importPath"] = os.path.join(outputPath, fileName.split(".")[0] + ".txt") - config["w"], config["h"] = VideoReader(config).getWH() if not os.path.exists(config["importPath"]): contours, masks = ContourExtractor(config).extractContours() @@ -38,18 +27,27 @@ def main(): layerManager.cleanLayers() # layerManager.tagLayers() - layers = layerManager.layers - if len(layers) == 0: + if len(layerManager.layers) == 0: exit(1) - heatmap = HeatMap(config["w"], config["h"], [contour for layer in layers for contour in layer.bounds], 1920 / config["resizeWidth"]) + heatmap = HeatMap(config["w"], config["h"], [contour for layer in layerManager.layers for contour in layer.bounds], 1920 / config["resizeWidth"]) heatmap.showImage() - exporter = Exporter(config) - print(f"Exporting {len(contours)} Contours and {len(layers)} Layers") - exporter.export(layers, contours, masks, raw=True, overlayed=True) + print(f"Exporting {len(contours)} Contours and {len(layerManager.layers)} Layers") + Exporter(config).export(layerManager.layers, contours, masks, raw=True, overlayed=True) print("Total time: ", time.time() - startTotal) if __name__ == "__main__": - main() + config = Config() + + fileName = "x23-1.mp4" + outputPath = os.path.join(os.path.dirname(__file__), "output") + inputDirPath = os.path.join(os.path.dirname(__file__), "generate test footage") + + config["inputPath"] = os.path.join(inputDirPath, fileName) + config["outputPath"] = os.path.join(outputPath, fileName) + config["importPath"] = os.path.join(outputPath, fileName.split(".")[0] + ".txt") + config["w"], config["h"] = VideoReader(config).getWH() + + main(config) diff --git a/requirements.txt b/requirements.txt index 78a7dc5..6df5dc0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,4 +2,6 @@ opencv-python numpy imutils imageio -tensorflow \ No newline at end of file +tensorflow +matplotlib +imageio-ffmpeg \ No newline at end of file