diff --git a/.gitignore b/.gitignore index e98f1b9..ea407d9 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,5 @@ generate test footage/3.MP4 short.mp4 __pycache__/ + +*.mp4 diff --git a/ContourExctractor.py b/ContourExctractor.py index 6393c1f..71adcc2 100644 --- a/ContourExctractor.py +++ b/ContourExctractor.py @@ -9,67 +9,75 @@ import traceback import _thread import imageio import numpy as np -import time from threading import Thread from multiprocessing import Queue, Process, Pool from multiprocessing.pool import ThreadPool import concurrent.futures from VideoReader import VideoReader +from queue import Queue +import threading +from multiprocessing.pool import ThreadPool class ContourExtractor: #X = {frame_number: [(contour, (x,y,w,h)), ...], } - extractedContours = dict() - min_area = 100 - max_area = 1000 - threashold = 13 - xDim = 0 - yDim = 0 + def getextractedContours(self): return self.extractedContours def __init__(self): + self.frameBuffer = Queue(16) + self.extractedContours = dict() + self.min_area = 30 + self.max_area = 1000 + self.threashold = 13 + self.xDim = 0 + self.yDim = 0 + print("ContourExtractor initiated") def extractContours(self, videoPath, resizeWidth): - firstFrame = None extractedContours = dict() videoReader = VideoReader(videoPath) self.xDim = videoReader.w self.yDim = videoReader.h + self.resizeWidth = resizeWidth videoReader.fillBuffer() + frameCount, frame = videoReader.pop() + + #init compare image + frame = imutils.resize(frame, width=resizeWidth) + gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + #gray = np.asarray(gray[:,:,1]/2 + gray[:,:,2]/2).astype(np.uint8) + gray = cv2.GaussianBlur(gray, (5, 5), 0) + self.firstFrame = gray - while not videoReader.videoEnded(): - frameCount, frame = videoReader.pop() - if frameCount % (60*30) == 0: - print("Minutes processed: ", frameCount/(60*30)) - - if frame is None: - print("ContourExtractor: frame was None") - continue + threads = 16 + start = time.time() + with ThreadPool(threads) as pool: + while not videoReader.videoEnded(): + #FrameCount, frame = videoReader.pop() + if frameCount % (60*30) == 0: + print(f"Minutes processed: {frameCount/(60*30)} in {round((time.time() - start), 2)} each") + start = time.time() - # resize the frame, convert it to grayscale, and blur it - frame = imutils.resize(frame, width=resizeWidth) - gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + if videoReader.buffer.qsize() == 0: + time.sleep(1) - # if the first frame is None, initialize it - if firstFrame is None: - #gray = np.asarray(gray[:,:,1]/2 + gray[:,:,2]/2).astype(np.uint8) - gray = cv2.GaussianBlur(gray, (5, 5), 0) - firstFrame = gray - continue - x = self.getContours(gray, firstFrame) - if x is not None: - extractedContours[frameCount] = x + tmpData = [videoReader.pop() for i in range(0, videoReader.buffer.qsize())] + frameCount = tmpData[-1][0] + pool.map(self.getContours, tmpData) - print("done") videoReader.thread.join() - self.extractedContours = extractedContours - return extractedContours + + return self.extractedContours - def getContours(self, gray, firstFrame): - + def getContours(self, data): + frameCount, frame = data + firstFrame = self.firstFrame + frame = imutils.resize(frame, width=self.resizeWidth) + gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (5, 5), 0) frameDelta = cv2.absdiff(gray, firstFrame) thresh = cv2.threshold(frameDelta, self.threashold, 255, cv2.THRESH_BINARY)[1] @@ -84,11 +92,17 @@ class ContourExtractor: if ca < self.min_area or ca > self.max_area: continue (x, y, w, h) = cv2.boundingRect(c) - #print((x, y, w, h)) - contours.append((x, y, w, h)) + contours.append((x, y, w, h)) + if len(contours) != 0 and contours is not None: - return contours + # this should be thread safe + self.extractedContours[frameCount] = contours + + + + + def displayContours(self): diff --git a/Exporter.py b/Exporter.py index 96199f8..9ce9807 100644 --- a/Exporter.py +++ b/Exporter.py @@ -19,36 +19,47 @@ class Exporter: writer.append_data(np.array(frame)) writer.close() - def exportLayers(self, layers, outputPath, resizeWidth): + def exportLayers(self, layers, footagePath, outputPath, resizeWidth): + + listOfFrames = self.makeListOfFrames(layers) + videoReader = VideoReader(footagePath, listOfFrames) + videoReader.fillBuffer() + maxLength = self.getMaxLengthOfLayers(layers) underlay = cv2.VideoCapture(footagePath).read()[1] + underlay = cv2.cvtColor(underlay, cv2.COLOR_BGR2RGB) + frames = [underlay]*maxLength + exportFrame = 0 + fps = self.fps writer = imageio.get_writer(outputPath, fps=fps) - i = 0 - for layer in layers: - data = layer.data - contours = layer.bounds - if len(data) < 10: + while not videoReader.videoEnded(): + frameCount, frame = videoReader.pop() + if frameCount % (60*self.fps) == 0: + print("Minutes processed: ", frameCount/(60*self.fps)) + if frame is None: + print("ContourExtractor: frame was None") continue - for frame, contour in zip(data, contours): - (x, y, w, h) = contour - frame = frame - frame1 = underlay - frame1 = imutils.resize(frame1, width=resizeWidth) - frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2RGB) - frame1[y:y+frame.shape[0], x:x+frame.shape[1]] = frame - cv2.putText(frame1, str(i), (30, 30), - cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) - writer.append_data(np.array(frame1)) - #cv2.imshow("changes overlayed", frame) - #cv2.waitKey(10) & 0XFF - i += 1 + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + + + for layer in layers: + if layer.startFrame <= frameCount and layer.startFrame + len(layer.bounds) > frameCount: + (x, y, w, h) = layer.bounds[frameCount - layer.startFrame] + factor = videoReader.w / resizeWidth + x = int(x * factor) + y = int(y * factor) + w = int(w * factor) + h = int(h * factor) + # if exportFrame as index instead of frameCount - layer.startFrame then we have layer after layer + frame2 = underlay + frame2[y:y+h, x:x+w] = frame[y:y+h, x:x+w] + writer.append_data(frame2) + + + videoReader.thread.join() - writer.close() - # cv2.destroyAllWindows() def exportOverlayed(self, layers, footagePath, outputPath, resizeWidth): - - listOfFrames = self.makeListOfFrames(layers) videoReader = VideoReader(footagePath, listOfFrames) videoReader.fillBuffer() diff --git a/README.md b/README.md index 2ebf069..aeffde5 100644 --- a/README.md +++ b/README.md @@ -3,3 +3,6 @@ time compression Time consumed reading video: 369.0188868045807s 3.06GB 26min 1080p downscaled 500p 30fps Time consumed reading video: 240.s 3.06GB 26min 1080p downscaled 500p 30fps when multithreaded +contour extraction: 10.5 Sec. when only 2 Threads + 8 secs when also mapping getContours() + diff --git a/VideoReader.py b/VideoReader.py index 4c42cde..fa7b97e 100644 --- a/VideoReader.py +++ b/VideoReader.py @@ -51,13 +51,11 @@ class VideoReader: def readFrames(self): while self.lastFrame < self.endFrame: - if not self.buffer.full(): - res, frame = self.vc.read() - if res: - self.buffer.put((self.lastFrame, frame)) - self.lastFrame += 1 - else: - sleep(0.5) + res, frame = self.vc.read() + if res: + self.buffer.put((self.lastFrame, frame)) + self.lastFrame += 1 + self.stopped = True diff --git a/generate test footage/2.mp4 b/generate test footage/2.mp4 deleted file mode 100644 index b6ec884..0000000 Binary files a/generate test footage/2.mp4 and /dev/null differ diff --git a/generate test footage/out.mp4 b/generate test footage/out.mp4 deleted file mode 100644 index 518905a..0000000 Binary files a/generate test footage/out.mp4 and /dev/null differ diff --git a/main.py b/main.py index c5cea65..a26ebcd 100644 --- a/main.py +++ b/main.py @@ -21,14 +21,11 @@ def demo(): #print("Time consumed reading video: ", time.time() - start) contours = ContourExtractor().extractContours(footagePath, resizeWidth) - print("Time consumed in working: ", time.time() - start) + print("Time consumed extracting: ", time.time() - start) layerFactory = LayerFactory(contours) - print("freeing Data", time.time() - start) layerFactory.freeData(maxLayerLength, minLayerLength) print("sort Layers") layerFactory.sortLayers() - #print("fill Layers") - #layerFactory.fillLayers(footagePath, resizeWidth) Exporter().exportOverlayed(layerFactory.layers,footagePath, os.path.join(os.path.dirname(__file__), "./short.mp4"), resizeWidth) print("Total time: ", time.time() - start) diff --git a/short2.mp4 b/short2.mp4 deleted file mode 100644 index 1bdf41d..0000000 Binary files a/short2.mp4 and /dev/null differ diff --git a/short3.mp4 b/short3.mp4 deleted file mode 100644 index f77ada2..0000000 Binary files a/short3.mp4 and /dev/null differ