diff --git a/.gitignore b/.gitignore index 5c18a8d..7dab839 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,5 @@ generate test footage/images/ generate test footage/3.MP4 + +short.mp4 diff --git a/Analyzer.py b/Analyzer.py index 1e5d111..0d36f40 100644 --- a/Analyzer.py +++ b/Analyzer.py @@ -1,3 +1,3 @@ class Analyzer: - def __init__(self): + def __init__(self, footage): print("Analyzer constructed") \ No newline at end of file diff --git a/ContourExctractor.py b/ContourExctractor.py index f143ef2..0b20bfb 100644 --- a/ContourExctractor.py +++ b/ContourExctractor.py @@ -10,6 +10,10 @@ import traceback import _thread import imageio import numpy as np +from threading import Thread +from multiprocessing import Queue, Process, Pool +from multiprocessing.pool import ThreadPool +import concurrent.futures class ContourExtractor: @@ -28,9 +32,7 @@ class ContourExtractor: print("ContourExtractor initiated") def extractContours(self, videoPath, resizeWidth): - min_area = self.min_area - max_area = self.max_area - threashold = self.threashold + # initialize the first frame in the video stream vs = cv2.VideoCapture(videoPath) @@ -40,58 +42,73 @@ class ContourExtractor: self.yDim = image.shape[0] firstFrame = None # loop over the frames of the video - frameCount = 0 + frameCount = -1 + extractedContours = dict() + + results = [] extractedContours = dict() - while res: - res, frame = vs.read() - # resize the frame, convert it to grayscale, and blur it - if frame is None: - print("ContourExtractor: frame was None") - break - frame = imutils.resize(frame, width=resizeWidth) - - gray = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB) - gray = np.asarray(gray[:,:,1]/2 + gray[:,:,2]/2).astype(np.uint8) - - #gray = cv2.GaussianBlur(gray, (5, 5), 0) - - # if the first frame is None, initialize it - if firstFrame is None: - firstFrame = gray - continue - - frameDelta = cv2.absdiff(gray, firstFrame) - - thresh = cv2.threshold(frameDelta, threashold, 255, cv2.THRESH_BINARY)[1] - # dilate the thresholded image to fill in holes, then find contours - thresh = cv2.dilate(thresh, None, iterations=3) - cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) - cnts = imutils.grab_contours(cnts) - - contours = [] - for c in cnts: - ca = cv2.contourArea(c) - if ca < min_area or ca > max_area: - continue - (x, y, w, h) = cv2.boundingRect(c) - #print((x, y, w, h)) - contours.append((x, y, w, h)) + imageBuffer = [] + + with concurrent.futures.ProcessPoolExecutor() as executor: + while res: + frameCount += 1 + if frameCount % (60*30) == 0: + print("Minutes processed: ", frameCount/(60*30)) - #cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) - if len(contours) != 0: - extractedContours[frameCount] = contours - if frameCount % (60*30) == 0: - print("Minutes processed: ", frameCount/(60*30)) - frameCount += 1 + res, frame = vs.read() + # resize the frame, convert it to grayscale, and blur it + if frame is None: + print("ContourExtractor: frame was None") + break - #cv2.imshow( "annotated", thresh ) - #cv2.waitKey(10) & 0XFF + frame = imutils.resize(frame, width=resizeWidth) + gray = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB) + + # if the first frame is None, initialize it + if firstFrame is None: + gray = np.asarray(gray[:,:,1]/2 + gray[:,:,2]/2).astype(np.uint8) + gray = cv2.GaussianBlur(gray, (5, 5), 0) + firstFrame = gray + continue + + results.append(executor.submit(self.getContours, frameCount, gray, firstFrame)) + + #contours = self.getContours(frameCount, gray, firstFrame) + + for f in concurrent.futures.as_completed(results): + x=f.result() + if x is not None: + extractedContours = {**extractedContours, **x} + self.extractedContours = extractedContours return extractedContours - + def getContours(self, frameCount, gray, firstFrame): + gray = np.asarray(gray[:,:,1]/2 + gray[:,:,2]/2).astype(np.uint8) + gray = cv2.GaussianBlur(gray, (5, 5), 0) + frameDelta = cv2.absdiff(gray, firstFrame) + thresh = cv2.threshold(frameDelta, self.threashold, 255, cv2.THRESH_BINARY)[1] + # dilate the thresholded image to fill in holes, then find contours + thresh = cv2.dilate(thresh, None, iterations=3) + cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + cnts = imutils.grab_contours(cnts) + + contours = [] + for c in cnts: + ca = cv2.contourArea(c) + if ca < self.min_area or ca > self.max_area: + continue + (x, y, w, h) = cv2.boundingRect(c) + #print((x, y, w, h)) + contours.append((x, y, w, h)) + + if len(contours) != 0: + return {frameCount: contours} + else: + return None + def displayContours(self): values = self.extractedContours.values() for xx in values: diff --git a/LayerFactory.py b/LayerFactory.py index 77a1a55..3e7c2c8 100644 --- a/LayerFactory.py +++ b/LayerFactory.py @@ -35,7 +35,8 @@ class LayerFactory: layers.append(Layer(frameNumber, contour)) # inserts all the fucking contours as layers? - for frameNumber, contours in data.items(): + for frameNumber in sorted(data): + contours = data[frameNumber] for (x,y,w,h) in contours: foundLayer = False i = 0 diff --git a/__pycache__/Analyzer.cpython-37.pyc b/__pycache__/Analyzer.cpython-37.pyc new file mode 100644 index 0000000..405b571 Binary files /dev/null and b/__pycache__/Analyzer.cpython-37.pyc differ diff --git a/__pycache__/ContourExctractor.cpython-37.pyc b/__pycache__/ContourExctractor.cpython-37.pyc index 44b2a71..49b7ae7 100644 Binary files a/__pycache__/ContourExctractor.cpython-37.pyc and b/__pycache__/ContourExctractor.cpython-37.pyc differ diff --git a/__pycache__/LayerFactory.cpython-37.pyc b/__pycache__/LayerFactory.cpython-37.pyc index 4c526cf..84f0163 100644 Binary files a/__pycache__/LayerFactory.cpython-37.pyc and b/__pycache__/LayerFactory.cpython-37.pyc differ diff --git a/main.py b/main.py index ffeeab3..2c2c6dd 100644 --- a/main.py +++ b/main.py @@ -3,17 +3,19 @@ import time from ContourExctractor import ContourExtractor from Exporter import Exporter from LayerFactory import LayerFactory +from Analyzer import Analyzer import cv2 #TODO # finden von relevanten Stellen anhand von zu findenen metriken für vergleichsbilder def demo(): print("startup") - resizeWidth = 512 + resizeWidth = 1024 maxLayerLength = 1*60*30 start = time.time() - footagePath = os.path.join(os.path.dirname(__file__), "./generate test footage/out.mp4") + footagePath = os.path.join(os.path.dirname(__file__), "./generate test footage/3.MP4") + analyzer = Analyzer(footagePath) contours = ContourExtractor().extractContours(footagePath, resizeWidth) print("Time consumed in working: ", time.time() - start) layerFactory = LayerFactory(contours) @@ -21,7 +23,7 @@ def demo(): layerFactory.sortLayers() layerFactory.fillLayers(footagePath) underlay = cv2.VideoCapture(footagePath).read()[1] - Exporter().exportLayers(underlay, layerFactory.layers, os.path.join(os.path.dirname(__file__), "./short.mp4"), resizeWidth) + Exporter().exportOverlayed(underlay, layerFactory.layers, os.path.join(os.path.dirname(__file__), "./short.mp4"), resizeWidth) print("Total time: ", time.time() - start) def init(): print("not needed yet") diff --git a/short.mp4 b/short.mp4 deleted file mode 100644 index 0fc15fd..0000000 Binary files a/short.mp4 and /dev/null differ