video reader object

This commit is contained in:
Askill 2020-10-08 22:26:29 +02:00
parent 9d9f8a7361
commit 68c730ff6e
7 changed files with 110 additions and 63 deletions

View File

@ -14,6 +14,7 @@ from threading import Thread
from multiprocessing import Queue, Process, Pool from multiprocessing import Queue, Process, Pool
from multiprocessing.pool import ThreadPool from multiprocessing.pool import ThreadPool
import concurrent.futures import concurrent.futures
from VideoReader import VideoReader
class ContourExtractor: class ContourExtractor:
@ -32,61 +33,43 @@ class ContourExtractor:
print("ContourExtractor initiated") print("ContourExtractor initiated")
def extractContours(self, videoPath, resizeWidth): def extractContours(self, videoPath, resizeWidth):
# initialize the first frame in the video stream
vs = cv2.VideoCapture(videoPath)
res, image = vs.read()
self.xDim = image.shape[1]
self.yDim = image.shape[0]
firstFrame = None firstFrame = None
# loop over the frames of the video extractedContours = dict()
frameCount = -1 videoReader = VideoReader(videoPath)
extractedContours = dict() self.xDim = videoReader.w
self.yDim = videoReader.h
results = [] videoReader.fillBuffer()
extractedContours = dict()
imageBuffer = [] while not videoReader.videoEnded():
frameCount, frame = videoReader.pop()
with concurrent.futures.ProcessPoolExecutor() as executor: if frameCount % (60*30) == 0:
while res: print("Minutes processed: ", frameCount/(60*30))
frameCount += 1
if frameCount % (60*30) == 0: if frame is None:
print("Minutes processed: ", frameCount/(60*30)) print("ContourExtractor: frame was None")
continue
res, frame = vs.read() # resize the frame, convert it to grayscale, and blur it
# resize the frame, convert it to grayscale, and blur it frame = imutils.resize(frame, width=resizeWidth)
if frame is None: gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
print("ContourExtractor: frame was None")
break
frame = imutils.resize(frame, width=resizeWidth) # if the first frame is None, initialize it
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB) if firstFrame is None:
#gray = np.asarray(gray[:,:,1]/2 + gray[:,:,2]/2).astype(np.uint8)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
firstFrame = gray
continue
x = self.getContours(gray, firstFrame)
if x is not None:
extractedContours[frameCount] = x
# if the first frame is None, initialize it print("done")
if firstFrame is None: videoReader.thread.join()
gray = np.asarray(gray[:,:,1]/2 + gray[:,:,2]/2).astype(np.uint8)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
firstFrame = gray
continue
results.append(executor.submit(self.getContours, frameCount, gray, firstFrame))
#contours = self.getContours(frameCount, gray, firstFrame)
for f in concurrent.futures.as_completed(results):
x=f.result()
if x is not None:
extractedContours = {**extractedContours, **x}
self.extractedContours = extractedContours self.extractedContours = extractedContours
return extractedContours return extractedContours
def getContours(self, frameCount, gray, firstFrame): def getContours(self, gray, firstFrame):
gray = np.asarray(gray[:,:,1]/2 + gray[:,:,2]/2).astype(np.uint8)
gray = cv2.GaussianBlur(gray, (5, 5), 0) gray = cv2.GaussianBlur(gray, (5, 5), 0)
frameDelta = cv2.absdiff(gray, firstFrame) frameDelta = cv2.absdiff(gray, firstFrame)
thresh = cv2.threshold(frameDelta, self.threashold, 255, cv2.THRESH_BINARY)[1] thresh = cv2.threshold(frameDelta, self.threashold, 255, cv2.THRESH_BINARY)[1]
@ -104,10 +87,9 @@ class ContourExtractor:
#print((x, y, w, h)) #print((x, y, w, h))
contours.append((x, y, w, h)) contours.append((x, y, w, h))
if len(contours) != 0: if len(contours) != 0 and contours is not None:
return {frameCount: contours} return contours
else:
return None
def displayContours(self): def displayContours(self):
values = self.extractedContours.values() values = self.extractedContours.values()

View File

@ -53,10 +53,8 @@ class LayerFactory:
# inserts all the fucking contours as layers? # inserts all the fucking contours as layers?
for frameNumber, contours in data.items(): for frameNumber, contours in data.items():
if frameNumber%5000 == 0: if frameNumber%5000 == 0:
print(f"{round(frameNumber/max(data.keys()), 2)}% done with Layer extraction") print(f"{int(round(frameNumber/max(data.keys()), 2)*100)}% done with Layer extraction")
for frameNumber in sorted(data):
contours = data[frameNumber]
for (x,y,w,h) in contours: for (x,y,w,h) in contours:
foundLayer = False foundLayer = False
for i in set(range(0, len(self.layers))).difference(set(oldLayerIDs)): for i in set(range(0, len(self.layers))).difference(set(oldLayerIDs)):
@ -85,15 +83,10 @@ class LayerFactory:
def fillLayers(self, footagePath, resizeWidth): def fillLayers(self, footagePath, resizeWidth):
for i in range(len(self.layers)): for i in range(len(self.layers)):
if i % 20 == 0:
print(f"filled {int(round(i/len(self.layers),2)*100)}% of all Layers")
self.layers[i].fill(footagePath, resizeWidth) self.layers[i].fill(footagePath, resizeWidth)
def sortLayers(self): def sortLayers(self):
# straight bubble # straight bubble
self.layers.sort(key = lambda c:c.lastFrame) self.layers.sort(key = lambda c:c.lastFrame)

70
VideoReader.py Normal file
View File

@ -0,0 +1,70 @@
import multiprocessing
import cv2
from time import sleep
from queue import Queue
import threading
class VideoReader:
#buffer = [(frameNumber, frame)]
def __init__(self, videoPath):
if videoPath is None:
print("Video reader needs a videoPath!")
return None
self.videoPath = videoPath
self.lastFrame = 0
self.buffer = Queue(16)
self.vc = cv2.VideoCapture(videoPath)
self.stopped = False
res, image = self.vc.read()
self.w = image.shape[1]
self.h = image.shape[0]
print(f"Video reader startet with buffer length of 16")
def pop(self):
return self.buffer.get(block=True)
def get(self):
return self.buffer[-1]
def fillBuffer(self):
if self.buffer.full():
print("VideoReader::fillBuffer was called when buffer was full.")
self.endFrame = int(self.vc.get(cv2.CAP_PROP_FRAME_COUNT))
self.endFrame = 10*60*30
self.thread = threading.Thread(target=self.readFrames, args=())
self.thread.start()
def stop(self):
self.thread.join()
self.vc.release()
def readFrames(self):
while self.lastFrame < self.endFrame:
if not self.buffer.full():
res, frame = self.vc.read()
if res:
self.buffer.put((self.lastFrame, frame))
self.lastFrame += 1
else:
sleep(0.5)
self.stopped = True
def videoEnded(self):
if self.stopped:
return True
else:
return False

Binary file not shown.

View File

@ -4,20 +4,22 @@ from ContourExctractor import ContourExtractor
from Exporter import Exporter from Exporter import Exporter
from LayerFactory import LayerFactory from LayerFactory import LayerFactory
from Analyzer import Analyzer from Analyzer import Analyzer
from VideoReader import VideoReader
import cv2 import cv2
#TODO #TODO
# finden von relevanten Stellen anhand von zu findenen metriken für vergleichsbilder # finden von relevanten Stellen anhand von zu findenen metriken für vergleichsbilder
def demo(): def demo():
print("startup") print("startup")
resizeWidth = 1024 resizeWidth = 512
maxLayerLength = 1*60*30 maxLayerLength = 1*60*30
minLayerLength = 3 minLayerLength = 30
start = time.time() start = time.time()
footagePath = os.path.join(os.path.dirname(__file__), "./generate test footage/3.mp4") footagePath = os.path.join(os.path.dirname(__file__), "./generate test footage/3.mp4")
#analyzer = Analyzer(footagePath) #analyzer = Analyzer(footagePath)
#print("Time consumed reading video: ", time.time() - start) #print("Time consumed reading video: ", time.time() - start)
contours = ContourExtractor().extractContours(footagePath, resizeWidth) contours = ContourExtractor().extractContours(footagePath, resizeWidth)
print("Time consumed in working: ", time.time() - start) print("Time consumed in working: ", time.time() - start)
layerFactory = LayerFactory(contours) layerFactory = LayerFactory(contours)