video reader has multiprocessing
This commit is contained in:
parent
874976331e
commit
df1c3cea05
|
|
@ -13,7 +13,6 @@ import os
|
||||||
|
|
||||||
|
|
||||||
class ContourExtractor:
|
class ContourExtractor:
|
||||||
|
|
||||||
# extracedContours = {frame_number: [(contour, (x,y,w,h)), ...], }
|
# extracedContours = {frame_number: [(contour, (x,y,w,h)), ...], }
|
||||||
# dict with frame numbers as keys and the contour bounds of every contour for that frame
|
# dict with frame numbers as keys and the contour bounds of every contour for that frame
|
||||||
|
|
||||||
|
|
@ -50,11 +49,13 @@ class ContourExtractor:
|
||||||
self.start = time.time()
|
self.start = time.time()
|
||||||
# start a bunch of frames and let them read from the video reader buffer until the video reader reaches EOF
|
# start a bunch of frames and let them read from the video reader buffer until the video reader reaches EOF
|
||||||
with ThreadPool(2) as pool:
|
with ThreadPool(2) as pool:
|
||||||
while not videoReader.videoEnded():
|
while True:
|
||||||
if videoReader.buffer.qsize() == 0:
|
while not videoReader.videoEnded() and videoReader.buffer.qsize() == 0:
|
||||||
time.sleep(0.5)
|
time.sleep(0.5)
|
||||||
|
|
||||||
tmpData = [videoReader.pop() for i in range(0, videoReader.buffer.qsize())]
|
tmpData = [videoReader.pop() for i in range(0, videoReader.buffer.qsize())]
|
||||||
|
if videoReader.videoEnded():
|
||||||
|
break
|
||||||
pool.map(self.computeMovingAverage, (tmpData,))
|
pool.map(self.computeMovingAverage, (tmpData,))
|
||||||
pool.map(self.async2, (tmpData,))
|
pool.map(self.async2, (tmpData,))
|
||||||
# for data in tmpData:
|
# for data in tmpData:
|
||||||
|
|
@ -132,7 +133,7 @@ class ContourExtractor:
|
||||||
frames = self.lastFrames + frames
|
frames = self.lastFrames + frames
|
||||||
|
|
||||||
tmp = [[j, frames, averageFrames] for j in range(averageFrames, len(frames))]
|
tmp = [[j, frames, averageFrames] for j in range(averageFrames, len(frames))]
|
||||||
with ThreadPool(os.cpu_count()) as pool:
|
with ThreadPool(int(os.cpu_count())) as pool:
|
||||||
pool.map(self.averageDaFrames, tmp)
|
pool.map(self.averageDaFrames, tmp)
|
||||||
|
|
||||||
self.lastFrames = frames[-averageFrames:]
|
self.lastFrames = frames[-averageFrames:]
|
||||||
|
|
|
||||||
|
|
@ -149,7 +149,6 @@ class Exporter:
|
||||||
except:
|
except:
|
||||||
continue
|
continue
|
||||||
videoReader.thread.join()
|
videoReader.thread.join()
|
||||||
videoReader.vc.release()
|
|
||||||
|
|
||||||
self.fps = videoReader.getFPS()
|
self.fps = videoReader.getFPS()
|
||||||
fps = self.fps
|
fps = self.fps
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
from queue import Queue
|
import multiprocessing
|
||||||
|
|
||||||
import cv2
|
import cv2
|
||||||
import threading
|
import threading
|
||||||
|
|
@ -18,8 +18,8 @@ class VideoReader:
|
||||||
self.lastFrame = 0
|
self.lastFrame = 0
|
||||||
# buffer data struct:
|
# buffer data struct:
|
||||||
# buffer = Queue([(frameNumber, frame), ])
|
# buffer = Queue([(frameNumber, frame), ])
|
||||||
self.buffer = Queue(config["videoBufferLength"])
|
self.buffer = multiprocessing.Queue(config["videoBufferLength"])
|
||||||
self.vc = cv2.VideoCapture(videoPath)
|
#self.vc = cv2.VideoCapture(videoPath)
|
||||||
self.stopped = False
|
self.stopped = False
|
||||||
self.getWH()
|
self.getWH()
|
||||||
self.calcFPS()
|
self.calcFPS()
|
||||||
|
|
@ -40,31 +40,35 @@ class VideoReader:
|
||||||
self.vc.release()
|
self.vc.release()
|
||||||
|
|
||||||
def pop(self):
|
def pop(self):
|
||||||
return self.buffer.get(block=True)
|
frameNumber, frame = self.buffer.get(block=True)
|
||||||
|
if frame is None:
|
||||||
|
self.stopped = True
|
||||||
|
return frameNumber, frame
|
||||||
|
|
||||||
def fillBuffer(self, listOfFrames=None):
|
def fillBuffer(self, listOfFrames=None):
|
||||||
self.endFrame = int(self.vc.get(cv2.CAP_PROP_FRAME_COUNT))
|
self.endFrame = int(cv2.VideoCapture(self.videoPath).get(cv2.CAP_PROP_FRAME_COUNT))
|
||||||
if listOfFrames is not None:
|
if listOfFrames is not None:
|
||||||
self.listOfFrames = listOfFrames
|
self.listOfFrames = listOfFrames
|
||||||
|
|
||||||
if self.listOfFrames is not None:
|
if self.listOfFrames is not None:
|
||||||
self.thread = threading.Thread(target=self.readFramesByList, args=())
|
self.thread = multiprocessing.Process(target=self.readFramesByList, args=())
|
||||||
else:
|
else:
|
||||||
self.thread = threading.Thread(target=self.readFrames, args=())
|
self.thread = multiprocessing.Process(target=self.readFrames, args=())
|
||||||
self.thread.start()
|
self.thread.start()
|
||||||
|
|
||||||
def readFrames(self):
|
def readFrames(self):
|
||||||
"""Reads video from start to finish"""
|
"""Reads video from start to finish"""
|
||||||
|
self.vc = cv2.VideoCapture(self.videoPath)
|
||||||
while self.lastFrame < self.endFrame:
|
while self.lastFrame < self.endFrame:
|
||||||
res, frame = self.vc.read()
|
res, frame = self.vc.read()
|
||||||
if res:
|
if res:
|
||||||
self.buffer.put((self.lastFrame, frame))
|
self.buffer.put((self.lastFrame, frame))
|
||||||
self.lastFrame += 1
|
self.lastFrame += 1
|
||||||
|
self.buffer.put((self.lastFrame, None))
|
||||||
self.stopped = True
|
|
||||||
|
|
||||||
def readFramesByList(self):
|
def readFramesByList(self):
|
||||||
"""Reads all frames from a list of frame numbers"""
|
"""Reads all frames from a list of frame numbers"""
|
||||||
|
self.vc = cv2.VideoCapture(self.videoPath)
|
||||||
self.vc.set(1, self.listOfFrames[0])
|
self.vc.set(1, self.listOfFrames[0])
|
||||||
self.lastFrame = self.listOfFrames[0]
|
self.lastFrame = self.listOfFrames[0]
|
||||||
self.endFrame = self.listOfFrames[-1]
|
self.endFrame = self.listOfFrames[-1]
|
||||||
|
|
@ -75,7 +79,7 @@ class VideoReader:
|
||||||
if res:
|
if res:
|
||||||
self.buffer.put((self.lastFrame, frame))
|
self.buffer.put((self.lastFrame, frame))
|
||||||
else:
|
else:
|
||||||
print("READING FRAMES IS FALSE")
|
print("Couldn't read Frame")
|
||||||
# since the list is sorted the first element is always the lowest relevant framenumber
|
# since the list is sorted the first element is always the lowest relevant framenumber
|
||||||
# [0,1,2,3,32,33,34,35,67,68,69]
|
# [0,1,2,3,32,33,34,35,67,68,69]
|
||||||
self.listOfFrames.pop(0)
|
self.listOfFrames.pop(0)
|
||||||
|
|
@ -84,8 +88,8 @@ class VideoReader:
|
||||||
# if current Frame number is not in list of Frames, we can skip a few frames
|
# if current Frame number is not in list of Frames, we can skip a few frames
|
||||||
self.vc.set(1, self.listOfFrames[0])
|
self.vc.set(1, self.listOfFrames[0])
|
||||||
self.lastFrame = self.listOfFrames[0]
|
self.lastFrame = self.listOfFrames[0]
|
||||||
|
self.buffer.put((self.lastFrame, None))
|
||||||
|
|
||||||
self.stopped = True
|
|
||||||
|
|
||||||
def videoEnded(self):
|
def videoEnded(self):
|
||||||
if self.stopped and self.buffer.empty():
|
if self.stopped and self.buffer.empty():
|
||||||
|
|
@ -94,7 +98,7 @@ class VideoReader:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def calcFPS(self):
|
def calcFPS(self):
|
||||||
self.fps = self.vc.get(cv2.CAP_PROP_FPS)
|
self.fps = cv2.VideoCapture(self.videoPath).get(cv2.CAP_PROP_FPS)
|
||||||
|
|
||||||
def getFPS(self):
|
def getFPS(self):
|
||||||
if self.fps is None:
|
if self.fps is None:
|
||||||
|
|
@ -102,7 +106,7 @@ class VideoReader:
|
||||||
return self.fps
|
return self.fps
|
||||||
|
|
||||||
def calcLength(self):
|
def calcLength(self):
|
||||||
fc = int(self.vc.get(cv2.CAP_PROP_FRAME_COUNT))
|
fc = int(cv2.VideoCapture(self.videoPath).get(cv2.CAP_PROP_FRAME_COUNT))
|
||||||
self.length = fc / self.getFPS()
|
self.length = fc / self.getFPS()
|
||||||
|
|
||||||
def getLength(self):
|
def getLength(self):
|
||||||
|
|
@ -121,8 +125,10 @@ class VideoReader:
|
||||||
|
|
||||||
def getWH(self):
|
def getWH(self):
|
||||||
"""get width and height"""
|
"""get width and height"""
|
||||||
|
vc = cv2.VideoCapture(self.videoPath)
|
||||||
if self.w is None or self.h is None:
|
if self.w is None or self.h is None:
|
||||||
res, image = self.vc.read()
|
res, image = vc.read()
|
||||||
self.w = image.shape[1]
|
self.w = image.shape[1]
|
||||||
self.h = image.shape[0]
|
self.h = image.shape[0]
|
||||||
|
|
||||||
return (self.w, self.h)
|
return (self.w, self.h)
|
||||||
|
|
|
||||||
36
main.py
36
main.py
|
|
@ -1,7 +1,6 @@
|
||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from Application.Classifiers import *
|
|
||||||
from Application.Config import Config
|
from Application.Config import Config
|
||||||
from Application.ContourExctractor import ContourExtractor
|
from Application.ContourExctractor import ContourExtractor
|
||||||
from Application.Exporter import Exporter
|
from Application.Exporter import Exporter
|
||||||
|
|
@ -12,18 +11,8 @@ from Application.LayerManager import LayerManager
|
||||||
from Application.VideoReader import VideoReader
|
from Application.VideoReader import VideoReader
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main(config):
|
||||||
startTotal = time.time()
|
startTotal = time.time()
|
||||||
config = Config()
|
|
||||||
|
|
||||||
fileName = "./x23-1.mp4"
|
|
||||||
outputPath = os.path.join(os.path.dirname(__file__), "output")
|
|
||||||
dirName = os.path.join(os.path.dirname(__file__), "generate test footage")
|
|
||||||
|
|
||||||
config["inputPath"] = os.path.join(dirName, fileName)
|
|
||||||
config["outputPath"] = os.path.join(outputPath, fileName)
|
|
||||||
config["importPath"] = os.path.join(outputPath, fileName.split(".")[0] + ".txt")
|
|
||||||
config["w"], config["h"] = VideoReader(config).getWH()
|
|
||||||
|
|
||||||
if not os.path.exists(config["importPath"]):
|
if not os.path.exists(config["importPath"]):
|
||||||
contours, masks = ContourExtractor(config).extractContours()
|
contours, masks = ContourExtractor(config).extractContours()
|
||||||
|
|
@ -38,18 +27,27 @@ def main():
|
||||||
layerManager.cleanLayers()
|
layerManager.cleanLayers()
|
||||||
|
|
||||||
# layerManager.tagLayers()
|
# layerManager.tagLayers()
|
||||||
layers = layerManager.layers
|
if len(layerManager.layers) == 0:
|
||||||
if len(layers) == 0:
|
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
heatmap = HeatMap(config["w"], config["h"], [contour for layer in layers for contour in layer.bounds], 1920 / config["resizeWidth"])
|
heatmap = HeatMap(config["w"], config["h"], [contour for layer in layerManager.layers for contour in layer.bounds], 1920 / config["resizeWidth"])
|
||||||
heatmap.showImage()
|
heatmap.showImage()
|
||||||
|
|
||||||
exporter = Exporter(config)
|
print(f"Exporting {len(contours)} Contours and {len(layerManager.layers)} Layers")
|
||||||
print(f"Exporting {len(contours)} Contours and {len(layers)} Layers")
|
Exporter(config).export(layerManager.layers, contours, masks, raw=True, overlayed=True)
|
||||||
exporter.export(layers, contours, masks, raw=True, overlayed=True)
|
|
||||||
print("Total time: ", time.time() - startTotal)
|
print("Total time: ", time.time() - startTotal)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
config = Config()
|
||||||
|
|
||||||
|
fileName = "x23-1.mp4"
|
||||||
|
outputPath = os.path.join(os.path.dirname(__file__), "output")
|
||||||
|
inputDirPath = os.path.join(os.path.dirname(__file__), "generate test footage")
|
||||||
|
|
||||||
|
config["inputPath"] = os.path.join(inputDirPath, fileName)
|
||||||
|
config["outputPath"] = os.path.join(outputPath, fileName)
|
||||||
|
config["importPath"] = os.path.join(outputPath, fileName.split(".")[0] + ".txt")
|
||||||
|
config["w"], config["h"] = VideoReader(config).getWH()
|
||||||
|
|
||||||
|
main(config)
|
||||||
|
|
|
||||||
|
|
@ -2,4 +2,6 @@ opencv-python
|
||||||
numpy
|
numpy
|
||||||
imutils
|
imutils
|
||||||
imageio
|
imageio
|
||||||
tensorflow
|
tensorflow
|
||||||
|
matplotlib
|
||||||
|
imageio-ffmpeg
|
||||||
Loading…
Reference in New Issue