implemented config object
This commit is contained in:
parent
c187b8ce9f
commit
04187cf9ac
28
Config.py
28
Config.py
|
|
@ -2,25 +2,23 @@
|
||||||
class Config:
|
class Config:
|
||||||
c = {
|
c = {
|
||||||
"min_area" : 500,
|
"min_area" : 500,
|
||||||
"max_area" : 28000,
|
"max_area" : 9000,
|
||||||
"threashold" : 13,
|
"threashold" : 10,
|
||||||
"xDim" : 0,
|
|
||||||
"yDim" : 0,
|
|
||||||
"resizeWidth" : 512,
|
"resizeWidth" : 512,
|
||||||
"inputPath" : "",
|
"inputPath" : None,
|
||||||
"outputPath": "",
|
"outputPath": None,
|
||||||
"maxLayerLength": 1000,
|
"maxLayerLength": 900,
|
||||||
"minLayerLength": 0,
|
"minLayerLength": 30,
|
||||||
"fps": 30,
|
"tolerance": 10,
|
||||||
"tolerance": 5,
|
|
||||||
"maxLength": None,
|
"maxLength": None,
|
||||||
""
|
"ttolerance": 10,
|
||||||
}
|
"videoBufferLength": 16}
|
||||||
__init__(self):
|
|
||||||
print(Current Config:)
|
def __init__(self):
|
||||||
|
print("Current Config:", self.c)
|
||||||
|
|
||||||
def __getitem__(self, key):
|
def __getitem__(self, key):
|
||||||
return self.c[key]
|
return self.c[key]
|
||||||
|
|
||||||
def __setitem__(self, key, value):
|
def __setitem__(self, key, value):
|
||||||
return self.c[key] = value
|
self.c[key] = value
|
||||||
|
|
|
||||||
|
|
@ -17,6 +17,7 @@ from VideoReader import VideoReader
|
||||||
from queue import Queue
|
from queue import Queue
|
||||||
import threading
|
import threading
|
||||||
from multiprocessing.pool import ThreadPool
|
from multiprocessing.pool import ThreadPool
|
||||||
|
from Config import Config
|
||||||
|
|
||||||
class ContourExtractor:
|
class ContourExtractor:
|
||||||
|
|
||||||
|
|
@ -26,28 +27,31 @@ class ContourExtractor:
|
||||||
def getextractedContours(self):
|
def getextractedContours(self):
|
||||||
return self.extractedContours
|
return self.extractedContours
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self, config):
|
||||||
self.frameBuffer = Queue(16)
|
self.frameBuffer = Queue(16)
|
||||||
self.extractedContours = dict()
|
self.extractedContours = dict()
|
||||||
self.min_area = 30
|
self.min_area = config["min_area"]
|
||||||
self.max_area = 1000
|
self.max_area = config["max_area"]
|
||||||
self.threashold = 13
|
self.threashold = config["threashold"]
|
||||||
|
self.resizeWidth = config["resizeWidth"]
|
||||||
|
self.videoPath = config["inputPath"]
|
||||||
self.xDim = 0
|
self.xDim = 0
|
||||||
self.yDim = 0
|
self.yDim = 0
|
||||||
|
self.config = config
|
||||||
|
|
||||||
print("ContourExtractor initiated")
|
print("ContourExtractor initiated")
|
||||||
|
|
||||||
def extractContours(self, videoPath, resizeWidth):
|
def extractContours(self):
|
||||||
extractedContours = dict()
|
extractedContours = dict()
|
||||||
videoReader = VideoReader(videoPath)
|
videoReader = VideoReader(self.config)
|
||||||
self.xDim = videoReader.w
|
self.xDim = videoReader.w
|
||||||
self.yDim = videoReader.h
|
self.yDim = videoReader.h
|
||||||
self.resizeWidth = resizeWidth
|
|
||||||
videoReader.fillBuffer()
|
videoReader.fillBuffer()
|
||||||
frameCount, frame = videoReader.pop()
|
frameCount, frame = videoReader.pop()
|
||||||
|
|
||||||
#init compare image
|
#init compare image
|
||||||
frame = imutils.resize(frame, width=resizeWidth)
|
frame = imutils.resize(frame, width=self.resizeWidth)
|
||||||
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||||||
#gray = np.asarray(gray[:,:,1]/2 + gray[:,:,2]/2).astype(np.uint8)
|
#gray = np.asarray(gray[:,:,1]/2 + gray[:,:,2]/2).astype(np.uint8)
|
||||||
gray = cv2.GaussianBlur(gray, (5, 5), 0)
|
gray = cv2.GaussianBlur(gray, (5, 5), 0)
|
||||||
|
|
@ -63,14 +67,13 @@ class ContourExtractor:
|
||||||
start = time.time()
|
start = time.time()
|
||||||
|
|
||||||
if videoReader.buffer.qsize() == 0:
|
if videoReader.buffer.qsize() == 0:
|
||||||
time.sleep(1)
|
time.sleep(.5)
|
||||||
|
|
||||||
tmpData = [videoReader.pop() for i in range(0, videoReader.buffer.qsize())]
|
tmpData = [videoReader.pop() for i in range(0, videoReader.buffer.qsize())]
|
||||||
frameCount = tmpData[-1][0]
|
frameCount = tmpData[-1][0]
|
||||||
pool.map(self.getContours, tmpData)
|
pool.map(self.getContours, tmpData)
|
||||||
|
|
||||||
videoReader.thread.join()
|
videoReader.thread.join()
|
||||||
|
|
||||||
return self.extractedContours
|
return self.extractedContours
|
||||||
|
|
||||||
def getContours(self, data):
|
def getContours(self, data):
|
||||||
|
|
@ -82,7 +85,7 @@ class ContourExtractor:
|
||||||
frameDelta = cv2.absdiff(gray, firstFrame)
|
frameDelta = cv2.absdiff(gray, firstFrame)
|
||||||
thresh = cv2.threshold(frameDelta, self.threashold, 255, cv2.THRESH_BINARY)[1]
|
thresh = cv2.threshold(frameDelta, self.threashold, 255, cv2.THRESH_BINARY)[1]
|
||||||
# dilate the thresholded image to fill in holes, then find contours
|
# dilate the thresholded image to fill in holes, then find contours
|
||||||
thresh = cv2.dilate(thresh, None, iterations=3)
|
thresh = cv2.dilate(thresh, None, iterations=4)
|
||||||
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
||||||
cnts = imutils.grab_contours(cnts)
|
cnts = imutils.grab_contours(cnts)
|
||||||
|
|
||||||
|
|
@ -99,12 +102,6 @@ class ContourExtractor:
|
||||||
# this should be thread safe
|
# this should be thread safe
|
||||||
self.extractedContours[frameCount] = contours
|
self.extractedContours[frameCount] = contours
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def displayContours(self):
|
def displayContours(self):
|
||||||
values = self.extractedContours.values()
|
values = self.extractedContours.values()
|
||||||
for xx in values:
|
for xx in values:
|
||||||
|
|
|
||||||
34
Exporter.py
34
Exporter.py
|
|
@ -9,29 +9,33 @@ from VideoReader import VideoReader
|
||||||
class Exporter:
|
class Exporter:
|
||||||
fps = 30
|
fps = 30
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self, config):
|
||||||
|
self.footagePath = config["inputPath"]
|
||||||
|
self.outputPath = config["outputPath"]
|
||||||
|
self.resizeWidth = config["resizeWidth"]
|
||||||
|
self.config = config
|
||||||
print("Exporter initiated")
|
print("Exporter initiated")
|
||||||
|
|
||||||
def export(self, frames, outputPath):
|
def export(self):
|
||||||
fps = self.fps
|
fps = self.fps
|
||||||
writer = imageio.get_writer(outputPath, fps=fps)
|
writer = imageio.get_writer(outputPath, fps=fps)
|
||||||
for frame in frames:
|
for frame in frames:
|
||||||
writer.append_data(np.array(frame))
|
writer.append_data(np.array(frame))
|
||||||
writer.close()
|
writer.close()
|
||||||
|
|
||||||
def exportLayers(self, layers, footagePath, outputPath, resizeWidth):
|
def exportLayers(self, layers):
|
||||||
|
|
||||||
listOfFrames = self.makeListOfFrames(layers)
|
listOfFrames = self.makeListOfFrames(layers)
|
||||||
videoReader = VideoReader(footagePath, listOfFrames)
|
videoReader = VideoReader(self.config, listOfFrames)
|
||||||
videoReader.fillBuffer()
|
videoReader.fillBuffer()
|
||||||
maxLength = self.getMaxLengthOfLayers(layers)
|
maxLength = self.getMaxLengthOfLayers(layers)
|
||||||
underlay = cv2.VideoCapture(footagePath).read()[1]
|
underlay = cv2.VideoCapture(self.footagePath).read()[1]
|
||||||
underlay = cv2.cvtColor(underlay, cv2.COLOR_BGR2RGB)
|
underlay = cv2.cvtColor(underlay, cv2.COLOR_BGR2RGB)
|
||||||
frames = [underlay]*maxLength
|
frames = [underlay]*maxLength
|
||||||
exportFrame = 0
|
exportFrame = 0
|
||||||
|
|
||||||
fps = self.fps
|
self.fps = videoReader.getFPS()
|
||||||
writer = imageio.get_writer(outputPath, fps=fps)
|
writer = imageio.get_writer(self.outputPath, fps=self.fps)
|
||||||
while not videoReader.videoEnded():
|
while not videoReader.videoEnded():
|
||||||
frameCount, frame = videoReader.pop()
|
frameCount, frame = videoReader.pop()
|
||||||
if frameCount % (60*self.fps) == 0:
|
if frameCount % (60*self.fps) == 0:
|
||||||
|
|
@ -45,12 +49,11 @@ class Exporter:
|
||||||
for layer in layers:
|
for layer in layers:
|
||||||
if layer.startFrame <= frameCount and layer.startFrame + len(layer.bounds) > frameCount:
|
if layer.startFrame <= frameCount and layer.startFrame + len(layer.bounds) > frameCount:
|
||||||
(x, y, w, h) = layer.bounds[frameCount - layer.startFrame]
|
(x, y, w, h) = layer.bounds[frameCount - layer.startFrame]
|
||||||
factor = videoReader.w / resizeWidth
|
factor = videoReader.w / self.resizeWidth
|
||||||
x = int(x * factor)
|
x = int(x * factor)
|
||||||
y = int(y * factor)
|
y = int(y * factor)
|
||||||
w = int(w * factor)
|
w = int(w * factor)
|
||||||
h = int(h * factor)
|
h = int(h * factor)
|
||||||
# if exportFrame as index instead of frameCount - layer.startFrame then we have layer after layer
|
|
||||||
frame2 = underlay
|
frame2 = underlay
|
||||||
frame2[y:y+h, x:x+w] = frame[y:y+h, x:x+w]
|
frame2[y:y+h, x:x+w] = frame[y:y+h, x:x+w]
|
||||||
writer.append_data(frame2)
|
writer.append_data(frame2)
|
||||||
|
|
@ -59,12 +62,13 @@ class Exporter:
|
||||||
videoReader.thread.join()
|
videoReader.thread.join()
|
||||||
|
|
||||||
|
|
||||||
def exportOverlayed(self, layers, footagePath, outputPath, resizeWidth):
|
def exportOverlayed(self, layers):
|
||||||
|
|
||||||
listOfFrames = self.makeListOfFrames(layers)
|
listOfFrames = self.makeListOfFrames(layers)
|
||||||
videoReader = VideoReader(footagePath, listOfFrames)
|
videoReader = VideoReader(self.config, listOfFrames)
|
||||||
videoReader.fillBuffer()
|
videoReader.fillBuffer()
|
||||||
maxLength = self.getMaxLengthOfLayers(layers)
|
maxLength = self.getMaxLengthOfLayers(layers)
|
||||||
underlay = cv2.VideoCapture(footagePath).read()[1]
|
underlay = cv2.VideoCapture(self.footagePath).read()[1]
|
||||||
underlay = cv2.cvtColor(underlay, cv2.COLOR_BGR2RGB)
|
underlay = cv2.cvtColor(underlay, cv2.COLOR_BGR2RGB)
|
||||||
frames = [underlay]*maxLength
|
frames = [underlay]*maxLength
|
||||||
exportFrame = 0
|
exportFrame = 0
|
||||||
|
|
@ -81,7 +85,7 @@ class Exporter:
|
||||||
for layer in layers:
|
for layer in layers:
|
||||||
if layer.startFrame <= frameCount and layer.startFrame + len(layer.bounds) > frameCount:
|
if layer.startFrame <= frameCount and layer.startFrame + len(layer.bounds) > frameCount:
|
||||||
(x, y, w, h) = layer.bounds[frameCount - layer.startFrame]
|
(x, y, w, h) = layer.bounds[frameCount - layer.startFrame]
|
||||||
factor = videoReader.w / resizeWidth
|
factor = videoReader.w / self.resizeWidth
|
||||||
x = int(x * factor)
|
x = int(x * factor)
|
||||||
y = int(y * factor)
|
y = int(y * factor)
|
||||||
w = int(w * factor)
|
w = int(w * factor)
|
||||||
|
|
@ -94,9 +98,9 @@ class Exporter:
|
||||||
|
|
||||||
videoReader.thread.join()
|
videoReader.thread.join()
|
||||||
|
|
||||||
|
self.fps = videoReader.getFPS()
|
||||||
fps = self.fps
|
fps = self.fps
|
||||||
writer = imageio.get_writer(outputPath, fps=fps)
|
writer = imageio.get_writer(self.outputPath, fps=fps)
|
||||||
for frame in frames:
|
for frame in frames:
|
||||||
writer.append_data(frame)
|
writer.append_data(frame)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,15 +1,23 @@
|
||||||
from Layer import Layer
|
from Layer import Layer
|
||||||
|
from Config import Config
|
||||||
|
|
||||||
class LayerFactory:
|
class LayerFactory:
|
||||||
data = {}
|
def __init__(self, config, data=None):
|
||||||
layers = []
|
self.data = {}
|
||||||
tolerance = 5
|
self.layers = []
|
||||||
def __init__(self, data=None):
|
self.tolerance = config["tolerance"]
|
||||||
|
self.ttolerance = config["ttolerance"]
|
||||||
|
self.minLayerLength = config["minLayerLength"]
|
||||||
|
self.maxLayerLength = config["maxLayerLength"]
|
||||||
|
self.resizeWidth = config["resizeWidth"]
|
||||||
|
self.footagePath = config["inputPath"]
|
||||||
print("LayerFactory constructed")
|
print("LayerFactory constructed")
|
||||||
self.data = data
|
self.data = data
|
||||||
if data is not None:
|
if data is not None:
|
||||||
self.extractLayers(data)
|
self.extractLayers(data)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def removeStaticLayers(self):
|
def removeStaticLayers(self):
|
||||||
'''Removes Layers with little to no movement'''
|
'''Removes Layers with little to no movement'''
|
||||||
layers = []
|
layers = []
|
||||||
|
|
@ -24,11 +32,11 @@ class LayerFactory:
|
||||||
self.layers = layers
|
self.layers = layers
|
||||||
|
|
||||||
|
|
||||||
def freeData(self, maxLayerLength, minLayerLength):
|
def freeData(self):
|
||||||
self.data.clear()
|
self.data.clear()
|
||||||
layers = []
|
layers = []
|
||||||
for l in self.layers:
|
for l in self.layers:
|
||||||
if l.getLength() < maxLayerLength and l.getLength() > minLayerLength:
|
if l.getLength() < self.maxLayerLength and l.getLength() > self.minLayerLength:
|
||||||
layers.append(l)
|
layers.append(l)
|
||||||
self.layers = layers
|
self.layers = layers
|
||||||
self.removeStaticLayers()
|
self.removeStaticLayers()
|
||||||
|
|
@ -51,14 +59,15 @@ class LayerFactory:
|
||||||
|
|
||||||
oldLayerIDs = []
|
oldLayerIDs = []
|
||||||
# inserts all the fucking contours as layers?
|
# inserts all the fucking contours as layers?
|
||||||
for frameNumber, contours in data.items():
|
for frameNumber in sorted(data.keys()):
|
||||||
|
contours = data[frameNumber]
|
||||||
if frameNumber%5000 == 0:
|
if frameNumber%5000 == 0:
|
||||||
print(f"{int(round(frameNumber/max(data.keys()), 2)*100)}% done with Layer extraction")
|
print(f"{int(round(frameNumber/max(data.keys()), 2)*100)}% done with Layer extraction")
|
||||||
|
|
||||||
for (x,y,w,h) in contours:
|
for (x,y,w,h) in contours:
|
||||||
foundLayer = False
|
foundLayer = False
|
||||||
for i in set(range(0, len(self.layers))).difference(set(oldLayerIDs)):
|
for i in set(range(0, len(self.layers))).difference(set(oldLayerIDs)):
|
||||||
if frameNumber - self.layers[i].lastFrame > 10:
|
if frameNumber - self.layers[i].lastFrame > self.ttolerance:
|
||||||
oldLayerIDs.append(i)
|
oldLayerIDs.append(i)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
@ -67,7 +76,6 @@ class LayerFactory:
|
||||||
self.layers[i].add(frameNumber, (x,y,w,h))
|
self.layers[i].add(frameNumber, (x,y,w,h))
|
||||||
foundLayer = True
|
foundLayer = True
|
||||||
break
|
break
|
||||||
|
|
||||||
if not foundLayer:
|
if not foundLayer:
|
||||||
self.layers.append(Layer(frameNumber, (x,y,w,h)))
|
self.layers.append(Layer(frameNumber, (x,y,w,h)))
|
||||||
|
|
||||||
|
|
@ -78,15 +86,13 @@ class LayerFactory:
|
||||||
# If one rectangle is above other
|
# If one rectangle is above other
|
||||||
if(l1[1] <= r2[1] or l2[1] <= r1[1]):
|
if(l1[1] <= r2[1] or l2[1] <= r1[1]):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def fillLayers(self, footagePath, resizeWidth):
|
def fillLayers(self):
|
||||||
for i in range(len(self.layers)):
|
for i in range(len(self.layers)):
|
||||||
if i % 20 == 0:
|
if i % 20 == 0:
|
||||||
print(f"filled {int(round(i/len(self.layers),2)*100)}% of all Layers")
|
print(f"filled {int(round(i/len(self.layers),2)*100)}% of all Layers")
|
||||||
self.layers[i].fill(footagePath, resizeWidth)
|
self.layers[i].fill(self.footagePath, self.resizeWidth)
|
||||||
|
|
||||||
def sortLayers(self):
|
def sortLayers(self):
|
||||||
# straight bubble
|
self.layers.sort(key = lambda c:c.startFrame)
|
||||||
self.layers.sort(key = lambda c:c.lastFrame)
|
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@ import cv2
|
||||||
from time import sleep
|
from time import sleep
|
||||||
from queue import Queue
|
from queue import Queue
|
||||||
import threading
|
import threading
|
||||||
|
from Config import Config
|
||||||
|
|
||||||
|
|
||||||
class VideoReader:
|
class VideoReader:
|
||||||
|
|
@ -11,14 +12,15 @@ class VideoReader:
|
||||||
#buffer = [(frameNumber, frame)]
|
#buffer = [(frameNumber, frame)]
|
||||||
listOfFrames = None
|
listOfFrames = None
|
||||||
|
|
||||||
def __init__(self, videoPath, setOfFrames = None):
|
def __init__(self, config, setOfFrames = None):
|
||||||
|
videoPath = config["inputPath"]
|
||||||
if videoPath is None:
|
if videoPath is None:
|
||||||
print("Video reader needs a videoPath!")
|
print("Video reader needs a videoPath!")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
self.videoPath = videoPath
|
self.videoPath = videoPath
|
||||||
self.lastFrame = 0
|
self.lastFrame = 0
|
||||||
self.buffer = Queue(16)
|
self.buffer = Queue(config["videoBufferLength"])
|
||||||
self.vc = cv2.VideoCapture(videoPath)
|
self.vc = cv2.VideoCapture(videoPath)
|
||||||
self.stopped = False
|
self.stopped = False
|
||||||
res, image = self.vc.read()
|
res, image = self.vc.read()
|
||||||
|
|
@ -83,10 +85,11 @@ class VideoReader:
|
||||||
self.stopped = True
|
self.stopped = True
|
||||||
|
|
||||||
def videoEnded(self):
|
def videoEnded(self):
|
||||||
if self.stopped:
|
return self.stopped
|
||||||
return True
|
|
||||||
else:
|
|
||||||
return False
|
def getFPS(self):
|
||||||
|
return self.vc.get(cv2.CAP_PROP_FPS)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
19
main.py
19
main.py
|
|
@ -5,29 +5,26 @@ from Exporter import Exporter
|
||||||
from LayerFactory import LayerFactory
|
from LayerFactory import LayerFactory
|
||||||
from Analyzer import Analyzer
|
from Analyzer import Analyzer
|
||||||
from VideoReader import VideoReader
|
from VideoReader import VideoReader
|
||||||
|
from Config import Config
|
||||||
import cv2
|
import cv2
|
||||||
#TODO
|
#TODO
|
||||||
# finden von relevanten Stellen anhand von zu findenen metriken für vergleichsbilder
|
# finden von relevanten Stellen anhand von zu findenen metriken für vergleichsbilder
|
||||||
|
|
||||||
def demo():
|
def demo():
|
||||||
print("startup")
|
print("startup")
|
||||||
resizeWidth = 256
|
|
||||||
maxLayerLength = 20*30
|
|
||||||
minLayerLength = 30
|
|
||||||
start = time.time()
|
start = time.time()
|
||||||
|
config = Config()
|
||||||
|
|
||||||
footagePath = os.path.join(os.path.dirname(__file__), "./generate test footage/3.mp4")
|
config["inputPath"] = os.path.join(os.path.dirname(__file__), "./generate test footage/3.mp4")
|
||||||
#analyzer = Analyzer(footagePath)
|
config["outputPath"] = os.path.join(os.path.dirname(__file__), "./output/short.mp4")
|
||||||
#print("Time consumed reading video: ", time.time() - start)
|
|
||||||
|
|
||||||
contours = ContourExtractor().extractContours(footagePath, resizeWidth)
|
contours = ContourExtractor(config).extractContours()
|
||||||
print("Time consumed extracting: ", time.time() - start)
|
print("Time consumed extracting: ", time.time() - start)
|
||||||
layerFactory = LayerFactory(contours)
|
layerFactory = LayerFactory(config, contours)
|
||||||
layerFactory.freeData(maxLayerLength, minLayerLength)
|
layerFactory.freeData()
|
||||||
print("sort Layers")
|
|
||||||
layerFactory.sortLayers()
|
layerFactory.sortLayers()
|
||||||
|
|
||||||
Exporter().exportOverlayed(layerFactory.layers,footagePath, os.path.join(os.path.dirname(__file__), "./short.mp4"), resizeWidth)
|
Exporter(config).exportOverlayed(layerFactory.layers)
|
||||||
print("Total time: ", time.time() - start)
|
print("Total time: ", time.time() - start)
|
||||||
|
|
||||||
def init():
|
def init():
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue