rewrote exporter

layers are now only filled on export
This commit is contained in:
Askill 2020-10-09 23:59:04 +02:00
parent 68c730ff6e
commit 15bfcc8e92
8 changed files with 110 additions and 115 deletions

View File

@ -20,8 +20,8 @@ class ContourExtractor:
#X = {frame_number: [(contour, (x,y,w,h)), ...], } #X = {frame_number: [(contour, (x,y,w,h)), ...], }
extractedContours = dict() extractedContours = dict()
min_area = 500 min_area = 100
max_area = 28000 max_area = 1000
threashold = 13 threashold = 13
xDim = 0 xDim = 0
yDim = 0 yDim = 0

View File

@ -1,8 +1,11 @@
import imageio import imageio
import imutils import imutils
import numpy as np import numpy as np
from Layer import Layer from Layer import Layer
import cv2 import cv2
from VideoReader import VideoReader
class Exporter: class Exporter:
fps = 30 fps = 30
@ -16,10 +19,11 @@ class Exporter:
writer.append_data(np.array(frame)) writer.append_data(np.array(frame))
writer.close() writer.close()
def exportLayers(self,underlay, layers, outputPath, resizeWidth): def exportLayers(self, layers, outputPath, resizeWidth):
underlay = cv2.VideoCapture(footagePath).read()[1]
fps = self.fps fps = self.fps
writer = imageio.get_writer(outputPath, fps=fps) writer = imageio.get_writer(outputPath, fps=fps)
i=0 i = 0
for layer in layers: for layer in layers:
data = layer.data data = layer.data
contours = layer.bounds contours = layer.bounds
@ -32,35 +36,60 @@ class Exporter:
frame1 = imutils.resize(frame1, width=resizeWidth) frame1 = imutils.resize(frame1, width=resizeWidth)
frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2RGB) frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2RGB)
frame1[y:y+frame.shape[0], x:x+frame.shape[1]] = frame frame1[y:y+frame.shape[0], x:x+frame.shape[1]] = frame
cv2.putText(frame1, str(i), (30,30), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,255,255), 2) cv2.putText(frame1, str(i), (30, 30),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
writer.append_data(np.array(frame1)) writer.append_data(np.array(frame1))
#cv2.imshow("changes overlayed", frame) #cv2.imshow("changes overlayed", frame)
#cv2.waitKey(10) & 0XFF #cv2.waitKey(10) & 0XFF
i += 1 i += 1
writer.close() writer.close()
#cv2.destroyAllWindows() # cv2.destroyAllWindows()
def exportOverlayed(self, underlay, layers, outputPath, resizeWidth): def exportOverlayed(self, layers, footagePath, outputPath, resizeWidth):
listOfFrames = self.makeListOfFrames(layers)
videoReader = VideoReader(footagePath, listOfFrames)
videoReader.fillBuffer()
maxLength = self.getMaxLengthOfLayers(layers)
underlay = cv2.VideoCapture(footagePath).read()[1]
underlay = cv2.cvtColor(underlay, cv2.COLOR_BGR2RGB)
frames = [underlay]*maxLength
exportFrame = 0
while not videoReader.videoEnded():
frameCount, frame = videoReader.pop()
if frameCount % (60*self.fps) == 0:
print("Minutes processed: ", frameCount/(60*self.fps))
if frame is None:
print("ContourExtractor: frame was None")
continue
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
for layer in layers:
if layer.startFrame <= frameCount and layer.startFrame + len(layer.bounds) > frameCount:
(x, y, w, h) = layer.bounds[frameCount - layer.startFrame]
factor = videoReader.w / resizeWidth
x = int(x * factor)
y = int(y * factor)
w = int(w * factor)
h = int(h * factor)
# if exportFrame as index instead of frameCount - layer.startFrame then we have layer after layer
frame2 = frames[frameCount - layer.startFrame]
frame2[y:y+h, x:x+w] = frame[y:y+h, x:x+w]
frames[frameCount - layer.startFrame] = np.copy(frame2)
videoReader.thread.join()
fps = self.fps fps = self.fps
writer = imageio.get_writer(outputPath, fps=fps) writer = imageio.get_writer(outputPath, fps=fps)
for frame in frames:
maxLength = self.getMaxLengthOfLayers(layers) writer.append_data(frame)
for i in range(maxLength): writer.close()
frame1 = underlay
frame1 = imutils.resize(frame1, width=resizeWidth)
frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2RGB)
for layer in layers:
data = layer.data
if len(layer.data) > i:
(x, y, w, h) = layer.bounds[i]
frame = layer.data[i]
if frame is not None:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame1[y:y+h, x:x+w] = frame
writer.append_data(np.array(frame1))
writer.close()
def getMaxLengthOfLayers(self, layers): def getMaxLengthOfLayers(self, layers):
maxLength = 0 maxLength = 0
@ -69,3 +98,11 @@ class Exporter:
maxLength = layer.getLength() maxLength = layer.getLength()
return maxLength return maxLength
def makeListOfFrames(self, layers):
'''Returns set of all Frames which are relavant to the Layers'''
frameNumbers = set()
for layer in layers:
frameNumbers.update(
list(range(layer.startFrame, layer.startFrame + len(layer.bounds))))
return list(frameNumbers)

View File

@ -4,11 +4,14 @@ import cv2
from time import sleep from time import sleep
from queue import Queue from queue import Queue
import threading import threading
class VideoReader: class VideoReader:
#buffer = [(frameNumber, frame)] #buffer = [(frameNumber, frame)]
listOfFrames = None
def __init__(self, videoPath): def __init__(self, videoPath, setOfFrames = None):
if videoPath is None: if videoPath is None:
print("Video reader needs a videoPath!") print("Video reader needs a videoPath!")
return None return None
@ -21,9 +24,8 @@ class VideoReader:
res, image = self.vc.read() res, image = self.vc.read()
self.w = image.shape[1] self.w = image.shape[1]
self.h = image.shape[0] self.h = image.shape[0]
if setOfFrames is not None:
print(f"Video reader startet with buffer length of 16") self.listOfFrames = sorted(setOfFrames)
def pop(self): def pop(self):
return self.buffer.get(block=True) return self.buffer.get(block=True)
@ -35,8 +37,12 @@ class VideoReader:
if self.buffer.full(): if self.buffer.full():
print("VideoReader::fillBuffer was called when buffer was full.") print("VideoReader::fillBuffer was called when buffer was full.")
self.endFrame = int(self.vc.get(cv2.CAP_PROP_FRAME_COUNT)) self.endFrame = int(self.vc.get(cv2.CAP_PROP_FRAME_COUNT))
self.endFrame = 10*60*30
self.thread = threading.Thread(target=self.readFrames, args=()) #self.endFrame = 10*60*30
if self.listOfFrames is not None:
self.thread = threading.Thread(target=self.readFramesByList, args=())
else:
self.thread = threading.Thread(target=self.readFrames, args=())
self.thread.start() self.thread.start()
def stop(self): def stop(self):
@ -46,10 +52,34 @@ class VideoReader:
def readFrames(self): def readFrames(self):
while self.lastFrame < self.endFrame: while self.lastFrame < self.endFrame:
if not self.buffer.full(): if not self.buffer.full():
res, frame = self.vc.read() res, frame = self.vc.read()
if res: if res:
self.buffer.put((self.lastFrame, frame)) self.buffer.put((self.lastFrame, frame))
self.lastFrame += 1 self.lastFrame += 1
else:
sleep(0.5)
self.stopped = True
def readFramesByList(self):
self.vc.set(1, self.listOfFrames[0])
self.lastFrame = self.listOfFrames[0]
self.endFrame = self.listOfFrames[-1]
while self.lastFrame < self.endFrame:
if not self.buffer.full():
if self.lastFrame in self.listOfFrames:
res, frame = self.vc.read()
if res:
self.buffer.put((self.lastFrame, frame))
# since the list is sorted the first element is always the lowest relevant framenumber
# [0,1,2,3,32,33,34,35,67,68,69]
self.listOfFrames.pop(0)
self.lastFrame += 1
else:
# if current Frame number is not in list of Frames, we can skip a few frames
self.vc.set(1, self.listOfFrames[0])
self.lastFrame = self.listOfFrames[0]
else: else:
sleep(0.5) sleep(0.5)
self.stopped = True self.stopped = True

Binary file not shown.

12
main.py
View File

@ -11,8 +11,8 @@ import cv2
def demo(): def demo():
print("startup") print("startup")
resizeWidth = 512 resizeWidth = 256
maxLayerLength = 1*60*30 maxLayerLength = 20*30
minLayerLength = 30 minLayerLength = 30
start = time.time() start = time.time()
@ -27,10 +27,10 @@ def demo():
layerFactory.freeData(maxLayerLength, minLayerLength) layerFactory.freeData(maxLayerLength, minLayerLength)
print("sort Layers") print("sort Layers")
layerFactory.sortLayers() layerFactory.sortLayers()
print("fill Layers") #print("fill Layers")
layerFactory.fillLayers(footagePath, resizeWidth) #layerFactory.fillLayers(footagePath, resizeWidth)
underlay = cv2.VideoCapture(footagePath).read()[1]
Exporter().exportOverlayed(underlay, layerFactory.layers, os.path.join(os.path.dirname(__file__), "./short.mp4"), resizeWidth) Exporter().exportOverlayed(layerFactory.layers,footagePath, os.path.join(os.path.dirname(__file__), "./short.mp4"), resizeWidth)
print("Total time: ", time.time() - start) print("Total time: ", time.time() - start)
def init(): def init():

View File

@ -1,72 +0,0 @@
from imutils.video import VideoStream
import argparse
import datetime
import imutils
import time
import cv2
import os
import traceback
import _thread
def compare():
try:
url = os.path.join(os.path.dirname(__file__), "./generate test footage/out.mp4")
min_area = 100
max_area = 30000
threashold = 10
# initialize the first frame in the video stream
vs = cv2.VideoCapture(url)
res = vs.read()[0]
firstFrame = None
# loop over the frames of the video
while res:
res, frame = vs.read()
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
cv2.imshow( "frame", frame )
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (31, 31), 0)
# if the first frame is None, initialize it
if firstFrame is None:
firstFrame = gray
continue
frameDelta = cv2.absdiff(gray, firstFrame)
thresh = cv2.threshold(frameDelta, threashold, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
thresh = cv2.dilate(thresh, None, iterations=3)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# loop over the contours
for c in cnts:
if cv2.contourArea(c) < min_area or cv2.contourArea(c) > max_area:
continue
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
cv2.imshow( "annotated", frame )
print("1")
cv2.waitKey(10) & 0XFF
except Exception as e:
traceback.print_exc()
compare()