Merge branch 'master' into multiprocessed
This commit is contained in:
commit
9d9f8a7361
|
|
@ -4,3 +4,5 @@ generate test footage/images/
|
||||||
generate test footage/3.MP4
|
generate test footage/3.MP4
|
||||||
|
|
||||||
short.mp4
|
short.mp4
|
||||||
|
|
||||||
|
__pycache__/
|
||||||
|
|
|
||||||
43
Analyzer.py
43
Analyzer.py
|
|
@ -1,3 +1,44 @@
|
||||||
|
from imutils.video import VideoStream
|
||||||
|
import argparse
|
||||||
|
import datetime
|
||||||
|
import imutils
|
||||||
|
import time
|
||||||
|
import cv2
|
||||||
|
import os
|
||||||
|
import traceback
|
||||||
|
import _thread
|
||||||
|
import imageio
|
||||||
|
import numpy as np
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
class Analyzer:
|
class Analyzer:
|
||||||
def __init__(self, footage):
|
def __init__(self, videoPath):
|
||||||
print("Analyzer constructed")
|
print("Analyzer constructed")
|
||||||
|
data = self.readIntoMem(videoPath)
|
||||||
|
|
||||||
|
vs = cv2.VideoCapture(videoPath)
|
||||||
|
threashold = 13
|
||||||
|
res, image = vs.read()
|
||||||
|
firstFrame = None
|
||||||
|
i = 0
|
||||||
|
diff = []
|
||||||
|
while res:
|
||||||
|
res, frame = vs.read()
|
||||||
|
if not res:
|
||||||
|
break
|
||||||
|
frame = imutils.resize(frame, width=500)
|
||||||
|
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB)
|
||||||
|
if firstFrame is None:
|
||||||
|
firstFrame = gray
|
||||||
|
continue
|
||||||
|
frameDelta = cv2.absdiff(gray, firstFrame)
|
||||||
|
thresh = cv2.threshold(frameDelta, threashold, 255, cv2.THRESH_BINARY)[1]
|
||||||
|
diff.append(np.count_nonzero(thresh))
|
||||||
|
i+=1
|
||||||
|
if i % (60*30) == 0:
|
||||||
|
print("Minutes processed: ", i/(60*30))
|
||||||
|
#print(diff)
|
||||||
|
|
||||||
|
plt.plot(diff)
|
||||||
|
plt.ylabel('some numbers')
|
||||||
|
plt.show()
|
||||||
|
|
@ -0,0 +1,26 @@
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
c = {
|
||||||
|
"min_area" : 500,
|
||||||
|
"max_area" : 28000,
|
||||||
|
"threashold" : 13,
|
||||||
|
"xDim" : 0,
|
||||||
|
"yDim" : 0,
|
||||||
|
"resizeWidth" : 512,
|
||||||
|
"inputPath" : "",
|
||||||
|
"outputPath": "",
|
||||||
|
"maxLayerLength": 1000,
|
||||||
|
"minLayerLength": 0,
|
||||||
|
"fps": 30,
|
||||||
|
"tolerance": 5,
|
||||||
|
"maxLength": None,
|
||||||
|
""
|
||||||
|
}
|
||||||
|
__init__(self):
|
||||||
|
print(Current Config:)
|
||||||
|
|
||||||
|
def __getitem__(self, key):
|
||||||
|
return self.c[key]
|
||||||
|
|
||||||
|
def __setitem__(self, key, value):
|
||||||
|
return self.c[key] = value
|
||||||
|
|
@ -5,11 +5,11 @@ import imutils
|
||||||
import time
|
import time
|
||||||
import cv2
|
import cv2
|
||||||
import os
|
import os
|
||||||
import numpy as np
|
|
||||||
import traceback
|
import traceback
|
||||||
import _thread
|
import _thread
|
||||||
import imageio
|
import imageio
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
import time
|
||||||
from threading import Thread
|
from threading import Thread
|
||||||
from multiprocessing import Queue, Process, Pool
|
from multiprocessing import Queue, Process, Pool
|
||||||
from multiprocessing.pool import ThreadPool
|
from multiprocessing.pool import ThreadPool
|
||||||
|
|
@ -20,7 +20,7 @@ class ContourExtractor:
|
||||||
#X = {frame_number: [(contour, (x,y,w,h)), ...], }
|
#X = {frame_number: [(contour, (x,y,w,h)), ...], }
|
||||||
extractedContours = dict()
|
extractedContours = dict()
|
||||||
min_area = 500
|
min_area = 500
|
||||||
max_area = 7000
|
max_area = 28000
|
||||||
threashold = 13
|
threashold = 13
|
||||||
xDim = 0
|
xDim = 0
|
||||||
yDim = 0
|
yDim = 0
|
||||||
|
|
|
||||||
9
Layer.py
9
Layer.py
|
|
@ -15,8 +15,6 @@ class Layer:
|
||||||
self.data = []
|
self.data = []
|
||||||
self.bounds = []
|
self.bounds = []
|
||||||
self.bounds.append(data)
|
self.bounds.append(data)
|
||||||
|
|
||||||
|
|
||||||
#print("Layer constructed")
|
#print("Layer constructed")
|
||||||
|
|
||||||
def add(self, frameNumber, data):
|
def add(self, frameNumber, data):
|
||||||
|
|
@ -24,12 +22,13 @@ class Layer:
|
||||||
self.lastFrame = frameNumber
|
self.lastFrame = frameNumber
|
||||||
|
|
||||||
self.bounds.append(data)
|
self.bounds.append(data)
|
||||||
|
self.getLength()
|
||||||
|
|
||||||
def getLength(self):
|
def getLength(self):
|
||||||
self.length = len(self.data)
|
self.length = len(self.bounds)
|
||||||
return self.length
|
return self.length
|
||||||
|
|
||||||
def fill(self, inputPath):
|
def fill(self, inputPath, resizeWidth):
|
||||||
'''reads in the contour data, needed for export'''
|
'''reads in the contour data, needed for export'''
|
||||||
|
|
||||||
cap = cv2.VideoCapture(inputPath)
|
cap = cv2.VideoCapture(inputPath)
|
||||||
|
|
@ -40,7 +39,7 @@ class Layer:
|
||||||
ret, frame = cap.read()
|
ret, frame = cap.read()
|
||||||
|
|
||||||
if ret:
|
if ret:
|
||||||
frame = imutils.resize(frame, width=512)
|
frame = imutils.resize(frame, width=resizeWidth)
|
||||||
(x, y, w, h) = self.bounds[i]
|
(x, y, w, h) = self.bounds[i]
|
||||||
self.data[i] = frame[y:y+h, x:x+w]
|
self.data[i] = frame[y:y+h, x:x+w]
|
||||||
i+=1
|
i+=1
|
||||||
|
|
|
||||||
|
|
@ -10,11 +10,28 @@ class LayerFactory:
|
||||||
if data is not None:
|
if data is not None:
|
||||||
self.extractLayers(data)
|
self.extractLayers(data)
|
||||||
|
|
||||||
def freeData(self, maxLayerLength):
|
def removeStaticLayers(self):
|
||||||
|
'''Removes Layers with little to no movement'''
|
||||||
|
layers = []
|
||||||
|
for i, layer in enumerate(self.layers):
|
||||||
|
checks = 0
|
||||||
|
if abs(self.layers[i].bounds[0][0] - self.layers[i].bounds[-1][0]) < 5:
|
||||||
|
checks += 1
|
||||||
|
if abs(self.layers[i].bounds[0][1] - self.layers[i].bounds[-1][1]) < 5:
|
||||||
|
checks += 1
|
||||||
|
if checks <= 2:
|
||||||
|
layers.append(layer)
|
||||||
|
self.layers = layers
|
||||||
|
|
||||||
|
|
||||||
|
def freeData(self, maxLayerLength, minLayerLength):
|
||||||
self.data.clear()
|
self.data.clear()
|
||||||
for i in range(len(self.layers)):
|
layers = []
|
||||||
if self.layers[i].getLength() > maxLayerLength:
|
for l in self.layers:
|
||||||
del self.layers[i]
|
if l.getLength() < maxLayerLength and l.getLength() > minLayerLength:
|
||||||
|
layers.append(l)
|
||||||
|
self.layers = layers
|
||||||
|
self.removeStaticLayers()
|
||||||
|
|
||||||
|
|
||||||
def extractLayers(self, data = None):
|
def extractLayers(self, data = None):
|
||||||
|
|
@ -27,56 +44,48 @@ class LayerFactory:
|
||||||
else:
|
else:
|
||||||
self.data = data
|
self.data = data
|
||||||
|
|
||||||
layers = []
|
|
||||||
frameNumber = min(data)
|
frameNumber = min(data)
|
||||||
contours = data[frameNumber]
|
contours = data[frameNumber]
|
||||||
|
|
||||||
for contour in contours:
|
for contour in contours:
|
||||||
layers.append(Layer(frameNumber, contour))
|
self.layers.append(Layer(frameNumber, contour))
|
||||||
|
|
||||||
|
oldLayerIDs = []
|
||||||
# inserts all the fucking contours as layers?
|
# inserts all the fucking contours as layers?
|
||||||
|
for frameNumber, contours in data.items():
|
||||||
|
if frameNumber%5000 == 0:
|
||||||
|
print(f"{round(frameNumber/max(data.keys()), 2)}% done with Layer extraction")
|
||||||
|
|
||||||
for frameNumber in sorted(data):
|
for frameNumber in sorted(data):
|
||||||
contours = data[frameNumber]
|
contours = data[frameNumber]
|
||||||
for (x,y,w,h) in contours:
|
for (x,y,w,h) in contours:
|
||||||
foundLayer = False
|
foundLayer = False
|
||||||
i = 0
|
for i in set(range(0, len(self.layers))).difference(set(oldLayerIDs)):
|
||||||
for i in range(0, len(layers)):
|
if frameNumber - self.layers[i].lastFrame > 10:
|
||||||
layer = layers[i]
|
oldLayerIDs.append(i)
|
||||||
|
|
||||||
if len(layer.bounds[-1]) != 4:
|
|
||||||
# should never be called, hints at problem in ContourExtractor
|
|
||||||
print("LayerFactory: Layer knew no bounds")
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if frameNumber - layer.lastFrame <= 5:
|
(x2,y2,w2,h2) = self.layers[i].bounds[-1]
|
||||||
(x2,y2,w2,h2) = layer.bounds[-1]
|
if self.contoursOverlay((x-tol,y+h+tol), (x+w+tol,y-tol), (x2,y2+h2), (x2+w2,y2)):
|
||||||
if self.contoursOverlay((x-tol,y+h+tol), (x+w+tol,y-tol), (x2,y2+h2), (x2+w2,y2)):
|
self.layers[i].add(frameNumber, (x,y,w,h))
|
||||||
foundLayer = True
|
foundLayer = True
|
||||||
layer.add(frameNumber, (x,y,w,h))
|
break
|
||||||
break
|
|
||||||
|
|
||||||
layers[i] = layer
|
|
||||||
if not foundLayer:
|
if not foundLayer:
|
||||||
layers.append(Layer(frameNumber, (x,y,w,h)))
|
self.layers.append(Layer(frameNumber, (x,y,w,h)))
|
||||||
|
|
||||||
self.layers = layers
|
|
||||||
|
|
||||||
|
|
||||||
def contoursOverlay(self, l1, r1, l2, r2):
|
def contoursOverlay(self, l1, r1, l2, r2):
|
||||||
|
|
||||||
# If one rectangle is on left side of other
|
# If one rectangle is on left side of other
|
||||||
if(l1[0] >= r2[0] or l2[0] >= r1[0]):
|
if(l1[0] >= r2[0] or l2[0] >= r1[0]):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# If one rectangle is above other
|
# If one rectangle is above other
|
||||||
if(l1[1] <= r2[1] or l2[1] <= r1[1]):
|
if(l1[1] <= r2[1] or l2[1] <= r1[1]):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def fillLayers(self, footagePath):
|
def fillLayers(self, footagePath, resizeWidth):
|
||||||
for i in range(len(self.layers)):
|
for i in range(len(self.layers)):
|
||||||
self.layers[i].fill(footagePath)
|
self.layers[i].fill(footagePath, resizeWidth)
|
||||||
|
|
||||||
def sortLayers(self):
|
def sortLayers(self):
|
||||||
# straight bubble
|
# straight bubble
|
||||||
|
|
|
||||||
|
|
@ -1 +1,4 @@
|
||||||
time compression
|
time compression
|
||||||
|
|
||||||
|
|
||||||
|
Time consumed reading video: 369.0188868045807s 3.06GB 26min 1080p downscaled 500p 30fps
|
||||||
Binary file not shown.
Binary file not shown.
14
main.py
14
main.py
|
|
@ -12,19 +12,25 @@ def demo():
|
||||||
print("startup")
|
print("startup")
|
||||||
resizeWidth = 1024
|
resizeWidth = 1024
|
||||||
maxLayerLength = 1*60*30
|
maxLayerLength = 1*60*30
|
||||||
|
minLayerLength = 3
|
||||||
start = time.time()
|
start = time.time()
|
||||||
|
|
||||||
footagePath = os.path.join(os.path.dirname(__file__), "./generate test footage/3.MP4")
|
footagePath = os.path.join(os.path.dirname(__file__), "./generate test footage/3.mp4")
|
||||||
analyzer = Analyzer(footagePath)
|
#analyzer = Analyzer(footagePath)
|
||||||
|
#print("Time consumed reading video: ", time.time() - start)
|
||||||
contours = ContourExtractor().extractContours(footagePath, resizeWidth)
|
contours = ContourExtractor().extractContours(footagePath, resizeWidth)
|
||||||
print("Time consumed in working: ", time.time() - start)
|
print("Time consumed in working: ", time.time() - start)
|
||||||
layerFactory = LayerFactory(contours)
|
layerFactory = LayerFactory(contours)
|
||||||
layerFactory.freeData(maxLayerLength)
|
print("freeing Data", time.time() - start)
|
||||||
|
layerFactory.freeData(maxLayerLength, minLayerLength)
|
||||||
|
print("sort Layers")
|
||||||
layerFactory.sortLayers()
|
layerFactory.sortLayers()
|
||||||
layerFactory.fillLayers(footagePath)
|
print("fill Layers")
|
||||||
|
layerFactory.fillLayers(footagePath, resizeWidth)
|
||||||
underlay = cv2.VideoCapture(footagePath).read()[1]
|
underlay = cv2.VideoCapture(footagePath).read()[1]
|
||||||
Exporter().exportOverlayed(underlay, layerFactory.layers, os.path.join(os.path.dirname(__file__), "./short.mp4"), resizeWidth)
|
Exporter().exportOverlayed(underlay, layerFactory.layers, os.path.join(os.path.dirname(__file__), "./short.mp4"), resizeWidth)
|
||||||
print("Total time: ", time.time() - start)
|
print("Total time: ", time.time() - start)
|
||||||
|
|
||||||
def init():
|
def init():
|
||||||
print("not needed yet")
|
print("not needed yet")
|
||||||
|
|
||||||
|
|
|
||||||
Binary file not shown.
Loading…
Reference in New Issue