added requirements.txt

reduced imports
This commit is contained in:
Askill 2020-12-26 14:58:58 +01:00
parent ffaf281385
commit 14bfcecbde
9 changed files with 94 additions and 178 deletions

View File

@ -1,57 +0,0 @@
from imutils.video import VideoStream
import argparse
import datetime
import imutils
import time
import cv2
import os
import traceback
import _thread
import imageio
import numpy as np
import matplotlib.pyplot as plt
from Application.VideoReader import VideoReader
from multiprocessing.pool import ThreadPool
import imutils
class Analyzer:
def __init__(self, config):
print("Analyzer constructed")
videoReader = VideoReader(config)
videoReader.fillBuffer()
self.config = config
self.avg = imutils.resize(np.zeros((videoReader.h,videoReader.w,3),np.float), width=config["resizeWidth"])
self.end = videoReader.endFrame
self.c = 0
start = time.time()
fak = 10
while not videoReader.videoEnded():
self.c, frame = videoReader.pop()
if not self.c%fak == 0:
continue
if videoReader.endFrame - self.c <= fak:
break
frame = imutils.resize(frame, width=self.config["resizeWidth"])
self.avg += frame.astype(np.float)/(self.end/fak)
if self.c%(1800*6) == 0:
print(f"{self.c/(60*30)} Minutes processed in {round((time.time() - start), 2)} each")
start = time.time()
#print("done")
videoReader.thread.join()
self.avg = np.array(np.round(self.avg), dtype=np.uint8)
#return self.avg
cv2.imshow("changes overlayed", self.avg)
cv2.waitKey(10) & 0XFF
def average(self, frame):
frame = imutils.resize(frame[1], width=self.config["resizeWidth"])
self.avg += frame.astype(np.float)/(self.end/5)

View File

@ -1,28 +1,19 @@
from imutils.video import VideoStream from Application.VideoReader import VideoReader
import argparse from Application.Config import Config
import datetime
import imutils
import time
import cv2
import os
import traceback
import _thread
import imageio
import numpy as np
from threading import Thread from threading import Thread
from multiprocessing import Queue, Process, Pool from multiprocessing import Queue, Process, Pool
from multiprocessing.pool import ThreadPool from multiprocessing.pool import ThreadPool
import concurrent.futures
from Application.VideoReader import VideoReader
from queue import Queue from queue import Queue
import threading import imutils
import time
from Application.Config import Config import cv2
import numpy as np
class ContourExtractor: class ContourExtractor:
#extracedContours = {frame_number: [(contour, (x,y,w,h)), ...], } # extracedContours = {frame_number: [(contour, (x,y,w,h)), ...], }
# dict with frame numbers as keys and the contour bounds of every contour for that frame # dict with frame numbers as keys and the contour bounds of every contour for that frame
def getExtractedContours(self): def getExtractedContours(self):
return self.extractedContours return self.extractedContours
@ -40,15 +31,15 @@ class ContourExtractor:
self.resizeWidth = config["resizeWidth"] self.resizeWidth = config["resizeWidth"]
self.videoPath = config["inputPath"] self.videoPath = config["inputPath"]
self.xDim = 0 self.xDim = 0
self.yDim = 0 self.yDim = 0
self.config = config self.config = config
self.lastFrames = None self.lastFrames = None
self.averages = dict() self.averages = dict()
print("ContourExtractor initiated") print("ContourExtractor initiated")
def extractContours(self): def extractContours(self):
videoReader = VideoReader(self.config) videoReader = VideoReader(self.config)
self.fps = videoReader.getFPS() self.fps = videoReader.getFPS()
self.length = videoReader.getLength() self.length = videoReader.getLength()
videoReader.fillBuffer() videoReader.fillBuffer()
@ -61,10 +52,11 @@ class ContourExtractor:
if videoReader.buffer.qsize() == 0: if videoReader.buffer.qsize() == 0:
time.sleep(.5) time.sleep(.5)
tmpData = [videoReader.pop() for i in range(0, videoReader.buffer.qsize())] tmpData = [videoReader.pop()
for i in range(0, videoReader.buffer.qsize())]
pool.map(self.computeMovingAverage, (tmpData,)) pool.map(self.computeMovingAverage, (tmpData,))
pool.map(self.async2, (tmpData,)) pool.map(self.async2, (tmpData,))
#for data in tmpData: # for data in tmpData:
# self.getContours(data) # self.getContours(data)
frameCount = tmpData[-1][0] frameCount = tmpData[-1][0]
@ -81,18 +73,21 @@ class ContourExtractor:
while frameCount not in self.averages: while frameCount not in self.averages:
time.sleep(0.1) time.sleep(0.1)
firstFrame = self.averages.pop(frameCount, None) firstFrame = self.averages.pop(frameCount, None)
if frameCount % (10*self.fps) == 1: if frameCount % (10*self.fps) == 1:
print(f" \r {round((frameCount/self.fps)/self.length, 4)*100} % processed in {round(time.time() - self.start, 2)}s", end='\r') print(
f" \r {round((frameCount/self.fps)/self.length, 4)*100} % processed in {round(time.time() - self.start, 2)}s", end='\r')
gray = self.prepareFrame(frame) gray = self.prepareFrame(frame)
frameDelta = cv2.absdiff(gray, firstFrame) frameDelta = cv2.absdiff(gray, firstFrame)
thresh = cv2.threshold(frameDelta, self.threashold, 255, cv2.THRESH_BINARY)[1] thresh = cv2.threshold(frameDelta, self.threashold,
255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours # dilate the thresholded image to fill in holes, then find contours
thresh = cv2.dilate(thresh, None, iterations=10) thresh = cv2.dilate(thresh, None, iterations=10)
#cv2.imshow("changes x", thresh) #cv2.imshow("changes x", thresh)
#cv2.waitKey(10) & 0XFF #cv2.waitKey(10) & 0XFF
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = cv2.findContours(
thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts) cnts = imutils.grab_contours(cnts)
contours = [] contours = []
@ -102,18 +97,15 @@ class ContourExtractor:
(x, y, w, h) = cv2.boundingRect(c) (x, y, w, h) = cv2.boundingRect(c)
if ca < self.min_area or ca > self.max_area: if ca < self.min_area or ca > self.max_area:
continue continue
contours.append((x, y, w, h)) contours.append((x, y, w, h))
# the mask has to be packed like this, since np doesn't have a bit array, # the mask has to be packed like this, since np doesn't have a bit array,
# meaning every bit in the mask would take up 8bits, which migth be too much # meaning every bit in the mask would take up 8bits, which migth be too much
masks.append(np.packbits(np.copy(thresh[y:y+h,x:x+w]), axis=0)) masks.append(np.packbits(np.copy(thresh[y:y+h, x:x+w]), axis=0))
if len(contours) != 0 and contours is not None:
if len(contours) != 0 and contours is not None:
# this should be thread safe # this should be thread safe
self.extractedContours[frameCount] = contours self.extractedContours[frameCount] = contours
self.extractedMasks[frameCount] = masks self.extractedMasks[frameCount] = masks
def prepareFrame(self, frame): def prepareFrame(self, frame):
frame = imutils.resize(frame, width=self.resizeWidth) frame = imutils.resize(frame, width=self.resizeWidth)
@ -129,29 +121,28 @@ class ContourExtractor:
frame = frames[0][1] frame = frames[0][1]
frame = self.prepareFrame(frame) frame = self.prepareFrame(frame)
for j in range(0, len(frames)): for j in range(0, len(frames)):
frameNumber, _ = frames[j] frameNumber, _ = frames[j]
self.averages[frameNumber] = frame self.averages[frameNumber] = frame
# put last x frames into a buffer # put last x frames into a buffer
self.lastFrames = frames[-averageFrames:] self.lastFrames = frames[-averageFrames:]
return return
if self.lastFrames is not None: if self.lastFrames is not None:
frames = self.lastFrames + frames frames = self.lastFrames + frames
tmp = [[j, frames, averageFrames] for j in range(averageFrames, len(frames))] tmp = [[j, frames, averageFrames]
for j in range(averageFrames, len(frames))]
with ThreadPool(16) as pool: with ThreadPool(16) as pool:
pool.map(self.averageDaFrames, tmp) pool.map(self.averageDaFrames, tmp)
self.lastFrames = frames[-averageFrames:] self.lastFrames = frames[-averageFrames:]
def averageDaFrames(self, dat): def averageDaFrames(self, dat):
j, frames, averageFrames = dat j, frames, averageFrames = dat
frameNumber, frame = frames[j] frameNumber, frame = frames[j]
frame = self.prepareFrame(frame) frame = self.prepareFrame(frame)
avg = frame/averageFrames avg = frame/averageFrames
for jj in range(0,averageFrames-1): for jj in range(0, averageFrames-1):
avg += self.prepareFrame(frames[j-jj][1])/averageFrames avg += self.prepareFrame(frames[j-jj][1])/averageFrames
self.averages[frameNumber] = np.array(np.round(avg), dtype=np.uint8) self.averages[frameNumber] = np.array(np.round(avg), dtype=np.uint8)
#self.averages[frameNumber] = self.prepareFrame(frames[j-averageFrames - 1][1])

View File

@ -1,12 +1,13 @@
from Application.Layer import Layer
from Application.VideoReader import VideoReader
from datetime import datetime
import imageio import imageio
import imutils import imutils
import numpy as np import numpy as np
from Application.Layer import Layer
import cv2 import cv2
from Application.VideoReader import VideoReader
import pickle import pickle
import time import time
from datetime import datetime
class Exporter: class Exporter:
fps = 30 fps = 30
@ -18,14 +19,13 @@ class Exporter:
self.config = config self.config = config
print("Exporter initiated") print("Exporter initiated")
def export(self, layers, contours, masks, raw = True, overlayed = True): def export(self, layers, contours, masks, raw=True, overlayed=True):
if raw: if raw:
self.exportRawData(layers, contours, masks) self.exportRawData(layers, contours, masks)
if overlayed: if overlayed:
self.exportOverlayed(layers) self.exportOverlayed(layers)
else: else:
self.exportLayers(layers) self.exportLayers(layers)
def exportLayers(self, layers): def exportLayers(self, layers):
@ -42,7 +42,8 @@ class Exporter:
start = time.time() start = time.time()
for i, layer in enumerate(layers): for i, layer in enumerate(layers):
print(f"\r {i}/{len(layers)} {round(i/len(layers)*100,2)}% {round((time.time() - start), 2)}s", end='\r') print(
f"\r {i}/{len(layers)} {round(i/len(layers)*100,2)}% {round((time.time() - start), 2)}s", end='\r')
if len(layer.bounds[0]) == 0: if len(layer.bounds[0]) == 0:
continue continue
videoReader = VideoReader(self.config) videoReader = VideoReader(self.config)
@ -56,12 +57,15 @@ class Exporter:
if x is None: if x is None:
continue continue
factor = videoReader.w / self.resizeWidth factor = videoReader.w / self.resizeWidth
x, y, w, h = (int(x * factor), int(y * factor), int(w * factor), int(h * factor)) x, y, w, h = (int(x * factor), int(y * factor),
int(w * factor), int(h * factor))
frame2[y:y+h, x:x+w] = np.copy(frame[y:y+h, x:x+w]) frame2[y:y+h, x:x+w] = np.copy(frame[y:y+h, x:x+w])
time = datetime.fromtimestamp(int(frameCount/self.fps) + videoReader.getStartTime()) time = datetime.fromtimestamp(
cv2.putText(frame2, str(i) + " " + f"{time.hour}:{time.minute}:{time.second}", (int(x+w/2), int(y+h/2)), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,255,255), 2) int(frameCount/self.fps) + videoReader.getStartTime())
cv2.putText(frame2, str(i) + " " + f"{time.hour}:{time.minute}:{time.second}", (int(
x+w/2), int(y+h/2)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
#cv2.putText(frame2, str(layer.stats["avg"]) + " " + str(layer.stats["var"]) + " " + str(layer.stats["dev"]), (int(500), int(500)), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,0,255), 2) #cv2.putText(frame2, str(layer.stats["avg"]) + " " + str(layer.stats["var"]) + " " + str(layer.stats["dev"]), (int(500), int(500)), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,0,255), 2)
writer.append_data(frame2) writer.append_data(frame2)
videoReader.vc.release() videoReader.vc.release()
@ -69,7 +73,6 @@ class Exporter:
videoReader.vc.release() videoReader.vc.release()
videoReader.thread.join() videoReader.thread.join()
writer.close() writer.close()
def exportOverlayed(self, layers): def exportOverlayed(self, layers):
@ -84,7 +87,6 @@ class Exporter:
frames.append(np.copy(underlay)) frames.append(np.copy(underlay))
exportFrame = 0 exportFrame = 0
while not videoReader.videoEnded(): while not videoReader.videoEnded():
frameCount, frame = videoReader.pop() frameCount, frame = videoReader.pop()
if frameCount % (60*self.fps) == 0: if frameCount % (60*self.fps) == 0:
@ -98,25 +100,31 @@ class Exporter:
if layer.startFrame <= frameCount and layer.startFrame + len(layer.bounds) > frameCount: if layer.startFrame <= frameCount and layer.startFrame + len(layer.bounds) > frameCount:
for i in range(0, len(layer.bounds[frameCount - layer.startFrame])): for i in range(0, len(layer.bounds[frameCount - layer.startFrame])):
underlay1 = underlay underlay1 = underlay
(x, y, w, h) = layer.bounds[frameCount - layer.startFrame][i] (x, y, w,
h) = layer.bounds[frameCount - layer.startFrame][i]
mask = layer.masks[frameCount - layer.startFrame][i] mask = layer.masks[frameCount - layer.startFrame][i]
if x is None: if x is None:
break break
factor = videoReader.w / self.resizeWidth factor = videoReader.w / self.resizeWidth
x, y, w, h = (int(x * factor), int(y * factor), int(w * factor), int(h * factor)) x, y, w, h = (int(x * factor), int(y * factor),
int(w * factor), int(h * factor))
mask = imutils.resize(mask, width=w, height=h+1) mask = imutils.resize(mask, width=w, height=h+1)
mask = np.resize(mask, (h,w)) mask = np.resize(mask, (h, w))
mask = cv2.erode(mask, None, iterations=10) mask = cv2.erode(mask, None, iterations=10)
mask *= 255 mask *= 255
frame2 = frames[frameCount - layer.startFrame] frame2 = frames[frameCount - layer.startFrame]
xx = np.copy(cv2.bitwise_and(frame2[y:y+h, x:x+w], frame2[y:y+h, x:x+w], mask=cv2.bitwise_not(mask))) xx = np.copy(cv2.bitwise_and(
frame2[y:y+h, x:x+w] = cv2.addWeighted(xx,1, np.copy(cv2.bitwise_and(frame[y:y+h, x:x+w], frame[y:y+h, x:x+w], mask=mask)),1,0) frame2[y:y+h, x:x+w], frame2[y:y+h, x:x+w], mask=cv2.bitwise_not(mask)))
frames[frameCount - layer.startFrame] = np.copy(frame2) frame2[y:y+h, x:x+w] = cv2.addWeighted(xx, 1, np.copy(
cv2.bitwise_and(frame[y:y+h, x:x+w], frame[y:y+h, x:x+w], mask=mask)), 1, 0)
frames[frameCount - layer.startFrame] = np.copy(frame2)
#cv2.imshow("changes x", frame2) #cv2.imshow("changes x", frame2)
#cv2.waitKey(10) & 0XFF #cv2.waitKey(10) & 0XFF
time = datetime.fromtimestamp(int(frameCount/self.fps) + videoReader.getStartTime()) time = datetime.fromtimestamp(
cv2.putText(frames[frameCount - layer.startFrame], f"{time.hour}:{time.minute}:{time.second}", (int(x+w/2), int(y+h/2)), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,255,255), 2) int(frameCount/self.fps) + videoReader.getStartTime())
cv2.putText(frames[frameCount - layer.startFrame], f"{time.hour}:{time.minute}:{time.second}", (int(
x+w/2), int(y+h/2)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
videoReader.thread.join() videoReader.thread.join()
videoReader.vc.release() videoReader.vc.release()
@ -132,7 +140,7 @@ class Exporter:
def exportRawData(self, layers, contours, masks): def exportRawData(self, layers, contours, masks):
with open(self.config["importPath"], "wb+") as file: with open(self.config["importPath"], "wb+") as file:
pickle.dump((layers, contours, masks), file) pickle.dump((layers, contours, masks), file)
def getMaxLengthOfLayers(self, layers): def getMaxLengthOfLayers(self, layers):
maxLength = 0 maxLength = 0
for layer in layers: for layer in layers:

View File

@ -39,8 +39,8 @@ class Layer:
return return
if frameNumber > self.lastFrame: if frameNumber > self.lastFrame:
for i in range(frameNumber - self.lastFrame): for i in range(frameNumber - self.lastFrame):
self.bounds.append([bound]) self.bounds.append([])
self.masks.append([mask]) self.masks.append([])
self.lastFrame = frameNumber self.lastFrame = frameNumber
if bound not in self.bounds[index]: if bound not in self.bounds[index]:

View File

@ -2,10 +2,9 @@ from Application.Layer import Layer
from Application.Config import Config from Application.Config import Config
from Application.VideoReader import VideoReader from Application.VideoReader import VideoReader
from Application.Exporter import Exporter from Application.Exporter import Exporter
from multiprocessing.pool import ThreadPool from multiprocessing.pool import ThreadPool
import cv2
import numpy as np import numpy as np
import copy
class LayerFactory: class LayerFactory:
@ -53,7 +52,7 @@ class LayerFactory:
for x in tmp: for x in tmp:
self.getLayers(x) self.getLayers(x)
#self.joinLayers() # self.joinLayers()
return self.layers return self.layers
def getLayers(self, data): def getLayers(self, data):
@ -150,14 +149,11 @@ class LayerFactory:
for l in layers: for l in layers:
if l.lastFrame < maxFrame: if l.lastFrame < maxFrame:
maxFrame = l.lastFrame maxFrame = l.lastFrame
return maxFrame return maxFrame
def contoursOverlay(self, l1, r1, l2, r2): def contoursOverlay(self, l1, r1, l2, r2):
# If one rectangle is on left side of other
if(l1[0] >= r2[0] or l2[0] >= r1[0]): if(l1[0] >= r2[0] or l2[0] >= r1[0]):
return False return False
# If one rectangle is above other
if(l1[1] <= r2[1] or l2[1] <= r1[1]): if(l1[1] <= r2[1] or l2[1] <= r1[1]):
return False return False
return True return True

View File

@ -48,26 +48,6 @@ class LayerManager:
for layer in self.layers: for layer in self.layers:
layer.calcStats() layer.calcStats()
def removeStaticLayers(self):
'''Removes Layers with little to no movement'''
layers = []
for i, layer in enumerate(self.layers):
checks = 0
for bound in layer.bounds[0]:
if bound[0] is None:
continue
for bound2 in layer.bounds[-1]:
if bound2[0] is None:
continue
if abs(bound[0] - bound2[0]) < 10:
checks += 1
if abs(bound[1] - bound2[1]) < 10:
checks += 1
if checks <= 2:
layers.append(layer)
self.layers = layers
def freeMin(self): def freeMin(self):
self.data.clear() self.data.clear()
layers = [] layers = []
@ -76,21 +56,19 @@ class LayerManager:
layers.append(l) layers.append(l)
self.layers = layers self.layers = layers
def freeMax(self): def freeMax(self):
layers = [] layers = []
for l in self.layers: for l in self.layers:
if len(l) < self.maxLayerLength: if len(l) < self.maxLayerLength:
layers.append(l) layers.append(l)
self.layers = layers self.layers = layers
def tagLayers(self): def tagLayers(self):
'''Use classifieres the tag all Layers, by reading the contour content from the original video, then applying the classifier''' '''Use classifieres the tag all Layers, by reading the contour content from the original video, then applying the classifier'''
print("Tagging Layers") print("Tagging Layers")
exporter = Exporter(self.config) exporter = Exporter(self.config)
start = time.time() start = time.time()
for i, layer in enumerate(self.layers[20:]): for i, layer in enumerate(self.layers):
print(f"{round(i/len(self.layers)*100,2)} {round((time.time() - start), 2)}") print(f"{round(i/len(self.layers)*100,2)} {round((time.time() - start), 2)}")
start = time.time() start = time.time()
if len(layer.bounds[0]) == 0: if len(layer.bounds[0]) == 0:

View File

@ -1,28 +1,26 @@
import multiprocessing
import cv2
from time import sleep
from queue import Queue
import threading
import pathlib
from Application.Config import Config from Application.Config import Config
import os
from datetime import datetime from datetime import datetime
from queue import Queue
import cv2
import threading
import os
class VideoReader: class VideoReader:
listOfFrames = None listOfFrames = None
w = 0 w = 0
h = 0 h = 0
def __init__(self, config, setOfFrames = None): def __init__(self, config, setOfFrames=None):
videoPath = config["inputPath"] videoPath = config["inputPath"]
if videoPath is None: if videoPath is None:
print("ERROR: Video reader needs a videoPath!") raise Exception("ERROR: Video reader needs a videoPath!")
return None
self.videoPath = videoPath self.videoPath = videoPath
self.lastFrame = 0 self.lastFrame = 0
#buffer = Queue([(frameNumber, frame), ]) # buffer data struct:
# buffer = Queue([(frameNumber, frame), ])
self.buffer = Queue(config["videoBufferLength"]) self.buffer = Queue(config["videoBufferLength"])
self.vc = cv2.VideoCapture(videoPath) self.vc = cv2.VideoCapture(videoPath)
self.stopped = False self.stopped = False
@ -31,7 +29,7 @@ class VideoReader:
self.calcLength() self.calcLength()
self.calcStartTime() self.calcStartTime()
if setOfFrames is not None: if setOfFrames is not None:
self.listOfFrames = sorted(setOfFrames) self.listOfFrames = sorted(setOfFrames)
def getWH(self): def getWH(self):
'''get width and height''' '''get width and height'''
@ -44,14 +42,13 @@ class VideoReader:
return self.buffer.get(block=True) return self.buffer.get(block=True)
def fillBuffer(self, listOfFrames=None): def fillBuffer(self, listOfFrames=None):
if self.buffer.full():
print("VideoReader::fillBuffer was called when buffer was full.")
self.endFrame = int(self.vc.get(cv2.CAP_PROP_FRAME_COUNT)) self.endFrame = int(self.vc.get(cv2.CAP_PROP_FRAME_COUNT))
if listOfFrames is not None: if listOfFrames is not None:
self.listOfFrames = listOfFrames self.listOfFrames = listOfFrames
if self.listOfFrames is not None: if self.listOfFrames is not None:
self.thread = threading.Thread(target=self.readFramesByList, args=()) self.thread = threading.Thread(
target=self.readFramesByList, args=())
else: else:
self.thread = threading.Thread(target=self.readFrames, args=()) self.thread = threading.Thread(target=self.readFrames, args=())
self.thread.start() self.thread.start()
@ -69,7 +66,7 @@ class VideoReader:
self.lastFrame += 1 self.lastFrame += 1
self.stopped = True self.stopped = True
def readFramesByList(self): def readFramesByList(self):
'''Reads all frames from a list of frame numbers''' '''Reads all frames from a list of frame numbers'''
self.vc.set(1, self.listOfFrames[0]) self.vc.set(1, self.listOfFrames[0])
@ -91,9 +88,9 @@ class VideoReader:
# if current Frame number is not in list of Frames, we can skip a few frames # if current Frame number is not in list of Frames, we can skip a few frames
self.vc.set(1, self.listOfFrames[0]) self.vc.set(1, self.listOfFrames[0])
self.lastFrame = self.listOfFrames[0] self.lastFrame = self.listOfFrames[0]
self.stopped = True self.stopped = True
def videoEnded(self): def videoEnded(self):
if self.stopped and self.buffer.empty(): if self.stopped and self.buffer.empty():
return True return True
@ -107,7 +104,7 @@ class VideoReader:
if self.fps is None: if self.fps is None:
self.calcFPS() self.calcFPS()
return self.fps return self.fps
def calcLength(self): def calcLength(self):
fc = int(self.vc.get(cv2.CAP_PROP_FRAME_COUNT)) fc = int(self.vc.get(cv2.CAP_PROP_FRAME_COUNT))
self.length = fc / self.getFPS() self.length = fc / self.getFPS()

View File

@ -35,12 +35,11 @@ def main():
layerManager = LayerManager(config, layers) layerManager = LayerManager(config, layers)
layerManager.transformLayers() layerManager.transformLayers()
#layerManager.tagLayers() #layerManager.tagLayers()
layers = layerManager.layers layers = layerManager.layers
#print([len(l) for l in sorted(layers, key = lambda c:len(c), reverse=True)[:20]])
if len(layers) == 0: if len(layers) == 0:
exit(1) exit(1)
exporter = Exporter(config) exporter = Exporter(config)
print(f"Exporting {len(contours)} Contours and {len(layers)} Layers") print(f"Exporting {len(contours)} Contours and {len(layers)} Layers")
exporter.export(layers, contours, masks, raw=True, overlayed=True) exporter.export(layers, contours, masks, raw=True, overlayed=True)

4
requirements.txt Normal file
View File

@ -0,0 +1,4 @@
opencv-python
numpy
imutils
imageio