added requirements.txt

reduced imports
This commit is contained in:
Askill 2020-12-26 14:58:58 +01:00
parent ffaf281385
commit 14bfcecbde
9 changed files with 94 additions and 178 deletions

View File

@ -1,57 +0,0 @@
from imutils.video import VideoStream
import argparse
import datetime
import imutils
import time
import cv2
import os
import traceback
import _thread
import imageio
import numpy as np
import matplotlib.pyplot as plt
from Application.VideoReader import VideoReader
from multiprocessing.pool import ThreadPool
import imutils
class Analyzer:
def __init__(self, config):
print("Analyzer constructed")
videoReader = VideoReader(config)
videoReader.fillBuffer()
self.config = config
self.avg = imutils.resize(np.zeros((videoReader.h,videoReader.w,3),np.float), width=config["resizeWidth"])
self.end = videoReader.endFrame
self.c = 0
start = time.time()
fak = 10
while not videoReader.videoEnded():
self.c, frame = videoReader.pop()
if not self.c%fak == 0:
continue
if videoReader.endFrame - self.c <= fak:
break
frame = imutils.resize(frame, width=self.config["resizeWidth"])
self.avg += frame.astype(np.float)/(self.end/fak)
if self.c%(1800*6) == 0:
print(f"{self.c/(60*30)} Minutes processed in {round((time.time() - start), 2)} each")
start = time.time()
#print("done")
videoReader.thread.join()
self.avg = np.array(np.round(self.avg), dtype=np.uint8)
#return self.avg
cv2.imshow("changes overlayed", self.avg)
cv2.waitKey(10) & 0XFF
def average(self, frame):
frame = imutils.resize(frame[1], width=self.config["resizeWidth"])
self.avg += frame.astype(np.float)/(self.end/5)

View File

@ -1,23 +1,14 @@
from imutils.video import VideoStream from Application.VideoReader import VideoReader
import argparse from Application.Config import Config
import datetime
import imutils
import time
import cv2
import os
import traceback
import _thread
import imageio
import numpy as np
from threading import Thread from threading import Thread
from multiprocessing import Queue, Process, Pool from multiprocessing import Queue, Process, Pool
from multiprocessing.pool import ThreadPool from multiprocessing.pool import ThreadPool
import concurrent.futures
from Application.VideoReader import VideoReader
from queue import Queue from queue import Queue
import threading import imutils
import time
from Application.Config import Config import cv2
import numpy as np
class ContourExtractor: class ContourExtractor:
@ -61,7 +52,8 @@ class ContourExtractor:
if videoReader.buffer.qsize() == 0: if videoReader.buffer.qsize() == 0:
time.sleep(.5) time.sleep(.5)
tmpData = [videoReader.pop() for i in range(0, videoReader.buffer.qsize())] tmpData = [videoReader.pop()
for i in range(0, videoReader.buffer.qsize())]
pool.map(self.computeMovingAverage, (tmpData,)) pool.map(self.computeMovingAverage, (tmpData,))
pool.map(self.async2, (tmpData,)) pool.map(self.async2, (tmpData,))
# for data in tmpData: # for data in tmpData:
@ -83,16 +75,19 @@ class ContourExtractor:
firstFrame = self.averages.pop(frameCount, None) firstFrame = self.averages.pop(frameCount, None)
if frameCount % (10*self.fps) == 1: if frameCount % (10*self.fps) == 1:
print(f" \r {round((frameCount/self.fps)/self.length, 4)*100} % processed in {round(time.time() - self.start, 2)}s", end='\r') print(
f" \r {round((frameCount/self.fps)/self.length, 4)*100} % processed in {round(time.time() - self.start, 2)}s", end='\r')
gray = self.prepareFrame(frame) gray = self.prepareFrame(frame)
frameDelta = cv2.absdiff(gray, firstFrame) frameDelta = cv2.absdiff(gray, firstFrame)
thresh = cv2.threshold(frameDelta, self.threashold, 255, cv2.THRESH_BINARY)[1] thresh = cv2.threshold(frameDelta, self.threashold,
255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours # dilate the thresholded image to fill in holes, then find contours
thresh = cv2.dilate(thresh, None, iterations=10) thresh = cv2.dilate(thresh, None, iterations=10)
#cv2.imshow("changes x", thresh) #cv2.imshow("changes x", thresh)
#cv2.waitKey(10) & 0XFF #cv2.waitKey(10) & 0XFF
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = cv2.findContours(
thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts) cnts = imutils.grab_contours(cnts)
contours = [] contours = []
@ -102,14 +97,11 @@ class ContourExtractor:
(x, y, w, h) = cv2.boundingRect(c) (x, y, w, h) = cv2.boundingRect(c)
if ca < self.min_area or ca > self.max_area: if ca < self.min_area or ca > self.max_area:
continue continue
contours.append((x, y, w, h)) contours.append((x, y, w, h))
# the mask has to be packed like this, since np doesn't have a bit array, # the mask has to be packed like this, since np doesn't have a bit array,
# meaning every bit in the mask would take up 8bits, which migth be too much # meaning every bit in the mask would take up 8bits, which migth be too much
masks.append(np.packbits(np.copy(thresh[y:y+h, x:x+w]), axis=0)) masks.append(np.packbits(np.copy(thresh[y:y+h, x:x+w]), axis=0))
if len(contours) != 0 and contours is not None: if len(contours) != 0 and contours is not None:
# this should be thread safe # this should be thread safe
self.extractedContours[frameCount] = contours self.extractedContours[frameCount] = contours
@ -138,13 +130,13 @@ class ContourExtractor:
if self.lastFrames is not None: if self.lastFrames is not None:
frames = self.lastFrames + frames frames = self.lastFrames + frames
tmp = [[j, frames, averageFrames] for j in range(averageFrames, len(frames))] tmp = [[j, frames, averageFrames]
for j in range(averageFrames, len(frames))]
with ThreadPool(16) as pool: with ThreadPool(16) as pool:
pool.map(self.averageDaFrames, tmp) pool.map(self.averageDaFrames, tmp)
self.lastFrames = frames[-averageFrames:] self.lastFrames = frames[-averageFrames:]
def averageDaFrames(self, dat): def averageDaFrames(self, dat):
j, frames, averageFrames = dat j, frames, averageFrames = dat
frameNumber, frame = frames[j] frameNumber, frame = frames[j]
@ -154,4 +146,3 @@ class ContourExtractor:
for jj in range(0, averageFrames-1): for jj in range(0, averageFrames-1):
avg += self.prepareFrame(frames[j-jj][1])/averageFrames avg += self.prepareFrame(frames[j-jj][1])/averageFrames
self.averages[frameNumber] = np.array(np.round(avg), dtype=np.uint8) self.averages[frameNumber] = np.array(np.round(avg), dtype=np.uint8)
#self.averages[frameNumber] = self.prepareFrame(frames[j-averageFrames - 1][1])

View File

@ -1,12 +1,13 @@
from Application.Layer import Layer
from Application.VideoReader import VideoReader
from datetime import datetime
import imageio import imageio
import imutils import imutils
import numpy as np import numpy as np
from Application.Layer import Layer
import cv2 import cv2
from Application.VideoReader import VideoReader
import pickle import pickle
import time import time
from datetime import datetime
class Exporter: class Exporter:
fps = 30 fps = 30
@ -26,7 +27,6 @@ class Exporter:
else: else:
self.exportLayers(layers) self.exportLayers(layers)
def exportLayers(self, layers): def exportLayers(self, layers):
listOfFrames = self.makeListOfFrames(layers) listOfFrames = self.makeListOfFrames(layers)
@ -42,7 +42,8 @@ class Exporter:
start = time.time() start = time.time()
for i, layer in enumerate(layers): for i, layer in enumerate(layers):
print(f"\r {i}/{len(layers)} {round(i/len(layers)*100,2)}% {round((time.time() - start), 2)}s", end='\r') print(
f"\r {i}/{len(layers)} {round(i/len(layers)*100,2)}% {round((time.time() - start), 2)}s", end='\r')
if len(layer.bounds[0]) == 0: if len(layer.bounds[0]) == 0:
continue continue
videoReader = VideoReader(self.config) videoReader = VideoReader(self.config)
@ -56,12 +57,15 @@ class Exporter:
if x is None: if x is None:
continue continue
factor = videoReader.w / self.resizeWidth factor = videoReader.w / self.resizeWidth
x, y, w, h = (int(x * factor), int(y * factor), int(w * factor), int(h * factor)) x, y, w, h = (int(x * factor), int(y * factor),
int(w * factor), int(h * factor))
frame2[y:y+h, x:x+w] = np.copy(frame[y:y+h, x:x+w]) frame2[y:y+h, x:x+w] = np.copy(frame[y:y+h, x:x+w])
time = datetime.fromtimestamp(int(frameCount/self.fps) + videoReader.getStartTime()) time = datetime.fromtimestamp(
cv2.putText(frame2, str(i) + " " + f"{time.hour}:{time.minute}:{time.second}", (int(x+w/2), int(y+h/2)), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,255,255), 2) int(frameCount/self.fps) + videoReader.getStartTime())
cv2.putText(frame2, str(i) + " " + f"{time.hour}:{time.minute}:{time.second}", (int(
x+w/2), int(y+h/2)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
#cv2.putText(frame2, str(layer.stats["avg"]) + " " + str(layer.stats["var"]) + " " + str(layer.stats["dev"]), (int(500), int(500)), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,0,255), 2) #cv2.putText(frame2, str(layer.stats["avg"]) + " " + str(layer.stats["var"]) + " " + str(layer.stats["dev"]), (int(500), int(500)), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,0,255), 2)
writer.append_data(frame2) writer.append_data(frame2)
videoReader.vc.release() videoReader.vc.release()
@ -70,7 +74,6 @@ class Exporter:
videoReader.thread.join() videoReader.thread.join()
writer.close() writer.close()
def exportOverlayed(self, layers): def exportOverlayed(self, layers):
listOfFrames = self.makeListOfFrames(layers) listOfFrames = self.makeListOfFrames(layers)
@ -84,7 +87,6 @@ class Exporter:
frames.append(np.copy(underlay)) frames.append(np.copy(underlay))
exportFrame = 0 exportFrame = 0
while not videoReader.videoEnded(): while not videoReader.videoEnded():
frameCount, frame = videoReader.pop() frameCount, frame = videoReader.pop()
if frameCount % (60*self.fps) == 0: if frameCount % (60*self.fps) == 0:
@ -98,25 +100,31 @@ class Exporter:
if layer.startFrame <= frameCount and layer.startFrame + len(layer.bounds) > frameCount: if layer.startFrame <= frameCount and layer.startFrame + len(layer.bounds) > frameCount:
for i in range(0, len(layer.bounds[frameCount - layer.startFrame])): for i in range(0, len(layer.bounds[frameCount - layer.startFrame])):
underlay1 = underlay underlay1 = underlay
(x, y, w, h) = layer.bounds[frameCount - layer.startFrame][i] (x, y, w,
h) = layer.bounds[frameCount - layer.startFrame][i]
mask = layer.masks[frameCount - layer.startFrame][i] mask = layer.masks[frameCount - layer.startFrame][i]
if x is None: if x is None:
break break
factor = videoReader.w / self.resizeWidth factor = videoReader.w / self.resizeWidth
x, y, w, h = (int(x * factor), int(y * factor), int(w * factor), int(h * factor)) x, y, w, h = (int(x * factor), int(y * factor),
int(w * factor), int(h * factor))
mask = imutils.resize(mask, width=w, height=h+1) mask = imutils.resize(mask, width=w, height=h+1)
mask = np.resize(mask, (h, w)) mask = np.resize(mask, (h, w))
mask = cv2.erode(mask, None, iterations=10) mask = cv2.erode(mask, None, iterations=10)
mask *= 255 mask *= 255
frame2 = frames[frameCount - layer.startFrame] frame2 = frames[frameCount - layer.startFrame]
xx = np.copy(cv2.bitwise_and(frame2[y:y+h, x:x+w], frame2[y:y+h, x:x+w], mask=cv2.bitwise_not(mask))) xx = np.copy(cv2.bitwise_and(
frame2[y:y+h, x:x+w] = cv2.addWeighted(xx,1, np.copy(cv2.bitwise_and(frame[y:y+h, x:x+w], frame[y:y+h, x:x+w], mask=mask)),1,0) frame2[y:y+h, x:x+w], frame2[y:y+h, x:x+w], mask=cv2.bitwise_not(mask)))
frame2[y:y+h, x:x+w] = cv2.addWeighted(xx, 1, np.copy(
cv2.bitwise_and(frame[y:y+h, x:x+w], frame[y:y+h, x:x+w], mask=mask)), 1, 0)
frames[frameCount - layer.startFrame] = np.copy(frame2) frames[frameCount - layer.startFrame] = np.copy(frame2)
#cv2.imshow("changes x", frame2) #cv2.imshow("changes x", frame2)
#cv2.waitKey(10) & 0XFF #cv2.waitKey(10) & 0XFF
time = datetime.fromtimestamp(int(frameCount/self.fps) + videoReader.getStartTime()) time = datetime.fromtimestamp(
cv2.putText(frames[frameCount - layer.startFrame], f"{time.hour}:{time.minute}:{time.second}", (int(x+w/2), int(y+h/2)), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,255,255), 2) int(frameCount/self.fps) + videoReader.getStartTime())
cv2.putText(frames[frameCount - layer.startFrame], f"{time.hour}:{time.minute}:{time.second}", (int(
x+w/2), int(y+h/2)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
videoReader.thread.join() videoReader.thread.join()
videoReader.vc.release() videoReader.vc.release()

View File

@ -39,8 +39,8 @@ class Layer:
return return
if frameNumber > self.lastFrame: if frameNumber > self.lastFrame:
for i in range(frameNumber - self.lastFrame): for i in range(frameNumber - self.lastFrame):
self.bounds.append([bound]) self.bounds.append([])
self.masks.append([mask]) self.masks.append([])
self.lastFrame = frameNumber self.lastFrame = frameNumber
if bound not in self.bounds[index]: if bound not in self.bounds[index]:

View File

@ -2,10 +2,9 @@ from Application.Layer import Layer
from Application.Config import Config from Application.Config import Config
from Application.VideoReader import VideoReader from Application.VideoReader import VideoReader
from Application.Exporter import Exporter from Application.Exporter import Exporter
from multiprocessing.pool import ThreadPool from multiprocessing.pool import ThreadPool
import cv2
import numpy as np import numpy as np
import copy
class LayerFactory: class LayerFactory:
@ -150,14 +149,11 @@ class LayerFactory:
for l in layers: for l in layers:
if l.lastFrame < maxFrame: if l.lastFrame < maxFrame:
maxFrame = l.lastFrame maxFrame = l.lastFrame
return maxFrame return maxFrame
def contoursOverlay(self, l1, r1, l2, r2): def contoursOverlay(self, l1, r1, l2, r2):
# If one rectangle is on left side of other
if(l1[0] >= r2[0] or l2[0] >= r1[0]): if(l1[0] >= r2[0] or l2[0] >= r1[0]):
return False return False
# If one rectangle is above other
if(l1[1] <= r2[1] or l2[1] <= r1[1]): if(l1[1] <= r2[1] or l2[1] <= r1[1]):
return False return False
return True return True

View File

@ -48,26 +48,6 @@ class LayerManager:
for layer in self.layers: for layer in self.layers:
layer.calcStats() layer.calcStats()
def removeStaticLayers(self):
'''Removes Layers with little to no movement'''
layers = []
for i, layer in enumerate(self.layers):
checks = 0
for bound in layer.bounds[0]:
if bound[0] is None:
continue
for bound2 in layer.bounds[-1]:
if bound2[0] is None:
continue
if abs(bound[0] - bound2[0]) < 10:
checks += 1
if abs(bound[1] - bound2[1]) < 10:
checks += 1
if checks <= 2:
layers.append(layer)
self.layers = layers
def freeMin(self): def freeMin(self):
self.data.clear() self.data.clear()
layers = [] layers = []
@ -76,7 +56,6 @@ class LayerManager:
layers.append(l) layers.append(l)
self.layers = layers self.layers = layers
def freeMax(self): def freeMax(self):
layers = [] layers = []
for l in self.layers: for l in self.layers:
@ -84,13 +63,12 @@ class LayerManager:
layers.append(l) layers.append(l)
self.layers = layers self.layers = layers
def tagLayers(self): def tagLayers(self):
'''Use classifieres the tag all Layers, by reading the contour content from the original video, then applying the classifier''' '''Use classifieres the tag all Layers, by reading the contour content from the original video, then applying the classifier'''
print("Tagging Layers") print("Tagging Layers")
exporter = Exporter(self.config) exporter = Exporter(self.config)
start = time.time() start = time.time()
for i, layer in enumerate(self.layers[20:]): for i, layer in enumerate(self.layers):
print(f"{round(i/len(self.layers)*100,2)} {round((time.time() - start), 2)}") print(f"{round(i/len(self.layers)*100,2)} {round((time.time() - start), 2)}")
start = time.time() start = time.time()
if len(layer.bounds[0]) == 0: if len(layer.bounds[0]) == 0:

View File

@ -1,13 +1,11 @@
import multiprocessing
import cv2
from time import sleep
from queue import Queue
import threading
import pathlib
from Application.Config import Config from Application.Config import Config
import os
from datetime import datetime from datetime import datetime
from queue import Queue
import cv2
import threading
import os
class VideoReader: class VideoReader:
listOfFrames = None listOfFrames = None
@ -17,11 +15,11 @@ class VideoReader:
def __init__(self, config, setOfFrames=None): def __init__(self, config, setOfFrames=None):
videoPath = config["inputPath"] videoPath = config["inputPath"]
if videoPath is None: if videoPath is None:
print("ERROR: Video reader needs a videoPath!") raise Exception("ERROR: Video reader needs a videoPath!")
return None
self.videoPath = videoPath self.videoPath = videoPath
self.lastFrame = 0 self.lastFrame = 0
# buffer data struct:
# buffer = Queue([(frameNumber, frame), ]) # buffer = Queue([(frameNumber, frame), ])
self.buffer = Queue(config["videoBufferLength"]) self.buffer = Queue(config["videoBufferLength"])
self.vc = cv2.VideoCapture(videoPath) self.vc = cv2.VideoCapture(videoPath)
@ -44,14 +42,13 @@ class VideoReader:
return self.buffer.get(block=True) return self.buffer.get(block=True)
def fillBuffer(self, listOfFrames=None): def fillBuffer(self, listOfFrames=None):
if self.buffer.full():
print("VideoReader::fillBuffer was called when buffer was full.")
self.endFrame = int(self.vc.get(cv2.CAP_PROP_FRAME_COUNT)) self.endFrame = int(self.vc.get(cv2.CAP_PROP_FRAME_COUNT))
if listOfFrames is not None: if listOfFrames is not None:
self.listOfFrames = listOfFrames self.listOfFrames = listOfFrames
if self.listOfFrames is not None: if self.listOfFrames is not None:
self.thread = threading.Thread(target=self.readFramesByList, args=()) self.thread = threading.Thread(
target=self.readFramesByList, args=())
else: else:
self.thread = threading.Thread(target=self.readFrames, args=()) self.thread = threading.Thread(target=self.readFrames, args=())
self.thread.start() self.thread.start()

View File

@ -35,12 +35,11 @@ def main():
layerManager = LayerManager(config, layers) layerManager = LayerManager(config, layers)
layerManager.transformLayers() layerManager.transformLayers()
#layerManager.tagLayers() #layerManager.tagLayers()
layers = layerManager.layers layers = layerManager.layers
#print([len(l) for l in sorted(layers, key = lambda c:len(c), reverse=True)[:20]])
if len(layers) == 0: if len(layers) == 0:
exit(1) exit(1)
exporter = Exporter(config) exporter = Exporter(config)
print(f"Exporting {len(contours)} Contours and {len(layers)} Layers") print(f"Exporting {len(contours)} Contours and {len(layers)} Layers")
exporter.export(layers, contours, masks, raw=True, overlayed=True) exporter.export(layers, contours, masks, raw=True, overlayed=True)

4
requirements.txt Normal file
View File

@ -0,0 +1,4 @@
opencv-python
numpy
imutils
imageio