added docs
This commit is contained in:
parent
32491f93f1
commit
26ae5ab0bf
|
|
@ -1,7 +1,7 @@
|
|||
|
||||
class Config:
|
||||
c = {
|
||||
"min_area" : 100,
|
||||
"min_area" : 500,
|
||||
"max_area" : 40000,
|
||||
"threashold" : 8,
|
||||
"resizeWidth" : 512,
|
||||
|
|
@ -9,7 +9,7 @@ class Config:
|
|||
"outputPath": None,
|
||||
"maxLayerLength": 900,
|
||||
"minLayerLength": 20,
|
||||
"tolerance": 10,
|
||||
"tolerance": 5,
|
||||
"maxLength": None,
|
||||
"ttolerance": 60,
|
||||
"videoBufferLength": 500,
|
||||
|
|
|
|||
|
|
@ -56,16 +56,16 @@ class ContourExtractor:
|
|||
threads = self.config["videoBufferLength"]
|
||||
self.start = time.time()
|
||||
# start a bunch of frames and let them read from the video reader buffer until the video reader reaches EOF
|
||||
with ThreadPool(threads) as pool:
|
||||
with ThreadPool(16) as pool:
|
||||
while not videoReader.videoEnded():
|
||||
if videoReader.buffer.qsize() == 0:
|
||||
time.sleep(.5)
|
||||
|
||||
tmpData = [videoReader.pop() for i in range(0, videoReader.buffer.qsize())]
|
||||
self.computeMovingAverage(tmpData)
|
||||
#pool.map(self.getContours, tmpData)
|
||||
for data in tmpData:
|
||||
self.getContours(data)
|
||||
pool.map_async(self.getContours, tmpData)
|
||||
#for data in tmpData:
|
||||
# self.getContours(data)
|
||||
frameCount = tmpData[-1][0]
|
||||
|
||||
videoReader.thread.join()
|
||||
|
|
@ -78,9 +78,8 @@ class ContourExtractor:
|
|||
time.sleep(0.1)
|
||||
firstFrame = self.averages.pop(frameCount, None)
|
||||
|
||||
if frameCount % (60*30) == 0:
|
||||
print(f" \r {frameCount/(60*30)} Minutes processed in {round((time.time() - self.start), 2)} each", end='\r')
|
||||
self.start = time.time()
|
||||
if frameCount % (60*30) == 1:
|
||||
print(f" \r {frameCount/(60*30)} Minutes processed in {round((time.time() - self.start)/(frameCount/(60*30)), 2)} each", end='\r')
|
||||
|
||||
gray = self.prepareFrame(frame)
|
||||
frameDelta = cv2.absdiff(gray, firstFrame)
|
||||
|
|
|
|||
|
|
@ -119,11 +119,13 @@ class Exporter:
|
|||
y = int(y * factor)
|
||||
w = int(w * factor)
|
||||
h = int(h * factor)
|
||||
|
||||
mask = imutils.resize(mask, width=w, height=h+1)
|
||||
mask = np.resize(mask, (h,w))
|
||||
|
||||
mask = cv2.erode(mask, None, iterations=10)
|
||||
mask *= 255
|
||||
frame2 = frames[frameCount - layer.startFrame]
|
||||
xx = np.copy(cv2.bitwise_and(underlay1[y:y+h, x:x+w], underlay1[y:y+h, x:x+w], mask=cv2.bitwise_not(mask)))
|
||||
xx = np.copy(cv2.bitwise_and(frame2[y:y+h, x:x+w], frame2[y:y+h, x:x+w], mask=cv2.bitwise_not(mask)))
|
||||
frame2[y:y+h, x:x+w] = cv2.addWeighted(xx,1, np.copy(cv2.bitwise_and(frame[y:y+h, x:x+w], frame[y:y+h, x:x+w], mask=mask)),1,0)
|
||||
frames[frameCount - layer.startFrame] = np.copy(frame2)
|
||||
#cv2.imshow("changes x", frame2)
|
||||
|
|
@ -145,8 +147,6 @@ class Exporter:
|
|||
with open(self.config["importPath"], "wb+") as file:
|
||||
pickle.dump((layers, contours, masks), file)
|
||||
|
||||
|
||||
|
||||
def getMaxLengthOfLayers(self, layers):
|
||||
maxLength = 0
|
||||
for layer in layers:
|
||||
|
|
|
|||
|
|
@ -2,13 +2,6 @@ import numpy as np
|
|||
import cv2
|
||||
import imutils
|
||||
|
||||
from kneed import KneeLocator
|
||||
from sklearn.datasets import make_blobs
|
||||
from sklearn.cluster import KMeans
|
||||
from sklearn.metrics import silhouette_score
|
||||
from sklearn.preprocessing import StandardScaler
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
class Layer:
|
||||
#bounds = [[(x,y,w,h), ],]
|
||||
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ from Application.Classifiers.Classifier import Classifier
|
|||
import cv2
|
||||
import numpy as np
|
||||
import time
|
||||
|
||||
class LayerManager:
|
||||
def __init__(self, config, layers):
|
||||
self.data = {}
|
||||
|
|
@ -22,8 +23,6 @@ class LayerManager:
|
|||
self.tags = []
|
||||
print("LayerManager constructed")
|
||||
|
||||
|
||||
|
||||
def transformLayers(self):
|
||||
print("'Cleaning' Layers")
|
||||
self.freeMin()
|
||||
|
|
|
|||
Binary file not shown.
|
|
@ -7,29 +7,26 @@ import glob
|
|||
import os
|
||||
import numpy as np
|
||||
|
||||
def getRandomColorString():
|
||||
return '#{:06x}'.format(random.randint(0, 256**3))
|
||||
|
||||
fps = 30
|
||||
xmax = 1920
|
||||
ymax = 1080
|
||||
# in minutes
|
||||
length = 1
|
||||
|
||||
length = 1 # in minutes
|
||||
numberOfEvents = 4
|
||||
|
||||
dirname = os.path.dirname(__file__)
|
||||
|
||||
imageType = ".png"
|
||||
|
||||
imagesPath = os.path.join(dirname, 'images')+"/"
|
||||
outputPath = os.path.join(dirname, 'out.mp4')
|
||||
|
||||
def genImages():
|
||||
counter = 0
|
||||
def getRandomColorString():
|
||||
return '#{:06x}'.format(random.randint(0, 256**3))
|
||||
|
||||
def genVideo():
|
||||
writer = imageio.get_writer(outputPath, fps=fps)
|
||||
writer.append_data(np.zeros(shape=[1080, 1920, 3], dtype=np.uint8))
|
||||
writer.append_data(np.zeros(shape=[1080, 1920, 3], dtype=np.uint8))
|
||||
for i in range(numberOfEvents):
|
||||
|
||||
for i in range(numberOfEvents):
|
||||
objectWidth = (5 + random.randint(0, 5)) * xmax / 100
|
||||
objectHeight = (10 + random.randint(-5, 5)) * ymax / 100
|
||||
|
||||
|
|
@ -41,40 +38,15 @@ def genImages():
|
|||
color = getRandomColorString()
|
||||
|
||||
for j in range(int(fps*length*60 / numberOfEvents)):
|
||||
counter+=1
|
||||
objectX -= objectSpeedX
|
||||
objectY -= objectSpeedY
|
||||
|
||||
objectShape = [(objectX, objectY), (objectX + objectWidth, objectY + objectHeight)]
|
||||
|
||||
|
||||
img = Image.new("RGB", (xmax, ymax))
|
||||
# create rectangle image
|
||||
img1 = ImageDraw.Draw(img)
|
||||
|
||||
img1.rectangle(objectShape, fill = color)
|
||||
#img.save( imagesPath + str(counter).zfill(6) + imageType)
|
||||
writer.append_data(np.array(img))
|
||||
|
||||
writer.close()
|
||||
|
||||
def makeVideo(input, output):
|
||||
fileList = []
|
||||
for file in sorted(os.listdir(input)):
|
||||
complete_path = imagesPath + file
|
||||
fileList.append(complete_path)
|
||||
|
||||
writer = imageio.get_writer(output, fps=fps)
|
||||
|
||||
for im in fileList:
|
||||
writer.append_data(imageio.imread(im))
|
||||
writer.close()
|
||||
|
||||
def deleteImages():
|
||||
filelist = glob.glob(os.path.join(imagesPath, "*" + imageType))
|
||||
for f in filelist:
|
||||
os.remove(f)
|
||||
|
||||
genImages()
|
||||
#makeVideo(imagesPath, outputPath)
|
||||
#deleteImages()
|
||||
genVideo()
|
||||
30
main.py
30
main.py
|
|
@ -10,11 +10,15 @@ from Application.VideoReader import VideoReader
|
|||
from Application.LayerManager import LayerManager
|
||||
from Application.Classifiers import *
|
||||
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
start = time.time()
|
||||
startTotal = time.time()
|
||||
start = startTotal
|
||||
config = Config()
|
||||
|
||||
fileName = "3.mp4"
|
||||
fileName = "X23-1.mp4"
|
||||
outputPath = os.path.join(os.path.dirname(__file__), "output")
|
||||
dirName = os.path.join(os.path.dirname(__file__), "generate test footage")
|
||||
|
||||
|
|
@ -24,12 +28,20 @@ def main():
|
|||
config["importPath"] = os.path.join(outputPath, fileName.split(".")[0] + ".txt")
|
||||
|
||||
config["w"], config["h"] = VideoReader(config).getWH()
|
||||
|
||||
stats = dict()
|
||||
stats["File Name"] = config["importPath"]
|
||||
stats["threads"] = "16, 16"
|
||||
stats["buffer"] = config["bufferLength"]
|
||||
if not os.path.exists(config["importPath"]):
|
||||
contours, masks = ContourExtractor(config).extractContours()
|
||||
print("Time consumed extracting: ", time.time() - start)
|
||||
stats["Contour Extractor"] = time.time() - start
|
||||
start = time.time()
|
||||
|
||||
print("Time consumed extracting contours: ", stats["Contour Extractor"])
|
||||
layerFactory = LayerFactory(config)
|
||||
layers = layerFactory.extractLayers(contours, masks)
|
||||
stats["Layer Factory"] = time.time() - start
|
||||
start = time.time()
|
||||
else:
|
||||
layers, contours, masks = Importer(config).importRawData()
|
||||
#layerFactory = LayerFactory(config)
|
||||
|
|
@ -37,14 +49,18 @@ def main():
|
|||
|
||||
layerManager = LayerManager(config, layers)
|
||||
layerManager.transformLayers()
|
||||
stats["Layer Manager"] = time.time() - start
|
||||
start = time.time()
|
||||
|
||||
#layerManager.tagLayers()
|
||||
layers = layerManager.layers
|
||||
exporter = Exporter(config)
|
||||
print(f"Exporting {len(contours)} Contours and {len(layers)} Layers")
|
||||
exporter.export(layers, contours, masks, raw=True, overlayed=True)
|
||||
|
||||
print("Total time: ", time.time() - start)
|
||||
exporter.export(layers, contours, masks, raw=False, overlayed=True)
|
||||
stats["Exporter"] = time.time() - start
|
||||
|
||||
print("Total time: ", time.time() - startTotal)
|
||||
print(stats)
|
||||
exit(0)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
|||
|
|
@ -0,0 +1,22 @@
|
|||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
import pandas as pd
|
||||
[
|
||||
{"threads": "32, 16", "Contour Extractor": 106.0405707359314, "Layer Factory": 0.5175004005432129, "Layer Manager": 1.4864997863769531, "Exporter": 34.591999769210815},
|
||||
{"threads": "1, 1", "Contour Extractor": 463.66750025749207, "Layer Factory": 0.5290005207061768, "Layer Manager": 1.551999807357788, "Exporter": 34.5339994430542},
|
||||
{"threads": "1, 4", "Contour Extractor": 193.51898574829102, "Layer Factory": 0.5249984264373779, "Layer Manager": 1.5125021934509277, "Exporter": 35.06749963760376},
|
||||
{"threads": "4, 4", "Contour Extractor": 186.46951842308044, "Layer Factory": 0.5295009613037109, "Layer Manager": 1.545145034790039, "Exporter": 34.260000705718994},
|
||||
{"threads": "4, 8", "Contour Extractor": 126.90402793884277, "Layer Factory": 0.5275006294250488, "Layer Manager": 1.536078929901123, "Exporter": 34.61099886894226},
|
||||
{"threads": "16, 16", "Contour Extractor": 109.17592716217041, "Layer Factory": 0.4179983139038086, "Layer Manager": 1.5620002746582031, "Exporter": 33.80550146102905},
|
||||
|
||||
]
|
||||
x = {
|
||||
"threads":["1, 1","1, 4","4, 4","4, 8","16, 16", "32, 16"],
|
||||
"Contour Extraction":[ 463.66750025749207, 193.51898574829102,186.46951842308044,126.90402793884277,109.17592716217041,106.0405707359314],
|
||||
}
|
||||
|
||||
df = pd.DataFrame.from_dict(x)
|
||||
ax = df.plot.bar(x="threads", title="Benötigte Zeit für Konturenextraktion mit unterschiedlicher Anzahlen von Threads", figsize=(10,4))
|
||||
ax.set_xlabel("Threads für Durchschnittsbildung | Threads für Differenzberechnung")
|
||||
ax.set_ylabel("Zeit in Sekunden")
|
||||
plt.show()
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
[
|
||||
{"File Name": "c:/projects/time compression\\output\\X23-1.txt", "threads": "32, 16", "Contour Extractor": 106.0405707359314, "Layer Factory": 0.5175004005432129, "Layer Manager": 1.4864997863769531, "Exporter": 34.591999769210815},
|
||||
{"File Name": "c:/projects/time compression\\output\\X23-1.txt", "threads": "1, 1", "Contour Extractor": 463.66750025749207, "Layer Factory": 0.5290005207061768, "Layer Manager": 1.551999807357788, "Exporter": 34.5339994430542},
|
||||
{"File Name": "c:/projects/time compression\\output\\X23-1.txt", "threads": "1, 4", "Contour Extractor": 193.51898574829102, "Layer Factory": 0.5249984264373779, "Layer Manager": 1.5125021934509277, "Exporter": 35.06749963760376},
|
||||
{"File Name": "c:/projects/time compression\\output\\X23-1.txt", "threads": "4, 4", "Contour Extractor": 186.46951842308044, "Layer Factory": 0.5295009613037109, "Layer Manager": 1.545145034790039, "Exporter": 34.260000705718994},
|
||||
{"File Name": "c:/projects/time compression\\output\\X23-1.txt", "threads": "4, 8", "Contour Extractor": 126.90402793884277, "Layer Factory": 0.5275006294250488, "Layer Manager": 1.536078929901123, "Exporter": 34.61099886894226},
|
||||
{"File Name": "c:/projects/time compression\\output\\X23-1.txt", "threads": "16, 16", "Contour Extractor": 109.17592716217041, "Layer Factory": 0.4179983139038086, "Layer Manager": 1.5620002746582031, "Exporter": 33.80550146102905},
|
||||
|
||||
]
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 364 KiB After Width: | Height: | Size: 504 KiB |
Loading…
Reference in New Issue