diff --git a/Application/Classifiers/Classifier.py b/Application/Classifiers/Classifier.py new file mode 100644 index 0000000..bca21d3 --- /dev/null +++ b/Application/Classifiers/Classifier.py @@ -0,0 +1,103 @@ +# Code adapted from Tensorflow Object Detection Framework +# https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb +# Tensorflow Object Detection Detector + +import numpy as np +import tensorflow as tf +import cv2 + +from Application.Classifiers.ClassifierInterface import ClassifierInterface + + +class Classifier(ClassifierInterface): + def __init__(self): + print("1") + self.model_path = "./class1.pb" + self.odapi = DetectorAPI(path_to_ckpt=self.model_path) + self.threshold = 0.6 + + def detect(self, stream): + cap = cv2.VideoCapture(stream) + img = None + r, img = cap.read() + if img is None: + return img + # scale the image down for faster processing + scale_percent = 60 # percent of original size + width = int(img.shape[1] * scale_percent / 100) + height = int(img.shape[0] * scale_percent / 100) + dim = (width, height) + + img = cv2.resize(img, dim) + + # get the results from the net + boxes, scores, classes, num = self.odapi.process_frame(img) + res = False + for i in range(len(boxes)): + # Class 1 represents human + # draw recogniction boxes and return resulting image + true/false + if classes[i] == 1: + if scores[i] > self.threshold: + box = boxes[i] + cv2.rectangle(img, (box[1], box[0]), (box[3], box[2]), (255, 0, 0), 2) + res = True + return img, res + else: + res = False + return img, res + + + def tagLayers(self, layers): + print("tagging") + # Detector API can be changed out given the I/O remains the same + # this way you can use a different N-Net if you like to + class DetectorAPI: + def __init__(self, path_to_ckpt): + self.path_to_ckpt = path_to_ckpt + + self.detection_graph = tf.Graph() + with self.detection_graph.as_default(): + od_graph_def = tf.GraphDef() + with tf.gfile.GFile(self.path_to_ckpt, 'rb') as fid: + serialized_graph = fid.read() + od_graph_def.ParseFromString(serialized_graph) + tf.import_graph_def(od_graph_def, name='') + + self.default_graph = self.detection_graph.as_default() + self.sess = tf.Session(graph=self.detection_graph) + + # Definite input and output Tensors for detection_graph + self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0') + # Each box represents a part of the image where a particular object was detected. + self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0') + # Each score represent how level of confidence for each of the objects. + # Score is shown on the result image, together with the class label. + self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0') + self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0') + self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0') + + def process_frame(self, image): + # Expand dimensions since the trained_model expects images to have shape: [1, None, None, 3] + image_np_expanded = np.expand_dims(image, axis=0) + # Actual detection. + + (boxes, scores, classes, num) = self.sess.run( + [self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections], + feed_dict={self.image_tensor: image_np_expanded}) + + im_height, im_width,_ = image.shape + boxes_list = [None for i in range(boxes.shape[1])] + for i in range(boxes.shape[1]): + boxes_list[i] = ( + int(boxes[0, i, 0] * im_height), + int(boxes[0, i, 1] * im_width), + int(boxes[0, i, 2] * im_height), + int(boxes[0, i, 3] * im_width) + ) + + return boxes_list, scores[0].tolist(), [int(x) for x in classes[0].tolist()], int(num[0]) + + def close(self): + self.sess.close() + self.default_graph.close() + diff --git a/Application/Classifiers/ClassifierInterface.py b/Application/Classifiers/ClassifierInterface.py new file mode 100644 index 0000000..1aa11bc --- /dev/null +++ b/Application/Classifiers/ClassifierInterface.py @@ -0,0 +1,6 @@ + + +class ClassifierInterface: + def tagLayers(self, layers): + """takes layers, returns list (len(), same as input) of lists with tags for corresponfing layers""" + pass \ No newline at end of file diff --git a/Application/Classifiers/class1.pb b/Application/Classifiers/class1.pb new file mode 100644 index 0000000..e0f7580 Binary files /dev/null and b/Application/Classifiers/class1.pb differ diff --git a/Application/Config.py b/Application/Config.py index 5801fa7..3a4d2bf 100644 --- a/Application/Config.py +++ b/Application/Config.py @@ -3,7 +3,7 @@ class Config: c = { "min_area" : 500, "max_area" : 40000, - "threashold" : 10, + "threashold" : 5, "resizeWidth" : 512, "inputPath" : None, "outputPath": None, diff --git a/Application/Exporter.py b/Application/Exporter.py index f80252f..8f936ba 100644 --- a/Application/Exporter.py +++ b/Application/Exporter.py @@ -16,12 +16,18 @@ class Exporter: self.config = config print("Exporter initiated") - def export(self): - fps = self.fps - writer = imageio.get_writer(outputPath, fps=fps) - for frame in frames: - writer.append_data(np.array(frame)) - writer.close() + def export(self, layers, raw = True, layered = False, overlayed = True): + + if raw: + self.exportRawData(layers) + if layered and overlayed: + print("Layered and Individual are mutially exclusive, Individual was choosen automatically") + overlayed = False + if layered and not overlayed: + self.exportLayers(layers) + if overlayed and not layered: + self.exportOverlayed(layers) + def exportLayers(self, layers): @@ -98,7 +104,7 @@ class Exporter: h = int(h * factor) # if exportFrame as index instead of frameCount - layer.startFrame then we have layer after layer frame2 = frames[frameCount - layer.startFrame] - frame2[y:y+h, x:x+w] = frame[y:y+h, x:x+w] + frame2[y:y+h, x:x+w] = frame2[y:y+h, x:x+w]/2 + frame[y:y+h, x:x+w]/2 frames[frameCount - layer.startFrame] = np.copy(frame2) cv2.putText(frames[frameCount - layer.startFrame], str(int(frameCount/self.fps)), (int(x+w/2), int(y+h/2)), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,255,255), 2) diff --git a/Application/Layer.py b/Application/Layer.py index 26e8acd..3899f36 100644 --- a/Application/Layer.py +++ b/Application/Layer.py @@ -90,7 +90,7 @@ class Layer: if y > maxm: maxm = y - if maxm > len(mapped)*(noiseSensitivity): + if maxm > len(mapped)*(noiseSensitivity) and clusterCount+1<=len(kmeans.cluster_centers_): clusterCount += 1 else: centers = kmeans.cluster_centers_ diff --git a/Application/LayerFactory.py b/Application/LayerFactory.py index 83ba8d0..b1434c6 100644 --- a/Application/LayerFactory.py +++ b/Application/LayerFactory.py @@ -22,44 +22,6 @@ class LayerFactory: if data is not None: self.extractLayers(data) - def removeStaticLayers(self): - '''Removes Layers with little to no movement''' - layers = [] - for i, layer in enumerate(self.layers): - checks = 0 - for bound in layer.bounds[0]: - if bound[0] is None: - continue - for bound2 in layer.bounds[-1]: - if bound2[0] is None: - continue - if abs(bound[0] - bound2[0]) < 10: - checks += 1 - if abs(bound[1] - bound2[1]) < 10: - checks += 1 - if checks <= 2: - layers.append(layer) - self.layers = layers - - - def freeMin(self): - self.data.clear() - layers = [] - for l in self.layers: - if l.getLength() > self.minLayerLength: - layers.append(l) - self.layers = layers - self.removeStaticLayers() - - def freeMax(self): - layers = [] - for l in self.layers: - if l.getLength() < self.maxLayerLength: - layers.append(l) - self.layers = layers - self.removeStaticLayers() - - def extractLayers(self, data = None): if self.data is None: if data is None: @@ -86,10 +48,6 @@ class LayerFactory: #for x in tmp: #self.getLayers(x) - self.freeMin() - self.sortLayers() - self.cleanLayers() - self.freeMax() return self.layers @@ -128,38 +86,3 @@ class LayerFactory: if(l1[1] <= r2[1] or l2[1] <= r1[1]): return False return True - - def fillLayers(self): - - listOfFrames = Exporter(self.config).makeListOfFrames(self.layers) - videoReader = VideoReader(self.config, listOfFrames) - videoReader.fillBuffer() - - while not videoReader.videoEnded(): - frameCount, frame = videoReader.pop() - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - for i, layer in enumerate(self.layers): - if i % 20 == 0: - print(f"filled {int(round(i/len(self.layers),2)*100)}% of all Layers") - - if layer.startFrame <= frameCount and layer.startFrame + len(layer.bounds) > frameCount: - data = [] - for (x, y, w, h) in layer.bounds[frameCount - layer.startFrame]: - if x is None: - break - factor = videoReader.w / self.resizeWidth - x = int(x * factor) - y = int(y * factor) - w = int(w * factor) - h = int(h * factor) - data.append(np.copy(frame[y:y+h, x:x+w])) - layer.data.append(data) - - videoReader.thread.join() - - def sortLayers(self): - self.layers.sort(key = lambda c:c.startFrame) - - def cleanLayers(self): - for layer in self.layers: - layer.clusterDelete() diff --git a/Application/LayerManager.py b/Application/LayerManager.py new file mode 100644 index 0000000..36ffad7 --- /dev/null +++ b/Application/LayerManager.py @@ -0,0 +1,98 @@ +from Application.Layer import Layer +from Application.Config import Config +from Application.VideoReader import VideoReader +from Application.Exporter import Exporter +from multiprocessing.pool import ThreadPool +import cv2 +import numpy as np + +class LayerManager: + def __init__(self, config, layers): + self.data = {} + self.layers = layers + self.tolerance = config["tolerance"] + self.ttolerance = config["ttolerance"] + self.minLayerLength = config["minLayerLength"] + self.maxLayerLength = config["maxLayerLength"] + self.resizeWidth = config["resizeWidth"] + self.footagePath = config["inputPath"] + self.config = config + print("LayerManager constructed") + + def cleanLayers(self): + self.freeMin() + self.sortLayers() + self.cleanLayers() + self.freeMax() + + def removeStaticLayers(self): + '''Removes Layers with little to no movement''' + layers = [] + for i, layer in enumerate(self.layers): + checks = 0 + for bound in layer.bounds[0]: + if bound[0] is None: + continue + for bound2 in layer.bounds[-1]: + if bound2[0] is None: + continue + if abs(bound[0] - bound2[0]) < 10: + checks += 1 + if abs(bound[1] - bound2[1]) < 10: + checks += 1 + if checks <= 2: + layers.append(layer) + self.layers = layers + + + def freeMin(self): + self.data.clear() + layers = [] + for l in self.layers: + if l.getLength() > self.minLayerLength: + layers.append(l) + self.layers = layers + self.removeStaticLayers() + + def freeMax(self): + layers = [] + for l in self.layers: + if l.getLength() < self.maxLayerLength: + layers.append(l) + self.layers = layers + self.removeStaticLayers() + + def fillLayers(self): + + listOfFrames = Exporter(self.config).makeListOfFrames(self.layers) + videoReader = VideoReader(self.config, listOfFrames) + videoReader.fillBuffer() + + while not videoReader.videoEnded(): + frameCount, frame = videoReader.pop() + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + for i, layer in enumerate(self.layers): + if i % 20 == 0: + print(f"filled {int(round(i/len(self.layers),2)*100)}% of all Layers") + + if layer.startFrame <= frameCount and layer.startFrame + len(layer.bounds) > frameCount: + data = [] + for (x, y, w, h) in layer.bounds[frameCount - layer.startFrame]: + if x is None: + break + factor = videoReader.w / self.resizeWidth + x = int(x * factor) + y = int(y * factor) + w = int(w * factor) + h = int(h * factor) + data.append(np.copy(frame[y:y+h, x:x+w])) + layer.data.append(data) + + videoReader.thread.join() + + def sortLayers(self): + self.layers.sort(key = lambda c:c.startFrame) + + def cleanLayers(self): + for layer in self.layers: + layer.clusterDelete() diff --git a/main.py b/main.py index 67f0645..57e5313 100644 --- a/main.py +++ b/main.py @@ -7,6 +7,8 @@ from Application.Analyzer import Analyzer from Application.Config import Config from Application.Importer import Importer from Application.VideoReader import VideoReader +from Application.LayerManager import LayerManager +from Application.Classifiers import * #TODO # finden von relevanten Stellen anhand von zu findenen metriken für vergleichsbilder @@ -15,9 +17,8 @@ def demo(): start = time.time() config = Config() - config["inputPath"] = os.path.join(os.path.dirname(__file__), "generate test footage/3.mp4") - #config["importPath"] = os.path.join(os.path.dirname(__file__), "output/short.txt") + config["importPath"] = os.path.join(os.path.dirname(__file__), "output/short.txt") config["outputPath"] = os.path.join(os.path.dirname(__file__), "output/short.mp4") vr = VideoReader(config) @@ -31,13 +32,14 @@ def demo(): layerFactory = LayerFactory(config) layers = layerFactory.extractLayers(contours) - #layerFactory.fillLayers() + layerManager = LayerManager(config, layers) + layerManager.cleanLayers() + layers = layerManager.layers else: layers = Importer(config).importRawData() exporter = Exporter(config) - exporter.exportRawData(layers) - exporter.exportLayers(layers) + exporter.export(layers) print("Total time: ", time.time() - start) diff --git a/output/short.txt b/output/short.txt index 813e3d1..83597bd 100644 Binary files a/output/short.txt and b/output/short.txt differ