Video-Summary/Application/Classifiers/Classifier.py

127 lines
5.1 KiB
Python
Raw Normal View History

2020-10-23 22:14:43 +00:00
# Code adapted from Tensorflow Object Detection Framework
# https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb
# Tensorflow Object Detection Detector
import numpy as np
import tensorflow as tf
import cv2
2020-11-01 16:43:05 +00:00
import os
2020-10-31 19:36:43 +00:00
import json
2020-11-01 20:50:24 +00:00
import imutils
2020-10-23 22:14:43 +00:00
from Application.Classifiers.ClassifierInterface import ClassifierInterface
class Classifier(ClassifierInterface):
def __init__(self):
2020-11-01 16:43:05 +00:00
print("1")
self.model_path = os.path.join(os.path.dirname(__file__), "./class1.pb")
self.odapi = self.DetectorAPI(path_to_ckpt=self.model_path)
2022-01-09 19:25:44 +00:00
self.threshold = 0.9
2020-10-31 19:36:43 +00:00
with open(os.path.join(os.path.dirname(__file__), "coco_map.json")) as file:
mapping = json.load(file)
self.classes = dict()
for element in mapping:
2020-11-01 16:43:05 +00:00
self.classes[element["id"]] = element["display_name"]
2020-10-31 19:36:43 +00:00
2020-11-01 16:43:05 +00:00
def detect(self, img):
# get the results from the net
boxes, scores, classes, num = self.odapi.process_frame(img)
res = False
for i in range(len(boxes)):
if scores[i] > self.threshold:
if classes[i] in self.classes:
2022-01-09 19:25:44 +00:00
# print(self.classes[classes[i]])
2020-11-01 16:43:05 +00:00
return self.classes[classes[i]]
2020-10-31 19:36:43 +00:00
2020-11-01 16:43:05 +00:00
def tagLayer(self, data):
res = []
for cnts in data:
for cnt in cnts:
if cnt.any():
2020-11-05 22:17:05 +00:00
cv2.imshow("changes x", cnt)
2022-01-09 19:25:44 +00:00
cv2.waitKey(10) & 0xFF
cnt = imutils.resize(cnt, width=320)
2020-11-01 16:43:05 +00:00
x = self.detect(cnt)
2020-11-01 20:50:24 +00:00
res.append(x)
di = dict()
for re in res:
if re not in di:
di[re] = 0
2022-01-09 19:25:44 +00:00
di[re] += 1
2020-11-01 20:50:24 +00:00
# remove all tags that occour infrequently
# if a giraff is only seen in 2 out of 100 frames, there probably wasn't a giraff in the layer
2022-01-09 19:25:44 +00:00
#
2020-11-01 20:50:24 +00:00
di.pop(None, None)
total = 0
for value in di.values():
total += value
result = []
for key, value in di.items():
if value > len(data) / len(di) / 2:
result.append(key)
return result
2020-11-01 16:43:05 +00:00
# Detector API can be changed out given the I/O remains the same
# this way you can use a different N-Net if you like to
class DetectorAPI:
def __init__(self, path_to_ckpt):
self.path_to_ckpt = path_to_ckpt
2022-01-09 19:25:44 +00:00
gpus = tf.config.experimental.list_physical_devices("GPU")
2020-11-01 16:43:05 +00:00
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
2022-01-09 19:25:44 +00:00
with tf.gfile.GFile(self.path_to_ckpt, "rb") as fid:
2020-11-01 16:43:05 +00:00
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
2022-01-09 19:25:44 +00:00
tf.import_graph_def(od_graph_def, name="")
2020-11-01 16:43:05 +00:00
self.default_graph = self.detection_graph.as_default()
self.sess = tf.Session(graph=self.detection_graph)
# Definite input and output Tensors for detection_graph
2022-01-09 19:25:44 +00:00
self.image_tensor = self.detection_graph.get_tensor_by_name("image_tensor:0")
2020-11-01 16:43:05 +00:00
# Each box represents a part of the image where a particular object was detected.
2022-01-09 19:25:44 +00:00
self.detection_boxes = self.detection_graph.get_tensor_by_name("detection_boxes:0")
2020-11-01 16:43:05 +00:00
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
2022-01-09 19:25:44 +00:00
self.detection_scores = self.detection_graph.get_tensor_by_name("detection_scores:0")
self.detection_classes = self.detection_graph.get_tensor_by_name("detection_classes:0")
self.num_detections = self.detection_graph.get_tensor_by_name("num_detections:0")
2020-11-01 16:43:05 +00:00
def process_frame(self, image):
# Expand dimensions since the trained_model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image, axis=0)
# Actual detection.
(boxes, scores, classes, num) = self.sess.run(
[self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],
2022-01-09 19:25:44 +00:00
feed_dict={self.image_tensor: image_np_expanded},
)
2020-11-01 16:43:05 +00:00
2022-01-09 19:25:44 +00:00
im_height, im_width, _ = image.shape
2020-11-01 16:43:05 +00:00
boxes_list = [None for i in range(boxes.shape[1])]
for i in range(boxes.shape[1]):
boxes_list[i] = (
int(boxes[0, i, 0] * im_height),
int(boxes[0, i, 1] * im_width),
int(boxes[0, i, 2] * im_height),
2022-01-09 19:25:44 +00:00
int(boxes[0, i, 3] * im_width),
2020-11-01 16:43:05 +00:00
)
return boxes_list, scores[0].tolist(), [int(x) for x in classes[0].tolist()], int(num[0])
2020-10-23 22:14:43 +00:00
2020-11-01 16:43:05 +00:00
def close(self):
self.sess.close()
self.default_graph.close()