-
Notifications
You must be signed in to change notification settings - Fork 0
/
TensorFlowPeopleTracker.py
126 lines (101 loc) · 5.05 KB
/
TensorFlowPeopleTracker.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
# Code adapted from Tensorflow Object Detection Framework
# https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb
# Tensorflow Object Detection Detector
import numpy as np
import tensorflow as tf
import cv2
import time
from imutils.video import FPS
import UtilsIO
import config
import VizionFTrackers
class DetectorAPI:
def __init__(self, path_to_ckpt):
self.path_to_ckpt = path_to_ckpt
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.path_to_ckpt, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.default_graph = self.detection_graph.as_default()
self.sess = tf.Session(graph=self.detection_graph)
# Definite input and output Tensors for detection_graph
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
def processFrame(self, image):
# Expand dimensions since the trained_model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image, axis=0)
# Actual detection.
start_time = time.time()
(boxes, scores, classes, num) = self.sess.run(
[self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],
feed_dict={self.image_tensor: image_np_expanded})
end_time = time.time()
print("Elapsed Time:", end_time - start_time)
im_height, im_width, _ = image.shape
boxes_list = [None for i in range(boxes.shape[1])]
for i in range(boxes.shape[1]):
boxes_list[i] = (int(boxes[0, i, 0] * im_height),
int(boxes[0, i, 1] * im_width),
int(boxes[0, i, 2] * im_height),
int(boxes[0, i, 3] * im_width))
return boxes_list, scores[0].tolist(), [int(x) for x in classes[0].tolist()], int(num[0])
def close(self):
self.sess.close()
self.default_graph.close()
if __name__ == "__main__":
model_path = UtilsIO.TRAINED_MODEL_TENSORFLOW
odapi = DetectorAPI(path_to_ckpt=model_path)
threshold = 0.7
cap = cv2.VideoCapture(UtilsIO.SAMPLE_FILE_NAME_2)
totalFrames = 0
initBB = None
while True:
r, img = cap.read()
img = cv2.resize(img, (1280, 720))
if totalFrames % 10 == 0:
boxes, scores, classes, num = odapi.processFrame(img)
# Visualization of the results of a detection.
for i in range(len(boxes)):
# Class 1 represents human
if classes[i] == 1 and scores[i] > threshold:
box = boxes[i]
cv2.rectangle(img, (box[1], box[0]), (box[3], box[2]), (255, 0, 0), 2)
tracker = VizionFTrackers.createTracker(VizionFTrackers.MOSSE_TRACKER)
if initBB is not None:
# grab the new bounding box coordinates of the object
(success, box) = tracker.update(img)
print("success", success)
# check to see if the tracking was a success
if success:
(x, y, w, h) = [int(v) for v in box]
cv2.rectangle(img, (x, y), (x + w, y + h),
(0, 255, 0), 2)
# update the FPS counter
fps.update()
fps.stop()
VizionFTrackers.setFrameInfo(VizionFTrackers.MOSSE_TRACKER, success, fps, img)
cv2.imshow("preview", img)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
if key == ord("s"):
# select the bounding box of the object we want to track (make
# sure you press ENTER or SPACE after selecting the ROI)
initBB = cv2.selectROI("Frame", img, fromCenter=False,
showCrosshair=False)
# start OpenCV object tracker using the supplied bounding box
# coordinates, then start the FPS throughput estimator as well
tracker.init(img, initBB)
fps = FPS().start()
elif key == ord("q"):
break
totalFrames += 1