-
Notifications
You must be signed in to change notification settings - Fork 2
/
online_debug.py
162 lines (137 loc) · 7.22 KB
/
online_debug.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
import os
import time
import online
import resize
import utils
import kmeans_tree
from parameters import *
import cv2 as cv
import numpy as np
import parallelize
import db_image
from collections import OrderedDict
trees = ["sift_all_tree.p"]
test_folder = "testset/"
def _look_for_sub_test_folders(test_folder):
# Check if there are sub test folders in the given test_folder
# If there are sub folders, use these as test_folders
# If there are no sub folders, use the given test_folder in test_folders
try:
test_folders = [test_folder + name_folder + "/" for name_folder in os.listdir(test_folder)]
if len(test_folders) == 0:
test_folders = [test_folder]
except Exception: # only a main folder
test_folders = [test_folder]
return test_folders
def main():
# Check if there are sub folders
test_folders = _look_for_sub_test_folders(test_folder)
for tree_path in trees:
tree = kmeans_tree.KMeansTree(tree_path) # load tree
# keep track of time performance
start_total = time.time()
t0, t1, t2, t3, t4, t5 = 0, 0, 0, 0, 0, 0
# t0 = Preprocessing images
# t1 = Calculating kp & des of test images
# t2 = Initial Scoring
# t3 = Accuracy calculations for debug information
# t4 = Final Scoring
# t5 = Certainty calculations
processed_images = 0
for folder in test_folders:
start = time.time()
# Get all paths in the current folder and grayscale all these images
paths = [folder + file_name for file_name in os.listdir(folder)]
parallelize.parallelize_resize(paths)
t0 += time.time() - start
processed_images += len(paths)
# Keep track of results
result = dict() # keep track of at which index the correct results is after processing the index search results
percentage_correct = dict() # keep track of the certainty percentages of every test image that was correct
percentage_incorrect = dict() # keep track of the certainty percentages of every test image that was incorrect
certain = [0, 0] # [correct, incorrect] of all test images who got the boolean value certain=True
uncertain = [0, 0] # [correct, incorrect] of all test images who got the boolean value certain=False
no_result = [0, 0] # [No db match, Has db match] of all test images with no result after geometrical verification
print("Start processing folder '{}' with tree '{}'".format(folder, tree_path))
for path in paths:
try:
# Read image and calculate kp and des
start = time.time()
img = cv.imread(path, cv.IMREAD_GRAYSCALE)
kp, des = utils.sift.detectAndCompute(img, None)
t1 += time.time() - start
# Initial scoring
start = time.time()
indices, scores = tree.initial_scoring(des)
t2 += time.time() - start
# Accuracy calculations
start = time.time()
if "Junk" not in path:
correct_id = int(path[path.rfind("/") + 1:-4])
else:
correct_id = -1
sort = sorted(zip(indices, scores), key=lambda item: item[1], reverse=True)
correct_index = -1
if correct_id != -1:
for index, item in enumerate(sort):
if item[0] == correct_id:
correct_index = index
break
result[correct_index] = result.get(correct_index, 0) + 1
t3 += time.time() - start
# Only use the best NB_OF_IMAGES_CONSIDERED in final scoring
start = time.time()
considered = np.argpartition(scores, -NB_OF_IMAGES_CONSIDERED)[-NB_OF_IMAGES_CONSIDERED:]
final_result = online.final_scoring(kp, des, indices[considered])
t4 += time.time() - start
start = time.time()
if len(final_result) > 0:
# Calculate certainty percentages
minimal_value = max(final_result.values()) * 0.2
good = {k: v for k, v in final_result.items() if v >= minimal_value}
sum_values = sum(good.values())
# Keep track of accuracies
first = True
for key, value in sorted(good.items(), key=lambda item: item[1], reverse=True):
certainty_percentage = min(100, value - 5) * value / sum_values
if first:
first = False
if key == correct_id:
percentage_correct[certainty_percentage] = percentage_correct.get(certainty_percentage, 0) + 1
else:
percentage_incorrect[certainty_percentage] = percentage_incorrect.get(certainty_percentage, 0) + 1
# Certain/Uncertain as boolean value
if sum_values < 105 or certainty_percentage < 50:
if key == correct_id:
uncertain[0] += 1
else:
uncertain[1] += 1
else:
if key == correct_id:
certain[0] += 1
else:
certain[1] += 1
else:
if correct_index == -1:
no_result[0] += 1
else:
no_result[1] += 1
t5 += time.time() - start
except Exception as e:
print("Error at {} with error: {}".format(path, e))
# Print accuracy results per folder
print("position index", OrderedDict(sorted(result.items())))
print("Certainty percentage correct", OrderedDict(sorted(percentage_correct.items())))
print("Certainty percentage incorrect", OrderedDict(sorted(percentage_incorrect.items())))
print("Certain", certain)
print("Uncertain", uncertain)
print("No Result", no_result)
# Print timing results per tree
print("Average time per image: {:.2f}s.".format((time.time() - start_total)/processed_images))
sum_times = (t0 + t1 + t2 + t3 + t4 + t5)/100
t0, t1, t2, t3, t4, t5 = t0/sum_times, t1/sum_times, t2/sum_times, t3/sum_times, t4/sum_times, t5/sum_times
print("Percentages: Preprocessing images {:.2f}, Calculating kp & des {:.2f}, Initial Scoring {:.2f}, "
"Accuracy Testing {:.2f}, Final Scoring {:.2f}, Certainty Calculation {:.2f}."
.format(t0, t1, t2, t3, t4, t5))
if __name__ == "__main__":
main()