This repository has been archived by the owner on Dec 3, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
recognize.py
83 lines (70 loc) · 2.9 KB
/
recognize.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
from cv2 import aruco
import cv2
import numpy as np
import argparse
import json
parser = argparse.ArgumentParser(description='Recognize markers in the video')
group = parser.add_mutually_exclusive_group()
group.add_argument('-i', '--input', type=str, help='Input Movie')
group.add_argument('-c', '--camera', type=int, default=0, help='Input Camera')
parser.add_argument('--dict', type=str, default="4X4_50", help='The ArUco marker dictionary used in the movie')
parser.add_argument('-m', '--camera-matrix', type=str, required=True, help='The path to npy file contains the camera matrix')
parser.add_argument('-d', '--dist-coeff', type=str, required=True, help='The path to npy file contains the distortion coefficients')
parser.add_argument('-s', '--size', type=float, required=True, help='The length of the markers\' side')
parser.add_argument('-o', '--output', type=str, help='Output path')
parser.add_argument('--output-video', type=str, help='Output the processes video to the specified path')
parser.add_argument('--output-codec', type=str, default='XVID', help='The fourcc code to output the video')
args = parser.parse_args()
dictionary = aruco.getPredefinedDictionary(getattr(aruco, "DICT_" + args.dict))
cameraMatrix = np.load(args.camera_matrix)
distCoeff = np.load(args.dist_coeff)
result = []
cap = cv2.VideoCapture(args.input if args.input else args.camera)
ret, frame = cap.read()
height, width, _ = frame.shape
fps = cap.get(cv2.CAP_PROP_FPS)
frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT)
if args.output_video:
fourcc = cv2.VideoWriter_fourcc(*args.output_codec)
out = cv2.VideoWriter(args.output_video, fourcc, fps, (width, height))
while ret:
corners, ids, _ = aruco.detectMarkers(frame, dictionary, cameraMatrix=cameraMatrix, distCoeff=distCoeff)
if ids is None:
result.append({})
else:
rvecs, tvecs, _ = aruco.estimatePoseSingleMarkers(corners, args.size, cameraMatrix, distCoeff)
result.append({int(idn): {"rotation": rvec[0].tolist(), "transform": tvec[0].tolist()} for (rvec, tvec, idn) in zip(rvecs, tvecs, ids)})
if args.output_video:
aruco.drawDetectedMarkers(frame, corners, ids, (0,255,0))
for rvec, tvec in zip(rvecs, tvecs):
aruco.drawAxis(frame, cameraMatrix, distCoeff, rvec, tvec, 0.1)
out.write(frame)
ret, frame = cap.read()
cap.release()
if args.output_video:
out.release()
cv2.destroyAllWindows()
data = {
"metadata": {
"frame": {
"width": width,
"height": height,
"fps": fps,
"count": frame_count
},
"camera": {
"matrix": cameraMatrix.tolist(),
"distortion": distCoeff.tolist()
},
"marker": {
"dict": args.dict,
"size": args.size
}
},
"data": result
}
if args.output:
with open(args.output, 'w') as f:
json.dump(data, f)
else:
print(json.dumps(data))