-
Notifications
You must be signed in to change notification settings - Fork 2
/
area_data_gen.py
172 lines (137 loc) · 4.12 KB
/
area_data_gen.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
#
# Include Setting
#
import cv2
import pandas
import numpy as np
import glob
import os
import json
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.patches as mpatches
from matplotlib.path import Path
from skimage.draw import line, bezier_curve
from tqdm import tqdm
#
# Data Path Setting
#
DATA_PATH_BASE = "D:/Datasets/Berkly/bdd100k_images\bdd100k\images\100k"
INPUT_DATA_BASE = DATA_PATH_BASE + "input/"
INPUT_IMAGES = INPUT_DATA_BASE + "images/"
INPUT_LABELS = INPUT_DATA_BASE + "labels/"
TRAIN_IMAGES = INPUT_IMAGES + "train/"
VAL_IMAGES = INPUT_IMAGES + "val/"
TEST_IMAGES = INPUT_IMAGES + "test/"
TRAIN_LABELS = INPUT_LABELS + "bdd100k_labels_images_train.json"
VAL_LABELS = INPUT_LABELS + "bdd100k_labels_images_val.json"
#
# Data State
#
STATE_OFFESET = 0
STATE_INDEX = 0
#
# Settings
#
VAL_LOAD = 4000
TRAIN_LOAD = 20000
DOWNSCALE = 1
#
# Load Labels into Memory
#
def load_label_file(path):
with open(path) as json_file:
data = json.load(json_file)
return data
print ("--- LOADING VALIDATION LABELES --- ")
val_file = load_label_file(VAL_LABELS)
#
# Parse Labels
#
def parse_label(entry):
image_name = entry['name']
labels = entry['labels']
driveable = []
alt = []
formatted_data = []
for label in labels:
cat = label['category']
if cat not in 'drivable area':
continue
area_type = label['attributes']['areaType']
#print(area_type)
if area_type in 'direct':
polygon = label['poly2d'][0]
verts = polygon['vertices']
types = polygon['types']
closed = polygon["closed"]
codes = []
moves = {'L': Path.LINETO,'C': Path.CURVE4}
codes = [moves[t] for t in types]
codes[0] = Path.MOVETO
if closed:
verts.append(verts[0])
codes.append(Path.CLOSEPOLY)
driveable.append([verts, codes])
else:
polygon = label['poly2d'][0]
verts = polygon['vertices']
types = polygon['types']
closed = polygon["closed"]
codes = []
moves = {'L': Path.LINETO,'C': Path.CURVE4}
codes = [moves[t] for t in types]
codes[0] = Path.MOVETO
if closed:
verts.append(verts[0])
codes.append(Path.CLOSEPOLY)
alt.append([verts, codes])
formatted_data.append([image_name, driveable, alt])
return formatted_data
#
# Image Generation
#
def label_to_image(label):
driveable = label[1]
alt = label[2]
image = np.zeros([int(720 / DOWNSCALE),int(1280 / DOWNSCALE),3])
for cur in driveable:
verts = cur[0]
control = cur[1]
path = Path(verts, control)
patch = mpatches.PathPatch(path)
points = np.array([patch.get_verts()], dtype=np.int32)
image = cv2.fillPoly(image,points, (0,1.0,0))
for cur in alt:
verts = cur[0]
control = cur[1]
path = Path(verts, control)
patch = mpatches.PathPatch(path)
points = np.array([patch.get_verts()], dtype=np.int32)
image = cv2.fillPoly(image,points, (1.0,0,0))
image = cv2.resize(image, (254,126))
return image
def get_source(path):
image = cv2.imread(path)
image = cv2.normalize(image.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)
image = cv2.resize(image, (254,126))
return image
print ("--- GENERATING VALIDATION DATA ---")
for i in tqdm(range(VAL_LOAD)):
entry = val_file[i]
labels = parse_label(entry)
for label in labels:
data_entry = [get_source(VAL_IMAGES + label[0]), label_to_image(label)]
np.save("data/output/area/val/lane-"+str(STATE_INDEX)+".npy", data_entry);
STATE_INDEX += 1
print ("--- LOADING TRAINING LABELES --- ")
train_file = load_label_file(TRAIN_LABELS)
STATE_INDEX = 0
print ("--- GENERATING TRAINING DATA ---")
for i in tqdm(range(TRAIN_LOAD)):
entry = train_file[i]
labels = parse_label(entry)
for label in labels:
data_entry = [get_source(TRAIN_IMAGES + label[0]), label_to_image(label)]
np.save("data/output/area/train/lane-"+str(STATE_INDEX)+".npy", data_entry);
STATE_INDEX += 1