-
Notifications
You must be signed in to change notification settings - Fork 0
/
trainer.py
145 lines (113 loc) · 4.42 KB
/
trainer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
# CODE THAT USES THE GIVEN DATASET TO TRAIN THE CNN MODEL
import numpy
from keras.models import Sequential
from keras.models import model_from_json
from keras.layers import Dense
from keras.layers import Dropout
from keras.utils import np_utils
import glob
import cv2
from sklearn.utils import shuffle
import os
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# load training data for gesture 2
myFiveTrainImageFiles = glob.glob("D:/train/fiveFingerTrainDataset/*.jpg")
myFiveTrainImageFiles.sort()
myFiveTrainImages = [cv2.imread(img,0) for img in myFiveTrainImageFiles] #we pass zero to load greyscale image
for i in range(0,len(myFiveTrainImages)):
myFiveTrainImages[i] = cv2.resize(myFiveTrainImages[i],(50,50))
tn1 = numpy.asarray(myFiveTrainImages)
# load training data for gesture 1
myZeroTrainImageFiles = glob.glob("D:/train/zeroFingerTrainDataset/*.jpg")
myZeroTrainImageFiles.sort()
myZeroTrainImages = [cv2.imread(img,0) for img in myZeroTrainImageFiles]
for i in range(0,len(myZeroTrainImages)):
myZeroTrainImages[i] = cv2.resize(myZeroTrainImages[i],(50,50))
tn2 = numpy.asarray(myZeroTrainImages)
finalTrainImages = []
finalTrainImages.extend(myFiveTrainImages)
finalTrainImages.extend(myZeroTrainImages)
# load testing data for gesture 2
myFiveTestImageFiles = glob.glob("D:/train/fiveFingerTestDataset/*.jpg")
myFiveTestImageFiles.sort()
myFiveTestImages = [cv2.imread(img,0) for img in myFiveTestImageFiles]
for i in range(0,len(myFiveTestImages)):
myFiveTestImages[i] = cv2.resize(myFiveTestImages[i],(50,50))
ts1 = numpy.asarray(myFiveTestImages)
# load testing data for gesture 1
myZeroTestImageFiles = glob.glob("D:/train/zeroFingerTestDataset/*.jpg")
myZeroTestImageFiles .sort()
myZeroTestImages = [cv2.imread(img,0) for img in myZeroTestImageFiles]
for i in range(0,len(myZeroTestImages)):
myZeroTestImages[i] = cv2.resize(myZeroTestImages[i],(50,50))
ts2 = numpy.asarray(myZeroTestImages)
finalTestImages = []
finalTestImages.extend(myFiveTestImages)
finalTestImages.extend(myZeroTestImages)
x_train = numpy.asarray(finalTrainImages)
x_test = numpy.asarray(finalTestImages)
# Now preparing the training and testing outputs
y_myFiveTrainImages = numpy.empty([tn1.shape[0]])
y_myZeroTrainImages = numpy.empty([tn2.shape[0]])
y_myFiveTestImages = numpy.empty([ts1.shape[0]])
y_myZeroTestImages = numpy.empty([ts2.shape[0]])
for j in range(0,tn1.shape[0]):
y_myFiveTrainImages[j] = 5
for j in range(0,ts1.shape[0]):
y_myFiveTestImages[j] = 5
for j in range(0,tn2.shape[0]):
y_myZeroTrainImages[j] = 0
for j in range(0,ts2.shape[0]):
y_myZeroTestImages[j] = 0
y_train_temp = []
y_train_temp.extend(y_myFiveTrainImages)
y_train_temp.extend(y_myZeroTrainImages)
y_train = numpy.asarray(y_train_temp)
y_test_temp = []
y_test_temp.extend(y_myFiveTestImages)
y_test_temp.extend(y_myZeroTestImages)
y_test = numpy.asarray(y_test_temp)
print(x_train.shape)
#print(x_test.shape)
print(y_train.shape)
#print(y_test.shape)
#shuffling the data
x_train,y_train = shuffle(x_train,y_train)
x_test,y_test = shuffle(x_test,y_test)
# flatten 50*50 images to a 2500 vector for each image
num_pixels = x_train.shape[1] * x_train.shape[2]
x_train = x_train.reshape(x_train.shape[0], num_pixels).astype('float32')
x_test = x_test.reshape(x_test.shape[0], num_pixels).astype('float32')
# normalize inputs from 0-255 to 0-1
x_train = x_train / 255
x_test = x_test / 255
# one hot encode outputs
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
print("num_classes")
print(num_classes)
# define baseline model
def baseline_model():
# create model
model = Sequential()
model.add(Dense(num_pixels, input_dim=num_pixels, kernel_initializer='normal', activation='relu'))
model.add(Dense(num_classes, kernel_initializer='normal', activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# build the model
model = baseline_model()
# Fit the model
model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=2000, batch_size=20, verbose=2)
# Final evaluation of the model
scores = model.evaluate(x_test, y_test, verbose=0)
print("Baseline Error: %.2f%%" % (100-scores[1]*100))
# Save the model
model_json = model.to_json();
with open("trainedModel-5.json","w") as jsonFile:
jsonFile.write(model_json)
model.save_weights("modelWeights-5.h5")
print("Saved model to disk")