-
Notifications
You must be signed in to change notification settings - Fork 8
/
alexnet_classify.py
executable file
·157 lines (139 loc) · 4.94 KB
/
alexnet_classify.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
#!/usr/bin/env python
######################################################################################
################################ Reusing Existing code ###############################
######## Reference: Caffe https://github.com/BVLC/caffe/blob/master/python/classify.py
'''
classify.py is an out-of-the-box image classifer callable from the command line.
By default it configures and runs the Caffe reference ImageNet model.
Modified for What’s in a Question: Using Visual Questions as a Form of Supervision project
https://github.com/sidgan/cvpr2017
Produces a pickle dump of the results
'''
#######################################################################################
import numpy as np
import os
import sys
import argparse
import glob
import time
import pickle
import caffe
def main(argv):
pycaffe_dir = os.path.dirname(__file__)
parser = argparse.ArgumentParser()
# Required arguments: input and output files.
parser.add_argument(
"input_file",
default="/scratch/sganju1/val2014/",
help="Input image, directory, or npy."
)
parser.add_argument(
"output_file",
default="final_multilabel.npy",
help="Output npy filename."
)
# Optional arguments.
parser.add_argument(
"--model_def",
default=os.path.join(pycaffe_dir,
"../models/golden_multilabel/deploy.prototxt"),
help="Model definition file."
)
parser.add_argument(
"--pretrained_model",
default=os.path.join(pycaffe_dir,
"../models/golden_multilabel/finetune_iter_100000.caffemodel"),
help="Trained model weights file."
)
parser.add_argument(
"--gpu",
action='store_true',
default=5,
help="Switch for gpu computation."
)
parser.add_argument(
"--center_only",
action='store_true',
help="Switch for prediction from center crop alone instead of " +
"averaging predictions across crops (default)."
)
parser.add_argument(
"--images_dim",
default='227,227',
help="Canonical 'height,width' dimensions of input images."
)
parser.add_argument(
"--mean_file",
default=os.path.join(pycaffe_dir,
'caffe/imagenet/ilsvrc_2012_mean.npy'),
help="Data set image mean of [Channels x Height x Width] dimensions " +
"(numpy array). Set to '' for no mean subtraction."
)
parser.add_argument(
"--input_scale",
type=float,
help="Multiply input features by this scale to finish preprocessing."
)
parser.add_argument(
"--raw_scale",
type=float,
default=255.0,
help="Multiply raw input by this scale before preprocessing."
)
parser.add_argument(
"--channel_swap",
default='2,1,0',
help="Order to permute input channels. The default converts " +
"RGB -> BGR since BGR is the Caffe default by way of OpenCV."
)
parser.add_argument(
"--ext",
default='jpg',
help="Image file extension to take as input when a directory " +
"is given as the input file."
)
args = parser.parse_args()
image_dims = [int(s) for s in args.images_dim.split(',')]
mean, channel_swap = None, None
if args.mean_file:
#mean = np.load(args.mean_file)
mean = np.load(args.mean_file).mean(1).mean(1)
if args.channel_swap:
channel_swap = [int(s) for s in args.channel_swap.split(',')]
if args.gpu:
caffe.set_mode_gpu()
print("GPU mode")
else:
caffe.set_mode_cpu()
print("CPU mode")
# Make classifier.
classifier = caffe.Classifier(args.model_def, args.pretrained_model,
image_dims=image_dims, mean=mean,
input_scale=args.input_scale, raw_scale=args.raw_scale,
channel_swap=channel_swap)
# Load numpy array (.npy), directory glob (*.jpg), or image file.
args.input_file = os.path.expanduser(args.input_file)
if args.input_file.endswith('npy'):
print("Loading file: %s" % args.input_file)
inputs = np.load(args.input_file)
elif os.path.isdir(args.input_file):
print("Loading folder: %s" % args.input_file)
img_urls = [im_f for im_f in glob.glob(args.input_file + '/*.' + args.ext) ]
print len(img_urls)
else:
print("Loading file: %s" % args.input_file)
inputs = [caffe.io.load_image(args.input_file)]
#make the pickle file
result_predictions = {}
for each in img_urls:
inputs = [caffe.io.load_image(each)]
#Classify.
start = time.time()
predictions = classifier.predict(inputs, not args.center_only)
print("Done in %.2f s." % (time.time() - start))
np.save(args.output_file, predictions)
result_predictions[each] = predictions
#dump the pickle file
pickle.dump( result_predictions, open( "alexnet_predictions.p", "wb" ) )
if __name__ == '__main__':
main(sys.argv)