Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Image classification #10738

Merged
merged 31 commits into from
May 18, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
9dff644
Inferencer take infer_func as parameter
jacquesqiao May 15, 2018
d94f673
update trainer and word2vector demo
jacquesqiao May 15, 2018
52ac039
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
jacquesqiao May 15, 2018
acc94dc
delete unused code
jacquesqiao May 15, 2018
214dc6e
update test_fit_a_line
jacquesqiao May 15, 2018
1a05ae0
update test_recognize_digits_conv.py
jacquesqiao May 15, 2018
e347198
update test_recognize_digits_mlp.py
jacquesqiao May 15, 2018
ac0836c
clean code
jacquesqiao May 15, 2018
1801557
fix test failure
May 15, 2018
76b7a43
fix style
May 15, 2018
683c48f
style
May 15, 2018
33031d9
Merge branch 'develop' into image_classification_word2vec
daming-lu May 16, 2018
98ef2c8
rm notest
daming-lu May 16, 2018
6bfa6ff
finish vgg
daming-lu May 17, 2018
ccd95e3
Merge remote-tracking branch 'upstream/develop' into image_classifica…
daming-lu May 17, 2018
04200d7
style
daming-lu May 17, 2018
2d99eb1
image classification done
daming-lu May 17, 2018
6c3ca56
Merge remote-tracking branch 'upstream/develop' into image_classifica…
daming-lu May 17, 2018
4108430
style
May 17, 2018
aa2b0bf
the train_network returned result has to be an array
daming-lu May 17, 2018
bc485f7
Merge branch 'image_classification_word2vec' of https://github.com/da…
daming-lu May 17, 2018
6b405c1
add cmake file
daming-lu May 17, 2018
f863f28
Merge remote-tracking branch 'upstream/develop' into image_classifica…
daming-lu May 17, 2018
939b2c7
move cifar10 dataset to local so that we can read a smaller dataset
daming-lu May 18, 2018
0afb31e
Merge remote-tracking branch 'upstream/develop' into image_classifica…
daming-lu May 18, 2018
8bd087f
switch to smaller dataset
daming-lu May 18, 2018
b23b6e8
style
May 18, 2018
f59c8c9
Merge remote-tracking branch 'upstream/develop' into image_classifica…
May 18, 2018
2ef6f2f
tune threshold to be small as the training sample is small
daming-lu May 18, 2018
47a8b25
Merge branch 'image_classification_word2vec' of https://github.com/da…
daming-lu May 18, 2018
a6ec94e
Merge remote-tracking branch 'upstream/develop' into image_classifica…
daming-lu May 18, 2018
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,4 @@ endforeach()

add_subdirectory(fit_a_line)
add_subdirectory(recognize_digits)
add_subdirectory(image_classification)
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py")
string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")

# default test
foreach(src ${TEST_OPS})
py_test(${src} SRCS ${src}.py)
endforeach()
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
CIFAR dataset.

This module will download dataset from
https://www.cs.toronto.edu/~kriz/cifar.html and parse train/test set into
paddle reader creators.

The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes,
with 6000 images per class. There are 50000 training images and 10000 test
images.

The CIFAR-100 dataset is just like the CIFAR-10, except it has 100 classes
containing 600 images each. There are 500 training images and 100 testing
images per class.

"""

import cPickle
import itertools
import numpy
import paddle.v2.dataset.common
import tarfile

__all__ = ['train10']

URL_PREFIX = 'https://www.cs.toronto.edu/~kriz/'
CIFAR10_URL = URL_PREFIX + 'cifar-10-python.tar.gz'
CIFAR10_MD5 = 'c58f30108f718f92721af3b95e74349a'


def reader_creator(filename, sub_name, batch_size=None):
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we do not add this helper method, then the minimum training loop is 1 epoch, which could take loooong time to finish.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can use Trainer.stop added by #10762

Copy link
Contributor Author

@daming-lu daming-lu May 18, 2018

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Discussed offline, since CI is just 10 mins away from finishing, we can merge this first and change to trainer.stop() once that PR is merged. 😬

def read_batch(batch):
data = batch['data']
labels = batch.get('labels', batch.get('fine_labels', None))
assert labels is not None
for sample, label in itertools.izip(data, labels):
yield (sample / 255.0).astype(numpy.float32), int(label)

def reader():
with tarfile.open(filename, mode='r') as f:
names = (each_item.name for each_item in f
if sub_name in each_item.name)

batch_count = 0
for name in names:
batch = cPickle.load(f.extractfile(name))
for item in read_batch(batch):
if isinstance(batch_size, int) and batch_count > batch_size:
break
batch_count += 1
yield item

return reader


def train10(batch_size=None):
"""
CIFAR-10 training set creator.

It returns a reader creator, each sample in the reader is image pixels in
[0, 1] and label in [0, 9].

:return: Training reader creator
:rtype: callable
"""
return reader_creator(
paddle.v2.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5),
'data_batch',
batch_size=batch_size)
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import paddle
import paddle.fluid as fluid
import numpy
import cifar10_small_test_set


def resnet_cifar10(input, depth=32):
Expand Down Expand Up @@ -81,46 +82,50 @@ def train_network():
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(cost)
accuracy = fluid.layers.accuracy(input=predict, label=label)
return avg_cost, accuracy
return [avg_cost, accuracy]


def train(use_cuda, save_path):
def train(use_cuda, train_program, save_dirname):
BATCH_SIZE = 128
EPOCH_NUM = 1

train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.cifar.train10(), buf_size=128 * 10),
cifar10_small_test_set.train10(batch_size=10), buf_size=128 * 10),
batch_size=BATCH_SIZE)

test_reader = paddle.batch(
paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE)

def event_handler(event):
if isinstance(event, fluid.EndIteration):
if (event.batch_id % 10) == 0:
avg_cost, accuracy = trainer.test(reader=test_reader)
if isinstance(event, fluid.EndStepEvent):
avg_cost, accuracy = trainer.test(
reader=test_reader, feed_order=['pixel', 'label'])

print('BatchID {1:04}, Loss {2:2.2}, Acc {3:2.2}'.format(
event.batch_id + 1, avg_cost, accuracy))
print('Loss {0:2.2}, Acc {1:2.2}'.format(avg_cost, accuracy))

if accuracy > 0.01: # Low threshold for speeding up CI
trainer.params.save(save_path)
return
if accuracy > 0.01: # Low threshold for speeding up CI
if save_dirname is not None:
trainer.save_params(save_dirname)
return

place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer(
train_network,
train_func=train_program,
optimizer=fluid.optimizer.Adam(learning_rate=0.001),
place=place,
event_handler=event_handler)
trainer.train(train_reader, EPOCH_NUM, event_handler=event_handler)
place=place)

trainer.train(
reader=train_reader,
num_epochs=EPOCH_NUM,
event_handler=event_handler,
feed_order=['pixel', 'label'])

def infer(use_cuda, save_path):
params = fluid.Params(save_path)

def infer(use_cuda, inference_program, save_dirname=None):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.Inferencer(inference_network, params, place=place)
inferencer = fluid.Inferencer(
infer_func=inference_program, param_path=save_dirname, place=place)

# The input's dimension of conv should be 4-D or 5-D.
# Use normilized image pixels as input data, which should be in the range
Expand All @@ -135,8 +140,14 @@ def main(use_cuda):
if use_cuda and not fluid.core.is_compiled_with_cuda():
return
save_path = "image_classification_resnet.inference.model"
train(use_cuda, save_path)
infer(use_cuda, save_path)

train(
use_cuda=use_cuda, train_program=train_network, save_dirname=save_path)

infer(
use_cuda=use_cuda,
inference_program=inference_network,
save_dirname=save_path)


if __name__ == '__main__':
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import paddle
import paddle.fluid as fluid
import numpy
import cifar10_small_test_set


def vgg16_bn_drop(input):
Expand Down Expand Up @@ -60,46 +61,48 @@ def train_network():
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(cost)
accuracy = fluid.layers.accuracy(input=predict, label=label)
return avg_cost, accuracy
return [avg_cost, accuracy]


def train(use_cuda, save_path):
def train(use_cuda, train_program, save_dirname):
BATCH_SIZE = 128
EPOCH_NUM = 1

train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.cifar.train10(), buf_size=128 * 10),
cifar10_small_test_set.train10(batch_size=10), buf_size=128 * 10),
batch_size=BATCH_SIZE)

test_reader = paddle.batch(
paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE)

def event_handler(event):
if isinstance(event, fluid.EndIteration):
if (event.batch_id % 10) == 0:
avg_cost, accuracy = trainer.test(reader=test_reader)
if isinstance(event, fluid.EndStepEvent):
avg_cost, accuracy = trainer.test(
reader=test_reader, feed_order=['pixel', 'label'])

print('BatchID {1:04}, Loss {2:2.2}, Acc {3:2.2}'.format(
event.batch_id + 1, avg_cost, accuracy))
print('Loss {0:2.2}, Acc {1:2.2}'.format(avg_cost, accuracy))

if accuracy > 0.01: # Low threshold for speeding up CI
trainer.params.save(save_path)
return
if accuracy > 0.01: # Low threshold for speeding up CI
if save_dirname is not None:
trainer.save_params(save_dirname)
return

place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer(
train_network,
optimizer=fluid.optimizer.Adam(learning_rate=0.001),
train_func=train_program,
place=place,
event_handler=event_handler)
trainer.train(train_reader, EPOCH_NUM, event_handler=event_handler)
optimizer=fluid.optimizer.Adam(learning_rate=0.001))

trainer.train(
reader=train_reader,
num_epochs=1,
event_handler=event_handler,
feed_order=['pixel', 'label'])


def infer(use_cuda, save_path):
params = fluid.Params(save_path)
def infer(use_cuda, inference_program, save_dirname=None):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
inferencer = fluid.Inferencer(inference_network, params, place=place)
inferencer = fluid.Inferencer(
infer_func=inference_program, param_path=save_dirname, place=place)

# The input's dimension of conv should be 4-D or 5-D.
# Use normilized image pixels as input data, which should be in the range
Expand All @@ -114,8 +117,14 @@ def main(use_cuda):
if use_cuda and not fluid.core.is_compiled_with_cuda():
return
save_path = "image_classification_vgg.inference.model"
train(use_cuda, save_path)
infer(use_cuda, save_path)

train(
use_cuda=use_cuda, train_program=train_network, save_dirname=save_path)

infer(
use_cuda=use_cuda,
inference_program=inference_network,
save_dirname=save_path)


if __name__ == '__main__':
Expand Down