Skip to content
This repository has been archived by the owner on Sep 18, 2024. It is now read-only.

Commit

Permalink
Merge v0.3 to master (#339)
Browse files Browse the repository at this point in the history
* Fix pypi package missing python module

* Fix pypi package missing python module

* fix bug in smartparam example (#322)

* Fix nnictl update trialnum and document (#326)

1.Fix restful server of update
2.Update nnictl document of update
3.Add tensorboard in docement

* Update the version numbers from 0.3.2 to 0.3.3

* Update examples (#331)

* update mnist-annotation

* fix mnist-annotation typo

* update mnist example

* update mnist-smartparam

* update mnist-annotation

* update mnist-smartparam

* change learning rate

* update mnist assessor maxTrialNum

* update examples

* update examples

* update maxTrialNum

* fix breaking path in config_assessor.yml
  • Loading branch information
QuanluZhang authored Nov 7, 2018
1 parent a87517c commit 640e7bb
Show file tree
Hide file tree
Showing 5 changed files with 24 additions and 21 deletions.
19 changes: 10 additions & 9 deletions examples/trials/mnist-annotation/mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,8 +157,8 @@ def main(params):
'''
# Import data
mnist = input_data.read_data_sets(params['data_dir'], one_hot=True)
print('Mnist download data down.')
logger.debug('Mnist download data down.')
print('Mnist download data done.')
logger.debug('Mnist download data done.')

# Create the model
# Build the graph for the deep net
Expand All @@ -180,15 +180,15 @@ def main(params):
test_acc = 0.0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
"""@nni.variable(nni.choice(50, 250, 500), name=batch_num)"""
batch_num = params['batch_num']
for i in range(batch_num):
batch = mnist.train.next_batch(batch_num)
"""@nni.variable(nni.choice(1, 5), name=dropout_rate)"""
"""@nni.variable(nni.choice(1, 4, 8, 16, 32), name=batch_size)"""
batch_size = params['batch_size']
for i in range(params['batch_num']):
batch = mnist.train.next_batch(batch_size)
"""@nni.variable(nni.choice(0.5, 0.9), name=dropout_rate)"""
dropout_rate = params['dropout_rate']
mnist_network.train_step.run(feed_dict={mnist_network.images: batch[0],
mnist_network.labels: batch[1],
mnist_network.keep_prob: dropout_rate}
mnist_network.keep_prob: 1 - dropout_rate}
)

if i % 100 == 0:
Expand Down Expand Up @@ -224,7 +224,8 @@ def generate_defualt_params():
'pool_size': 2,
'hidden_size': 1024,
'learning_rate': 1e-4,
'batch_num': 200}
'batch_num': 2000,
'batch_size': 32}
return params


Expand Down
10 changes: 5 additions & 5 deletions examples/trials/mnist-smartparam/mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,13 +180,13 @@ def main(params):
test_acc = 0.0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
batch_num = nni.choice(50, 250, 500, name='batch_num')
for i in range(batch_num):
batch = mnist.train.next_batch(batch_num)
dropout_rate = nni.choice(1, 5, name='dropout_rate')
batch_size = nni.choice(1, 4, 8, 16, 32, name='batch_size')
for i in range(2000):
batch = mnist.train.next_batch(batch_size)
dropout_rate = nni.choice(0.5, 0.9, name='dropout_rate')
mnist_network.train_step.run(feed_dict={mnist_network.images: batch[0],
mnist_network.labels: batch[1],
mnist_network.keep_prob: dropout_rate}
mnist_network.keep_prob: 1 - dropout_rate}
)

if i % 100 == 0:
Expand Down
6 changes: 3 additions & 3 deletions examples/trials/mnist/config_assessor.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@ authorName: default
experimentName: example_mnist
trialConcurrency: 1
maxExecDuration: 1h
maxTrialNum: 1
maxTrialNum: 20
#choice: local, remote
trainingServicePlatform: local
searchSpacePath: ~/nni/examples/trials/mnist/search_space.json
searchSpacePath: search_space.json
#choice: true, false
useAnnotation: false
tuner:
Expand All @@ -23,5 +23,5 @@ assessor:
optimize_mode: maximize
trial:
command: python3 mnist.py
codeDir: ~/nni/examples/trials/mnist
codeDir: .
gpuNum: 0
7 changes: 4 additions & 3 deletions examples/trials/mnist/mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,10 +173,10 @@ def main(params):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(params['batch_num']):
batch = mnist.train.next_batch(params['batch_num'])
batch = mnist.train.next_batch(params['batch_size'])
mnist_network.train_step.run(feed_dict={mnist_network.images: batch[0],
mnist_network.labels: batch[1],
mnist_network.keep_prob: params['dropout_rate']}
mnist_network.keep_prob: 1 - params['dropout_rate']}
)

if i % 100 == 0:
Expand Down Expand Up @@ -212,7 +212,8 @@ def generate_default_params():
'pool_size': 2,
'hidden_size': 1024,
'learning_rate': 1e-4,
'batch_num': 200}
'batch_num': 2000,
'batch_size': 32}
return params


Expand Down
3 changes: 2 additions & 1 deletion examples/trials/mnist/search_space.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
{
"dropout_rate":{"_type":"uniform","_value":[0.1,0.5]},
"dropout_rate":{"_type":"uniform","_value":[0.5, 0.9]},
"conv_size":{"_type":"choice","_value":[2,3,5,7]},
"hidden_size":{"_type":"choice","_value":[124, 512, 1024]},
"batch_size": {"_type":"choice", "_value": [1, 4, 8, 16, 32]},
"learning_rate":{"_type":"choice","_value":[0.0001, 0.001, 0.01, 0.1]}
}

0 comments on commit 640e7bb

Please sign in to comment.