From 63aa63bd9fff33c2799f83710956bbcb72c78d6d Mon Sep 17 00:00:00 2001 From: xuehui Date: Mon, 14 Jan 2019 11:00:07 +0800 Subject: [PATCH] Remove unused example (#600) * update README in metis and update RuntimeError * remove smart params --- examples/trials/mnist-smartparam/config.yml | 20 -- .../mnist-smartparam/config_kubeflow.yml | 31 --- .../trials/mnist-smartparam/config_pai.yml | 35 --- examples/trials/mnist-smartparam/mnist.py | 230 ------------------ 4 files changed, 316 deletions(-) delete mode 100644 examples/trials/mnist-smartparam/config.yml delete mode 100644 examples/trials/mnist-smartparam/config_kubeflow.yml delete mode 100644 examples/trials/mnist-smartparam/config_pai.yml delete mode 100644 examples/trials/mnist-smartparam/mnist.py diff --git a/examples/trials/mnist-smartparam/config.yml b/examples/trials/mnist-smartparam/config.yml deleted file mode 100644 index 756dff1c1b..0000000000 --- a/examples/trials/mnist-smartparam/config.yml +++ /dev/null @@ -1,20 +0,0 @@ -authorName: default -experimentName: example_mnist-smartparam -trialConcurrency: 1 -maxExecDuration: 1h -maxTrialNum: 10 -#choice: local, remote, pai -trainingServicePlatform: local -#choice: true, false -useAnnotation: true -tuner: - #choice: TPE, Random, Anneal, Evolution, BatchTuner - #SMAC (SMAC should be installed through nnictl) - builtinTunerName: TPE - classArgs: - #choice: maximize, minimize - optimize_mode: maximize -trial: - command: python3 mnist.py - codeDir: . - gpuNum: 0 diff --git a/examples/trials/mnist-smartparam/config_kubeflow.yml b/examples/trials/mnist-smartparam/config_kubeflow.yml deleted file mode 100644 index 0eadc66db4..0000000000 --- a/examples/trials/mnist-smartparam/config_kubeflow.yml +++ /dev/null @@ -1,31 +0,0 @@ -authorName: default -experimentName: example_dist -trialConcurrency: 1 -maxExecDuration: 1h -maxTrialNum: 10 -#choice: local, remote, pai, kubeflow -trainingServicePlatform: kubeflow -#choice: true, false -useAnnotation: true -tuner: - #choice: TPE, Random, Anneal, Evolution - builtinTunerName: TPE - classArgs: - #choice: maximize, minimize - optimize_mode: maximize -trial: - codeDir: . - worker: - replicas: 1 - command: python3 mnist.py - gpuNum: 0 - cpuNum: 1 - memoryMB: 8192 - image: msranni/nni:latest -kubeflowConfig: - operator: tf-operator - apiVersion: v1alpha2 - storage: nfs - nfs: - server: 10.10.10.10 - path: /var/nfs/general \ No newline at end of file diff --git a/examples/trials/mnist-smartparam/config_pai.yml b/examples/trials/mnist-smartparam/config_pai.yml deleted file mode 100644 index 6c448391dc..0000000000 --- a/examples/trials/mnist-smartparam/config_pai.yml +++ /dev/null @@ -1,35 +0,0 @@ -authorName: default -experimentName: example_mnist-smartparam -trialConcurrency: 1 -maxExecDuration: 1h -maxTrialNum: 10 -#choice: local, remote, pai -trainingServicePlatform: pai -#choice: true, false -useAnnotation: true -tuner: - #choice: TPE, Random, Anneal, Evolution, BatchTuner - #SMAC (SMAC should be installed through nnictl) - builtinTunerName: TPE - classArgs: - #choice: maximize, minimize - optimize_mode: maximize -trial: - command: python3 mnist.py - codeDir: . - gpuNum: 0 - cpuNum: 1 - memoryMB: 8196 - #The docker image to run nni job on pai - image: msranni/nni:latest - #The hdfs directory to store data on pai, format 'hdfs://host:port/directory' - dataDir: hdfs://10.10.10.10:9000/username/nni - #The hdfs directory to store output data generated by nni, format 'hdfs://host:port/directory' - outputDir: hdfs://10.10.10.10:9000/username/nni -paiConfig: - #The username to login pai - userName: username - #The password to login pai - passWord: password - #The host of restful server of pai - host: 10.10.10.10 \ No newline at end of file diff --git a/examples/trials/mnist-smartparam/mnist.py b/examples/trials/mnist-smartparam/mnist.py deleted file mode 100644 index 28b9fb8b31..0000000000 --- a/examples/trials/mnist-smartparam/mnist.py +++ /dev/null @@ -1,230 +0,0 @@ -"""A deep MNIST classifier using convolutional layers.""" - -import logging -import math -import tempfile -import tensorflow as tf - -from tensorflow.examples.tutorials.mnist import input_data - -import nni - -FLAGS = None - -logger = logging.getLogger('mnist_AutoML') - - -class MnistNetwork(object): - ''' - MnistNetwork is for initlizing and building basic network for mnist. - ''' - def __init__(self, - channel_1_num, - channel_2_num, - pool_size, - x_dim=784, - y_dim=10): - self.channel_1_num = channel_1_num - self.channel_2_num = channel_2_num - self.conv_size = nni.choice(2, 3, 5, 7, name='conv-size') - self.hidden_size = nni.choice(124, 512, 1024) # example: without name - self.pool_size = pool_size - self.learning_rate = nni.uniform(0.0001, 0.1, name='learning_rate') - self.x_dim = x_dim - self.y_dim = y_dim - - self.images = tf.placeholder(tf.float32, [None, self.x_dim], name='input_x') - self.labels = tf.placeholder(tf.float32, [None, self.y_dim], name='input_y') - self.keep_prob = tf.placeholder(tf.float32, name='keep_prob') - - self.train_step = None - self.accuracy = None - - def build_network(self): - ''' - Building network for mnist - ''' - - # Reshape to use within a convolutional neural net. - # Last dimension is for "features" - there is only one here, since images are - # grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc. - with tf.name_scope('reshape'): - try: - input_dim = int(math.sqrt(self.x_dim)) - except: - print( - 'input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim)) - logger.debug( - 'input dim cannot be sqrt and reshape. input dim: %s', str(self.x_dim)) - raise - x_image = tf.reshape(self.images, [-1, input_dim, input_dim, 1]) - - # First convolutional layer - maps one grayscale image to 32 feature maps. - with tf.name_scope('conv1'): - w_conv1 = weight_variable( - [self.conv_size, self.conv_size, 1, self.channel_1_num]) - b_conv1 = bias_variable([self.channel_1_num]) - h_conv1 = nni.function_choice( - lambda: tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1), - lambda: tf.nn.sigmoid(conv2d(x_image, w_conv1) + b_conv1), - lambda: tf.nn.tanh(conv2d(x_image, w_conv1) + b_conv1) - ) # example: without name - - # Pooling layer - downsamples by 2X. - with tf.name_scope('pool1'): - h_pool1 = max_pool(h_conv1, self.pool_size) - h_pool1 = nni.function_choice( - lambda: max_pool(h_conv1, self.pool_size), - lambda: avg_pool(h_conv1, self.pool_size), - name='h_pool1') - - - # Second convolutional layer -- maps 32 feature maps to 64. - with tf.name_scope('conv2'): - w_conv2 = weight_variable([self.conv_size, self.conv_size, - self.channel_1_num, self.channel_2_num]) - b_conv2 = bias_variable([self.channel_2_num]) - h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2) - - # Second pooling layer. - with tf.name_scope('pool2'): # example: another style - h_pool2 = max_pool(h_conv2, self.pool_size) - - # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image - # is down to 7x7x64 feature maps -- maps this to 1024 features. - last_dim = int(input_dim / (self.pool_size * self.pool_size)) - with tf.name_scope('fc1'): - w_fc1 = weight_variable( - [last_dim * last_dim * self.channel_2_num, self.hidden_size]) - b_fc1 = bias_variable([self.hidden_size]) - - h_pool2_flat = tf.reshape( - h_pool2, [-1, last_dim * last_dim * self.channel_2_num]) - h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1) - - # Dropout - controls the complexity of the model, prevents co-adaptation of features. - with tf.name_scope('dropout'): - h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob) - - # Map the 1024 features to 10 classes, one for each digit - with tf.name_scope('fc2'): - w_fc2 = weight_variable([self.hidden_size, self.y_dim]) - b_fc2 = bias_variable([self.y_dim]) - y_conv = tf.matmul(h_fc1_drop, w_fc2) + b_fc2 - - with tf.name_scope('loss'): - cross_entropy = tf.reduce_mean( - tf.nn.softmax_cross_entropy_with_logits(labels=self.labels, logits=y_conv)) - with tf.name_scope('adam_optimizer'): - self.train_step = tf.train.AdamOptimizer( - self.learning_rate).minimize(cross_entropy) - - with tf.name_scope('accuracy'): - correct_prediction = tf.equal( - tf.argmax(y_conv, 1), tf.argmax(self.labels, 1)) - self.accuracy = tf.reduce_mean( - tf.cast(correct_prediction, tf.float32)) - - -def conv2d(x_input, w_matrix): - """conv2d returns a 2d convolution layer with full stride.""" - return tf.nn.conv2d(x_input, w_matrix, strides=[1, 1, 1, 1], padding='SAME') - - -def max_pool(x_input, pool_size): - """max_pool downsamples a feature map by 2X.""" - return tf.nn.max_pool(x_input, ksize=[1, pool_size, pool_size, 1], - strides=[1, pool_size, pool_size, 1], padding='SAME') - - -def avg_pool(x_input, pool_size): - return tf.nn.avg_pool(x_input, ksize=[1, pool_size, pool_size, 1], - strides=[1, pool_size, pool_size, 1], padding='SAME') - - -def weight_variable(shape): - """weight_variable generates a weight variable of a given shape.""" - initial = tf.truncated_normal(shape, stddev=0.1) - return tf.Variable(initial) - - -def bias_variable(shape): - """bias_variable generates a bias variable of a given shape.""" - initial = tf.constant(0.1, shape=shape) - return tf.Variable(initial) - - -def main(params): - ''' - Main function, build mnist network, run and send result to NNI. - ''' - # Import data - mnist = input_data.read_data_sets(params['data_dir'], one_hot=True) - print('Mnist download data down.') - logger.debug('Mnist download data down.') - - # Create the model - # Build the graph for the deep net - mnist_network = MnistNetwork(channel_1_num=params['channel_1_num'], - channel_2_num=params['channel_2_num'], - pool_size=params['pool_size']) - mnist_network.build_network() - logger.debug('Mnist build network done.') - - # Write log - graph_location = tempfile.mkdtemp() - logger.debug('Saving graph to: %s', graph_location) - train_writer = tf.summary.FileWriter(graph_location) - train_writer.add_graph(tf.get_default_graph()) - - test_acc = 0.0 - with tf.Session() as sess: - sess.run(tf.global_variables_initializer()) - batch_size = nni.choice(1, 4, 8, 16, 32, name='batch_size') - for i in range(2000): - batch = mnist.train.next_batch(batch_size) - dropout_rate = nni.choice(0.5, 0.9, name='dropout_rate') - mnist_network.train_step.run(feed_dict={mnist_network.images: batch[0], - mnist_network.labels: batch[1], - mnist_network.keep_prob: 1 - dropout_rate} - ) - - if i % 100 == 0: - test_acc = mnist_network.accuracy.eval( - feed_dict={mnist_network.images: mnist.test.images, - mnist_network.labels: mnist.test.labels, - mnist_network.keep_prob: 1.0}) - - nni.report_intermediate_result(test_acc) - logger.debug('test accuracy %g', test_acc) - logger.debug('Pipe send intermediate result done.') - - test_acc = mnist_network.accuracy.eval( - feed_dict={mnist_network.images: mnist.test.images, - mnist_network.labels: mnist.test.labels, - mnist_network.keep_prob: 1.0}) - - nni.report_final_result(test_acc) - logger.debug('Final result is %g', test_acc) - logger.debug('Send final result done.') - - -def generate_defualt_params(): - ''' - Generate default parameters for mnist network. - ''' - params = { - 'data_dir': '/tmp/tensorflow/mnist/input_data', - 'channel_1_num': 32, - 'channel_2_num': 64, - 'pool_size': 2} - return params - - -if __name__ == '__main__': - try: - nni.get_next_parameter() - main(generate_defualt_params()) - except Exception as exception: - logger.exception(exception) - raise