diff --git a/.gitignore b/.gitignore index 2712fab7e2..c7081c4e2e 100644 --- a/.gitignore +++ b/.gitignore @@ -61,4 +61,9 @@ typings/ .next # Pycharm Project files -.idea \ No newline at end of file +.idea + +# Python cache files +__pycache__ +build +*.egg-info \ No newline at end of file diff --git a/docs/howto_2_CustomizedTuner.md b/docs/howto_2_CustomizedTuner.md index 862df6885d..f086f37eed 100644 --- a/docs/howto_2_CustomizedTuner.md +++ b/docs/howto_2_CustomizedTuner.md @@ -27,12 +27,12 @@ class CustomizedTuner(Tuner): def __init__(self, ...): ... - def receive_trial_result(self, parameter_id, parameters, reward): + def receive_trial_result(self, parameter_id, parameters, value): ''' Record an observation of the objective function and Train parameter_id: int parameters: object created by 'generate_parameters()' - reward: object reported by trial + value: final metrics of the trial, including reward ''' # your code implements here. ... @@ -46,7 +46,7 @@ class CustomizedTuner(Tuner): return your_parameters ... ``` -```receive_trial_result``` will receive ```the parameter_id, parameters, reward``` as parameters input. Also, Tuner will receive the ```reward``` object are exactly same reward that Trial send. +```receive_trial_result``` will receive ```the parameter_id, parameters, value``` as parameters input. Also, Tuner will receive the ```value``` object are exactly same value that Trial send. The ```your_parameters``` return from ```generate_parameters``` function, will be package as json object by NNI SDK. NNI SDK will unpack json object so the Trial will receive the exact same ```your_parameters``` from Tuner. @@ -65,7 +65,7 @@ If the you implement the ```generate_parameters``` like this: ``` parameter_id = 82347 parameters = {"dropout": 0.3, "learning_rate": 0.4} -reward = 0.93 +value = 0.93 ``` **Note that** if you want to access a file (e.g., ```data.txt```) in the directory of your own tuner, you cannot use ```open('data.txt', 'r')```. Instead, you should use the following: diff --git a/examples/tuners/ga_customer_tuner/customer_tuner.py b/examples/tuners/ga_customer_tuner/customer_tuner.py index 16203a2e08..2cfae001e5 100644 --- a/examples/tuners/ga_customer_tuner/customer_tuner.py +++ b/examples/tuners/ga_customer_tuner/customer_tuner.py @@ -108,13 +108,14 @@ def generate_parameters(self, parameter_id): return temp - def receive_trial_result(self, parameter_id, parameters, reward): + def receive_trial_result(self, parameter_id, parameters, value): ''' Record an observation of the objective function parameter_id : int parameters : dict of parameters - reward : reward of one trial + value: final metrics of the trial, including reward ''' + reward = self.extract_scalar_reward(value) if self.optimize_mode is OptimizeMode.Minimize: reward = -reward @@ -131,7 +132,7 @@ def update_search_space(self, data): if __name__ =='__main__': tuner = CustomerTuner(OptimizeMode.Maximize) - config = tuner.generate_parameter(0) + config = tuner.generate_parameters(0) with open('./data.json', 'w') as outfile: json.dump(config, outfile) tuner.receive_trial_result(0, config, 0.99) diff --git a/pylintrc b/pylintrc index 304e2bce6e..673d8e058c 100644 --- a/pylintrc +++ b/pylintrc @@ -22,8 +22,9 @@ enable=F, duplicate-key, unnecessary-semicolon, global-variable-not-assigned, - unused-variable, binary-op-exception, bad-format-string, anomalous-backslash-in-string, bad-open-mode + +extension-pkg-whitelist=numpy \ No newline at end of file diff --git a/src/nni_manager/core/test/dummy_tuner.py b/src/nni_manager/core/test/dummy_tuner.py index d525b3f812..c13fd41e0d 100644 --- a/src/nni_manager/core/test/dummy_tuner.py +++ b/src/nni_manager/core/test/dummy_tuner.py @@ -25,10 +25,10 @@ def generate_parameters(self, parameter_id): def generate_multiple_parameters(self, parameter_id_list): return ['unit-test-param1', 'unit-test-param2'] - def receive_trial_result(self, parameter_id, parameters, reward): + def receive_trial_result(self, parameter_id, parameters, value): pass - def receive_customized_trial_result(self, parameter_id, parameters, reward): + def receive_customized_trial_result(self, parameter_id, parameters, value): pass def update_search_space(self, search_space): diff --git a/src/sdk/pynni/nni/batch_tuner/batch_tuner.py b/src/sdk/pynni/nni/batch_tuner/batch_tuner.py index e9967ea8ea..7085c38982 100644 --- a/src/sdk/pynni/nni/batch_tuner/batch_tuner.py +++ b/src/sdk/pynni/nni/batch_tuner/batch_tuner.py @@ -77,5 +77,5 @@ def generate_parameters(self, parameter_id): raise nni.NoMoreTrialError('no more parameters now.') return self.values[self.count] - def receive_trial_result(self, parameter_id, parameters, reward): + def receive_trial_result(self, parameter_id, parameters, value): pass \ No newline at end of file diff --git a/src/sdk/pynni/nni/evolution_tuner/evolution_tuner.py b/src/sdk/pynni/nni/evolution_tuner/evolution_tuner.py index 4c564e7cef..fb51c9fee5 100644 --- a/src/sdk/pynni/nni/evolution_tuner/evolution_tuner.py +++ b/src/sdk/pynni/nni/evolution_tuner/evolution_tuner.py @@ -234,12 +234,13 @@ def generate_parameters(self, parameter_id): config = _split_index(total_config) return config - def receive_trial_result(self, parameter_id, parameters, reward): + def receive_trial_result(self, parameter_id, parameters, value): ''' Record an observation of the objective function parameters: dict of parameters - reward: reward of one trial + value: final metrics of the trial, including reward ''' + reward = self.extract_scalar_reward(value) if parameter_id not in self.total_data: raise RuntimeError('Received parameter_id not in total_data.') # restore the paramsters contains "_index" diff --git a/src/sdk/pynni/nni/hyperopt_tuner/hyperopt_tuner.py b/src/sdk/pynni/nni/hyperopt_tuner/hyperopt_tuner.py index 0f3ef7eb19..2706a45885 100644 --- a/src/sdk/pynni/nni/hyperopt_tuner/hyperopt_tuner.py +++ b/src/sdk/pynni/nni/hyperopt_tuner/hyperopt_tuner.py @@ -206,13 +206,14 @@ def generate_parameters(self, parameter_id): params = _split_index(total_params) return params - def receive_trial_result(self, parameter_id, parameters, reward): + def receive_trial_result(self, parameter_id, parameters, value): ''' Record an observation of the objective function parameter_id : int parameters : dict of parameters - reward : reward of one trial + value: final metrics of the trial, including reward ''' + reward = self.extract_scalar_reward(value) # restore the paramsters contains '_index' if parameter_id not in self.total_data: raise RuntimeError('Received parameter_id not in total_data.') diff --git a/src/sdk/pynni/nni/msg_dispatcher.py b/src/sdk/pynni/nni/msg_dispatcher.py index 4f94f50f34..1667d53562 100644 --- a/src/sdk/pynni/nni/msg_dispatcher.py +++ b/src/sdk/pynni/nni/msg_dispatcher.py @@ -110,21 +110,15 @@ def handle_add_customized_trial(self, data): return True def handle_report_metric_data(self, data): + """ + :param data: a dict received from nni_manager, which contains: + - 'parameter_id': id of the trial + - 'value': metric value reported by nni.report_final_result() + - 'type': report type, support {'FINAL', 'PERIODICAL'} + """ if data['type'] == 'FINAL': - value = None id_ = data['parameter_id'] - - if isinstance(data['value'], float) or isinstance(data['value'], int): - value = data['value'] - elif isinstance(data['value'], dict) and 'default' in data['value']: - value = data['value']['default'] - if isinstance(value, float) or isinstance(value, int): - pass - else: - raise RuntimeError('Incorrect final result: the final result should be float/int, or a dict which has a key named "default" whose value is float/int.') - else: - raise RuntimeError('Incorrect final result: the final result should be float/int, or a dict which has a key named "default" whose value is float/int.') - + value = data['value'] if id_ in _customized_parameter_ids: self.tuner.receive_customized_trial_result(id_, _trial_params[id_], value) else: diff --git a/src/sdk/pynni/nni/multi_phase/multi_phase_tuner.py b/src/sdk/pynni/nni/multi_phase/multi_phase_tuner.py index 1fb10ab676..22c096f184 100644 --- a/src/sdk/pynni/nni/multi_phase/multi_phase_tuner.py +++ b/src/sdk/pynni/nni/multi_phase/multi_phase_tuner.py @@ -44,19 +44,19 @@ def generate_multiple_parameters(self, parameter_id_list): """ return [self.generate_parameters(parameter_id) for parameter_id in parameter_id_list] - def receive_trial_result(self, parameter_id, parameters, reward, trial_job_id): + def receive_trial_result(self, parameter_id, parameters, value, trial_job_id): """Invoked when a trial reports its final result. Must override. parameter_id: int parameters: object created by 'generate_parameters()' - reward: object reported by trial + value: object reported by trial """ raise NotImplementedError('Tuner: receive_trial_result not implemented') - def receive_customized_trial_result(self, parameter_id, parameters, reward, trial_job_id): + def receive_customized_trial_result(self, parameter_id, parameters, value, trial_job_id): """Invoked when a trial added by WebUI reports its final result. Do nothing by default. parameter_id: int parameters: object created by user - reward: object reported by trial + value: object reported by trial """ _logger.info('Customized trial job %s ignored by tuner', parameter_id) diff --git a/src/sdk/pynni/nni/smac_tuner/smac_tuner.py b/src/sdk/pynni/nni/smac_tuner/smac_tuner.py index 36c14b330a..9d1d738b58 100644 --- a/src/sdk/pynni/nni/smac_tuner/smac_tuner.py +++ b/src/sdk/pynni/nni/smac_tuner/smac_tuner.py @@ -134,10 +134,11 @@ def update_search_space(self, search_space): else: self.logger.warning('update search space is not supported.') - def receive_trial_result(self, parameter_id, parameters, reward): + def receive_trial_result(self, parameter_id, parameters, value): ''' receive_trial_result ''' + reward = self.extract_scalar_reward(value) if self.optimize_mode is OptimizeMode.Maximize: reward = -reward diff --git a/src/sdk/pynni/nni/tuner.py b/src/sdk/pynni/nni/tuner.py index 5437f8ed7c..c5d443c330 100644 --- a/src/sdk/pynni/nni/tuner.py +++ b/src/sdk/pynni/nni/tuner.py @@ -52,7 +52,7 @@ def generate_multiple_parameters(self, parameter_id_list): result.append(res) return result - def receive_trial_result(self, parameter_id, parameters, reward): + def receive_trial_result(self, parameter_id, parameters, value): """Invoked when a trial reports its final result. Must override. parameter_id: int parameters: object created by 'generate_parameters()' @@ -60,11 +60,11 @@ def receive_trial_result(self, parameter_id, parameters, reward): """ raise NotImplementedError('Tuner: receive_trial_result not implemented') - def receive_customized_trial_result(self, parameter_id, parameters, reward): + def receive_customized_trial_result(self, parameter_id, parameters, value): """Invoked when a trial added by WebUI reports its final result. Do nothing by default. parameter_id: int parameters: object created by user - reward: object reported by trial + value: object reported by trial """ _logger.info('Customized trial job %s ignored by tuner', parameter_id) @@ -93,3 +93,12 @@ def _on_exit(self): def _on_error(self): pass + + def extract_scalar_reward(self, value, scalar_key='default'): + if isinstance(value, float) or isinstance(value, int): + reward = value + elif isinstance(value, dict) and scalar_key in value and isinstance(value[scalar_key], (float, int)): + reward = value[scalar_key] + else: + raise RuntimeError('Incorrect final result: the final result for %s should be float/int, or a dict which has a key named "default" whose value is float/int.' % str(self.__class__)) + return reward \ No newline at end of file diff --git a/src/sdk/pynni/tests/test_multi_phase_tuner.py b/src/sdk/pynni/tests/test_multi_phase_tuner.py index 72b477999e..cf4737fd04 100644 --- a/src/sdk/pynni/tests/test_multi_phase_tuner.py +++ b/src/sdk/pynni/tests/test_multi_phase_tuner.py @@ -35,10 +35,10 @@ def generate_parameters(self, parameter_id, trial_job_id=None): return generated_parameters - def receive_trial_result(self, parameter_id, parameters, reward, trial_job_id): - logging.getLogger(__name__).debug('receive_trial_result: {},{},{},{}'.format(parameter_id, parameters, reward, trial_job_id)) + def receive_trial_result(self, parameter_id, parameters, value, trial_job_id): + logging.getLogger(__name__).debug('receive_trial_result: {},{},{},{}'.format(parameter_id, parameters, value, trial_job_id)) - def receive_customized_trial_result(self, parameter_id, parameters, reward, trial_job_id): + def receive_customized_trial_result(self, parameter_id, parameters, value, trial_job_id): pass def update_search_space(self, search_space): diff --git a/src/sdk/pynni/tests/test_tuner.py b/src/sdk/pynni/tests/test_tuner.py index faf341c829..fb2014ab2c 100644 --- a/src/sdk/pynni/tests/test_tuner.py +++ b/src/sdk/pynni/tests/test_tuner.py @@ -44,10 +44,12 @@ def generate_parameters(self, parameter_id): 'search_space': self.search_space } - def receive_trial_result(self, parameter_id, parameters, reward): + def receive_trial_result(self, parameter_id, parameters, value): + reward = self.extract_scalar_reward(value) self.trial_results.append((parameter_id, parameters['param'], reward, False)) - def receive_customized_trial_result(self, parameter_id, parameters, reward): + def receive_customized_trial_result(self, parameter_id, parameters, value): + reward = self.extract_scalar_reward(value) self.trial_results.append((parameter_id, parameters['param'], reward, True)) def update_search_space(self, search_space): diff --git a/test/naive_test/naive_tuner.py b/test/naive_test/naive_tuner.py index 9ff98d6961..37099170cf 100644 --- a/test/naive_test/naive_tuner.py +++ b/test/naive_test/naive_tuner.py @@ -20,7 +20,8 @@ def generate_parameters(self, parameter_id): _logger.info('generate parameters: %s' % self.cur) return { 'x': self.cur } - def receive_trial_result(self, parameter_id, parameters, reward): + def receive_trial_result(self, parameter_id, parameters, value): + reward = self.extract_scalar_reward(value) _logger.info('receive trial result: %s, %s, %s' % (parameter_id, parameters, reward)) _result.write('%d %d\n' % (parameters['x'], reward)) _result.flush()