diff --git a/src/sdk/pynni/nni/bohb_advisor/bohb_advisor.py b/src/sdk/pynni/nni/bohb_advisor/bohb_advisor.py index 1c5d2dfa98..5fb17880e9 100644 --- a/src/sdk/pynni/nni/bohb_advisor/bohb_advisor.py +++ b/src/sdk/pynni/nni/bohb_advisor/bohb_advisor.py @@ -106,7 +106,7 @@ def __init__(self, s, s_max, eta, max_budget, optimize_mode): self.s_max = s_max self.eta = eta self.max_budget = max_budget - self.optimize_mode = optimize_mode + self.optimize_mode = OptimizeMode(optimize_mode) self.n = math.ceil((s_max + 1) * eta**s / (s + 1) - _epsilon) self.r = max_budget / eta**s diff --git a/src/sdk/pynni/nni/hyperband_advisor/hyperband_advisor.py b/src/sdk/pynni/nni/hyperband_advisor/hyperband_advisor.py index 7faf30e926..3b8750bcba 100644 --- a/src/sdk/pynni/nni/hyperband_advisor/hyperband_advisor.py +++ b/src/sdk/pynni/nni/hyperband_advisor/hyperband_advisor.py @@ -144,7 +144,7 @@ def __init__(self, s, s_max, eta, R, optimize_mode): self.configs_perf = [] # [ {id: [seq, acc]}, {}, ... ] self.num_configs_to_run = [] # [ n, n, n, ... ] self.num_finished_configs = [] # [ n, n, n, ... ] - self.optimize_mode = optimize_mode + self.optimize_mode = OptimizeMode(optimize_mode) self.no_more_trial = False def is_completed(self): diff --git a/src/sdk/pynni/nni/metis_tuner/Regression_GMM/Selection.py b/src/sdk/pynni/nni/metis_tuner/Regression_GMM/Selection.py index 4507e30886..9341e49e2b 100644 --- a/src/sdk/pynni/nni/metis_tuner/Regression_GMM/Selection.py +++ b/src/sdk/pynni/nni/metis_tuner/Regression_GMM/Selection.py @@ -49,15 +49,16 @@ def selection_r(x_bounds, num_starting_points=100, minimize_constraints_fun=None): ''' - Call selection + Select using different types. ''' - minimize_starting_points = [lib_data.rand(x_bounds, x_types)\ - for i in range(0, num_starting_points)] + minimize_starting_points = clusteringmodel_gmm_good.sample(n_samples=num_starting_points) + outputs = selection(x_bounds, x_types, clusteringmodel_gmm_good, clusteringmodel_gmm_bad, - minimize_starting_points, + minimize_starting_points[0], minimize_constraints_fun) + return outputs def selection(x_bounds, diff --git a/src/sdk/pynni/nni/metis_tuner/metis_tuner.py b/src/sdk/pynni/nni/metis_tuner/metis_tuner.py index 95f32a7c92..a796ab6163 100644 --- a/src/sdk/pynni/nni/metis_tuner/metis_tuner.py +++ b/src/sdk/pynni/nni/metis_tuner/metis_tuner.py @@ -20,15 +20,15 @@ import copy import logging +import numpy as np import os import random import statistics import sys +import warnings from enum import Enum, unique from multiprocessing.dummy import Pool as ThreadPool -import numpy as np - import nni.metis_tuner.lib_constraint_summation as lib_constraint_summation import nni.metis_tuner.lib_data as lib_data import nni.metis_tuner.Regression_GMM.CreateModel as gmm_create_model @@ -42,8 +42,6 @@ logger = logging.getLogger("Metis_Tuner_AutoML") - - NONE_TYPE = '' CONSTRAINT_LOWERBOUND = None CONSTRAINT_UPPERBOUND = None @@ -93,7 +91,7 @@ def __init__(self, optimize_mode="maximize", no_resampling=True, no_candidates=F self.space = None self.no_resampling = no_resampling self.no_candidates = no_candidates - self.optimize_mode = optimize_mode + self.optimize_mode = OptimizeMode(optimize_mode) self.key_order = [] self.cold_start_num = cold_start_num self.selection_num_starting_points = selection_num_starting_points @@ -254,6 +252,9 @@ def _selection(self, samples_x, samples_y_aggregation, samples_y, threshold_samplessize_resampling=50, no_candidates=False, minimize_starting_points=None, minimize_constraints_fun=None): + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + next_candidate = None candidates = [] samples_size_all = sum([len(i) for i in samples_y]) @@ -271,13 +272,12 @@ def _selection(self, samples_x, samples_y_aggregation, samples_y, minimize_constraints_fun=minimize_constraints_fun) if not lm_current: return None - - if no_candidates is False: - candidates.append({'hyperparameter': lm_current['hyperparameter'], + logger.info({'hyperparameter': lm_current['hyperparameter'], 'expected_mu': lm_current['expected_mu'], 'expected_sigma': lm_current['expected_sigma'], 'reason': "exploitation_gp"}) + if no_candidates is False: # ===== STEP 2: Get recommended configurations for exploration ===== results_exploration = gp_selection.selection( "lc", @@ -290,34 +290,48 @@ def _selection(self, samples_x, samples_y_aggregation, samples_y, if results_exploration is not None: if _num_past_samples(results_exploration['hyperparameter'], samples_x, samples_y) == 0: - candidates.append({'hyperparameter': results_exploration['hyperparameter'], + temp_candidate = {'hyperparameter': results_exploration['hyperparameter'], 'expected_mu': results_exploration['expected_mu'], 'expected_sigma': results_exploration['expected_sigma'], - 'reason': "exploration"}) + 'reason': "exploration"} + candidates.append(temp_candidate) + logger.info("DEBUG: 1 exploration candidate selected\n") + logger.info(temp_candidate) else: logger.info("DEBUG: No suitable exploration candidates were") # ===== STEP 3: Get recommended configurations for exploitation ===== if samples_size_all >= threshold_samplessize_exploitation: - print("Getting candidates for exploitation...\n") + logger.info("Getting candidates for exploitation...\n") try: gmm = gmm_create_model.create_model(samples_x, samples_y_aggregation) - results_exploitation = gmm_selection.selection( - x_bounds, - x_types, - gmm['clusteringmodel_good'], - gmm['clusteringmodel_bad'], - minimize_starting_points, - minimize_constraints_fun=minimize_constraints_fun) + + if ("discrete_int" in x_types) or ("range_int" in x_types): + results_exploitation = gmm_selection.selection(x_bounds, x_types, + gmm['clusteringmodel_good'], + gmm['clusteringmodel_bad'], + minimize_starting_points, + minimize_constraints_fun=minimize_constraints_fun) + else: + # If all parameters are of "range_continuous", let's use GMM to generate random starting points + results_exploitation = gmm_selection.selection_r(x_bounds, x_types, + gmm['clusteringmodel_good'], + gmm['clusteringmodel_bad'], + num_starting_points=self.selection_num_starting_points, + minimize_constraints_fun=minimize_constraints_fun) if results_exploitation is not None: if _num_past_samples(results_exploitation['hyperparameter'], samples_x, samples_y) == 0: - candidates.append({'hyperparameter': results_exploitation['hyperparameter'],\ - 'expected_mu': results_exploitation['expected_mu'],\ - 'expected_sigma': results_exploitation['expected_sigma'],\ - 'reason': "exploitation_gmm"}) + temp_expected_mu, temp_expected_sigma = gp_prediction.predict(results_exploitation['hyperparameter'], gp_model['model']) + temp_candidate = {'hyperparameter': results_exploitation['hyperparameter'], + 'expected_mu': temp_expected_mu, + 'expected_sigma': temp_expected_sigma, + 'reason': "exploitation_gmm"} + candidates.append(temp_candidate) + logger.info("DEBUG: 1 exploitation_gmm candidate selected\n") + logger.info(temp_candidate) else: logger.info("DEBUG: No suitable exploitation_gmm candidates were found\n") @@ -338,11 +352,13 @@ def _selection(self, samples_x, samples_y_aggregation, samples_y, if results_outliers is not None: for results_outlier in results_outliers: if _num_past_samples(samples_x[results_outlier['samples_idx']], samples_x, samples_y) < max_resampling_per_x: - candidates.append({'hyperparameter': samples_x[results_outlier['samples_idx']],\ + temp_candidate = {'hyperparameter': samples_x[results_outlier['samples_idx']],\ 'expected_mu': results_outlier['expected_mu'],\ 'expected_sigma': results_outlier['expected_sigma'],\ - 'reason': "resampling"}) + 'reason': "resampling"} + candidates.append(temp_candidate) logger.info("DEBUG: %d re-sampling candidates selected\n") + logger.info(temp_candidate) else: logger.info("DEBUG: No suitable resampling candidates were found\n")